aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt38
-rw-r--r--drivers/ieee1394/Kconfig26
-rw-r--r--drivers/ieee1394/Makefile5
-rw-r--r--drivers/ieee1394/csr.c8
-rw-r--r--drivers/ieee1394/dv1394.c24
-rw-r--r--drivers/ieee1394/eth1394.c4
-rw-r--r--drivers/ieee1394/highlevel.h1
-rw-r--r--drivers/ieee1394/hosts.c41
-rw-r--r--drivers/ieee1394/ieee1394_core.c4
-rw-r--r--drivers/ieee1394/nodemgr.c465
-rw-r--r--drivers/ieee1394/nodemgr.h7
-rw-r--r--drivers/ieee1394/ohci1394.c140
-rw-r--r--drivers/ieee1394/pcilynx.c3
-rw-r--r--drivers/ieee1394/raw1394-private.h10
-rw-r--r--drivers/ieee1394/raw1394.c23
-rw-r--r--drivers/ieee1394/sbp2.c2190
-rw-r--r--drivers/ieee1394/sbp2.h311
-rw-r--r--drivers/ieee1394/video1394.c54
18 files changed, 1444 insertions, 1910 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 226ecf2ffd56..46f2a559b27c 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -30,11 +30,39 @@ Who: Adrian Bunk <bunk@stusta.de>
30--------------------------- 30---------------------------
31 31
32What: raw1394: requests of type RAW1394_REQ_ISO_SEND, RAW1394_REQ_ISO_LISTEN 32What: raw1394: requests of type RAW1394_REQ_ISO_SEND, RAW1394_REQ_ISO_LISTEN
33When: November 2006 33When: June 2007
34Why: Deprecated in favour of the new ioctl-based rawiso interface, which is 34Why: Deprecated in favour of the more efficient and robust rawiso interface.
35 more efficient. You should really be using libraw1394 for raw1394 35 Affected are applications which use the deprecated part of libraw1394
36 access anyway. 36 (raw1394_iso_write, raw1394_start_iso_write, raw1394_start_iso_rcv,
37Who: Jody McIntyre <scjody@modernduck.com> 37 raw1394_stop_iso_rcv) or bypass libraw1394.
38Who: Dan Dennedy <dan@dennedy.org>, Stefan Richter <stefanr@s5r6.in-berlin.de>
39
40---------------------------
41
42What: dv1394 driver (CONFIG_IEEE1394_DV1394)
43When: June 2007
44Why: Replaced by raw1394 + userspace libraries, notably libiec61883. This
45 shift of application support has been indicated on www.linux1394.org
46 and developers' mailinglists for quite some time. Major applications
47 have been converted, with the exception of ffmpeg and hence xine.
48 Piped output of dvgrab2 is a partial equivalent to dv1394.
49Who: Dan Dennedy <dan@dennedy.org>, Stefan Richter <stefanr@s5r6.in-berlin.de>
50
51---------------------------
52
53What: ieee1394 core's unused exports (CONFIG_IEEE1394_EXPORT_FULL_API)
54When: January 2007
55Why: There are no projects known to use these exported symbols, except
56 dfg1394 (uses one symbol whose functionality is core-internal now).
57Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
58
59---------------------------
60
61What: ieee1394's *_oui sysfs attributes (CONFIG_IEEE1394_OUI_DB)
62When: January 2007
63Files: drivers/ieee1394/: oui.db, oui2c.sh
64Why: big size, little value
65Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
38 66
39--------------------------- 67---------------------------
40 68
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 672b92ef9f21..e7d56573fe56 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -36,7 +36,7 @@ config IEEE1394_VERBOSEDEBUG
36 else says N. 36 else says N.
37 37
38config IEEE1394_OUI_DB 38config IEEE1394_OUI_DB
39 bool "OUI Database built-in" 39 bool "OUI Database built-in (deprecated)"
40 depends on IEEE1394 40 depends on IEEE1394
41 help 41 help
42 If you say Y here, then an OUI list (vendor unique ID's) will be 42 If you say Y here, then an OUI list (vendor unique ID's) will be
@@ -67,16 +67,11 @@ config IEEE1394_CONFIG_ROM_IP1394
67 eth1394 option below. 67 eth1394 option below.
68 68
69config IEEE1394_EXPORT_FULL_API 69config IEEE1394_EXPORT_FULL_API
70 bool "Export all symbols of ieee1394's API" 70 bool "Export all symbols of ieee1394's API (deprecated)"
71 depends on IEEE1394 71 depends on IEEE1394
72 default n 72 default n
73 help 73 help
74 Export all symbols of ieee1394's driver programming interface, even 74 This option will be removed soon. Don't worry, say N.
75 those that are not currently used by the standard IEEE 1394 drivers.
76
77 This option does not affect the interface to userspace applications.
78 Say Y here if you want to compile externally developed drivers that
79 make extended use of ieee1394's API. It is otherwise safe to say N.
80 75
81comment "Device Drivers" 76comment "Device Drivers"
82 depends on IEEE1394 77 depends on IEEE1394
@@ -125,7 +120,7 @@ comment "SBP-2 support (for storage devices) requires SCSI"
125 120
126config IEEE1394_SBP2 121config IEEE1394_SBP2
127 tristate "SBP-2 support (Harddisks etc.)" 122 tristate "SBP-2 support (Harddisks etc.)"
128 depends on IEEE1394 && SCSI && (PCI || BROKEN) 123 depends on IEEE1394 && SCSI
129 help 124 help
130 This option enables you to use SBP-2 devices connected to an IEEE 125 This option enables you to use SBP-2 devices connected to an IEEE
131 1394 bus. SBP-2 devices include storage devices like harddisks and 126 1394 bus. SBP-2 devices include storage devices like harddisks and
@@ -161,17 +156,12 @@ config IEEE1394_ETH1394
161 MCAP, therefore multicast support is significantly limited. 156 MCAP, therefore multicast support is significantly limited.
162 157
163config IEEE1394_DV1394 158config IEEE1394_DV1394
164 tristate "OHCI-DV I/O support" 159 tristate "OHCI-DV I/O support (deprecated)"
165 depends on IEEE1394 && IEEE1394_OHCI1394 160 depends on IEEE1394 && IEEE1394_OHCI1394
166 help 161 help
167 This driver allows you to transmit and receive DV (digital video) 162 The dv1394 driver will be removed from Linux in a future release.
168 streams on an OHCI-1394 card using a simple frame-oriented 163 Its functionality is now provided by raw1394 together with libraries
169 interface. 164 such as libiec61883.
170
171 The user-space API for dv1394 is documented in dv1394.h.
172
173 To compile this driver as a module, say M here: the
174 module will be called dv1394.
175 165
176config IEEE1394_RAWIO 166config IEEE1394_RAWIO
177 tristate "Raw IEEE1394 I/O support" 167 tristate "Raw IEEE1394 I/O support"
diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile
index 6f53611fe255..d9650d3d77a0 100644
--- a/drivers/ieee1394/Makefile
+++ b/drivers/ieee1394/Makefile
@@ -3,8 +3,11 @@
3# 3#
4 4
5ieee1394-objs := ieee1394_core.o ieee1394_transactions.o hosts.o \ 5ieee1394-objs := ieee1394_core.o ieee1394_transactions.o hosts.o \
6 highlevel.o csr.o nodemgr.o oui.o dma.o iso.o \ 6 highlevel.o csr.o nodemgr.o dma.o iso.o \
7 csr1212.o config_roms.o 7 csr1212.o config_roms.o
8ifdef CONFIG_IEEE1394_OUI_DB
9ieee1394-objs += oui.o
10endif
8 11
9obj-$(CONFIG_IEEE1394) += ieee1394.o 12obj-$(CONFIG_IEEE1394) += ieee1394.o
10obj-$(CONFIG_IEEE1394_PCILYNX) += pcilynx.o 13obj-$(CONFIG_IEEE1394_PCILYNX) += pcilynx.o
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c
index ab0c80f61b9d..52ac83e0ebee 100644
--- a/drivers/ieee1394/csr.c
+++ b/drivers/ieee1394/csr.c
@@ -158,12 +158,10 @@ static void host_reset(struct hpsb_host *host)
158 */ 158 */
159static inline void calculate_expire(struct csr_control *csr) 159static inline void calculate_expire(struct csr_control *csr)
160{ 160{
161 unsigned long usecs = 161 unsigned int usecs = (csr->split_timeout_hi & 7) * 1000000 +
162 (csr->split_timeout_hi & 0x07) * USEC_PER_SEC + 162 (csr->split_timeout_lo >> 19) * 125;
163 (csr->split_timeout_lo >> 19) * 125L;
164
165 csr->expire = usecs_to_jiffies(usecs > 100000L ? usecs : 100000L);
166 163
164 csr->expire = usecs_to_jiffies(usecs > 100000 ? usecs : 100000);
167 HPSB_VERBOSE("CSR: setting expire to %lu, HZ=%u", csr->expire, HZ); 165 HPSB_VERBOSE("CSR: setting expire to %lu, HZ=%u", csr->expire, HZ);
168} 166}
169 167
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 6c72f04b2b5d..1084da4d88a9 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1536,27 +1536,20 @@ static ssize_t dv1394_read(struct file *file, char __user *buffer, size_t count
1536 1536
1537static long dv1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1537static long dv1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1538{ 1538{
1539 struct video_card *video; 1539 struct video_card *video = file_to_video_card(file);
1540 unsigned long flags; 1540 unsigned long flags;
1541 int ret = -EINVAL; 1541 int ret = -EINVAL;
1542 void __user *argp = (void __user *)arg; 1542 void __user *argp = (void __user *)arg;
1543 1543
1544 DECLARE_WAITQUEUE(wait, current); 1544 DECLARE_WAITQUEUE(wait, current);
1545 1545
1546 lock_kernel();
1547 video = file_to_video_card(file);
1548
1549 /* serialize this to prevent multi-threaded mayhem */ 1546 /* serialize this to prevent multi-threaded mayhem */
1550 if (file->f_flags & O_NONBLOCK) { 1547 if (file->f_flags & O_NONBLOCK) {
1551 if (!mutex_trylock(&video->mtx)) { 1548 if (!mutex_trylock(&video->mtx))
1552 unlock_kernel();
1553 return -EAGAIN; 1549 return -EAGAIN;
1554 }
1555 } else { 1550 } else {
1556 if (mutex_lock_interruptible(&video->mtx)) { 1551 if (mutex_lock_interruptible(&video->mtx))
1557 unlock_kernel();
1558 return -ERESTARTSYS; 1552 return -ERESTARTSYS;
1559 }
1560 } 1553 }
1561 1554
1562 switch(cmd) 1555 switch(cmd)
@@ -1780,7 +1773,6 @@ static long dv1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1780 1773
1781 out: 1774 out:
1782 mutex_unlock(&video->mtx); 1775 mutex_unlock(&video->mtx);
1783 unlock_kernel();
1784 return ret; 1776 return ret;
1785} 1777}
1786 1778
@@ -2188,12 +2180,8 @@ static struct ieee1394_device_id dv1394_id_table[] = {
2188MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table); 2180MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
2189 2181
2190static struct hpsb_protocol_driver dv1394_driver = { 2182static struct hpsb_protocol_driver dv1394_driver = {
2191 .name = "DV/1394 Driver", 2183 .name = "dv1394",
2192 .id_table = dv1394_id_table, 2184 .id_table = dv1394_id_table,
2193 .driver = {
2194 .name = "dv1394",
2195 .bus = &ieee1394_bus_type,
2196 },
2197}; 2185};
2198 2186
2199 2187
@@ -2587,6 +2575,10 @@ static int __init dv1394_init_module(void)
2587{ 2575{
2588 int ret; 2576 int ret;
2589 2577
2578 printk(KERN_WARNING
2579 "WARNING: The dv1394 driver is unsupported and will be removed "
2580 "from Linux soon. Use raw1394 instead.\n");
2581
2590 cdev_init(&dv1394_cdev, &dv1394_fops); 2582 cdev_init(&dv1394_cdev, &dv1394_fops);
2591 dv1394_cdev.owner = THIS_MODULE; 2583 dv1394_cdev.owner = THIS_MODULE;
2592 kobject_set_name(&dv1394_cdev.kobj, "dv1394"); 2584 kobject_set_name(&dv1394_cdev.kobj, "dv1394");
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 27d6c642415d..97e5c3dd044d 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -474,12 +474,10 @@ static struct ieee1394_device_id eth1394_id_table[] = {
474MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table); 474MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table);
475 475
476static struct hpsb_protocol_driver eth1394_proto_driver = { 476static struct hpsb_protocol_driver eth1394_proto_driver = {
477 .name = "IPv4 over 1394 Driver", 477 .name = ETH1394_DRIVER_NAME,
478 .id_table = eth1394_id_table, 478 .id_table = eth1394_id_table,
479 .update = eth1394_update, 479 .update = eth1394_update,
480 .driver = { 480 .driver = {
481 .name = ETH1394_DRIVER_NAME,
482 .bus = &ieee1394_bus_type,
483 .probe = eth1394_probe, 481 .probe = eth1394_probe,
484 .remove = eth1394_remove, 482 .remove = eth1394_remove,
485 }, 483 },
diff --git a/drivers/ieee1394/highlevel.h b/drivers/ieee1394/highlevel.h
index 50f2dd2c7e20..4b330117067a 100644
--- a/drivers/ieee1394/highlevel.h
+++ b/drivers/ieee1394/highlevel.h
@@ -24,7 +24,6 @@ struct hpsb_address_serve {
24/* Only the following structures are of interest to actual highlevel drivers. */ 24/* Only the following structures are of interest to actual highlevel drivers. */
25 25
26struct hpsb_highlevel { 26struct hpsb_highlevel {
27 struct module *owner;
28 const char *name; 27 const char *name;
29 28
30 /* Any of the following pointers can legally be NULL, except for 29 /* Any of the following pointers can legally be NULL, except for
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index b935e08695a9..ee82a5320bf7 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -44,9 +44,10 @@ static void delayed_reset_bus(struct work_struct *work)
44 44
45 CSR_SET_BUS_INFO_GENERATION(host->csr.rom, generation); 45 CSR_SET_BUS_INFO_GENERATION(host->csr.rom, generation);
46 if (csr1212_generate_csr_image(host->csr.rom) != CSR1212_SUCCESS) { 46 if (csr1212_generate_csr_image(host->csr.rom) != CSR1212_SUCCESS) {
47 /* CSR image creation failed, reset generation field and do not 47 /* CSR image creation failed.
48 * issue a bus reset. */ 48 * Reset generation field and do not issue a bus reset. */
49 CSR_SET_BUS_INFO_GENERATION(host->csr.rom, host->csr.generation); 49 CSR_SET_BUS_INFO_GENERATION(host->csr.rom,
50 host->csr.generation);
50 return; 51 return;
51 } 52 }
52 53
@@ -54,7 +55,8 @@ static void delayed_reset_bus(struct work_struct *work)
54 55
55 host->update_config_rom = 0; 56 host->update_config_rom = 0;
56 if (host->driver->set_hw_config_rom) 57 if (host->driver->set_hw_config_rom)
57 host->driver->set_hw_config_rom(host, host->csr.rom->bus_info_data); 58 host->driver->set_hw_config_rom(host,
59 host->csr.rom->bus_info_data);
58 60
59 host->csr.gen_timestamp[host->csr.generation] = jiffies; 61 host->csr.gen_timestamp[host->csr.generation] = jiffies;
60 hpsb_reset_bus(host, SHORT_RESET); 62 hpsb_reset_bus(host, SHORT_RESET);
@@ -70,7 +72,8 @@ static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg)
70 return -1; 72 return -1;
71} 73}
72 74
73static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg) 75static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
76 unsigned long arg)
74{ 77{
75 return -1; 78 return -1;
76} 79}
@@ -128,10 +131,8 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
128 return NULL; 131 return NULL;
129 132
130 h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h); 133 h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h);
131 if (!h->csr.rom) { 134 if (!h->csr.rom)
132 kfree(h); 135 goto fail;
133 return NULL;
134 }
135 136
136 h->hostdata = h + 1; 137 h->hostdata = h + 1;
137 h->driver = drv; 138 h->driver = drv;
@@ -151,16 +152,15 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
151 init_timer(&h->timeout); 152 init_timer(&h->timeout);
152 h->timeout.data = (unsigned long) h; 153 h->timeout.data = (unsigned long) h;
153 h->timeout.function = abort_timedouts; 154 h->timeout.function = abort_timedouts;
154 h->timeout_interval = HZ / 20; // 50ms by default 155 h->timeout_interval = HZ / 20; /* 50ms, half of minimum SPLIT_TIMEOUT */
155 156
156 h->topology_map = h->csr.topology_map + 3; 157 h->topology_map = h->csr.topology_map + 3;
157 h->speed_map = (u8 *)(h->csr.speed_map + 2); 158 h->speed_map = (u8 *)(h->csr.speed_map + 2);
158 159
159 mutex_lock(&host_num_alloc); 160 mutex_lock(&host_num_alloc);
160
161 while (nodemgr_for_each_host(&hostnum, alloc_hostnum_cb)) 161 while (nodemgr_for_each_host(&hostnum, alloc_hostnum_cb))
162 hostnum++; 162 hostnum++;
163 163 mutex_unlock(&host_num_alloc);
164 h->id = hostnum; 164 h->id = hostnum;
165 165
166 memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device)); 166 memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device));
@@ -171,13 +171,19 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
171 h->class_dev.class = &hpsb_host_class; 171 h->class_dev.class = &hpsb_host_class;
172 snprintf(h->class_dev.class_id, BUS_ID_SIZE, "fw-host%d", h->id); 172 snprintf(h->class_dev.class_id, BUS_ID_SIZE, "fw-host%d", h->id);
173 173
174 device_register(&h->device); 174 if (device_register(&h->device))
175 class_device_register(&h->class_dev); 175 goto fail;
176 if (class_device_register(&h->class_dev)) {
177 device_unregister(&h->device);
178 goto fail;
179 }
176 get_device(&h->device); 180 get_device(&h->device);
177 181
178 mutex_unlock(&host_num_alloc);
179
180 return h; 182 return h;
183
184fail:
185 kfree(h);
186 return NULL;
181} 187}
182 188
183int hpsb_add_host(struct hpsb_host *host) 189int hpsb_add_host(struct hpsb_host *host)
@@ -229,7 +235,8 @@ int hpsb_update_config_rom_image(struct hpsb_host *host)
229 if (time_before(jiffies, host->csr.gen_timestamp[next_gen] + 60 * HZ)) 235 if (time_before(jiffies, host->csr.gen_timestamp[next_gen] + 60 * HZ))
230 /* Wait 60 seconds from the last time this generation number was 236 /* Wait 60 seconds from the last time this generation number was
231 * used. */ 237 * used. */
232 reset_delay = (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies; 238 reset_delay =
239 (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies;
233 else 240 else
234 /* Wait 1 second in case some other code wants to change the 241 /* Wait 1 second in case some other code wants to change the
235 * Config ROM in the near future. */ 242 * Config ROM in the near future. */
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 5fccf9f7a1d2..9a48ca20d1fd 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -1237,10 +1237,10 @@ EXPORT_SYMBOL(highlevel_remove_host);
1237/** nodemgr.c **/ 1237/** nodemgr.c **/
1238EXPORT_SYMBOL(hpsb_node_fill_packet); 1238EXPORT_SYMBOL(hpsb_node_fill_packet);
1239EXPORT_SYMBOL(hpsb_node_write); 1239EXPORT_SYMBOL(hpsb_node_write);
1240EXPORT_SYMBOL(hpsb_register_protocol); 1240EXPORT_SYMBOL(__hpsb_register_protocol);
1241EXPORT_SYMBOL(hpsb_unregister_protocol); 1241EXPORT_SYMBOL(hpsb_unregister_protocol);
1242EXPORT_SYMBOL(ieee1394_bus_type);
1243#ifdef CONFIG_IEEE1394_EXPORT_FULL_API 1242#ifdef CONFIG_IEEE1394_EXPORT_FULL_API
1243EXPORT_SYMBOL(ieee1394_bus_type);
1244EXPORT_SYMBOL(nodemgr_for_each_host); 1244EXPORT_SYMBOL(nodemgr_for_each_host);
1245#endif 1245#endif
1246 1246
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index e829c9336b3c..61307ca296ae 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/kthread.h> 16#include <linux/kthread.h>
17#include <linux/module.h>
17#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
18#include <linux/freezer.h> 19#include <linux/freezer.h>
19#include <asm/atomic.h> 20#include <asm/atomic.h>
@@ -67,7 +68,7 @@ static int nodemgr_check_speed(struct nodemgr_csr_info *ci, u64 addr,
67{ 68{
68 quadlet_t q; 69 quadlet_t q;
69 u8 i, *speed, old_speed, good_speed; 70 u8 i, *speed, old_speed, good_speed;
70 int ret; 71 int error;
71 72
72 speed = &(ci->host->speed[NODEID_TO_NODE(ci->nodeid)]); 73 speed = &(ci->host->speed[NODEID_TO_NODE(ci->nodeid)]);
73 old_speed = *speed; 74 old_speed = *speed;
@@ -79,9 +80,9 @@ static int nodemgr_check_speed(struct nodemgr_csr_info *ci, u64 addr,
79 * just finished its initialization. */ 80 * just finished its initialization. */
80 for (i = IEEE1394_SPEED_100; i <= old_speed; i++) { 81 for (i = IEEE1394_SPEED_100; i <= old_speed; i++) {
81 *speed = i; 82 *speed = i;
82 ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr, 83 error = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
83 &q, sizeof(quadlet_t)); 84 &q, sizeof(quadlet_t));
84 if (ret) 85 if (error)
85 break; 86 break;
86 *buffer = q; 87 *buffer = q;
87 good_speed = i; 88 good_speed = i;
@@ -95,19 +96,19 @@ static int nodemgr_check_speed(struct nodemgr_csr_info *ci, u64 addr,
95 return 0; 96 return 0;
96 } 97 }
97 *speed = old_speed; 98 *speed = old_speed;
98 return ret; 99 return error;
99} 100}
100 101
101static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length, 102static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
102 void *buffer, void *__ci) 103 void *buffer, void *__ci)
103{ 104{
104 struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci; 105 struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci;
105 int i, ret; 106 int i, error;
106 107
107 for (i = 1; ; i++) { 108 for (i = 1; ; i++) {
108 ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr, 109 error = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
109 buffer, length); 110 buffer, length);
110 if (!ret) { 111 if (!error) {
111 ci->speed_unverified = 0; 112 ci->speed_unverified = 0;
112 break; 113 break;
113 } 114 }
@@ -118,14 +119,14 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
118 /* The ieee1394_core guessed the node's speed capability from 119 /* The ieee1394_core guessed the node's speed capability from
119 * the self ID. Check whether a lower speed works. */ 120 * the self ID. Check whether a lower speed works. */
120 if (ci->speed_unverified && length == sizeof(quadlet_t)) { 121 if (ci->speed_unverified && length == sizeof(quadlet_t)) {
121 ret = nodemgr_check_speed(ci, addr, buffer); 122 error = nodemgr_check_speed(ci, addr, buffer);
122 if (!ret) 123 if (!error)
123 break; 124 break;
124 } 125 }
125 if (msleep_interruptible(334)) 126 if (msleep_interruptible(334))
126 return -EINTR; 127 return -EINTR;
127 } 128 }
128 return ret; 129 return error;
129} 130}
130 131
131static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci) 132static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci)
@@ -260,9 +261,20 @@ static struct device nodemgr_dev_template_ne = {
260 .release = nodemgr_release_ne, 261 .release = nodemgr_release_ne,
261}; 262};
262 263
264/* This dummy driver prevents the host devices from being scanned. We have no
265 * useful drivers for them yet, and there would be a deadlock possible if the
266 * driver core scans the host device while the host's low-level driver (i.e.
267 * the host's parent device) is being removed. */
268static struct device_driver nodemgr_mid_layer_driver = {
269 .bus = &ieee1394_bus_type,
270 .name = "nodemgr",
271 .owner = THIS_MODULE,
272};
273
263struct device nodemgr_dev_template_host = { 274struct device nodemgr_dev_template_host = {
264 .bus = &ieee1394_bus_type, 275 .bus = &ieee1394_bus_type,
265 .release = nodemgr_release_host, 276 .release = nodemgr_release_host,
277 .driver = &nodemgr_mid_layer_driver,
266}; 278};
267 279
268 280
@@ -307,8 +319,8 @@ static ssize_t fw_drv_show_##field (struct device_driver *drv, char *buf) \
307 return sprintf(buf, format_string, (type)driver->field);\ 319 return sprintf(buf, format_string, (type)driver->field);\
308} \ 320} \
309static struct driver_attribute driver_attr_drv_##field = { \ 321static struct driver_attribute driver_attr_drv_##field = { \
310 .attr = {.name = __stringify(field), .mode = S_IRUGO }, \ 322 .attr = {.name = __stringify(field), .mode = S_IRUGO }, \
311 .show = fw_drv_show_##field, \ 323 .show = fw_drv_show_##field, \
312}; 324};
313 325
314 326
@@ -362,7 +374,7 @@ static ssize_t fw_show_ne_tlabels_mask(struct device *dev,
362#endif 374#endif
363 spin_unlock_irqrestore(&hpsb_tlabel_lock, flags); 375 spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
364 376
365 return sprintf(buf, "0x%016llx\n", tm); 377 return sprintf(buf, "0x%016llx\n", (unsigned long long)tm);
366} 378}
367static DEVICE_ATTR(tlabels_mask, S_IRUGO, fw_show_ne_tlabels_mask, NULL); 379static DEVICE_ATTR(tlabels_mask, S_IRUGO, fw_show_ne_tlabels_mask, NULL);
368#endif /* HPSB_DEBUG_TLABELS */ 380#endif /* HPSB_DEBUG_TLABELS */
@@ -374,11 +386,11 @@ static ssize_t fw_set_ignore_driver(struct device *dev, struct device_attribute
374 int state = simple_strtoul(buf, NULL, 10); 386 int state = simple_strtoul(buf, NULL, 10);
375 387
376 if (state == 1) { 388 if (state == 1) {
377 down_write(&dev->bus->subsys.rwsem);
378 device_release_driver(dev);
379 ud->ignore_driver = 1; 389 ud->ignore_driver = 1;
380 up_write(&dev->bus->subsys.rwsem); 390 down_write(&ieee1394_bus_type.subsys.rwsem);
381 } else if (!state) 391 device_release_driver(dev);
392 up_write(&ieee1394_bus_type.subsys.rwsem);
393 } else if (state == 0)
382 ud->ignore_driver = 0; 394 ud->ignore_driver = 0;
383 395
384 return count; 396 return count;
@@ -413,11 +425,14 @@ static ssize_t fw_get_destroy_node(struct bus_type *bus, char *buf)
413static BUS_ATTR(destroy_node, S_IWUSR | S_IRUGO, fw_get_destroy_node, fw_set_destroy_node); 425static BUS_ATTR(destroy_node, S_IWUSR | S_IRUGO, fw_get_destroy_node, fw_set_destroy_node);
414 426
415 427
416static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf, size_t count) 428static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf,
429 size_t count)
417{ 430{
431 int error = 0;
432
418 if (simple_strtoul(buf, NULL, 10) == 1) 433 if (simple_strtoul(buf, NULL, 10) == 1)
419 bus_rescan_devices(&ieee1394_bus_type); 434 error = bus_rescan_devices(&ieee1394_bus_type);
420 return count; 435 return error ? error : count;
421} 436}
422static ssize_t fw_get_rescan(struct bus_type *bus, char *buf) 437static ssize_t fw_get_rescan(struct bus_type *bus, char *buf)
423{ 438{
@@ -433,7 +448,7 @@ static ssize_t fw_set_ignore_drivers(struct bus_type *bus, const char *buf, size
433 448
434 if (state == 1) 449 if (state == 1)
435 ignore_drivers = 1; 450 ignore_drivers = 1;
436 else if (!state) 451 else if (state == 0)
437 ignore_drivers = 0; 452 ignore_drivers = 0;
438 453
439 return count; 454 return count;
@@ -526,7 +541,7 @@ static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
526 int length = 0; 541 int length = 0;
527 char *scratch = buf; 542 char *scratch = buf;
528 543
529 driver = container_of(drv, struct hpsb_protocol_driver, driver); 544 driver = container_of(drv, struct hpsb_protocol_driver, driver);
530 545
531 for (id = driver->id_table; id->match_flags != 0; id++) { 546 for (id = driver->id_table; id->match_flags != 0; id++) {
532 int need_coma = 0; 547 int need_coma = 0;
@@ -583,7 +598,11 @@ static void nodemgr_create_drv_files(struct hpsb_protocol_driver *driver)
583 int i; 598 int i;
584 599
585 for (i = 0; i < ARRAY_SIZE(fw_drv_attrs); i++) 600 for (i = 0; i < ARRAY_SIZE(fw_drv_attrs); i++)
586 driver_create_file(drv, fw_drv_attrs[i]); 601 if (driver_create_file(drv, fw_drv_attrs[i]))
602 goto fail;
603 return;
604fail:
605 HPSB_ERR("Failed to add sysfs attribute for driver %s", driver->name);
587} 606}
588 607
589 608
@@ -603,7 +622,12 @@ static void nodemgr_create_ne_dev_files(struct node_entry *ne)
603 int i; 622 int i;
604 623
605 for (i = 0; i < ARRAY_SIZE(fw_ne_attrs); i++) 624 for (i = 0; i < ARRAY_SIZE(fw_ne_attrs); i++)
606 device_create_file(dev, fw_ne_attrs[i]); 625 if (device_create_file(dev, fw_ne_attrs[i]))
626 goto fail;
627 return;
628fail:
629 HPSB_ERR("Failed to add sysfs attribute for node %016Lx",
630 (unsigned long long)ne->guid);
607} 631}
608 632
609 633
@@ -613,11 +637,16 @@ static void nodemgr_create_host_dev_files(struct hpsb_host *host)
613 int i; 637 int i;
614 638
615 for (i = 0; i < ARRAY_SIZE(fw_host_attrs); i++) 639 for (i = 0; i < ARRAY_SIZE(fw_host_attrs); i++)
616 device_create_file(dev, fw_host_attrs[i]); 640 if (device_create_file(dev, fw_host_attrs[i]))
641 goto fail;
642 return;
643fail:
644 HPSB_ERR("Failed to add sysfs attribute for host %d", host->id);
617} 645}
618 646
619 647
620static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host, nodeid_t nodeid); 648static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host,
649 nodeid_t nodeid);
621 650
622static void nodemgr_update_host_dev_links(struct hpsb_host *host) 651static void nodemgr_update_host_dev_links(struct hpsb_host *host)
623{ 652{
@@ -628,12 +657,18 @@ static void nodemgr_update_host_dev_links(struct hpsb_host *host)
628 sysfs_remove_link(&dev->kobj, "busmgr_id"); 657 sysfs_remove_link(&dev->kobj, "busmgr_id");
629 sysfs_remove_link(&dev->kobj, "host_id"); 658 sysfs_remove_link(&dev->kobj, "host_id");
630 659
631 if ((ne = find_entry_by_nodeid(host, host->irm_id))) 660 if ((ne = find_entry_by_nodeid(host, host->irm_id)) &&
632 sysfs_create_link(&dev->kobj, &ne->device.kobj, "irm_id"); 661 sysfs_create_link(&dev->kobj, &ne->device.kobj, "irm_id"))
633 if ((ne = find_entry_by_nodeid(host, host->busmgr_id))) 662 goto fail;
634 sysfs_create_link(&dev->kobj, &ne->device.kobj, "busmgr_id"); 663 if ((ne = find_entry_by_nodeid(host, host->busmgr_id)) &&
635 if ((ne = find_entry_by_nodeid(host, host->node_id))) 664 sysfs_create_link(&dev->kobj, &ne->device.kobj, "busmgr_id"))
636 sysfs_create_link(&dev->kobj, &ne->device.kobj, "host_id"); 665 goto fail;
666 if ((ne = find_entry_by_nodeid(host, host->node_id)) &&
667 sysfs_create_link(&dev->kobj, &ne->device.kobj, "host_id"))
668 goto fail;
669 return;
670fail:
671 HPSB_ERR("Failed to update sysfs attributes for host %d", host->id);
637} 672}
638 673
639static void nodemgr_create_ud_dev_files(struct unit_directory *ud) 674static void nodemgr_create_ud_dev_files(struct unit_directory *ud)
@@ -642,32 +677,39 @@ static void nodemgr_create_ud_dev_files(struct unit_directory *ud)
642 int i; 677 int i;
643 678
644 for (i = 0; i < ARRAY_SIZE(fw_ud_attrs); i++) 679 for (i = 0; i < ARRAY_SIZE(fw_ud_attrs); i++)
645 device_create_file(dev, fw_ud_attrs[i]); 680 if (device_create_file(dev, fw_ud_attrs[i]))
646 681 goto fail;
647 if (ud->flags & UNIT_DIRECTORY_SPECIFIER_ID) 682 if (ud->flags & UNIT_DIRECTORY_SPECIFIER_ID)
648 device_create_file(dev, &dev_attr_ud_specifier_id); 683 if (device_create_file(dev, &dev_attr_ud_specifier_id))
649 684 goto fail;
650 if (ud->flags & UNIT_DIRECTORY_VERSION) 685 if (ud->flags & UNIT_DIRECTORY_VERSION)
651 device_create_file(dev, &dev_attr_ud_version); 686 if (device_create_file(dev, &dev_attr_ud_version))
652 687 goto fail;
653 if (ud->flags & UNIT_DIRECTORY_VENDOR_ID) { 688 if (ud->flags & UNIT_DIRECTORY_VENDOR_ID) {
654 device_create_file(dev, &dev_attr_ud_vendor_id); 689 if (device_create_file(dev, &dev_attr_ud_vendor_id))
655 if (ud->vendor_name_kv) 690 goto fail;
656 device_create_file(dev, &dev_attr_ud_vendor_name_kv); 691 if (ud->vendor_name_kv &&
692 device_create_file(dev, &dev_attr_ud_vendor_name_kv))
693 goto fail;
657 } 694 }
658
659 if (ud->flags & UNIT_DIRECTORY_MODEL_ID) { 695 if (ud->flags & UNIT_DIRECTORY_MODEL_ID) {
660 device_create_file(dev, &dev_attr_ud_model_id); 696 if (device_create_file(dev, &dev_attr_ud_model_id))
661 if (ud->model_name_kv) 697 goto fail;
662 device_create_file(dev, &dev_attr_ud_model_name_kv); 698 if (ud->model_name_kv &&
699 device_create_file(dev, &dev_attr_ud_model_name_kv))
700 goto fail;
663 } 701 }
702 return;
703fail:
704 HPSB_ERR("Failed to add sysfs attributes for unit %s",
705 ud->device.bus_id);
664} 706}
665 707
666 708
667static int nodemgr_bus_match(struct device * dev, struct device_driver * drv) 709static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
668{ 710{
669 struct hpsb_protocol_driver *driver; 711 struct hpsb_protocol_driver *driver;
670 struct unit_directory *ud; 712 struct unit_directory *ud;
671 struct ieee1394_device_id *id; 713 struct ieee1394_device_id *id;
672 714
673 /* We only match unit directories */ 715 /* We only match unit directories */
@@ -675,55 +717,77 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
675 return 0; 717 return 0;
676 718
677 ud = container_of(dev, struct unit_directory, device); 719 ud = container_of(dev, struct unit_directory, device);
678 driver = container_of(drv, struct hpsb_protocol_driver, driver);
679
680 if (ud->ne->in_limbo || ud->ignore_driver) 720 if (ud->ne->in_limbo || ud->ignore_driver)
681 return 0; 721 return 0;
682 722
683 for (id = driver->id_table; id->match_flags != 0; id++) { 723 /* We only match drivers of type hpsb_protocol_driver */
684 if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) && 724 if (drv == &nodemgr_mid_layer_driver)
685 id->vendor_id != ud->vendor_id) 725 return 0;
686 continue;
687 726
688 if ((id->match_flags & IEEE1394_MATCH_MODEL_ID) && 727 driver = container_of(drv, struct hpsb_protocol_driver, driver);
689 id->model_id != ud->model_id) 728 for (id = driver->id_table; id->match_flags != 0; id++) {
690 continue; 729 if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
730 id->vendor_id != ud->vendor_id)
731 continue;
691 732
692 if ((id->match_flags & IEEE1394_MATCH_SPECIFIER_ID) && 733 if ((id->match_flags & IEEE1394_MATCH_MODEL_ID) &&
693 id->specifier_id != ud->specifier_id) 734 id->model_id != ud->model_id)
694 continue; 735 continue;
695 736
696 if ((id->match_flags & IEEE1394_MATCH_VERSION) && 737 if ((id->match_flags & IEEE1394_MATCH_SPECIFIER_ID) &&
697 id->version != ud->version) 738 id->specifier_id != ud->specifier_id)
698 continue; 739 continue;
740
741 if ((id->match_flags & IEEE1394_MATCH_VERSION) &&
742 id->version != ud->version)
743 continue;
699 744
700 return 1; 745 return 1;
701 } 746 }
702 747
703 return 0; 748 return 0;
704} 749}
705 750
706 751
752static DEFINE_MUTEX(nodemgr_serialize_remove_uds);
753
707static void nodemgr_remove_uds(struct node_entry *ne) 754static void nodemgr_remove_uds(struct node_entry *ne)
708{ 755{
709 struct class_device *cdev, *next; 756 struct class_device *cdev;
710 struct unit_directory *ud; 757 struct unit_directory *tmp, *ud;
711 758
712 list_for_each_entry_safe(cdev, next, &nodemgr_ud_class.children, node) { 759 /* Iteration over nodemgr_ud_class.children has to be protected by
713 ud = container_of(cdev, struct unit_directory, class_dev); 760 * nodemgr_ud_class.sem, but class_device_unregister() will eventually
714 761 * take nodemgr_ud_class.sem too. Therefore pick out one ud at a time,
715 if (ud->ne != ne) 762 * release the semaphore, and then unregister the ud. Since this code
716 continue; 763 * may be called from other contexts besides the knodemgrds, protect the
717 764 * gap after release of the semaphore by nodemgr_serialize_remove_uds.
765 */
766 mutex_lock(&nodemgr_serialize_remove_uds);
767 for (;;) {
768 ud = NULL;
769 down(&nodemgr_ud_class.sem);
770 list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
771 tmp = container_of(cdev, struct unit_directory,
772 class_dev);
773 if (tmp->ne == ne) {
774 ud = tmp;
775 break;
776 }
777 }
778 up(&nodemgr_ud_class.sem);
779 if (ud == NULL)
780 break;
718 class_device_unregister(&ud->class_dev); 781 class_device_unregister(&ud->class_dev);
719 device_unregister(&ud->device); 782 device_unregister(&ud->device);
720 } 783 }
784 mutex_unlock(&nodemgr_serialize_remove_uds);
721} 785}
722 786
723 787
724static void nodemgr_remove_ne(struct node_entry *ne) 788static void nodemgr_remove_ne(struct node_entry *ne)
725{ 789{
726 struct device *dev = &ne->device; 790 struct device *dev;
727 791
728 dev = get_device(&ne->device); 792 dev = get_device(&ne->device);
729 if (!dev) 793 if (!dev)
@@ -748,7 +812,7 @@ static int __nodemgr_remove_host_dev(struct device *dev, void *data)
748 812
749static void nodemgr_remove_host_dev(struct device *dev) 813static void nodemgr_remove_host_dev(struct device *dev)
750{ 814{
751 device_for_each_child(dev, NULL, __nodemgr_remove_host_dev); 815 WARN_ON(device_for_each_child(dev, NULL, __nodemgr_remove_host_dev));
752 sysfs_remove_link(&dev->kobj, "irm_id"); 816 sysfs_remove_link(&dev->kobj, "irm_id");
753 sysfs_remove_link(&dev->kobj, "busmgr_id"); 817 sysfs_remove_link(&dev->kobj, "busmgr_id");
754 sysfs_remove_link(&dev->kobj, "host_id"); 818 sysfs_remove_link(&dev->kobj, "host_id");
@@ -762,16 +826,16 @@ static void nodemgr_update_bus_options(struct node_entry *ne)
762#endif 826#endif
763 quadlet_t busoptions = be32_to_cpu(ne->csr->bus_info_data[2]); 827 quadlet_t busoptions = be32_to_cpu(ne->csr->bus_info_data[2]);
764 828
765 ne->busopt.irmc = (busoptions >> 31) & 1; 829 ne->busopt.irmc = (busoptions >> 31) & 1;
766 ne->busopt.cmc = (busoptions >> 30) & 1; 830 ne->busopt.cmc = (busoptions >> 30) & 1;
767 ne->busopt.isc = (busoptions >> 29) & 1; 831 ne->busopt.isc = (busoptions >> 29) & 1;
768 ne->busopt.bmc = (busoptions >> 28) & 1; 832 ne->busopt.bmc = (busoptions >> 28) & 1;
769 ne->busopt.pmc = (busoptions >> 27) & 1; 833 ne->busopt.pmc = (busoptions >> 27) & 1;
770 ne->busopt.cyc_clk_acc = (busoptions >> 16) & 0xff; 834 ne->busopt.cyc_clk_acc = (busoptions >> 16) & 0xff;
771 ne->busopt.max_rec = 1 << (((busoptions >> 12) & 0xf) + 1); 835 ne->busopt.max_rec = 1 << (((busoptions >> 12) & 0xf) + 1);
772 ne->busopt.max_rom = (busoptions >> 8) & 0x3; 836 ne->busopt.max_rom = (busoptions >> 8) & 0x3;
773 ne->busopt.generation = (busoptions >> 4) & 0xf; 837 ne->busopt.generation = (busoptions >> 4) & 0xf;
774 ne->busopt.lnkspd = busoptions & 0x7; 838 ne->busopt.lnkspd = busoptions & 0x7;
775 839
776 HPSB_VERBOSE("NodeMgr: raw=0x%08x irmc=%d cmc=%d isc=%d bmc=%d pmc=%d " 840 HPSB_VERBOSE("NodeMgr: raw=0x%08x irmc=%d cmc=%d isc=%d bmc=%d pmc=%d "
777 "cyc_clk_acc=%d max_rec=%d max_rom=%d gen=%d lspd=%d", 841 "cyc_clk_acc=%d max_rec=%d max_rom=%d gen=%d lspd=%d",
@@ -792,7 +856,7 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
792 856
793 ne = kzalloc(sizeof(*ne), GFP_KERNEL); 857 ne = kzalloc(sizeof(*ne), GFP_KERNEL);
794 if (!ne) 858 if (!ne)
795 return NULL; 859 goto fail_alloc;
796 860
797 ne->host = host; 861 ne->host = host;
798 ne->nodeid = nodeid; 862 ne->nodeid = nodeid;
@@ -815,12 +879,15 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
815 snprintf(ne->class_dev.class_id, BUS_ID_SIZE, "%016Lx", 879 snprintf(ne->class_dev.class_id, BUS_ID_SIZE, "%016Lx",
816 (unsigned long long)(ne->guid)); 880 (unsigned long long)(ne->guid));
817 881
818 device_register(&ne->device); 882 if (device_register(&ne->device))
819 class_device_register(&ne->class_dev); 883 goto fail_devreg;
884 if (class_device_register(&ne->class_dev))
885 goto fail_classdevreg;
820 get_device(&ne->device); 886 get_device(&ne->device);
821 887
822 if (ne->guid_vendor_oui) 888 if (ne->guid_vendor_oui &&
823 device_create_file(&ne->device, &dev_attr_ne_guid_vendor_oui); 889 device_create_file(&ne->device, &dev_attr_ne_guid_vendor_oui))
890 goto fail_addoiu;
824 nodemgr_create_ne_dev_files(ne); 891 nodemgr_create_ne_dev_files(ne);
825 892
826 nodemgr_update_bus_options(ne); 893 nodemgr_update_bus_options(ne);
@@ -830,17 +897,28 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
830 NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid); 897 NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid);
831 898
832 return ne; 899 return ne;
900
901fail_addoiu:
902 put_device(&ne->device);
903fail_classdevreg:
904 device_unregister(&ne->device);
905fail_devreg:
906 kfree(ne);
907fail_alloc:
908 HPSB_ERR("Failed to create node ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
909 NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid);
910
911 return NULL;
833} 912}
834 913
835 914
836static struct node_entry *find_entry_by_guid(u64 guid) 915static struct node_entry *find_entry_by_guid(u64 guid)
837{ 916{
838 struct class *class = &nodemgr_ne_class;
839 struct class_device *cdev; 917 struct class_device *cdev;
840 struct node_entry *ne, *ret_ne = NULL; 918 struct node_entry *ne, *ret_ne = NULL;
841 919
842 down_read(&class->subsys.rwsem); 920 down(&nodemgr_ne_class.sem);
843 list_for_each_entry(cdev, &class->children, node) { 921 list_for_each_entry(cdev, &nodemgr_ne_class.children, node) {
844 ne = container_of(cdev, struct node_entry, class_dev); 922 ne = container_of(cdev, struct node_entry, class_dev);
845 923
846 if (ne->guid == guid) { 924 if (ne->guid == guid) {
@@ -848,20 +926,20 @@ static struct node_entry *find_entry_by_guid(u64 guid)
848 break; 926 break;
849 } 927 }
850 } 928 }
851 up_read(&class->subsys.rwsem); 929 up(&nodemgr_ne_class.sem);
852 930
853 return ret_ne; 931 return ret_ne;
854} 932}
855 933
856 934
857static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host, nodeid_t nodeid) 935static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host,
936 nodeid_t nodeid)
858{ 937{
859 struct class *class = &nodemgr_ne_class;
860 struct class_device *cdev; 938 struct class_device *cdev;
861 struct node_entry *ne, *ret_ne = NULL; 939 struct node_entry *ne, *ret_ne = NULL;
862 940
863 down_read(&class->subsys.rwsem); 941 down(&nodemgr_ne_class.sem);
864 list_for_each_entry(cdev, &class->children, node) { 942 list_for_each_entry(cdev, &nodemgr_ne_class.children, node) {
865 ne = container_of(cdev, struct node_entry, class_dev); 943 ne = container_of(cdev, struct node_entry, class_dev);
866 944
867 if (ne->host == host && ne->nodeid == nodeid) { 945 if (ne->host == host && ne->nodeid == nodeid) {
@@ -869,7 +947,7 @@ static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host, nodeid_t
869 break; 947 break;
870 } 948 }
871 } 949 }
872 up_read(&class->subsys.rwsem); 950 up(&nodemgr_ne_class.sem);
873 951
874 return ret_ne; 952 return ret_ne;
875} 953}
@@ -891,13 +969,25 @@ static void nodemgr_register_device(struct node_entry *ne,
891 snprintf(ud->class_dev.class_id, BUS_ID_SIZE, "%s-%u", 969 snprintf(ud->class_dev.class_id, BUS_ID_SIZE, "%s-%u",
892 ne->device.bus_id, ud->id); 970 ne->device.bus_id, ud->id);
893 971
894 device_register(&ud->device); 972 if (device_register(&ud->device))
895 class_device_register(&ud->class_dev); 973 goto fail_devreg;
974 if (class_device_register(&ud->class_dev))
975 goto fail_classdevreg;
896 get_device(&ud->device); 976 get_device(&ud->device);
897 977
898 if (ud->vendor_oui) 978 if (ud->vendor_oui &&
899 device_create_file(&ud->device, &dev_attr_ud_vendor_oui); 979 device_create_file(&ud->device, &dev_attr_ud_vendor_oui))
980 goto fail_addoui;
900 nodemgr_create_ud_dev_files(ud); 981 nodemgr_create_ud_dev_files(ud);
982
983 return;
984
985fail_addoui:
986 put_device(&ud->device);
987fail_classdevreg:
988 device_unregister(&ud->device);
989fail_devreg:
990 HPSB_ERR("Failed to create unit %s", ud->device.bus_id);
901} 991}
902 992
903 993
@@ -977,10 +1067,9 @@ static struct unit_directory *nodemgr_process_unit_directory
977 /* Logical Unit Number */ 1067 /* Logical Unit Number */
978 if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) { 1068 if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
979 if (ud->flags & UNIT_DIRECTORY_HAS_LUN) { 1069 if (ud->flags & UNIT_DIRECTORY_HAS_LUN) {
980 ud_child = kmalloc(sizeof(*ud_child), GFP_KERNEL); 1070 ud_child = kmemdup(ud, sizeof(*ud_child), GFP_KERNEL);
981 if (!ud_child) 1071 if (!ud_child)
982 goto unit_directory_error; 1072 goto unit_directory_error;
983 memcpy(ud_child, ud, sizeof(*ud_child));
984 nodemgr_register_device(ne, ud_child, &ne->device); 1073 nodemgr_register_device(ne, ud_child, &ne->device);
985 ud_child = NULL; 1074 ud_child = NULL;
986 1075
@@ -1094,10 +1183,16 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
1094 last_key_id = kv->key.id; 1183 last_key_id = kv->key.id;
1095 } 1184 }
1096 1185
1097 if (ne->vendor_oui) 1186 if (ne->vendor_oui &&
1098 device_create_file(&ne->device, &dev_attr_ne_vendor_oui); 1187 device_create_file(&ne->device, &dev_attr_ne_vendor_oui))
1099 if (ne->vendor_name_kv) 1188 goto fail;
1100 device_create_file(&ne->device, &dev_attr_ne_vendor_name_kv); 1189 if (ne->vendor_name_kv &&
1190 device_create_file(&ne->device, &dev_attr_ne_vendor_name_kv))
1191 goto fail;
1192 return;
1193fail:
1194 HPSB_ERR("Failed to add sysfs attribute for node %016Lx",
1195 (unsigned long long)ne->guid);
1101} 1196}
1102 1197
1103#ifdef CONFIG_HOTPLUG 1198#ifdef CONFIG_HOTPLUG
@@ -1161,16 +1256,20 @@ static int nodemgr_uevent(struct class_device *cdev, char **envp, int num_envp,
1161#endif /* CONFIG_HOTPLUG */ 1256#endif /* CONFIG_HOTPLUG */
1162 1257
1163 1258
1164int hpsb_register_protocol(struct hpsb_protocol_driver *driver) 1259int __hpsb_register_protocol(struct hpsb_protocol_driver *drv,
1260 struct module *owner)
1165{ 1261{
1166 int ret; 1262 int error;
1167 1263
1168 /* This will cause a probe for devices */ 1264 drv->driver.bus = &ieee1394_bus_type;
1169 ret = driver_register(&driver->driver); 1265 drv->driver.owner = owner;
1170 if (!ret) 1266 drv->driver.name = drv->name;
1171 nodemgr_create_drv_files(driver);
1172 1267
1173 return ret; 1268 /* This will cause a probe for devices */
1269 error = driver_register(&drv->driver);
1270 if (!error)
1271 nodemgr_create_drv_files(drv);
1272 return error;
1174} 1273}
1175 1274
1176void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver) 1275void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver)
@@ -1298,26 +1397,25 @@ static void nodemgr_node_scan_one(struct host_info *hi,
1298 1397
1299static void nodemgr_node_scan(struct host_info *hi, int generation) 1398static void nodemgr_node_scan(struct host_info *hi, int generation)
1300{ 1399{
1301 int count; 1400 int count;
1302 struct hpsb_host *host = hi->host; 1401 struct hpsb_host *host = hi->host;
1303 struct selfid *sid = (struct selfid *)host->topology_map; 1402 struct selfid *sid = (struct selfid *)host->topology_map;
1304 nodeid_t nodeid = LOCAL_BUS; 1403 nodeid_t nodeid = LOCAL_BUS;
1305 1404
1306 /* Scan each node on the bus */ 1405 /* Scan each node on the bus */
1307 for (count = host->selfid_count; count; count--, sid++) { 1406 for (count = host->selfid_count; count; count--, sid++) {
1308 if (sid->extended) 1407 if (sid->extended)
1309 continue; 1408 continue;
1310 1409
1311 if (!sid->link_active) { 1410 if (!sid->link_active) {
1312 nodeid++; 1411 nodeid++;
1313 continue; 1412 continue;
1314 } 1413 }
1315 nodemgr_node_scan_one(hi, nodeid++, generation); 1414 nodemgr_node_scan_one(hi, nodeid++, generation);
1316 } 1415 }
1317} 1416}
1318 1417
1319 1418
1320/* Caller needs to hold nodemgr_ud_class.subsys.rwsem as reader. */
1321static void nodemgr_suspend_ne(struct node_entry *ne) 1419static void nodemgr_suspend_ne(struct node_entry *ne)
1322{ 1420{
1323 struct class_device *cdev; 1421 struct class_device *cdev;
@@ -1327,21 +1425,22 @@ static void nodemgr_suspend_ne(struct node_entry *ne)
1327 NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid); 1425 NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
1328 1426
1329 ne->in_limbo = 1; 1427 ne->in_limbo = 1;
1330 device_create_file(&ne->device, &dev_attr_ne_in_limbo); 1428 WARN_ON(device_create_file(&ne->device, &dev_attr_ne_in_limbo));
1331 1429
1332 down_write(&ne->device.bus->subsys.rwsem); 1430 down(&nodemgr_ud_class.sem);
1333 list_for_each_entry(cdev, &nodemgr_ud_class.children, node) { 1431 list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
1334 ud = container_of(cdev, struct unit_directory, class_dev); 1432 ud = container_of(cdev, struct unit_directory, class_dev);
1335
1336 if (ud->ne != ne) 1433 if (ud->ne != ne)
1337 continue; 1434 continue;
1338 1435
1436 down_write(&ieee1394_bus_type.subsys.rwsem);
1339 if (ud->device.driver && 1437 if (ud->device.driver &&
1340 (!ud->device.driver->suspend || 1438 (!ud->device.driver->suspend ||
1341 ud->device.driver->suspend(&ud->device, PMSG_SUSPEND))) 1439 ud->device.driver->suspend(&ud->device, PMSG_SUSPEND)))
1342 device_release_driver(&ud->device); 1440 device_release_driver(&ud->device);
1441 up_write(&ieee1394_bus_type.subsys.rwsem);
1343 } 1442 }
1344 up_write(&ne->device.bus->subsys.rwsem); 1443 up(&nodemgr_ud_class.sem);
1345} 1444}
1346 1445
1347 1446
@@ -1353,45 +1452,47 @@ static void nodemgr_resume_ne(struct node_entry *ne)
1353 ne->in_limbo = 0; 1452 ne->in_limbo = 0;
1354 device_remove_file(&ne->device, &dev_attr_ne_in_limbo); 1453 device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
1355 1454
1356 down_read(&nodemgr_ud_class.subsys.rwsem); 1455 down(&nodemgr_ud_class.sem);
1357 down_read(&ne->device.bus->subsys.rwsem);
1358 list_for_each_entry(cdev, &nodemgr_ud_class.children, node) { 1456 list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
1359 ud = container_of(cdev, struct unit_directory, class_dev); 1457 ud = container_of(cdev, struct unit_directory, class_dev);
1360
1361 if (ud->ne != ne) 1458 if (ud->ne != ne)
1362 continue; 1459 continue;
1363 1460
1461 down_read(&ieee1394_bus_type.subsys.rwsem);
1364 if (ud->device.driver && ud->device.driver->resume) 1462 if (ud->device.driver && ud->device.driver->resume)
1365 ud->device.driver->resume(&ud->device); 1463 ud->device.driver->resume(&ud->device);
1464 up_read(&ieee1394_bus_type.subsys.rwsem);
1366 } 1465 }
1367 up_read(&ne->device.bus->subsys.rwsem); 1466 up(&nodemgr_ud_class.sem);
1368 up_read(&nodemgr_ud_class.subsys.rwsem);
1369 1467
1370 HPSB_DEBUG("Node resumed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]", 1468 HPSB_DEBUG("Node resumed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
1371 NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid); 1469 NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
1372} 1470}
1373 1471
1374 1472
1375/* Caller needs to hold nodemgr_ud_class.subsys.rwsem as reader. */
1376static void nodemgr_update_pdrv(struct node_entry *ne) 1473static void nodemgr_update_pdrv(struct node_entry *ne)
1377{ 1474{
1378 struct unit_directory *ud; 1475 struct unit_directory *ud;
1379 struct hpsb_protocol_driver *pdrv; 1476 struct hpsb_protocol_driver *pdrv;
1380 struct class_device *cdev; 1477 struct class_device *cdev;
1381 1478
1479 down(&nodemgr_ud_class.sem);
1382 list_for_each_entry(cdev, &nodemgr_ud_class.children, node) { 1480 list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
1383 ud = container_of(cdev, struct unit_directory, class_dev); 1481 ud = container_of(cdev, struct unit_directory, class_dev);
1384 if (ud->ne != ne || !ud->device.driver) 1482 if (ud->ne != ne)
1385 continue; 1483 continue;
1386 1484
1387 pdrv = container_of(ud->device.driver, struct hpsb_protocol_driver, driver); 1485 down_write(&ieee1394_bus_type.subsys.rwsem);
1388 1486 if (ud->device.driver) {
1389 if (pdrv->update && pdrv->update(ud)) { 1487 pdrv = container_of(ud->device.driver,
1390 down_write(&ud->device.bus->subsys.rwsem); 1488 struct hpsb_protocol_driver,
1391 device_release_driver(&ud->device); 1489 driver);
1392 up_write(&ud->device.bus->subsys.rwsem); 1490 if (pdrv->update && pdrv->update(ud))
1491 device_release_driver(&ud->device);
1393 } 1492 }
1493 up_write(&ieee1394_bus_type.subsys.rwsem);
1394 } 1494 }
1495 up(&nodemgr_ud_class.sem);
1395} 1496}
1396 1497
1397 1498
@@ -1405,7 +1506,7 @@ static void nodemgr_irm_write_bc(struct node_entry *ne, int generation)
1405{ 1506{
1406 const u64 bc_addr = (CSR_REGISTER_BASE | CSR_BROADCAST_CHANNEL); 1507 const u64 bc_addr = (CSR_REGISTER_BASE | CSR_BROADCAST_CHANNEL);
1407 quadlet_t bc_remote, bc_local; 1508 quadlet_t bc_remote, bc_local;
1408 int ret; 1509 int error;
1409 1510
1410 if (!ne->host->is_irm || ne->generation != generation || 1511 if (!ne->host->is_irm || ne->generation != generation ||
1411 ne->nodeid == ne->host->node_id) 1512 ne->nodeid == ne->host->node_id)
@@ -1414,16 +1515,14 @@ static void nodemgr_irm_write_bc(struct node_entry *ne, int generation)
1414 bc_local = cpu_to_be32(ne->host->csr.broadcast_channel); 1515 bc_local = cpu_to_be32(ne->host->csr.broadcast_channel);
1415 1516
1416 /* Check if the register is implemented and 1394a compliant. */ 1517 /* Check if the register is implemented and 1394a compliant. */
1417 ret = hpsb_read(ne->host, ne->nodeid, generation, bc_addr, &bc_remote, 1518 error = hpsb_read(ne->host, ne->nodeid, generation, bc_addr, &bc_remote,
1418 sizeof(bc_remote)); 1519 sizeof(bc_remote));
1419 if (!ret && bc_remote & cpu_to_be32(0x80000000) && 1520 if (!error && bc_remote & cpu_to_be32(0x80000000) &&
1420 bc_remote != bc_local) 1521 bc_remote != bc_local)
1421 hpsb_node_write(ne, bc_addr, &bc_local, sizeof(bc_local)); 1522 hpsb_node_write(ne, bc_addr, &bc_local, sizeof(bc_local));
1422} 1523}
1423 1524
1424 1525
1425/* Caller needs to hold nodemgr_ud_class.subsys.rwsem as reader because the
1426 * calls to nodemgr_update_pdrv() and nodemgr_suspend_ne() here require it. */
1427static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int generation) 1526static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int generation)
1428{ 1527{
1429 struct device *dev; 1528 struct device *dev;
@@ -1456,7 +1555,6 @@ static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int ge
1456static void nodemgr_node_probe(struct host_info *hi, int generation) 1555static void nodemgr_node_probe(struct host_info *hi, int generation)
1457{ 1556{
1458 struct hpsb_host *host = hi->host; 1557 struct hpsb_host *host = hi->host;
1459 struct class *class = &nodemgr_ne_class;
1460 struct class_device *cdev; 1558 struct class_device *cdev;
1461 struct node_entry *ne; 1559 struct node_entry *ne;
1462 1560
@@ -1469,18 +1567,18 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
1469 * while probes are time-consuming. (Well, those probes need some 1567 * while probes are time-consuming. (Well, those probes need some
1470 * improvement...) */ 1568 * improvement...) */
1471 1569
1472 down_read(&class->subsys.rwsem); 1570 down(&nodemgr_ne_class.sem);
1473 list_for_each_entry(cdev, &class->children, node) { 1571 list_for_each_entry(cdev, &nodemgr_ne_class.children, node) {
1474 ne = container_of(cdev, struct node_entry, class_dev); 1572 ne = container_of(cdev, struct node_entry, class_dev);
1475 if (!ne->needs_probe) 1573 if (!ne->needs_probe)
1476 nodemgr_probe_ne(hi, ne, generation); 1574 nodemgr_probe_ne(hi, ne, generation);
1477 } 1575 }
1478 list_for_each_entry(cdev, &class->children, node) { 1576 list_for_each_entry(cdev, &nodemgr_ne_class.children, node) {
1479 ne = container_of(cdev, struct node_entry, class_dev); 1577 ne = container_of(cdev, struct node_entry, class_dev);
1480 if (ne->needs_probe) 1578 if (ne->needs_probe)
1481 nodemgr_probe_ne(hi, ne, generation); 1579 nodemgr_probe_ne(hi, ne, generation);
1482 } 1580 }
1483 up_read(&class->subsys.rwsem); 1581 up(&nodemgr_ne_class.sem);
1484 1582
1485 1583
1486 /* If we had a bus reset while we were scanning the bus, it is 1584 /* If we had a bus reset while we were scanning the bus, it is
@@ -1498,15 +1596,14 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
1498 * just removed. */ 1596 * just removed. */
1499 1597
1500 if (generation == get_hpsb_generation(host)) 1598 if (generation == get_hpsb_generation(host))
1501 bus_rescan_devices(&ieee1394_bus_type); 1599 if (bus_rescan_devices(&ieee1394_bus_type))
1502 1600 HPSB_DEBUG("bus_rescan_devices had an error");
1503 return;
1504} 1601}
1505 1602
1506static int nodemgr_send_resume_packet(struct hpsb_host *host) 1603static int nodemgr_send_resume_packet(struct hpsb_host *host)
1507{ 1604{
1508 struct hpsb_packet *packet; 1605 struct hpsb_packet *packet;
1509 int ret = 1; 1606 int error = -ENOMEM;
1510 1607
1511 packet = hpsb_make_phypacket(host, 1608 packet = hpsb_make_phypacket(host,
1512 EXTPHYPACKET_TYPE_RESUME | 1609 EXTPHYPACKET_TYPE_RESUME |
@@ -1514,12 +1611,12 @@ static int nodemgr_send_resume_packet(struct hpsb_host *host)
1514 if (packet) { 1611 if (packet) {
1515 packet->no_waiter = 1; 1612 packet->no_waiter = 1;
1516 packet->generation = get_hpsb_generation(host); 1613 packet->generation = get_hpsb_generation(host);
1517 ret = hpsb_send_packet(packet); 1614 error = hpsb_send_packet(packet);
1518 } 1615 }
1519 if (ret) 1616 if (error)
1520 HPSB_WARN("fw-host%d: Failed to broadcast resume packet", 1617 HPSB_WARN("fw-host%d: Failed to broadcast resume packet",
1521 host->id); 1618 host->id);
1522 return ret; 1619 return error;
1523} 1620}
1524 1621
1525/* Perform a few high-level IRM responsibilities. */ 1622/* Perform a few high-level IRM responsibilities. */
@@ -1692,19 +1789,18 @@ exit:
1692 1789
1693int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *)) 1790int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
1694{ 1791{
1695 struct class *class = &hpsb_host_class;
1696 struct class_device *cdev; 1792 struct class_device *cdev;
1697 struct hpsb_host *host; 1793 struct hpsb_host *host;
1698 int error = 0; 1794 int error = 0;
1699 1795
1700 down_read(&class->subsys.rwsem); 1796 down(&hpsb_host_class.sem);
1701 list_for_each_entry(cdev, &class->children, node) { 1797 list_for_each_entry(cdev, &hpsb_host_class.children, node) {
1702 host = container_of(cdev, struct hpsb_host, class_dev); 1798 host = container_of(cdev, struct hpsb_host, class_dev);
1703 1799
1704 if ((error = cb(host, __data))) 1800 if ((error = cb(host, __data)))
1705 break; 1801 break;
1706 } 1802 }
1707 up_read(&class->subsys.rwsem); 1803 up(&hpsb_host_class.sem);
1708 1804
1709 return error; 1805 return error;
1710} 1806}
@@ -1726,10 +1822,10 @@ int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
1726 1822
1727void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt) 1823void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt)
1728{ 1824{
1729 pkt->host = ne->host; 1825 pkt->host = ne->host;
1730 pkt->generation = ne->generation; 1826 pkt->generation = ne->generation;
1731 barrier(); 1827 barrier();
1732 pkt->node_id = ne->nodeid; 1828 pkt->node_id = ne->nodeid;
1733} 1829}
1734 1830
1735int hpsb_node_write(struct node_entry *ne, u64 addr, 1831int hpsb_node_write(struct node_entry *ne, u64 addr,
@@ -1789,26 +1885,25 @@ static struct hpsb_highlevel nodemgr_highlevel = {
1789 1885
1790int init_ieee1394_nodemgr(void) 1886int init_ieee1394_nodemgr(void)
1791{ 1887{
1792 int ret; 1888 int error;
1793 1889
1794 ret = class_register(&nodemgr_ne_class); 1890 error = class_register(&nodemgr_ne_class);
1795 if (ret < 0) 1891 if (error)
1796 return ret; 1892 return error;
1797 1893
1798 ret = class_register(&nodemgr_ud_class); 1894 error = class_register(&nodemgr_ud_class);
1799 if (ret < 0) { 1895 if (error) {
1800 class_unregister(&nodemgr_ne_class); 1896 class_unregister(&nodemgr_ne_class);
1801 return ret; 1897 return error;
1802 } 1898 }
1803 1899 error = driver_register(&nodemgr_mid_layer_driver);
1804 hpsb_register_highlevel(&nodemgr_highlevel); 1900 hpsb_register_highlevel(&nodemgr_highlevel);
1805
1806 return 0; 1901 return 0;
1807} 1902}
1808 1903
1809void cleanup_ieee1394_nodemgr(void) 1904void cleanup_ieee1394_nodemgr(void)
1810{ 1905{
1811 hpsb_unregister_highlevel(&nodemgr_highlevel); 1906 hpsb_unregister_highlevel(&nodemgr_highlevel);
1812 1907
1813 class_unregister(&nodemgr_ud_class); 1908 class_unregister(&nodemgr_ud_class);
1814 class_unregister(&nodemgr_ne_class); 1909 class_unregister(&nodemgr_ne_class);
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index 0e1e7d930783..e25cbadb8be0 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -144,7 +144,12 @@ struct hpsb_protocol_driver {
144 struct device_driver driver; 144 struct device_driver driver;
145}; 145};
146 146
147int hpsb_register_protocol(struct hpsb_protocol_driver *driver); 147int __hpsb_register_protocol(struct hpsb_protocol_driver *, struct module *);
148static inline int hpsb_register_protocol(struct hpsb_protocol_driver *driver)
149{
150 return __hpsb_register_protocol(driver, THIS_MODULE);
151}
152
148void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver); 153void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver);
149 154
150static inline int hpsb_node_entry_valid(struct node_entry *ne) 155static inline int hpsb_node_entry_valid(struct node_entry *ne)
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index eae97d8dcf03..628130a58af3 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -468,7 +468,6 @@ static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
468/* Global initialization */ 468/* Global initialization */
469static void ohci_initialize(struct ti_ohci *ohci) 469static void ohci_initialize(struct ti_ohci *ohci)
470{ 470{
471 char irq_buf[16];
472 quadlet_t buf; 471 quadlet_t buf;
473 int num_ports, i; 472 int num_ports, i;
474 473
@@ -586,11 +585,10 @@ static void ohci_initialize(struct ti_ohci *ohci)
586 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable); 585 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
587 586
588 buf = reg_read(ohci, OHCI1394_Version); 587 buf = reg_read(ohci, OHCI1394_Version);
589 sprintf (irq_buf, "%d", ohci->dev->irq); 588 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d] "
590 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
591 "MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]", 589 "MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
592 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10), 590 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
593 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf, 591 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
594 (unsigned long long)pci_resource_start(ohci->dev, 0), 592 (unsigned long long)pci_resource_start(ohci->dev, 0),
595 (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1, 593 (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
596 ohci->max_packet_size, 594 ohci->max_packet_size,
@@ -3217,6 +3215,18 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3217 struct ti_ohci *ohci; /* shortcut to currently handled device */ 3215 struct ti_ohci *ohci; /* shortcut to currently handled device */
3218 resource_size_t ohci_base; 3216 resource_size_t ohci_base;
3219 3217
3218#ifdef CONFIG_PPC_PMAC
3219 /* Necessary on some machines if ohci1394 was loaded/ unloaded before */
3220 if (machine_is(powermac)) {
3221 struct device_node *ofn = pci_device_to_OF_node(dev);
3222
3223 if (ofn) {
3224 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3225 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3226 }
3227 }
3228#endif /* CONFIG_PPC_PMAC */
3229
3220 if (pci_enable_device(dev)) 3230 if (pci_enable_device(dev))
3221 FAIL(-ENXIO, "Failed to enable OHCI hardware"); 3231 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3222 pci_set_master(dev); 3232 pci_set_master(dev);
@@ -3505,17 +3515,14 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
3505#endif 3515#endif
3506 3516
3507#ifdef CONFIG_PPC_PMAC 3517#ifdef CONFIG_PPC_PMAC
3508 /* On UniNorth, power down the cable and turn off the chip 3518 /* On UniNorth, power down the cable and turn off the chip clock
3509 * clock when the module is removed to save power on 3519 * to save power on laptops */
3510 * laptops. Turning it back ON is done by the arch code when 3520 if (machine_is(powermac)) {
3511 * pci_enable_device() is called */ 3521 struct device_node* ofn = pci_device_to_OF_node(ohci->dev);
3512 {
3513 struct device_node* of_node;
3514 3522
3515 of_node = pci_device_to_OF_node(ohci->dev); 3523 if (ofn) {
3516 if (of_node) { 3524 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3517 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0); 3525 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3518 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3519 } 3526 }
3520 } 3527 }
3521#endif /* CONFIG_PPC_PMAC */ 3528#endif /* CONFIG_PPC_PMAC */
@@ -3529,59 +3536,102 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
3529} 3536}
3530 3537
3531#ifdef CONFIG_PM 3538#ifdef CONFIG_PM
3532static int ohci1394_pci_resume (struct pci_dev *pdev) 3539static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3533{
3534/* PowerMac resume code comes first */
3535#ifdef CONFIG_PPC_PMAC
3536 if (machine_is(powermac)) {
3537 struct device_node *of_node;
3538
3539 /* Re-enable 1394 */
3540 of_node = pci_device_to_OF_node (pdev);
3541 if (of_node)
3542 pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3543 }
3544#endif /* CONFIG_PPC_PMAC */
3545
3546 pci_set_power_state(pdev, PCI_D0);
3547 pci_restore_state(pdev);
3548 return pci_enable_device(pdev);
3549}
3550
3551static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3552{ 3540{
3553 int err; 3541 int err;
3542 struct ti_ohci *ohci = pci_get_drvdata(pdev);
3554 3543
3555 printk(KERN_INFO "%s does not fully support suspend and resume yet\n", 3544 printk(KERN_INFO "%s does not fully support suspend and resume yet\n",
3556 OHCI1394_DRIVER_NAME); 3545 OHCI1394_DRIVER_NAME);
3557 3546
3547 if (!ohci) {
3548 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3549 OHCI1394_DRIVER_NAME);
3550 return -ENXIO;
3551 }
3552 DBGMSG("suspend called");
3553
3554 /* Clear the async DMA contexts and stop using the controller */
3555 hpsb_bus_reset(ohci->host);
3556
3557 /* See ohci1394_pci_remove() for comments on this sequence */
3558 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3559 reg_write(ohci, OHCI1394_BusOptions,
3560 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3561 0x00ff0000);
3562 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3563 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3564 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3565 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3566 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3567 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3568 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3569 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3570 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3571 ohci_soft_reset(ohci);
3572
3558 err = pci_save_state(pdev); 3573 err = pci_save_state(pdev);
3559 if (err) { 3574 if (err) {
3560 printk(KERN_ERR "%s: pci_save_state failed with %d\n", 3575 PRINT(KERN_ERR, "pci_save_state failed with %d", err);
3561 OHCI1394_DRIVER_NAME, err);
3562 return err; 3576 return err;
3563 } 3577 }
3564 err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3578 err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
3565#ifdef OHCI1394_DEBUG
3566 if (err) 3579 if (err)
3567 printk(KERN_DEBUG "%s: pci_set_power_state failed with %d\n", 3580 DBGMSG("pci_set_power_state failed with %d", err);
3568 OHCI1394_DRIVER_NAME, err);
3569#endif /* OHCI1394_DEBUG */
3570 3581
3571/* PowerMac suspend code comes last */ 3582/* PowerMac suspend code comes last */
3572#ifdef CONFIG_PPC_PMAC 3583#ifdef CONFIG_PPC_PMAC
3573 if (machine_is(powermac)) { 3584 if (machine_is(powermac)) {
3574 struct device_node *of_node; 3585 struct device_node *ofn = pci_device_to_OF_node(pdev);
3575 3586
3576 /* Disable 1394 */ 3587 if (ofn)
3577 of_node = pci_device_to_OF_node (pdev); 3588 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3578 if (of_node)
3579 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3580 } 3589 }
3581#endif /* CONFIG_PPC_PMAC */ 3590#endif /* CONFIG_PPC_PMAC */
3582 3591
3583 return 0; 3592 return 0;
3584} 3593}
3594
3595static int ohci1394_pci_resume(struct pci_dev *pdev)
3596{
3597 int err;
3598 struct ti_ohci *ohci = pci_get_drvdata(pdev);
3599
3600 if (!ohci) {
3601 printk(KERN_ERR "%s: tried to resume nonexisting host\n",
3602 OHCI1394_DRIVER_NAME);
3603 return -ENXIO;
3604 }
3605 DBGMSG("resume called");
3606
3607/* PowerMac resume code comes first */
3608#ifdef CONFIG_PPC_PMAC
3609 if (machine_is(powermac)) {
3610 struct device_node *ofn = pci_device_to_OF_node(pdev);
3611
3612 if (ofn)
3613 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3614 }
3615#endif /* CONFIG_PPC_PMAC */
3616
3617 pci_set_power_state(pdev, PCI_D0);
3618 pci_restore_state(pdev);
3619 err = pci_enable_device(pdev);
3620 if (err) {
3621 PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
3622 return err;
3623 }
3624
3625 /* See ohci1394_pci_probe() for comments on this sequence */
3626 ohci_soft_reset(ohci);
3627 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3628 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3629 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3630 mdelay(50);
3631 ohci_initialize(ohci);
3632
3633 return 0;
3634}
3585#endif /* CONFIG_PM */ 3635#endif /* CONFIG_PM */
3586 3636
3587#define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10) 3637#define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index 9cab1d661472..13a617917bf2 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -1428,10 +1428,9 @@ static int __devinit add_card(struct pci_dev *dev,
1428 struct i2c_algo_bit_data i2c_adapter_data; 1428 struct i2c_algo_bit_data i2c_adapter_data;
1429 1429
1430 error = -ENOMEM; 1430 error = -ENOMEM;
1431 i2c_ad = kmalloc(sizeof(*i2c_ad), GFP_KERNEL); 1431 i2c_ad = kmemdup(&bit_ops, sizeof(*i2c_ad), GFP_KERNEL);
1432 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory"); 1432 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1433 1433
1434 memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
1435 i2c_adapter_data = bit_data; 1434 i2c_adapter_data = bit_data;
1436 i2c_ad->algo_data = &i2c_adapter_data; 1435 i2c_ad->algo_data = &i2c_adapter_data;
1437 i2c_adapter_data.data = lynx; 1436 i2c_adapter_data.data = lynx;
diff --git a/drivers/ieee1394/raw1394-private.h b/drivers/ieee1394/raw1394-private.h
index c7731d1bcd89..50daabf6e5fa 100644
--- a/drivers/ieee1394/raw1394-private.h
+++ b/drivers/ieee1394/raw1394-private.h
@@ -27,12 +27,12 @@ struct file_info {
27 27
28 struct hpsb_host *host; 28 struct hpsb_host *host;
29 29
30 struct list_head req_pending; 30 struct list_head req_pending; /* protected by reqlists_lock */
31 struct list_head req_complete; 31 struct list_head req_complete; /* protected by reqlists_lock */
32 spinlock_t reqlists_lock; 32 spinlock_t reqlists_lock;
33 wait_queue_head_t wait_complete; 33 wait_queue_head_t wait_complete;
34 34
35 struct list_head addr_list; 35 struct list_head addr_list; /* protected by host_info_lock */
36 36
37 u8 __user *fcp_buffer; 37 u8 __user *fcp_buffer;
38 38
@@ -63,7 +63,7 @@ struct arm_addr {
63 u8 client_transactions; 63 u8 client_transactions;
64 u64 recvb; 64 u64 recvb;
65 u16 rec_length; 65 u16 rec_length;
66 u8 *addr_space_buffer; /* accessed by read/write/lock */ 66 u8 *addr_space_buffer; /* accessed by read/write/lock requests */
67}; 67};
68 68
69struct pending_request { 69struct pending_request {
@@ -79,7 +79,7 @@ struct pending_request {
79struct host_info { 79struct host_info {
80 struct list_head list; 80 struct list_head list;
81 struct hpsb_host *host; 81 struct hpsb_host *host;
82 struct list_head file_info_list; 82 struct list_head file_info_list; /* protected by host_info_lock */
83}; 83};
84 84
85#endif /* IEEE1394_RAW1394_PRIVATE_H */ 85#endif /* IEEE1394_RAW1394_PRIVATE_H */
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index bf71e069eaf5..ad2108f27a04 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -99,6 +99,21 @@ static struct hpsb_address_ops arm_ops = {
99 99
100static void queue_complete_cb(struct pending_request *req); 100static void queue_complete_cb(struct pending_request *req);
101 101
102#include <asm/current.h>
103static void print_old_iso_deprecation(void)
104{
105 static pid_t p;
106
107 if (p == current->pid)
108 return;
109 p = current->pid;
110 printk(KERN_WARNING "raw1394: WARNING - Program \"%s\" uses unsupported"
111 " isochronous request types which will be removed in a next"
112 " kernel release\n", current->comm);
113 printk(KERN_WARNING "raw1394: Update your software to use libraw1394's"
114 " newer interface\n");
115}
116
102static struct pending_request *__alloc_pending_request(gfp_t flags) 117static struct pending_request *__alloc_pending_request(gfp_t flags)
103{ 118{
104 struct pending_request *req; 119 struct pending_request *req;
@@ -2292,6 +2307,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req)
2292 return sizeof(struct raw1394_request); 2307 return sizeof(struct raw1394_request);
2293 2308
2294 case RAW1394_REQ_ISO_SEND: 2309 case RAW1394_REQ_ISO_SEND:
2310 print_old_iso_deprecation();
2295 return handle_iso_send(fi, req, node); 2311 return handle_iso_send(fi, req, node);
2296 2312
2297 case RAW1394_REQ_ARM_REGISTER: 2313 case RAW1394_REQ_ARM_REGISTER:
@@ -2310,6 +2326,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req)
2310 return reset_notification(fi, req); 2326 return reset_notification(fi, req);
2311 2327
2312 case RAW1394_REQ_ISO_LISTEN: 2328 case RAW1394_REQ_ISO_LISTEN:
2329 print_old_iso_deprecation();
2313 handle_iso_listen(fi, req); 2330 handle_iso_listen(fi, req);
2314 return sizeof(struct raw1394_request); 2331 return sizeof(struct raw1394_request);
2315 2332
@@ -2970,12 +2987,8 @@ static struct ieee1394_device_id raw1394_id_table[] = {
2970MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table); 2987MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
2971 2988
2972static struct hpsb_protocol_driver raw1394_driver = { 2989static struct hpsb_protocol_driver raw1394_driver = {
2973 .name = "raw1394 Driver", 2990 .name = "raw1394",
2974 .id_table = raw1394_id_table, 2991 .id_table = raw1394_id_table,
2975 .driver = {
2976 .name = "raw1394",
2977 .bus = &ieee1394_bus_type,
2978 },
2979}; 2992};
2980 2993
2981/******************************************************************************/ 2994/******************************************************************************/
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index cd156d4e779e..e68b80b7340d 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -29,13 +29,26 @@
29 * driver. It also registers as a SCSI lower-level driver in order to accept 29 * driver. It also registers as a SCSI lower-level driver in order to accept
30 * SCSI commands for transport using SBP-2. 30 * SCSI commands for transport using SBP-2.
31 * 31 *
32 * You may access any attached SBP-2 storage devices as if they were SCSI 32 * You may access any attached SBP-2 (usually storage devices) as regular
33 * devices (e.g. mount /dev/sda1, fdisk, mkfs, etc.). 33 * SCSI devices. E.g. mount /dev/sda1, fdisk, mkfs, etc..
34 * 34 *
35 * Current Issues: 35 * See http://www.t10.org/drafts.htm#sbp2 for the final draft of the SBP-2
36 * specification and for where to purchase the official standard.
36 * 37 *
37 * - Error Handling: SCSI aborts and bus reset requests are handled somewhat 38 * TODO:
38 * but the code needs additional debugging. 39 * - look into possible improvements of the SCSI error handlers
40 * - handle Unit_Characteristics.mgt_ORB_timeout and .ORB_size
41 * - handle Logical_Unit_Number.ordered
42 * - handle src == 1 in status blocks
43 * - reimplement the DMA mapping in absence of physical DMA so that
44 * bus_to_virt is no longer required
45 * - debug the handling of absent physical DMA
46 * - replace CONFIG_IEEE1394_SBP2_PHYS_DMA by automatic detection
47 * (this is easy but depends on the previous two TODO items)
48 * - make the parameter serialize_io configurable per device
49 * - move all requests to fetch agent registers into non-atomic context,
50 * replace all usages of sbp2util_node_write_no_wait by true transactions
51 * Grep for inline FIXME comments below.
39 */ 52 */
40 53
41#include <linux/blkdev.h> 54#include <linux/blkdev.h>
@@ -49,7 +62,6 @@
49#include <linux/list.h> 62#include <linux/list.h>
50#include <linux/module.h> 63#include <linux/module.h>
51#include <linux/moduleparam.h> 64#include <linux/moduleparam.h>
52#include <linux/pci.h>
53#include <linux/slab.h> 65#include <linux/slab.h>
54#include <linux/spinlock.h> 66#include <linux/spinlock.h>
55#include <linux/stat.h> 67#include <linux/stat.h>
@@ -98,20 +110,20 @@
98 * (probably due to PCI latency/throughput issues with the part). You can 110 * (probably due to PCI latency/throughput issues with the part). You can
99 * bump down the speed if you are running into problems. 111 * bump down the speed if you are running into problems.
100 */ 112 */
101static int max_speed = IEEE1394_SPEED_MAX; 113static int sbp2_max_speed = IEEE1394_SPEED_MAX;
102module_param(max_speed, int, 0644); 114module_param_named(max_speed, sbp2_max_speed, int, 0644);
103MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb, 1 = 200mb, 0 = 100mb)"); 115MODULE_PARM_DESC(max_speed, "Force max speed "
116 "(3 = 800Mb/s, 2 = 400Mb/s, 1 = 200Mb/s, 0 = 100Mb/s)");
104 117
105/* 118/*
106 * Set serialize_io to 1 if you'd like only one scsi command sent 119 * Set serialize_io to 1 if you'd like only one scsi command sent
107 * down to us at a time (debugging). This might be necessary for very 120 * down to us at a time (debugging). This might be necessary for very
108 * badly behaved sbp2 devices. 121 * badly behaved sbp2 devices.
109 *
110 * TODO: Make this configurable per device.
111 */ 122 */
112static int serialize_io = 1; 123static int sbp2_serialize_io = 1;
113module_param(serialize_io, int, 0444); 124module_param_named(serialize_io, sbp2_serialize_io, int, 0444);
114MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default = 1, faster = 0)"); 125MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers "
126 "(default = 1, faster = 0)");
115 127
116/* 128/*
117 * Bump up max_sectors if you'd like to support very large sized 129 * Bump up max_sectors if you'd like to support very large sized
@@ -121,10 +133,10 @@ MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default
121 * the Oxsemi sbp2 chipsets have no problems supporting very large 133 * the Oxsemi sbp2 chipsets have no problems supporting very large
122 * transfer sizes. 134 * transfer sizes.
123 */ 135 */
124static int max_sectors = SBP2_MAX_SECTORS; 136static int sbp2_max_sectors = SBP2_MAX_SECTORS;
125module_param(max_sectors, int, 0444); 137module_param_named(max_sectors, sbp2_max_sectors, int, 0444);
126MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = " 138MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported "
127 __stringify(SBP2_MAX_SECTORS) ")"); 139 "(default = " __stringify(SBP2_MAX_SECTORS) ")");
128 140
129/* 141/*
130 * Exclusive login to sbp2 device? In most cases, the sbp2 driver should 142 * Exclusive login to sbp2 device? In most cases, the sbp2 driver should
@@ -139,9 +151,10 @@ MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = "
139 * concurrent logins. Depending on firmware, four or two concurrent logins 151 * concurrent logins. Depending on firmware, four or two concurrent logins
140 * are possible on OXFW911 and newer Oxsemi bridges. 152 * are possible on OXFW911 and newer Oxsemi bridges.
141 */ 153 */
142static int exclusive_login = 1; 154static int sbp2_exclusive_login = 1;
143module_param(exclusive_login, int, 0644); 155module_param_named(exclusive_login, sbp2_exclusive_login, int, 0644);
144MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)"); 156MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
157 "(default = 1)");
145 158
146/* 159/*
147 * If any of the following workarounds is required for your device to work, 160 * If any of the following workarounds is required for your device to work,
@@ -179,123 +192,123 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
179 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) 192 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
180 ", or a combination)"); 193 ", or a combination)");
181 194
182/*
183 * Export information about protocols/devices supported by this driver.
184 */
185static struct ieee1394_device_id sbp2_id_table[] = {
186 {
187 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
188 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
189 .version = SBP2_SW_VERSION_ENTRY & 0xffffff},
190 {}
191};
192
193MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
194
195/*
196 * Debug levels, configured via kernel config, or enable here.
197 */
198
199#define CONFIG_IEEE1394_SBP2_DEBUG 0
200/* #define CONFIG_IEEE1394_SBP2_DEBUG_ORBS */
201/* #define CONFIG_IEEE1394_SBP2_DEBUG_DMA */
202/* #define CONFIG_IEEE1394_SBP2_DEBUG 1 */
203/* #define CONFIG_IEEE1394_SBP2_DEBUG 2 */
204/* #define CONFIG_IEEE1394_SBP2_PACKET_DUMP */
205
206#ifdef CONFIG_IEEE1394_SBP2_DEBUG_ORBS
207#define SBP2_ORB_DEBUG(fmt, args...) HPSB_ERR("sbp2(%s): "fmt, __FUNCTION__, ## args)
208static u32 global_outstanding_command_orbs = 0;
209#define outstanding_orb_incr global_outstanding_command_orbs++
210#define outstanding_orb_decr global_outstanding_command_orbs--
211#else
212#define SBP2_ORB_DEBUG(fmt, args...) do {} while (0)
213#define outstanding_orb_incr do {} while (0)
214#define outstanding_orb_decr do {} while (0)
215#endif
216
217#ifdef CONFIG_IEEE1394_SBP2_DEBUG_DMA
218#define SBP2_DMA_ALLOC(fmt, args...) \
219 HPSB_ERR("sbp2(%s)alloc(%d): "fmt, __FUNCTION__, \
220 ++global_outstanding_dmas, ## args)
221#define SBP2_DMA_FREE(fmt, args...) \
222 HPSB_ERR("sbp2(%s)free(%d): "fmt, __FUNCTION__, \
223 --global_outstanding_dmas, ## args)
224static u32 global_outstanding_dmas = 0;
225#else
226#define SBP2_DMA_ALLOC(fmt, args...) do {} while (0)
227#define SBP2_DMA_FREE(fmt, args...) do {} while (0)
228#endif
229 195
230#if CONFIG_IEEE1394_SBP2_DEBUG >= 2 196#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
231#define SBP2_DEBUG(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args) 197#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
232#define SBP2_INFO(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
233#define SBP2_NOTICE(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
234#define SBP2_WARN(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
235#elif CONFIG_IEEE1394_SBP2_DEBUG == 1
236#define SBP2_DEBUG(fmt, args...) HPSB_DEBUG("sbp2: "fmt, ## args)
237#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
238#define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args)
239#define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args)
240#else
241#define SBP2_DEBUG(fmt, args...) do {} while (0)
242#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
243#define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args)
244#define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args)
245#endif
246
247#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
248#define SBP2_DEBUG_ENTER() SBP2_DEBUG("%s", __FUNCTION__)
249 198
250/* 199/*
251 * Globals 200 * Globals
252 */ 201 */
202static void sbp2scsi_complete_all_commands(struct sbp2_lu *, u32);
203static void sbp2scsi_complete_command(struct sbp2_lu *, u32, struct scsi_cmnd *,
204 void (*)(struct scsi_cmnd *));
205static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *);
206static int sbp2_start_device(struct sbp2_lu *);
207static void sbp2_remove_device(struct sbp2_lu *);
208static int sbp2_login_device(struct sbp2_lu *);
209static int sbp2_reconnect_device(struct sbp2_lu *);
210static int sbp2_logout_device(struct sbp2_lu *);
211static void sbp2_host_reset(struct hpsb_host *);
212static int sbp2_handle_status_write(struct hpsb_host *, int, int, quadlet_t *,
213 u64, size_t, u16);
214static int sbp2_agent_reset(struct sbp2_lu *, int);
215static void sbp2_parse_unit_directory(struct sbp2_lu *,
216 struct unit_directory *);
217static int sbp2_set_busy_timeout(struct sbp2_lu *);
218static int sbp2_max_speed_and_size(struct sbp2_lu *);
253 219
254static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
255 u32 status);
256
257static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
258 u32 scsi_status, struct scsi_cmnd *SCpnt,
259 void (*done)(struct scsi_cmnd *));
260
261static struct scsi_host_template scsi_driver_template;
262 220
263static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC }; 221static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC };
264 222
265static void sbp2_host_reset(struct hpsb_host *host);
266
267static int sbp2_probe(struct device *dev);
268static int sbp2_remove(struct device *dev);
269static int sbp2_update(struct unit_directory *ud);
270
271static struct hpsb_highlevel sbp2_highlevel = { 223static struct hpsb_highlevel sbp2_highlevel = {
272 .name = SBP2_DEVICE_NAME, 224 .name = SBP2_DEVICE_NAME,
273 .host_reset = sbp2_host_reset, 225 .host_reset = sbp2_host_reset,
274}; 226};
275 227
276static struct hpsb_address_ops sbp2_ops = { 228static struct hpsb_address_ops sbp2_ops = {
277 .write = sbp2_handle_status_write 229 .write = sbp2_handle_status_write
278}; 230};
279 231
280#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA 232#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
233static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *,
234 u64, size_t, u16);
235static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64,
236 size_t, u16);
237
281static struct hpsb_address_ops sbp2_physdma_ops = { 238static struct hpsb_address_ops sbp2_physdma_ops = {
282 .read = sbp2_handle_physdma_read, 239 .read = sbp2_handle_physdma_read,
283 .write = sbp2_handle_physdma_write, 240 .write = sbp2_handle_physdma_write,
284}; 241};
285#endif 242#endif
286 243
244
245/*
246 * Interface to driver core and IEEE 1394 core
247 */
248static struct ieee1394_device_id sbp2_id_table[] = {
249 {
250 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
251 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
252 .version = SBP2_SW_VERSION_ENTRY & 0xffffff},
253 {}
254};
255MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
256
257static int sbp2_probe(struct device *);
258static int sbp2_remove(struct device *);
259static int sbp2_update(struct unit_directory *);
260
287static struct hpsb_protocol_driver sbp2_driver = { 261static struct hpsb_protocol_driver sbp2_driver = {
288 .name = "SBP2 Driver", 262 .name = SBP2_DEVICE_NAME,
289 .id_table = sbp2_id_table, 263 .id_table = sbp2_id_table,
290 .update = sbp2_update, 264 .update = sbp2_update,
291 .driver = { 265 .driver = {
292 .name = SBP2_DEVICE_NAME,
293 .bus = &ieee1394_bus_type,
294 .probe = sbp2_probe, 266 .probe = sbp2_probe,
295 .remove = sbp2_remove, 267 .remove = sbp2_remove,
296 }, 268 },
297}; 269};
298 270
271
272/*
273 * Interface to SCSI core
274 */
275static int sbp2scsi_queuecommand(struct scsi_cmnd *,
276 void (*)(struct scsi_cmnd *));
277static int sbp2scsi_abort(struct scsi_cmnd *);
278static int sbp2scsi_reset(struct scsi_cmnd *);
279static int sbp2scsi_slave_alloc(struct scsi_device *);
280static int sbp2scsi_slave_configure(struct scsi_device *);
281static void sbp2scsi_slave_destroy(struct scsi_device *);
282static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *,
283 struct device_attribute *, char *);
284
285static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
286
287static struct device_attribute *sbp2_sysfs_sdev_attrs[] = {
288 &dev_attr_ieee1394_id,
289 NULL
290};
291
292static struct scsi_host_template sbp2_shost_template = {
293 .module = THIS_MODULE,
294 .name = "SBP-2 IEEE-1394",
295 .proc_name = SBP2_DEVICE_NAME,
296 .queuecommand = sbp2scsi_queuecommand,
297 .eh_abort_handler = sbp2scsi_abort,
298 .eh_device_reset_handler = sbp2scsi_reset,
299 .slave_alloc = sbp2scsi_slave_alloc,
300 .slave_configure = sbp2scsi_slave_configure,
301 .slave_destroy = sbp2scsi_slave_destroy,
302 .this_id = -1,
303 .sg_tablesize = SG_ALL,
304 .use_clustering = ENABLE_CLUSTERING,
305 .cmd_per_lun = SBP2_MAX_CMDS,
306 .can_queue = SBP2_MAX_CMDS,
307 .emulated = 1,
308 .sdev_attrs = sbp2_sysfs_sdev_attrs,
309};
310
311
299/* 312/*
300 * List of devices with known bugs. 313 * List of devices with known bugs.
301 * 314 *
@@ -363,8 +376,6 @@ static inline void sbp2util_be32_to_cpu_buffer(void *buffer, int length)
363 376
364 for (length = (length >> 2); length--; ) 377 for (length = (length >> 2); length--; )
365 temp[length] = be32_to_cpu(temp[length]); 378 temp[length] = be32_to_cpu(temp[length]);
366
367 return;
368} 379}
369 380
370/* 381/*
@@ -376,8 +387,6 @@ static inline void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
376 387
377 for (length = (length >> 2); length--; ) 388 for (length = (length >> 2); length--; )
378 temp[length] = cpu_to_be32(temp[length]); 389 temp[length] = cpu_to_be32(temp[length]);
379
380 return;
381} 390}
382#else /* BIG_ENDIAN */ 391#else /* BIG_ENDIAN */
383/* Why waste the cpu cycles? */ 392/* Why waste the cpu cycles? */
@@ -385,344 +394,246 @@ static inline void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
385#define sbp2util_cpu_to_be32_buffer(x,y) do {} while (0) 394#define sbp2util_cpu_to_be32_buffer(x,y) do {} while (0)
386#endif 395#endif
387 396
388#ifdef CONFIG_IEEE1394_SBP2_PACKET_DUMP 397static DECLARE_WAIT_QUEUE_HEAD(sbp2_access_wq);
389/*
390 * Debug packet dump routine. Length is in bytes.
391 */
392static void sbp2util_packet_dump(void *buffer, int length, char *dump_name,
393 u32 dump_phys_addr)
394{
395 int i;
396 unsigned char *dump = buffer;
397
398 if (!dump || !length || !dump_name)
399 return;
400
401 if (dump_phys_addr)
402 printk("[%s, 0x%x]", dump_name, dump_phys_addr);
403 else
404 printk("[%s]", dump_name);
405 for (i = 0; i < length; i++) {
406 if (i > 0x3f) {
407 printk("\n ...");
408 break;
409 }
410 if ((i & 0x3) == 0)
411 printk(" ");
412 if ((i & 0xf) == 0)
413 printk("\n ");
414 printk("%02x ", (int)dump[i]);
415 }
416 printk("\n");
417
418 return;
419}
420#else
421#define sbp2util_packet_dump(w,x,y,z) do {} while (0)
422#endif
423
424static DECLARE_WAIT_QUEUE_HEAD(access_wq);
425 398
426/* 399/*
427 * Waits for completion of an SBP-2 access request. 400 * Waits for completion of an SBP-2 access request.
428 * Returns nonzero if timed out or prematurely interrupted. 401 * Returns nonzero if timed out or prematurely interrupted.
429 */ 402 */
430static int sbp2util_access_timeout(struct scsi_id_instance_data *scsi_id, 403static int sbp2util_access_timeout(struct sbp2_lu *lu, int timeout)
431 int timeout)
432{ 404{
433 long leftover = wait_event_interruptible_timeout( 405 long leftover;
434 access_wq, scsi_id->access_complete, timeout);
435 406
436 scsi_id->access_complete = 0; 407 leftover = wait_event_interruptible_timeout(
408 sbp2_access_wq, lu->access_complete, timeout);
409 lu->access_complete = 0;
437 return leftover <= 0; 410 return leftover <= 0;
438} 411}
439 412
440/* Frees an allocated packet */ 413static void sbp2_free_packet(void *packet)
441static void sbp2_free_packet(struct hpsb_packet *packet)
442{ 414{
443 hpsb_free_tlabel(packet); 415 hpsb_free_tlabel(packet);
444 hpsb_free_packet(packet); 416 hpsb_free_packet(packet);
445} 417}
446 418
447/* This is much like hpsb_node_write(), except it ignores the response 419/*
448 * subaction and returns immediately. Can be used from interrupts. 420 * This is much like hpsb_node_write(), except it ignores the response
421 * subaction and returns immediately. Can be used from atomic context.
449 */ 422 */
450static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr, 423static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
451 quadlet_t *buffer, size_t length) 424 quadlet_t *buf, size_t len)
452{ 425{
453 struct hpsb_packet *packet; 426 struct hpsb_packet *packet;
454 427
455 packet = hpsb_make_writepacket(ne->host, ne->nodeid, 428 packet = hpsb_make_writepacket(ne->host, ne->nodeid, addr, buf, len);
456 addr, buffer, length);
457 if (!packet) 429 if (!packet)
458 return -ENOMEM; 430 return -ENOMEM;
459 431
460 hpsb_set_packet_complete_task(packet, 432 hpsb_set_packet_complete_task(packet, sbp2_free_packet, packet);
461 (void (*)(void *))sbp2_free_packet,
462 packet);
463
464 hpsb_node_fill_packet(ne, packet); 433 hpsb_node_fill_packet(ne, packet);
465
466 if (hpsb_send_packet(packet) < 0) { 434 if (hpsb_send_packet(packet) < 0) {
467 sbp2_free_packet(packet); 435 sbp2_free_packet(packet);
468 return -EIO; 436 return -EIO;
469 } 437 }
470
471 return 0; 438 return 0;
472} 439}
473 440
474static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id, 441static void sbp2util_notify_fetch_agent(struct sbp2_lu *lu, u64 offset,
475 u64 offset, quadlet_t *data, size_t len) 442 quadlet_t *data, size_t len)
476{ 443{
477 /* 444 /* There is a small window after a bus reset within which the node
478 * There is a small window after a bus reset within which the node 445 * entry's generation is current but the reconnect wasn't completed. */
479 * entry's generation is current but the reconnect wasn't completed. 446 if (unlikely(atomic_read(&lu->state) == SBP2LU_STATE_IN_RESET))
480 */
481 if (unlikely(atomic_read(&scsi_id->state) == SBP2LU_STATE_IN_RESET))
482 return; 447 return;
483 448
484 if (hpsb_node_write(scsi_id->ne, 449 if (hpsb_node_write(lu->ne, lu->command_block_agent_addr + offset,
485 scsi_id->sbp2_command_block_agent_addr + offset,
486 data, len)) 450 data, len))
487 SBP2_ERR("sbp2util_notify_fetch_agent failed."); 451 SBP2_ERR("sbp2util_notify_fetch_agent failed.");
488 /* 452
489 * Now accept new SCSI commands, unless a bus reset happended during 453 /* Now accept new SCSI commands, unless a bus reset happended during
490 * hpsb_node_write. 454 * hpsb_node_write. */
491 */ 455 if (likely(atomic_read(&lu->state) != SBP2LU_STATE_IN_RESET))
492 if (likely(atomic_read(&scsi_id->state) != SBP2LU_STATE_IN_RESET)) 456 scsi_unblock_requests(lu->shost);
493 scsi_unblock_requests(scsi_id->scsi_host);
494} 457}
495 458
496static void sbp2util_write_orb_pointer(struct work_struct *work) 459static void sbp2util_write_orb_pointer(struct work_struct *work)
497{ 460{
498 struct scsi_id_instance_data *scsi_id = 461 struct sbp2_lu *lu = container_of(work, struct sbp2_lu, protocol_work);
499 container_of(work, struct scsi_id_instance_data,
500 protocol_work.work);
501 quadlet_t data[2]; 462 quadlet_t data[2];
502 463
503 data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id); 464 data[0] = ORB_SET_NODE_ID(lu->hi->host->node_id);
504 data[1] = scsi_id->last_orb_dma; 465 data[1] = lu->last_orb_dma;
505 sbp2util_cpu_to_be32_buffer(data, 8); 466 sbp2util_cpu_to_be32_buffer(data, 8);
506 sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8); 467 sbp2util_notify_fetch_agent(lu, SBP2_ORB_POINTER_OFFSET, data, 8);
507} 468}
508 469
509static void sbp2util_write_doorbell(struct work_struct *work) 470static void sbp2util_write_doorbell(struct work_struct *work)
510{ 471{
511 struct scsi_id_instance_data *scsi_id = 472 struct sbp2_lu *lu = container_of(work, struct sbp2_lu, protocol_work);
512 container_of(work, struct scsi_id_instance_data, 473
513 protocol_work.work); 474 sbp2util_notify_fetch_agent(lu, SBP2_DOORBELL_OFFSET, NULL, 4);
514 sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4);
515} 475}
516 476
517/* 477static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
518 * This function is called to create a pool of command orbs used for
519 * command processing. It is called when a new sbp2 device is detected.
520 */
521static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id)
522{ 478{
523 struct sbp2scsi_host_info *hi = scsi_id->hi; 479 struct sbp2_fwhost_info *hi = lu->hi;
524 int i; 480 int i;
525 unsigned long flags, orbs; 481 unsigned long flags, orbs;
526 struct sbp2_command_info *command; 482 struct sbp2_command_info *cmd;
527 483
528 orbs = serialize_io ? 2 : SBP2_MAX_CMDS; 484 orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
529 485
530 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); 486 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
531 for (i = 0; i < orbs; i++) { 487 for (i = 0; i < orbs; i++) {
532 command = kzalloc(sizeof(*command), GFP_ATOMIC); 488 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
533 if (!command) { 489 if (!cmd) {
534 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, 490 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
535 flags);
536 return -ENOMEM; 491 return -ENOMEM;
537 } 492 }
538 command->command_orb_dma = 493 cmd->command_orb_dma = dma_map_single(&hi->host->device,
539 pci_map_single(hi->host->pdev, &command->command_orb, 494 &cmd->command_orb,
540 sizeof(struct sbp2_command_orb), 495 sizeof(struct sbp2_command_orb),
541 PCI_DMA_TODEVICE); 496 DMA_TO_DEVICE);
542 SBP2_DMA_ALLOC("single command orb DMA"); 497 cmd->sge_dma = dma_map_single(&hi->host->device,
543 command->sge_dma = 498 &cmd->scatter_gather_element,
544 pci_map_single(hi->host->pdev, 499 sizeof(cmd->scatter_gather_element),
545 &command->scatter_gather_element, 500 DMA_BIDIRECTIONAL);
546 sizeof(command->scatter_gather_element), 501 INIT_LIST_HEAD(&cmd->list);
547 PCI_DMA_BIDIRECTIONAL); 502 list_add_tail(&cmd->list, &lu->cmd_orb_completed);
548 SBP2_DMA_ALLOC("scatter_gather_element"); 503 }
549 INIT_LIST_HEAD(&command->list); 504 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
550 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
551 }
552 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
553 return 0; 505 return 0;
554} 506}
555 507
556/* 508static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu)
557 * This function is called to delete a pool of command orbs.
558 */
559static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id)
560{ 509{
561 struct hpsb_host *host = scsi_id->hi->host; 510 struct hpsb_host *host = lu->hi->host;
562 struct list_head *lh, *next; 511 struct list_head *lh, *next;
563 struct sbp2_command_info *command; 512 struct sbp2_command_info *cmd;
564 unsigned long flags; 513 unsigned long flags;
565 514
566 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); 515 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
567 if (!list_empty(&scsi_id->sbp2_command_orb_completed)) { 516 if (!list_empty(&lu->cmd_orb_completed))
568 list_for_each_safe(lh, next, &scsi_id->sbp2_command_orb_completed) { 517 list_for_each_safe(lh, next, &lu->cmd_orb_completed) {
569 command = list_entry(lh, struct sbp2_command_info, list); 518 cmd = list_entry(lh, struct sbp2_command_info, list);
570 519 dma_unmap_single(&host->device, cmd->command_orb_dma,
571 /* Release our generic DMA's */
572 pci_unmap_single(host->pdev, command->command_orb_dma,
573 sizeof(struct sbp2_command_orb), 520 sizeof(struct sbp2_command_orb),
574 PCI_DMA_TODEVICE); 521 DMA_TO_DEVICE);
575 SBP2_DMA_FREE("single command orb DMA"); 522 dma_unmap_single(&host->device, cmd->sge_dma,
576 pci_unmap_single(host->pdev, command->sge_dma, 523 sizeof(cmd->scatter_gather_element),
577 sizeof(command->scatter_gather_element), 524 DMA_BIDIRECTIONAL);
578 PCI_DMA_BIDIRECTIONAL); 525 kfree(cmd);
579 SBP2_DMA_FREE("scatter_gather_element");
580
581 kfree(command);
582 } 526 }
583 } 527 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
584 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
585 return; 528 return;
586} 529}
587 530
588/* 531/*
589 * This function finds the sbp2_command for a given outstanding command 532 * Finds the sbp2_command for a given outstanding command ORB.
590 * orb.Only looks at the inuse list. 533 * Only looks at the in-use list.
591 */ 534 */
592static struct sbp2_command_info *sbp2util_find_command_for_orb( 535static struct sbp2_command_info *sbp2util_find_command_for_orb(
593 struct scsi_id_instance_data *scsi_id, dma_addr_t orb) 536 struct sbp2_lu *lu, dma_addr_t orb)
594{ 537{
595 struct sbp2_command_info *command; 538 struct sbp2_command_info *cmd;
596 unsigned long flags; 539 unsigned long flags;
597 540
598 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); 541 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
599 if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) { 542 if (!list_empty(&lu->cmd_orb_inuse))
600 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) { 543 list_for_each_entry(cmd, &lu->cmd_orb_inuse, list)
601 if (command->command_orb_dma == orb) { 544 if (cmd->command_orb_dma == orb) {
602 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 545 spin_unlock_irqrestore(
603 return command; 546 &lu->cmd_orb_lock, flags);
547 return cmd;
604 } 548 }
605 } 549 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
606 }
607 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
608
609 SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb);
610
611 return NULL; 550 return NULL;
612} 551}
613 552
614/* 553/*
615 * This function finds the sbp2_command for a given outstanding SCpnt. 554 * Finds the sbp2_command for a given outstanding SCpnt.
616 * Only looks at the inuse list. 555 * Only looks at the in-use list.
617 * Must be called with scsi_id->sbp2_command_orb_lock held. 556 * Must be called with lu->cmd_orb_lock held.
618 */ 557 */
619static struct sbp2_command_info *sbp2util_find_command_for_SCpnt( 558static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(
620 struct scsi_id_instance_data *scsi_id, void *SCpnt) 559 struct sbp2_lu *lu, void *SCpnt)
621{ 560{
622 struct sbp2_command_info *command; 561 struct sbp2_command_info *cmd;
623 562
624 if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) 563 if (!list_empty(&lu->cmd_orb_inuse))
625 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) 564 list_for_each_entry(cmd, &lu->cmd_orb_inuse, list)
626 if (command->Current_SCpnt == SCpnt) 565 if (cmd->Current_SCpnt == SCpnt)
627 return command; 566 return cmd;
628 return NULL; 567 return NULL;
629} 568}
630 569
631/*
632 * This function allocates a command orb used to send a scsi command.
633 */
634static struct sbp2_command_info *sbp2util_allocate_command_orb( 570static struct sbp2_command_info *sbp2util_allocate_command_orb(
635 struct scsi_id_instance_data *scsi_id, 571 struct sbp2_lu *lu,
636 struct scsi_cmnd *Current_SCpnt, 572 struct scsi_cmnd *Current_SCpnt,
637 void (*Current_done)(struct scsi_cmnd *)) 573 void (*Current_done)(struct scsi_cmnd *))
638{ 574{
639 struct list_head *lh; 575 struct list_head *lh;
640 struct sbp2_command_info *command = NULL; 576 struct sbp2_command_info *cmd = NULL;
641 unsigned long flags; 577 unsigned long flags;
642 578
643 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); 579 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
644 if (!list_empty(&scsi_id->sbp2_command_orb_completed)) { 580 if (!list_empty(&lu->cmd_orb_completed)) {
645 lh = scsi_id->sbp2_command_orb_completed.next; 581 lh = lu->cmd_orb_completed.next;
646 list_del(lh); 582 list_del(lh);
647 command = list_entry(lh, struct sbp2_command_info, list); 583 cmd = list_entry(lh, struct sbp2_command_info, list);
648 command->Current_done = Current_done; 584 cmd->Current_done = Current_done;
649 command->Current_SCpnt = Current_SCpnt; 585 cmd->Current_SCpnt = Current_SCpnt;
650 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_inuse); 586 list_add_tail(&cmd->list, &lu->cmd_orb_inuse);
651 } else { 587 } else
652 SBP2_ERR("%s: no orbs available", __FUNCTION__); 588 SBP2_ERR("%s: no orbs available", __FUNCTION__);
653 } 589 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
654 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 590 return cmd;
655 return command;
656}
657
658/* Free our DMA's */
659static void sbp2util_free_command_dma(struct sbp2_command_info *command)
660{
661 struct scsi_id_instance_data *scsi_id =
662 (struct scsi_id_instance_data *)command->Current_SCpnt->device->host->hostdata[0];
663 struct hpsb_host *host;
664
665 if (!scsi_id) {
666 SBP2_ERR("%s: scsi_id == NULL", __FUNCTION__);
667 return;
668 }
669
670 host = scsi_id->ud->ne->host;
671
672 if (command->cmd_dma) {
673 if (command->dma_type == CMD_DMA_SINGLE) {
674 pci_unmap_single(host->pdev, command->cmd_dma,
675 command->dma_size, command->dma_dir);
676 SBP2_DMA_FREE("single bulk");
677 } else if (command->dma_type == CMD_DMA_PAGE) {
678 pci_unmap_page(host->pdev, command->cmd_dma,
679 command->dma_size, command->dma_dir);
680 SBP2_DMA_FREE("single page");
681 } /* XXX: Check for CMD_DMA_NONE bug */
682 command->dma_type = CMD_DMA_NONE;
683 command->cmd_dma = 0;
684 }
685
686 if (command->sge_buffer) {
687 pci_unmap_sg(host->pdev, command->sge_buffer,
688 command->dma_size, command->dma_dir);
689 SBP2_DMA_FREE("scatter list");
690 command->sge_buffer = NULL;
691 }
692} 591}
693 592
694/* 593/*
695 * This function moves a command to the completed orb list. 594 * Unmaps the DMAs of a command and moves the command to the completed ORB list.
696 * Must be called with scsi_id->sbp2_command_orb_lock held. 595 * Must be called with lu->cmd_orb_lock held.
697 */ 596 */
698static void sbp2util_mark_command_completed( 597static void sbp2util_mark_command_completed(struct sbp2_lu *lu,
699 struct scsi_id_instance_data *scsi_id, 598 struct sbp2_command_info *cmd)
700 struct sbp2_command_info *command)
701{ 599{
702 list_del(&command->list); 600 struct hpsb_host *host = lu->ud->ne->host;
703 sbp2util_free_command_dma(command); 601
704 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed); 602 if (cmd->cmd_dma) {
603 if (cmd->dma_type == CMD_DMA_SINGLE)
604 dma_unmap_single(&host->device, cmd->cmd_dma,
605 cmd->dma_size, cmd->dma_dir);
606 else if (cmd->dma_type == CMD_DMA_PAGE)
607 dma_unmap_page(&host->device, cmd->cmd_dma,
608 cmd->dma_size, cmd->dma_dir);
609 /* XXX: Check for CMD_DMA_NONE bug */
610 cmd->dma_type = CMD_DMA_NONE;
611 cmd->cmd_dma = 0;
612 }
613 if (cmd->sge_buffer) {
614 dma_unmap_sg(&host->device, cmd->sge_buffer,
615 cmd->dma_size, cmd->dma_dir);
616 cmd->sge_buffer = NULL;
617 }
618 list_move_tail(&cmd->list, &lu->cmd_orb_completed);
705} 619}
706 620
707/* 621/*
708 * Is scsi_id valid? Is the 1394 node still present? 622 * Is lu valid? Is the 1394 node still present?
709 */ 623 */
710static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_id) 624static inline int sbp2util_node_is_available(struct sbp2_lu *lu)
711{ 625{
712 return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo; 626 return lu && lu->ne && !lu->ne->in_limbo;
713} 627}
714 628
715/********************************************* 629/*********************************************
716 * IEEE-1394 core driver stack related section 630 * IEEE-1394 core driver stack related section
717 *********************************************/ 631 *********************************************/
718static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud);
719 632
720static int sbp2_probe(struct device *dev) 633static int sbp2_probe(struct device *dev)
721{ 634{
722 struct unit_directory *ud; 635 struct unit_directory *ud;
723 struct scsi_id_instance_data *scsi_id; 636 struct sbp2_lu *lu;
724
725 SBP2_DEBUG_ENTER();
726 637
727 ud = container_of(dev, struct unit_directory, device); 638 ud = container_of(dev, struct unit_directory, device);
728 639
@@ -731,67 +642,58 @@ static int sbp2_probe(struct device *dev)
731 if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY) 642 if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
732 return -ENODEV; 643 return -ENODEV;
733 644
734 scsi_id = sbp2_alloc_device(ud); 645 lu = sbp2_alloc_device(ud);
735 646 if (!lu)
736 if (!scsi_id)
737 return -ENOMEM; 647 return -ENOMEM;
738 648
739 sbp2_parse_unit_directory(scsi_id, ud); 649 sbp2_parse_unit_directory(lu, ud);
740 650 return sbp2_start_device(lu);
741 return sbp2_start_device(scsi_id);
742} 651}
743 652
744static int sbp2_remove(struct device *dev) 653static int sbp2_remove(struct device *dev)
745{ 654{
746 struct unit_directory *ud; 655 struct unit_directory *ud;
747 struct scsi_id_instance_data *scsi_id; 656 struct sbp2_lu *lu;
748 struct scsi_device *sdev; 657 struct scsi_device *sdev;
749 658
750 SBP2_DEBUG_ENTER();
751
752 ud = container_of(dev, struct unit_directory, device); 659 ud = container_of(dev, struct unit_directory, device);
753 scsi_id = ud->device.driver_data; 660 lu = ud->device.driver_data;
754 if (!scsi_id) 661 if (!lu)
755 return 0; 662 return 0;
756 663
757 if (scsi_id->scsi_host) { 664 if (lu->shost) {
758 /* Get rid of enqueued commands if there is no chance to 665 /* Get rid of enqueued commands if there is no chance to
759 * send them. */ 666 * send them. */
760 if (!sbp2util_node_is_available(scsi_id)) 667 if (!sbp2util_node_is_available(lu))
761 sbp2scsi_complete_all_commands(scsi_id, DID_NO_CONNECT); 668 sbp2scsi_complete_all_commands(lu, DID_NO_CONNECT);
762 /* scsi_remove_device() will trigger shutdown functions of SCSI 669 /* scsi_remove_device() may trigger shutdown functions of SCSI
763 * highlevel drivers which would deadlock if blocked. */ 670 * highlevel drivers which would deadlock if blocked. */
764 atomic_set(&scsi_id->state, SBP2LU_STATE_IN_SHUTDOWN); 671 atomic_set(&lu->state, SBP2LU_STATE_IN_SHUTDOWN);
765 scsi_unblock_requests(scsi_id->scsi_host); 672 scsi_unblock_requests(lu->shost);
766 } 673 }
767 sdev = scsi_id->sdev; 674 sdev = lu->sdev;
768 if (sdev) { 675 if (sdev) {
769 scsi_id->sdev = NULL; 676 lu->sdev = NULL;
770 scsi_remove_device(sdev); 677 scsi_remove_device(sdev);
771 } 678 }
772 679
773 sbp2_logout_device(scsi_id); 680 sbp2_logout_device(lu);
774 sbp2_remove_device(scsi_id); 681 sbp2_remove_device(lu);
775 682
776 return 0; 683 return 0;
777} 684}
778 685
779static int sbp2_update(struct unit_directory *ud) 686static int sbp2_update(struct unit_directory *ud)
780{ 687{
781 struct scsi_id_instance_data *scsi_id = ud->device.driver_data; 688 struct sbp2_lu *lu = ud->device.driver_data;
782
783 SBP2_DEBUG_ENTER();
784 689
785 if (sbp2_reconnect_device(scsi_id)) { 690 if (sbp2_reconnect_device(lu)) {
691 /* Reconnect has failed. Perhaps we didn't reconnect fast
692 * enough. Try a regular login, but first log out just in
693 * case of any weirdness. */
694 sbp2_logout_device(lu);
786 695
787 /* 696 if (sbp2_login_device(lu)) {
788 * Ok, reconnect has failed. Perhaps we didn't
789 * reconnect fast enough. Try doing a regular login, but
790 * first do a logout just in case of any weirdness.
791 */
792 sbp2_logout_device(scsi_id);
793
794 if (sbp2_login_device(scsi_id)) {
795 /* Login failed too, just fail, and the backend 697 /* Login failed too, just fail, and the backend
796 * will call our sbp2_remove for us */ 698 * will call our sbp2_remove for us */
797 SBP2_ERR("Failed to reconnect to sbp2 device!"); 699 SBP2_ERR("Failed to reconnect to sbp2 device!");
@@ -799,69 +701,59 @@ static int sbp2_update(struct unit_directory *ud)
799 } 701 }
800 } 702 }
801 703
802 /* Set max retries to something large on the device. */ 704 sbp2_set_busy_timeout(lu);
803 sbp2_set_busy_timeout(scsi_id); 705 sbp2_agent_reset(lu, 1);
706 sbp2_max_speed_and_size(lu);
804 707
805 /* Do a SBP-2 fetch agent reset. */ 708 /* Complete any pending commands with busy (so they get retried)
806 sbp2_agent_reset(scsi_id, 1); 709 * and remove them from our queue. */
807 710 sbp2scsi_complete_all_commands(lu, DID_BUS_BUSY);
808 /* Get the max speed and packet size that we can use. */
809 sbp2_max_speed_and_size(scsi_id);
810
811 /* Complete any pending commands with busy (so they get
812 * retried) and remove them from our queue
813 */
814 sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
815 711
816 /* Accept new commands unless there was another bus reset in the 712 /* Accept new commands unless there was another bus reset in the
817 * meantime. */ 713 * meantime. */
818 if (hpsb_node_entry_valid(scsi_id->ne)) { 714 if (hpsb_node_entry_valid(lu->ne)) {
819 atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); 715 atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
820 scsi_unblock_requests(scsi_id->scsi_host); 716 scsi_unblock_requests(lu->shost);
821 } 717 }
822 return 0; 718 return 0;
823} 719}
824 720
825/* This functions is called by the sbp2_probe, for each new device. We now 721static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
826 * allocate one scsi host for each scsi_id (unit directory). */
827static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud)
828{ 722{
829 struct sbp2scsi_host_info *hi; 723 struct sbp2_fwhost_info *hi;
830 struct Scsi_Host *scsi_host = NULL; 724 struct Scsi_Host *shost = NULL;
831 struct scsi_id_instance_data *scsi_id = NULL; 725 struct sbp2_lu *lu = NULL;
832 726
833 SBP2_DEBUG_ENTER(); 727 lu = kzalloc(sizeof(*lu), GFP_KERNEL);
834 728 if (!lu) {
835 scsi_id = kzalloc(sizeof(*scsi_id), GFP_KERNEL); 729 SBP2_ERR("failed to create lu");
836 if (!scsi_id) {
837 SBP2_ERR("failed to create scsi_id");
838 goto failed_alloc; 730 goto failed_alloc;
839 } 731 }
840 732
841 scsi_id->ne = ud->ne; 733 lu->ne = ud->ne;
842 scsi_id->ud = ud; 734 lu->ud = ud;
843 scsi_id->speed_code = IEEE1394_SPEED_100; 735 lu->speed_code = IEEE1394_SPEED_100;
844 scsi_id->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100]; 736 lu->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100];
845 scsi_id->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE; 737 lu->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE;
846 INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_inuse); 738 INIT_LIST_HEAD(&lu->cmd_orb_inuse);
847 INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed); 739 INIT_LIST_HEAD(&lu->cmd_orb_completed);
848 INIT_LIST_HEAD(&scsi_id->scsi_list); 740 INIT_LIST_HEAD(&lu->lu_list);
849 spin_lock_init(&scsi_id->sbp2_command_orb_lock); 741 spin_lock_init(&lu->cmd_orb_lock);
850 atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); 742 atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
851 INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL); 743 INIT_WORK(&lu->protocol_work, NULL);
852 744
853 ud->device.driver_data = scsi_id; 745 ud->device.driver_data = lu;
854 746
855 hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host); 747 hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
856 if (!hi) { 748 if (!hi) {
857 hi = hpsb_create_hostinfo(&sbp2_highlevel, ud->ne->host, sizeof(*hi)); 749 hi = hpsb_create_hostinfo(&sbp2_highlevel, ud->ne->host,
750 sizeof(*hi));
858 if (!hi) { 751 if (!hi) {
859 SBP2_ERR("failed to allocate hostinfo"); 752 SBP2_ERR("failed to allocate hostinfo");
860 goto failed_alloc; 753 goto failed_alloc;
861 } 754 }
862 SBP2_DEBUG("sbp2_alloc_device: allocated hostinfo");
863 hi->host = ud->ne->host; 755 hi->host = ud->ne->host;
864 INIT_LIST_HEAD(&hi->scsi_ids); 756 INIT_LIST_HEAD(&hi->logical_units);
865 757
866#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA 758#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
867 /* Handle data movement if physical dma is not 759 /* Handle data movement if physical dma is not
@@ -881,9 +773,9 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
881 goto failed_alloc; 773 goto failed_alloc;
882 } 774 }
883 775
884 scsi_id->hi = hi; 776 lu->hi = hi;
885 777
886 list_add_tail(&scsi_id->scsi_list, &hi->scsi_ids); 778 list_add_tail(&lu->lu_list, &hi->logical_units);
887 779
888 /* Register the status FIFO address range. We could use the same FIFO 780 /* Register the status FIFO address range. We could use the same FIFO
889 * for targets at different nodes. However we need different FIFOs per 781 * for targets at different nodes. However we need different FIFOs per
@@ -893,302 +785,214 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
893 * then be performed as unified transactions. This slightly reduces 785 * then be performed as unified transactions. This slightly reduces
894 * bandwidth usage, and some Prolific based devices seem to require it. 786 * bandwidth usage, and some Prolific based devices seem to require it.
895 */ 787 */
896 scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace( 788 lu->status_fifo_addr = hpsb_allocate_and_register_addrspace(
897 &sbp2_highlevel, ud->ne->host, &sbp2_ops, 789 &sbp2_highlevel, ud->ne->host, &sbp2_ops,
898 sizeof(struct sbp2_status_block), sizeof(quadlet_t), 790 sizeof(struct sbp2_status_block), sizeof(quadlet_t),
899 ud->ne->host->low_addr_space, CSR1212_ALL_SPACE_END); 791 ud->ne->host->low_addr_space, CSR1212_ALL_SPACE_END);
900 if (scsi_id->status_fifo_addr == CSR1212_INVALID_ADDR_SPACE) { 792 if (lu->status_fifo_addr == CSR1212_INVALID_ADDR_SPACE) {
901 SBP2_ERR("failed to allocate status FIFO address range"); 793 SBP2_ERR("failed to allocate status FIFO address range");
902 goto failed_alloc; 794 goto failed_alloc;
903 } 795 }
904 796
905 /* Register our host with the SCSI stack. */ 797 shost = scsi_host_alloc(&sbp2_shost_template, sizeof(unsigned long));
906 scsi_host = scsi_host_alloc(&scsi_driver_template, 798 if (!shost) {
907 sizeof(unsigned long));
908 if (!scsi_host) {
909 SBP2_ERR("failed to register scsi host"); 799 SBP2_ERR("failed to register scsi host");
910 goto failed_alloc; 800 goto failed_alloc;
911 } 801 }
912 802
913 scsi_host->hostdata[0] = (unsigned long)scsi_id; 803 shost->hostdata[0] = (unsigned long)lu;
914 804
915 if (!scsi_add_host(scsi_host, &ud->device)) { 805 if (!scsi_add_host(shost, &ud->device)) {
916 scsi_id->scsi_host = scsi_host; 806 lu->shost = shost;
917 return scsi_id; 807 return lu;
918 } 808 }
919 809
920 SBP2_ERR("failed to add scsi host"); 810 SBP2_ERR("failed to add scsi host");
921 scsi_host_put(scsi_host); 811 scsi_host_put(shost);
922 812
923failed_alloc: 813failed_alloc:
924 sbp2_remove_device(scsi_id); 814 sbp2_remove_device(lu);
925 return NULL; 815 return NULL;
926} 816}
927 817
928static void sbp2_host_reset(struct hpsb_host *host) 818static void sbp2_host_reset(struct hpsb_host *host)
929{ 819{
930 struct sbp2scsi_host_info *hi; 820 struct sbp2_fwhost_info *hi;
931 struct scsi_id_instance_data *scsi_id; 821 struct sbp2_lu *lu;
932 822
933 hi = hpsb_get_hostinfo(&sbp2_highlevel, host); 823 hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
934 if (!hi) 824 if (!hi)
935 return; 825 return;
936 list_for_each_entry(scsi_id, &hi->scsi_ids, scsi_list) 826 list_for_each_entry(lu, &hi->logical_units, lu_list)
937 if (likely(atomic_read(&scsi_id->state) != 827 if (likely(atomic_read(&lu->state) !=
938 SBP2LU_STATE_IN_SHUTDOWN)) { 828 SBP2LU_STATE_IN_SHUTDOWN)) {
939 atomic_set(&scsi_id->state, SBP2LU_STATE_IN_RESET); 829 atomic_set(&lu->state, SBP2LU_STATE_IN_RESET);
940 scsi_block_requests(scsi_id->scsi_host); 830 scsi_block_requests(lu->shost);
941 } 831 }
942} 832}
943 833
944/* 834static int sbp2_start_device(struct sbp2_lu *lu)
945 * This function is where we first pull the node unique ids, and then
946 * allocate memory and register a SBP-2 device.
947 */
948static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
949{ 835{
950 struct sbp2scsi_host_info *hi = scsi_id->hi; 836 struct sbp2_fwhost_info *hi = lu->hi;
951 int error; 837 int error;
952 838
953 SBP2_DEBUG_ENTER(); 839 lu->login_response = dma_alloc_coherent(&hi->host->device,
954
955 /* Login FIFO DMA */
956 scsi_id->login_response =
957 pci_alloc_consistent(hi->host->pdev,
958 sizeof(struct sbp2_login_response), 840 sizeof(struct sbp2_login_response),
959 &scsi_id->login_response_dma); 841 &lu->login_response_dma, GFP_KERNEL);
960 if (!scsi_id->login_response) 842 if (!lu->login_response)
961 goto alloc_fail; 843 goto alloc_fail;
962 SBP2_DMA_ALLOC("consistent DMA region for login FIFO");
963 844
964 /* Query logins ORB DMA */ 845 lu->query_logins_orb = dma_alloc_coherent(&hi->host->device,
965 scsi_id->query_logins_orb =
966 pci_alloc_consistent(hi->host->pdev,
967 sizeof(struct sbp2_query_logins_orb), 846 sizeof(struct sbp2_query_logins_orb),
968 &scsi_id->query_logins_orb_dma); 847 &lu->query_logins_orb_dma, GFP_KERNEL);
969 if (!scsi_id->query_logins_orb) 848 if (!lu->query_logins_orb)
970 goto alloc_fail; 849 goto alloc_fail;
971 SBP2_DMA_ALLOC("consistent DMA region for query logins ORB");
972 850
973 /* Query logins response DMA */ 851 lu->query_logins_response = dma_alloc_coherent(&hi->host->device,
974 scsi_id->query_logins_response =
975 pci_alloc_consistent(hi->host->pdev,
976 sizeof(struct sbp2_query_logins_response), 852 sizeof(struct sbp2_query_logins_response),
977 &scsi_id->query_logins_response_dma); 853 &lu->query_logins_response_dma, GFP_KERNEL);
978 if (!scsi_id->query_logins_response) 854 if (!lu->query_logins_response)
979 goto alloc_fail; 855 goto alloc_fail;
980 SBP2_DMA_ALLOC("consistent DMA region for query logins response");
981 856
982 /* Reconnect ORB DMA */ 857 lu->reconnect_orb = dma_alloc_coherent(&hi->host->device,
983 scsi_id->reconnect_orb =
984 pci_alloc_consistent(hi->host->pdev,
985 sizeof(struct sbp2_reconnect_orb), 858 sizeof(struct sbp2_reconnect_orb),
986 &scsi_id->reconnect_orb_dma); 859 &lu->reconnect_orb_dma, GFP_KERNEL);
987 if (!scsi_id->reconnect_orb) 860 if (!lu->reconnect_orb)
988 goto alloc_fail; 861 goto alloc_fail;
989 SBP2_DMA_ALLOC("consistent DMA region for reconnect ORB");
990 862
991 /* Logout ORB DMA */ 863 lu->logout_orb = dma_alloc_coherent(&hi->host->device,
992 scsi_id->logout_orb =
993 pci_alloc_consistent(hi->host->pdev,
994 sizeof(struct sbp2_logout_orb), 864 sizeof(struct sbp2_logout_orb),
995 &scsi_id->logout_orb_dma); 865 &lu->logout_orb_dma, GFP_KERNEL);
996 if (!scsi_id->logout_orb) 866 if (!lu->logout_orb)
997 goto alloc_fail; 867 goto alloc_fail;
998 SBP2_DMA_ALLOC("consistent DMA region for logout ORB");
999 868
1000 /* Login ORB DMA */ 869 lu->login_orb = dma_alloc_coherent(&hi->host->device,
1001 scsi_id->login_orb =
1002 pci_alloc_consistent(hi->host->pdev,
1003 sizeof(struct sbp2_login_orb), 870 sizeof(struct sbp2_login_orb),
1004 &scsi_id->login_orb_dma); 871 &lu->login_orb_dma, GFP_KERNEL);
1005 if (!scsi_id->login_orb) 872 if (!lu->login_orb)
1006 goto alloc_fail; 873 goto alloc_fail;
1007 SBP2_DMA_ALLOC("consistent DMA region for login ORB");
1008
1009 SBP2_DEBUG("New SBP-2 device inserted, SCSI ID = %x", scsi_id->ud->id);
1010 874
1011 /* 875 if (sbp2util_create_command_orb_pool(lu)) {
1012 * Create our command orb pool
1013 */
1014 if (sbp2util_create_command_orb_pool(scsi_id)) {
1015 SBP2_ERR("sbp2util_create_command_orb_pool failed!"); 876 SBP2_ERR("sbp2util_create_command_orb_pool failed!");
1016 sbp2_remove_device(scsi_id); 877 sbp2_remove_device(lu);
1017 return -ENOMEM; 878 return -ENOMEM;
1018 } 879 }
1019 880
1020 /* Schedule a timeout here. The reason is that we may be so close 881 /* Wait a second before trying to log in. Previously logged in
1021 * to a bus reset, that the device is not available for logins. 882 * initiators need a chance to reconnect. */
1022 * This can happen when the bus reset is caused by the host
1023 * connected to the sbp2 device being removed. That host would
1024 * have a certain amount of time to relogin before the sbp2 device
1025 * allows someone else to login instead. One second makes sense. */
1026 if (msleep_interruptible(1000)) { 883 if (msleep_interruptible(1000)) {
1027 sbp2_remove_device(scsi_id); 884 sbp2_remove_device(lu);
1028 return -EINTR; 885 return -EINTR;
1029 } 886 }
1030 887
1031 /* 888 if (sbp2_login_device(lu)) {
1032 * Login to the sbp-2 device 889 sbp2_remove_device(lu);
1033 */
1034 if (sbp2_login_device(scsi_id)) {
1035 /* Login failed, just remove the device. */
1036 sbp2_remove_device(scsi_id);
1037 return -EBUSY; 890 return -EBUSY;
1038 } 891 }
1039 892
1040 /* 893 sbp2_set_busy_timeout(lu);
1041 * Set max retries to something large on the device 894 sbp2_agent_reset(lu, 1);
1042 */ 895 sbp2_max_speed_and_size(lu);
1043 sbp2_set_busy_timeout(scsi_id);
1044
1045 /*
1046 * Do a SBP-2 fetch agent reset
1047 */
1048 sbp2_agent_reset(scsi_id, 1);
1049 896
1050 /* 897 error = scsi_add_device(lu->shost, 0, lu->ud->id, 0);
1051 * Get the max speed and packet size that we can use
1052 */
1053 sbp2_max_speed_and_size(scsi_id);
1054
1055 /* Add this device to the scsi layer now */
1056 error = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0);
1057 if (error) { 898 if (error) {
1058 SBP2_ERR("scsi_add_device failed"); 899 SBP2_ERR("scsi_add_device failed");
1059 sbp2_logout_device(scsi_id); 900 sbp2_logout_device(lu);
1060 sbp2_remove_device(scsi_id); 901 sbp2_remove_device(lu);
1061 return error; 902 return error;
1062 } 903 }
1063 904
1064 return 0; 905 return 0;
1065 906
1066alloc_fail: 907alloc_fail:
1067 SBP2_ERR("Could not allocate memory for scsi_id"); 908 SBP2_ERR("Could not allocate memory for lu");
1068 sbp2_remove_device(scsi_id); 909 sbp2_remove_device(lu);
1069 return -ENOMEM; 910 return -ENOMEM;
1070} 911}
1071 912
1072/* 913static void sbp2_remove_device(struct sbp2_lu *lu)
1073 * This function removes an sbp2 device from the sbp2scsi_host_info struct.
1074 */
1075static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
1076{ 914{
1077 struct sbp2scsi_host_info *hi; 915 struct sbp2_fwhost_info *hi;
1078
1079 SBP2_DEBUG_ENTER();
1080 916
1081 if (!scsi_id) 917 if (!lu)
1082 return; 918 return;
1083 919
1084 hi = scsi_id->hi; 920 hi = lu->hi;
1085 921
1086 /* This will remove our scsi device aswell */ 922 if (lu->shost) {
1087 if (scsi_id->scsi_host) { 923 scsi_remove_host(lu->shost);
1088 scsi_remove_host(scsi_id->scsi_host); 924 scsi_host_put(lu->shost);
1089 scsi_host_put(scsi_id->scsi_host);
1090 } 925 }
1091 flush_scheduled_work(); 926 flush_scheduled_work();
1092 sbp2util_remove_command_orb_pool(scsi_id); 927 sbp2util_remove_command_orb_pool(lu);
1093 928
1094 list_del(&scsi_id->scsi_list); 929 list_del(&lu->lu_list);
1095 930
1096 if (scsi_id->login_response) { 931 if (lu->login_response)
1097 pci_free_consistent(hi->host->pdev, 932 dma_free_coherent(&hi->host->device,
1098 sizeof(struct sbp2_login_response), 933 sizeof(struct sbp2_login_response),
1099 scsi_id->login_response, 934 lu->login_response,
1100 scsi_id->login_response_dma); 935 lu->login_response_dma);
1101 SBP2_DMA_FREE("single login FIFO"); 936 if (lu->login_orb)
1102 } 937 dma_free_coherent(&hi->host->device,
1103
1104 if (scsi_id->login_orb) {
1105 pci_free_consistent(hi->host->pdev,
1106 sizeof(struct sbp2_login_orb), 938 sizeof(struct sbp2_login_orb),
1107 scsi_id->login_orb, 939 lu->login_orb,
1108 scsi_id->login_orb_dma); 940 lu->login_orb_dma);
1109 SBP2_DMA_FREE("single login ORB"); 941 if (lu->reconnect_orb)
1110 } 942 dma_free_coherent(&hi->host->device,
1111
1112 if (scsi_id->reconnect_orb) {
1113 pci_free_consistent(hi->host->pdev,
1114 sizeof(struct sbp2_reconnect_orb), 943 sizeof(struct sbp2_reconnect_orb),
1115 scsi_id->reconnect_orb, 944 lu->reconnect_orb,
1116 scsi_id->reconnect_orb_dma); 945 lu->reconnect_orb_dma);
1117 SBP2_DMA_FREE("single reconnect orb"); 946 if (lu->logout_orb)
1118 } 947 dma_free_coherent(&hi->host->device,
1119
1120 if (scsi_id->logout_orb) {
1121 pci_free_consistent(hi->host->pdev,
1122 sizeof(struct sbp2_logout_orb), 948 sizeof(struct sbp2_logout_orb),
1123 scsi_id->logout_orb, 949 lu->logout_orb,
1124 scsi_id->logout_orb_dma); 950 lu->logout_orb_dma);
1125 SBP2_DMA_FREE("single logout orb"); 951 if (lu->query_logins_orb)
1126 } 952 dma_free_coherent(&hi->host->device,
1127
1128 if (scsi_id->query_logins_orb) {
1129 pci_free_consistent(hi->host->pdev,
1130 sizeof(struct sbp2_query_logins_orb), 953 sizeof(struct sbp2_query_logins_orb),
1131 scsi_id->query_logins_orb, 954 lu->query_logins_orb,
1132 scsi_id->query_logins_orb_dma); 955 lu->query_logins_orb_dma);
1133 SBP2_DMA_FREE("single query logins orb"); 956 if (lu->query_logins_response)
1134 } 957 dma_free_coherent(&hi->host->device,
1135
1136 if (scsi_id->query_logins_response) {
1137 pci_free_consistent(hi->host->pdev,
1138 sizeof(struct sbp2_query_logins_response), 958 sizeof(struct sbp2_query_logins_response),
1139 scsi_id->query_logins_response, 959 lu->query_logins_response,
1140 scsi_id->query_logins_response_dma); 960 lu->query_logins_response_dma);
1141 SBP2_DMA_FREE("single query logins data");
1142 }
1143 961
1144 if (scsi_id->status_fifo_addr != CSR1212_INVALID_ADDR_SPACE) 962 if (lu->status_fifo_addr != CSR1212_INVALID_ADDR_SPACE)
1145 hpsb_unregister_addrspace(&sbp2_highlevel, hi->host, 963 hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
1146 scsi_id->status_fifo_addr); 964 lu->status_fifo_addr);
1147 965
1148 scsi_id->ud->device.driver_data = NULL; 966 lu->ud->device.driver_data = NULL;
1149 967
1150 if (hi) 968 if (hi)
1151 module_put(hi->host->driver->owner); 969 module_put(hi->host->driver->owner);
1152 970
1153 SBP2_DEBUG("SBP-2 device removed, SCSI ID = %d", scsi_id->ud->id); 971 kfree(lu);
1154
1155 kfree(scsi_id);
1156} 972}
1157 973
1158#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA 974#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
1159/* 975/*
1160 * This function deals with physical dma write requests (for adapters that do not support 976 * Deal with write requests on adapters which do not support physical DMA or
1161 * physical dma in hardware). Mostly just here for debugging... 977 * have it switched off.
1162 */ 978 */
1163static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, 979static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid,
1164 int destid, quadlet_t *data, u64 addr, 980 int destid, quadlet_t *data, u64 addr,
1165 size_t length, u16 flags) 981 size_t length, u16 flags)
1166{ 982{
1167
1168 /*
1169 * Manually put the data in the right place.
1170 */
1171 memcpy(bus_to_virt((u32) addr), data, length); 983 memcpy(bus_to_virt((u32) addr), data, length);
1172 sbp2util_packet_dump(data, length, "sbp2 phys dma write by device",
1173 (u32) addr);
1174 return RCODE_COMPLETE; 984 return RCODE_COMPLETE;
1175} 985}
1176 986
1177/* 987/*
1178 * This function deals with physical dma read requests (for adapters that do not support 988 * Deal with read requests on adapters which do not support physical DMA or
1179 * physical dma in hardware). Mostly just here for debugging... 989 * have it switched off.
1180 */ 990 */
1181static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, 991static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid,
1182 quadlet_t *data, u64 addr, size_t length, 992 quadlet_t *data, u64 addr, size_t length,
1183 u16 flags) 993 u16 flags)
1184{ 994{
1185
1186 /*
1187 * Grab data from memory and send a read response.
1188 */
1189 memcpy(data, bus_to_virt((u32) addr), length); 995 memcpy(data, bus_to_virt((u32) addr), length);
1190 sbp2util_packet_dump(data, length, "sbp2 phys dma read by device",
1191 (u32) addr);
1192 return RCODE_COMPLETE; 996 return RCODE_COMPLETE;
1193} 997}
1194#endif 998#endif
@@ -1197,74 +1001,69 @@ static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid,
1197 * SBP-2 protocol related section 1001 * SBP-2 protocol related section
1198 **************************************/ 1002 **************************************/
1199 1003
1200/* 1004static int sbp2_query_logins(struct sbp2_lu *lu)
1201 * This function queries the device for the maximum concurrent logins it
1202 * supports.
1203 */
1204static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1205{ 1005{
1206 struct sbp2scsi_host_info *hi = scsi_id->hi; 1006 struct sbp2_fwhost_info *hi = lu->hi;
1207 quadlet_t data[2]; 1007 quadlet_t data[2];
1208 int max_logins; 1008 int max_logins;
1209 int active_logins; 1009 int active_logins;
1210 1010
1211 SBP2_DEBUG_ENTER(); 1011 lu->query_logins_orb->reserved1 = 0x0;
1212 1012 lu->query_logins_orb->reserved2 = 0x0;
1213 scsi_id->query_logins_orb->reserved1 = 0x0;
1214 scsi_id->query_logins_orb->reserved2 = 0x0;
1215
1216 scsi_id->query_logins_orb->query_response_lo = scsi_id->query_logins_response_dma;
1217 scsi_id->query_logins_orb->query_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
1218 1013
1219 scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST); 1014 lu->query_logins_orb->query_response_lo = lu->query_logins_response_dma;
1220 scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1); 1015 lu->query_logins_orb->query_response_hi =
1221 scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun); 1016 ORB_SET_NODE_ID(hi->host->node_id);
1017 lu->query_logins_orb->lun_misc =
1018 ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
1019 lu->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
1020 lu->query_logins_orb->lun_misc |= ORB_SET_LUN(lu->lun);
1222 1021
1223 scsi_id->query_logins_orb->reserved_resp_length = 1022 lu->query_logins_orb->reserved_resp_length =
1224 ORB_SET_QUERY_LOGINS_RESP_LENGTH(sizeof(struct sbp2_query_logins_response)); 1023 ORB_SET_QUERY_LOGINS_RESP_LENGTH(
1024 sizeof(struct sbp2_query_logins_response));
1225 1025
1226 scsi_id->query_logins_orb->status_fifo_hi = 1026 lu->query_logins_orb->status_fifo_hi =
1227 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id); 1027 ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
1228 scsi_id->query_logins_orb->status_fifo_lo = 1028 lu->query_logins_orb->status_fifo_lo =
1229 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr); 1029 ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
1230 1030
1231 sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb)); 1031 sbp2util_cpu_to_be32_buffer(lu->query_logins_orb,
1032 sizeof(struct sbp2_query_logins_orb));
1232 1033
1233 sbp2util_packet_dump(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb), 1034 memset(lu->query_logins_response, 0,
1234 "sbp2 query logins orb", scsi_id->query_logins_orb_dma); 1035 sizeof(struct sbp2_query_logins_response));
1235
1236 memset(scsi_id->query_logins_response, 0, sizeof(struct sbp2_query_logins_response));
1237 1036
1238 data[0] = ORB_SET_NODE_ID(hi->host->node_id); 1037 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1239 data[1] = scsi_id->query_logins_orb_dma; 1038 data[1] = lu->query_logins_orb_dma;
1240 sbp2util_cpu_to_be32_buffer(data, 8); 1039 sbp2util_cpu_to_be32_buffer(data, 8);
1241 1040
1242 hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8); 1041 hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
1243 1042
1244 if (sbp2util_access_timeout(scsi_id, 2*HZ)) { 1043 if (sbp2util_access_timeout(lu, 2*HZ)) {
1245 SBP2_INFO("Error querying logins to SBP-2 device - timed out"); 1044 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1246 return -EIO; 1045 return -EIO;
1247 } 1046 }
1248 1047
1249 if (scsi_id->status_block.ORB_offset_lo != scsi_id->query_logins_orb_dma) { 1048 if (lu->status_block.ORB_offset_lo != lu->query_logins_orb_dma) {
1250 SBP2_INFO("Error querying logins to SBP-2 device - timed out"); 1049 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1251 return -EIO; 1050 return -EIO;
1252 } 1051 }
1253 1052
1254 if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) { 1053 if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
1255 SBP2_INFO("Error querying logins to SBP-2 device - failed"); 1054 SBP2_INFO("Error querying logins to SBP-2 device - failed");
1256 return -EIO; 1055 return -EIO;
1257 } 1056 }
1258 1057
1259 sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_response, sizeof(struct sbp2_query_logins_response)); 1058 sbp2util_cpu_to_be32_buffer(lu->query_logins_response,
1260 1059 sizeof(struct sbp2_query_logins_response));
1261 SBP2_DEBUG("length_max_logins = %x",
1262 (unsigned int)scsi_id->query_logins_response->length_max_logins);
1263 1060
1264 max_logins = RESPONSE_GET_MAX_LOGINS(scsi_id->query_logins_response->length_max_logins); 1061 max_logins = RESPONSE_GET_MAX_LOGINS(
1062 lu->query_logins_response->length_max_logins);
1265 SBP2_INFO("Maximum concurrent logins supported: %d", max_logins); 1063 SBP2_INFO("Maximum concurrent logins supported: %d", max_logins);
1266 1064
1267 active_logins = RESPONSE_GET_ACTIVE_LOGINS(scsi_id->query_logins_response->length_max_logins); 1065 active_logins = RESPONSE_GET_ACTIVE_LOGINS(
1066 lu->query_logins_response->length_max_logins);
1268 SBP2_INFO("Number of active logins: %d", active_logins); 1067 SBP2_INFO("Number of active logins: %d", active_logins);
1269 1068
1270 if (active_logins >= max_logins) { 1069 if (active_logins >= max_logins) {
@@ -1274,332 +1073,231 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1274 return 0; 1073 return 0;
1275} 1074}
1276 1075
1277/* 1076static int sbp2_login_device(struct sbp2_lu *lu)
1278 * This function is called in order to login to a particular SBP-2 device,
1279 * after a bus reset.
1280 */
1281static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1282{ 1077{
1283 struct sbp2scsi_host_info *hi = scsi_id->hi; 1078 struct sbp2_fwhost_info *hi = lu->hi;
1284 quadlet_t data[2]; 1079 quadlet_t data[2];
1285 1080
1286 SBP2_DEBUG_ENTER(); 1081 if (!lu->login_orb)
1287
1288 if (!scsi_id->login_orb) {
1289 SBP2_DEBUG("%s: login_orb not alloc'd!", __FUNCTION__);
1290 return -EIO; 1082 return -EIO;
1291 }
1292 1083
1293 if (!exclusive_login) { 1084 if (!sbp2_exclusive_login && sbp2_query_logins(lu)) {
1294 if (sbp2_query_logins(scsi_id)) { 1085 SBP2_INFO("Device does not support any more concurrent logins");
1295 SBP2_INFO("Device does not support any more concurrent logins"); 1086 return -EIO;
1296 return -EIO;
1297 }
1298 } 1087 }
1299 1088
1300 /* Set-up login ORB, assume no password */ 1089 /* assume no password */
1301 scsi_id->login_orb->password_hi = 0; 1090 lu->login_orb->password_hi = 0;
1302 scsi_id->login_orb->password_lo = 0; 1091 lu->login_orb->password_lo = 0;
1303 1092
1304 scsi_id->login_orb->login_response_lo = scsi_id->login_response_dma; 1093 lu->login_orb->login_response_lo = lu->login_response_dma;
1305 scsi_id->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id); 1094 lu->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
1095 lu->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST);
1306 1096
1307 scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST); 1097 /* one second reconnect time */
1308 scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */ 1098 lu->login_orb->lun_misc |= ORB_SET_RECONNECT(0);
1309 scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login); /* Exclusive access to device */ 1099 lu->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(sbp2_exclusive_login);
1310 scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */ 1100 lu->login_orb->lun_misc |= ORB_SET_NOTIFY(1);
1311 scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun); 1101 lu->login_orb->lun_misc |= ORB_SET_LUN(lu->lun);
1312 1102
1313 scsi_id->login_orb->passwd_resp_lengths = 1103 lu->login_orb->passwd_resp_lengths =
1314 ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response)); 1104 ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
1315 1105
1316 scsi_id->login_orb->status_fifo_hi = 1106 lu->login_orb->status_fifo_hi =
1317 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id); 1107 ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
1318 scsi_id->login_orb->status_fifo_lo = 1108 lu->login_orb->status_fifo_lo =
1319 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr); 1109 ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
1320 1110
1321 sbp2util_cpu_to_be32_buffer(scsi_id->login_orb, sizeof(struct sbp2_login_orb)); 1111 sbp2util_cpu_to_be32_buffer(lu->login_orb,
1112 sizeof(struct sbp2_login_orb));
1322 1113
1323 sbp2util_packet_dump(scsi_id->login_orb, sizeof(struct sbp2_login_orb), 1114 memset(lu->login_response, 0, sizeof(struct sbp2_login_response));
1324 "sbp2 login orb", scsi_id->login_orb_dma);
1325
1326 memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response));
1327 1115
1328 data[0] = ORB_SET_NODE_ID(hi->host->node_id); 1116 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1329 data[1] = scsi_id->login_orb_dma; 1117 data[1] = lu->login_orb_dma;
1330 sbp2util_cpu_to_be32_buffer(data, 8); 1118 sbp2util_cpu_to_be32_buffer(data, 8);
1331 1119
1332 hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8); 1120 hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
1333 1121
1334 /* 1122 /* wait up to 20 seconds for login status */
1335 * Wait for login status (up to 20 seconds)... 1123 if (sbp2util_access_timeout(lu, 20*HZ)) {
1336 */
1337 if (sbp2util_access_timeout(scsi_id, 20*HZ)) {
1338 SBP2_ERR("Error logging into SBP-2 device - timed out"); 1124 SBP2_ERR("Error logging into SBP-2 device - timed out");
1339 return -EIO; 1125 return -EIO;
1340 } 1126 }
1341 1127
1342 /* 1128 /* make sure that the returned status matches the login ORB */
1343 * Sanity. Make sure status returned matches login orb. 1129 if (lu->status_block.ORB_offset_lo != lu->login_orb_dma) {
1344 */
1345 if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) {
1346 SBP2_ERR("Error logging into SBP-2 device - timed out"); 1130 SBP2_ERR("Error logging into SBP-2 device - timed out");
1347 return -EIO; 1131 return -EIO;
1348 } 1132 }
1349 1133
1350 if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) { 1134 if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
1351 SBP2_ERR("Error logging into SBP-2 device - failed"); 1135 SBP2_ERR("Error logging into SBP-2 device - failed");
1352 return -EIO; 1136 return -EIO;
1353 } 1137 }
1354 1138
1355 /* 1139 sbp2util_cpu_to_be32_buffer(lu->login_response,
1356 * Byte swap the login response, for use when reconnecting or 1140 sizeof(struct sbp2_login_response));
1357 * logging out. 1141 lu->command_block_agent_addr =
1358 */ 1142 ((u64)lu->login_response->command_block_agent_hi) << 32;
1359 sbp2util_cpu_to_be32_buffer(scsi_id->login_response, sizeof(struct sbp2_login_response)); 1143 lu->command_block_agent_addr |=
1360 1144 ((u64)lu->login_response->command_block_agent_lo);
1361 /* 1145 lu->command_block_agent_addr &= 0x0000ffffffffffffULL;
1362 * Grab our command block agent address from the login response.
1363 */
1364 SBP2_DEBUG("command_block_agent_hi = %x",
1365 (unsigned int)scsi_id->login_response->command_block_agent_hi);
1366 SBP2_DEBUG("command_block_agent_lo = %x",
1367 (unsigned int)scsi_id->login_response->command_block_agent_lo);
1368
1369 scsi_id->sbp2_command_block_agent_addr =
1370 ((u64)scsi_id->login_response->command_block_agent_hi) << 32;
1371 scsi_id->sbp2_command_block_agent_addr |= ((u64)scsi_id->login_response->command_block_agent_lo);
1372 scsi_id->sbp2_command_block_agent_addr &= 0x0000ffffffffffffULL;
1373 1146
1374 SBP2_INFO("Logged into SBP-2 device"); 1147 SBP2_INFO("Logged into SBP-2 device");
1375 return 0; 1148 return 0;
1376} 1149}
1377 1150
1378/* 1151static int sbp2_logout_device(struct sbp2_lu *lu)
1379 * This function is called in order to logout from a particular SBP-2
1380 * device, usually called during driver unload.
1381 */
1382static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
1383{ 1152{
1384 struct sbp2scsi_host_info *hi = scsi_id->hi; 1153 struct sbp2_fwhost_info *hi = lu->hi;
1385 quadlet_t data[2]; 1154 quadlet_t data[2];
1386 int error; 1155 int error;
1387 1156
1388 SBP2_DEBUG_ENTER(); 1157 lu->logout_orb->reserved1 = 0x0;
1389 1158 lu->logout_orb->reserved2 = 0x0;
1390 /* 1159 lu->logout_orb->reserved3 = 0x0;
1391 * Set-up logout ORB 1160 lu->logout_orb->reserved4 = 0x0;
1392 */
1393 scsi_id->logout_orb->reserved1 = 0x0;
1394 scsi_id->logout_orb->reserved2 = 0x0;
1395 scsi_id->logout_orb->reserved3 = 0x0;
1396 scsi_id->logout_orb->reserved4 = 0x0;
1397
1398 scsi_id->logout_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_LOGOUT_REQUEST);
1399 scsi_id->logout_orb->login_ID_misc |= ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
1400
1401 /* Notify us when complete */
1402 scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
1403 1161
1404 scsi_id->logout_orb->reserved5 = 0x0; 1162 lu->logout_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_LOGOUT_REQUEST);
1405 scsi_id->logout_orb->status_fifo_hi = 1163 lu->logout_orb->login_ID_misc |=
1406 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id); 1164 ORB_SET_LOGIN_ID(lu->login_response->length_login_ID);
1407 scsi_id->logout_orb->status_fifo_lo = 1165 lu->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
1408 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
1409 1166
1410 /* 1167 lu->logout_orb->reserved5 = 0x0;
1411 * Byte swap ORB if necessary 1168 lu->logout_orb->status_fifo_hi =
1412 */ 1169 ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
1413 sbp2util_cpu_to_be32_buffer(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb)); 1170 lu->logout_orb->status_fifo_lo =
1171 ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
1414 1172
1415 sbp2util_packet_dump(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb), 1173 sbp2util_cpu_to_be32_buffer(lu->logout_orb,
1416 "sbp2 logout orb", scsi_id->logout_orb_dma); 1174 sizeof(struct sbp2_logout_orb));
1417 1175
1418 /*
1419 * Ok, let's write to the target's management agent register
1420 */
1421 data[0] = ORB_SET_NODE_ID(hi->host->node_id); 1176 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1422 data[1] = scsi_id->logout_orb_dma; 1177 data[1] = lu->logout_orb_dma;
1423 sbp2util_cpu_to_be32_buffer(data, 8); 1178 sbp2util_cpu_to_be32_buffer(data, 8);
1424 1179
1425 error = hpsb_node_write(scsi_id->ne, 1180 error = hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
1426 scsi_id->sbp2_management_agent_addr, data, 8);
1427 if (error) 1181 if (error)
1428 return error; 1182 return error;
1429 1183
1430 /* Wait for device to logout...1 second. */ 1184 /* wait up to 1 second for the device to complete logout */
1431 if (sbp2util_access_timeout(scsi_id, HZ)) 1185 if (sbp2util_access_timeout(lu, HZ))
1432 return -EIO; 1186 return -EIO;
1433 1187
1434 SBP2_INFO("Logged out of SBP-2 device"); 1188 SBP2_INFO("Logged out of SBP-2 device");
1435 return 0; 1189 return 0;
1436} 1190}
1437 1191
1438/* 1192static int sbp2_reconnect_device(struct sbp2_lu *lu)
1439 * This function is called in order to reconnect to a particular SBP-2
1440 * device, after a bus reset.
1441 */
1442static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
1443{ 1193{
1444 struct sbp2scsi_host_info *hi = scsi_id->hi; 1194 struct sbp2_fwhost_info *hi = lu->hi;
1445 quadlet_t data[2]; 1195 quadlet_t data[2];
1446 int error; 1196 int error;
1447 1197
1448 SBP2_DEBUG_ENTER(); 1198 lu->reconnect_orb->reserved1 = 0x0;
1199 lu->reconnect_orb->reserved2 = 0x0;
1200 lu->reconnect_orb->reserved3 = 0x0;
1201 lu->reconnect_orb->reserved4 = 0x0;
1449 1202
1450 /* 1203 lu->reconnect_orb->login_ID_misc =
1451 * Set-up reconnect ORB 1204 ORB_SET_FUNCTION(SBP2_RECONNECT_REQUEST);
1452 */ 1205 lu->reconnect_orb->login_ID_misc |=
1453 scsi_id->reconnect_orb->reserved1 = 0x0; 1206 ORB_SET_LOGIN_ID(lu->login_response->length_login_ID);
1454 scsi_id->reconnect_orb->reserved2 = 0x0; 1207 lu->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
1455 scsi_id->reconnect_orb->reserved3 = 0x0;
1456 scsi_id->reconnect_orb->reserved4 = 0x0;
1457
1458 scsi_id->reconnect_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_RECONNECT_REQUEST);
1459 scsi_id->reconnect_orb->login_ID_misc |=
1460 ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
1461 1208
1462 /* Notify us when complete */ 1209 lu->reconnect_orb->reserved5 = 0x0;
1463 scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1); 1210 lu->reconnect_orb->status_fifo_hi =
1211 ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
1212 lu->reconnect_orb->status_fifo_lo =
1213 ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
1464 1214
1465 scsi_id->reconnect_orb->reserved5 = 0x0; 1215 sbp2util_cpu_to_be32_buffer(lu->reconnect_orb,
1466 scsi_id->reconnect_orb->status_fifo_hi = 1216 sizeof(struct sbp2_reconnect_orb));
1467 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
1468 scsi_id->reconnect_orb->status_fifo_lo =
1469 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
1470
1471 /*
1472 * Byte swap ORB if necessary
1473 */
1474 sbp2util_cpu_to_be32_buffer(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb));
1475
1476 sbp2util_packet_dump(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb),
1477 "sbp2 reconnect orb", scsi_id->reconnect_orb_dma);
1478 1217
1479 data[0] = ORB_SET_NODE_ID(hi->host->node_id); 1218 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1480 data[1] = scsi_id->reconnect_orb_dma; 1219 data[1] = lu->reconnect_orb_dma;
1481 sbp2util_cpu_to_be32_buffer(data, 8); 1220 sbp2util_cpu_to_be32_buffer(data, 8);
1482 1221
1483 error = hpsb_node_write(scsi_id->ne, 1222 error = hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
1484 scsi_id->sbp2_management_agent_addr, data, 8);
1485 if (error) 1223 if (error)
1486 return error; 1224 return error;
1487 1225
1488 /* 1226 /* wait up to 1 second for reconnect status */
1489 * Wait for reconnect status (up to 1 second)... 1227 if (sbp2util_access_timeout(lu, HZ)) {
1490 */
1491 if (sbp2util_access_timeout(scsi_id, HZ)) {
1492 SBP2_ERR("Error reconnecting to SBP-2 device - timed out"); 1228 SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
1493 return -EIO; 1229 return -EIO;
1494 } 1230 }
1495 1231
1496 /* 1232 /* make sure that the returned status matches the reconnect ORB */
1497 * Sanity. Make sure status returned matches reconnect orb. 1233 if (lu->status_block.ORB_offset_lo != lu->reconnect_orb_dma) {
1498 */
1499 if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) {
1500 SBP2_ERR("Error reconnecting to SBP-2 device - timed out"); 1234 SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
1501 return -EIO; 1235 return -EIO;
1502 } 1236 }
1503 1237
1504 if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) { 1238 if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
1505 SBP2_ERR("Error reconnecting to SBP-2 device - failed"); 1239 SBP2_ERR("Error reconnecting to SBP-2 device - failed");
1506 return -EIO; 1240 return -EIO;
1507 } 1241 }
1508 1242
1509 HPSB_DEBUG("Reconnected to SBP-2 device"); 1243 SBP2_INFO("Reconnected to SBP-2 device");
1510 return 0; 1244 return 0;
1511} 1245}
1512 1246
1513/* 1247/*
1514 * This function is called in order to set the busy timeout (number of 1248 * Set the target node's Single Phase Retry limit. Affects the target's retry
1515 * retries to attempt) on the sbp2 device. 1249 * behaviour if our node is too busy to accept requests.
1516 */ 1250 */
1517static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id) 1251static int sbp2_set_busy_timeout(struct sbp2_lu *lu)
1518{ 1252{
1519 quadlet_t data; 1253 quadlet_t data;
1520 1254
1521 SBP2_DEBUG_ENTER();
1522
1523 data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE); 1255 data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
1524 if (hpsb_node_write(scsi_id->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4)) 1256 if (hpsb_node_write(lu->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4))
1525 SBP2_ERR("%s error", __FUNCTION__); 1257 SBP2_ERR("%s error", __FUNCTION__);
1526 return 0; 1258 return 0;
1527} 1259}
1528 1260
1529/* 1261static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
1530 * This function is called to parse sbp2 device's config rom unit
1531 * directory. Used to determine things like sbp2 management agent offset,
1532 * and command set used (SCSI or RBC).
1533 */
1534static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1535 struct unit_directory *ud) 1262 struct unit_directory *ud)
1536{ 1263{
1537 struct csr1212_keyval *kv; 1264 struct csr1212_keyval *kv;
1538 struct csr1212_dentry *dentry; 1265 struct csr1212_dentry *dentry;
1539 u64 management_agent_addr; 1266 u64 management_agent_addr;
1540 u32 command_set_spec_id, command_set, unit_characteristics, 1267 u32 unit_characteristics, firmware_revision;
1541 firmware_revision;
1542 unsigned workarounds; 1268 unsigned workarounds;
1543 int i; 1269 int i;
1544 1270
1545 SBP2_DEBUG_ENTER(); 1271 management_agent_addr = 0;
1272 unit_characteristics = 0;
1273 firmware_revision = 0;
1546 1274
1547 management_agent_addr = 0x0;
1548 command_set_spec_id = 0x0;
1549 command_set = 0x0;
1550 unit_characteristics = 0x0;
1551 firmware_revision = 0x0;
1552
1553 /* Handle different fields in the unit directory, based on keys */
1554 csr1212_for_each_dir_entry(ud->ne->csr, kv, ud->ud_kv, dentry) { 1275 csr1212_for_each_dir_entry(ud->ne->csr, kv, ud->ud_kv, dentry) {
1555 switch (kv->key.id) { 1276 switch (kv->key.id) {
1556 case CSR1212_KV_ID_DEPENDENT_INFO: 1277 case CSR1212_KV_ID_DEPENDENT_INFO:
1557 if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET) { 1278 if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET)
1558 /* Save off the management agent address */
1559 management_agent_addr = 1279 management_agent_addr =
1560 CSR1212_REGISTER_SPACE_BASE + 1280 CSR1212_REGISTER_SPACE_BASE +
1561 (kv->value.csr_offset << 2); 1281 (kv->value.csr_offset << 2);
1562 1282
1563 SBP2_DEBUG("sbp2_management_agent_addr = %x", 1283 else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE)
1564 (unsigned int)management_agent_addr); 1284 lu->lun = ORB_SET_LUN(kv->value.immediate);
1565 } else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
1566 scsi_id->sbp2_lun =
1567 ORB_SET_LUN(kv->value.immediate);
1568 }
1569 break;
1570
1571 case SBP2_COMMAND_SET_SPEC_ID_KEY:
1572 /* Command spec organization */
1573 command_set_spec_id = kv->value.immediate;
1574 SBP2_DEBUG("sbp2_command_set_spec_id = %x",
1575 (unsigned int)command_set_spec_id);
1576 break;
1577
1578 case SBP2_COMMAND_SET_KEY:
1579 /* Command set used by sbp2 device */
1580 command_set = kv->value.immediate;
1581 SBP2_DEBUG("sbp2_command_set = %x",
1582 (unsigned int)command_set);
1583 break; 1285 break;
1584 1286
1585 case SBP2_UNIT_CHARACTERISTICS_KEY: 1287 case SBP2_UNIT_CHARACTERISTICS_KEY:
1586 /* 1288 /* FIXME: This is ignored so far.
1587 * Unit characterisitcs (orb related stuff 1289 * See SBP-2 clause 7.4.8. */
1588 * that I'm not yet paying attention to)
1589 */
1590 unit_characteristics = kv->value.immediate; 1290 unit_characteristics = kv->value.immediate;
1591 SBP2_DEBUG("sbp2_unit_characteristics = %x",
1592 (unsigned int)unit_characteristics);
1593 break; 1291 break;
1594 1292
1595 case SBP2_FIRMWARE_REVISION_KEY: 1293 case SBP2_FIRMWARE_REVISION_KEY:
1596 /* Firmware revision */
1597 firmware_revision = kv->value.immediate; 1294 firmware_revision = kv->value.immediate;
1598 SBP2_DEBUG("sbp2_firmware_revision = %x",
1599 (unsigned int)firmware_revision);
1600 break; 1295 break;
1601 1296
1602 default: 1297 default:
1298 /* FIXME: Check for SBP2_DEVICE_TYPE_AND_LUN_KEY.
1299 * Its "ordered" bit has consequences for command ORB
1300 * list handling. See SBP-2 clauses 4.6, 7.4.11, 10.2 */
1603 break; 1301 break;
1604 } 1302 }
1605 } 1303 }
@@ -1631,28 +1329,24 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1631 /* We would need one SCSI host template for each target to adjust 1329 /* We would need one SCSI host template for each target to adjust
1632 * max_sectors on the fly, therefore warn only. */ 1330 * max_sectors on the fly, therefore warn only. */
1633 if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS && 1331 if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
1634 (max_sectors * 512) > (128 * 1024)) 1332 (sbp2_max_sectors * 512) > (128 * 1024))
1635 SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB " 1333 SBP2_INFO("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
1636 "max transfer size. WARNING: Current max_sectors " 1334 "max transfer size. WARNING: Current max_sectors "
1637 "setting is larger than 128KB (%d sectors)", 1335 "setting is larger than 128KB (%d sectors)",
1638 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), 1336 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
1639 max_sectors); 1337 sbp2_max_sectors);
1640 1338
1641 /* If this is a logical unit directory entry, process the parent 1339 /* If this is a logical unit directory entry, process the parent
1642 * to get the values. */ 1340 * to get the values. */
1643 if (ud->flags & UNIT_DIRECTORY_LUN_DIRECTORY) { 1341 if (ud->flags & UNIT_DIRECTORY_LUN_DIRECTORY) {
1644 struct unit_directory *parent_ud = 1342 struct unit_directory *parent_ud = container_of(
1645 container_of(ud->device.parent, struct unit_directory, device); 1343 ud->device.parent, struct unit_directory, device);
1646 sbp2_parse_unit_directory(scsi_id, parent_ud); 1344 sbp2_parse_unit_directory(lu, parent_ud);
1647 } else { 1345 } else {
1648 scsi_id->sbp2_management_agent_addr = management_agent_addr; 1346 lu->management_agent_addr = management_agent_addr;
1649 scsi_id->sbp2_command_set_spec_id = command_set_spec_id; 1347 lu->workarounds = workarounds;
1650 scsi_id->sbp2_command_set = command_set;
1651 scsi_id->sbp2_unit_characteristics = unit_characteristics;
1652 scsi_id->sbp2_firmware_revision = firmware_revision;
1653 scsi_id->workarounds = workarounds;
1654 if (ud->flags & UNIT_DIRECTORY_HAS_LUN) 1348 if (ud->flags & UNIT_DIRECTORY_HAS_LUN)
1655 scsi_id->sbp2_lun = ORB_SET_LUN(ud->lun); 1349 lu->lun = ORB_SET_LUN(ud->lun);
1656 } 1350 }
1657} 1351}
1658 1352
@@ -1667,133 +1361,114 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1667 * the speed that it needs to use, and the max_rec the host supports, and 1361 * the speed that it needs to use, and the max_rec the host supports, and
1668 * it takes care of the rest. 1362 * it takes care of the rest.
1669 */ 1363 */
1670static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id) 1364static int sbp2_max_speed_and_size(struct sbp2_lu *lu)
1671{ 1365{
1672 struct sbp2scsi_host_info *hi = scsi_id->hi; 1366 struct sbp2_fwhost_info *hi = lu->hi;
1673 u8 payload; 1367 u8 payload;
1674 1368
1675 SBP2_DEBUG_ENTER(); 1369 lu->speed_code = hi->host->speed[NODEID_TO_NODE(lu->ne->nodeid)];
1676
1677 scsi_id->speed_code =
1678 hi->host->speed[NODEID_TO_NODE(scsi_id->ne->nodeid)];
1679 1370
1680 /* Bump down our speed if the user requested it */ 1371 if (lu->speed_code > sbp2_max_speed) {
1681 if (scsi_id->speed_code > max_speed) { 1372 lu->speed_code = sbp2_max_speed;
1682 scsi_id->speed_code = max_speed; 1373 SBP2_INFO("Reducing speed to %s",
1683 SBP2_ERR("Forcing SBP-2 max speed down to %s", 1374 hpsb_speedto_str[sbp2_max_speed]);
1684 hpsb_speedto_str[scsi_id->speed_code]);
1685 } 1375 }
1686 1376
1687 /* Payload size is the lesser of what our speed supports and what 1377 /* Payload size is the lesser of what our speed supports and what
1688 * our host supports. */ 1378 * our host supports. */
1689 payload = min(sbp2_speedto_max_payload[scsi_id->speed_code], 1379 payload = min(sbp2_speedto_max_payload[lu->speed_code],
1690 (u8) (hi->host->csr.max_rec - 1)); 1380 (u8) (hi->host->csr.max_rec - 1));
1691 1381
1692 /* If physical DMA is off, work around limitation in ohci1394: 1382 /* If physical DMA is off, work around limitation in ohci1394:
1693 * packet size must not exceed PAGE_SIZE */ 1383 * packet size must not exceed PAGE_SIZE */
1694 if (scsi_id->ne->host->low_addr_space < (1ULL << 32)) 1384 if (lu->ne->host->low_addr_space < (1ULL << 32))
1695 while (SBP2_PAYLOAD_TO_BYTES(payload) + 24 > PAGE_SIZE && 1385 while (SBP2_PAYLOAD_TO_BYTES(payload) + 24 > PAGE_SIZE &&
1696 payload) 1386 payload)
1697 payload--; 1387 payload--;
1698 1388
1699 HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]", 1389 SBP2_INFO("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
1700 NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid), 1390 NODE_BUS_ARGS(hi->host, lu->ne->nodeid),
1701 hpsb_speedto_str[scsi_id->speed_code], 1391 hpsb_speedto_str[lu->speed_code],
1702 SBP2_PAYLOAD_TO_BYTES(payload)); 1392 SBP2_PAYLOAD_TO_BYTES(payload));
1703 1393
1704 scsi_id->max_payload_size = payload; 1394 lu->max_payload_size = payload;
1705 return 0; 1395 return 0;
1706} 1396}
1707 1397
1708/* 1398static int sbp2_agent_reset(struct sbp2_lu *lu, int wait)
1709 * This function is called in order to perform a SBP-2 agent reset.
1710 */
1711static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
1712{ 1399{
1713 quadlet_t data; 1400 quadlet_t data;
1714 u64 addr; 1401 u64 addr;
1715 int retval; 1402 int retval;
1716 unsigned long flags; 1403 unsigned long flags;
1717 1404
1718 SBP2_DEBUG_ENTER(); 1405 /* flush lu->protocol_work */
1719
1720 cancel_delayed_work(&scsi_id->protocol_work);
1721 if (wait) 1406 if (wait)
1722 flush_scheduled_work(); 1407 flush_scheduled_work();
1723 1408
1724 data = ntohl(SBP2_AGENT_RESET_DATA); 1409 data = ntohl(SBP2_AGENT_RESET_DATA);
1725 addr = scsi_id->sbp2_command_block_agent_addr + SBP2_AGENT_RESET_OFFSET; 1410 addr = lu->command_block_agent_addr + SBP2_AGENT_RESET_OFFSET;
1726 1411
1727 if (wait) 1412 if (wait)
1728 retval = hpsb_node_write(scsi_id->ne, addr, &data, 4); 1413 retval = hpsb_node_write(lu->ne, addr, &data, 4);
1729 else 1414 else
1730 retval = sbp2util_node_write_no_wait(scsi_id->ne, addr, &data, 4); 1415 retval = sbp2util_node_write_no_wait(lu->ne, addr, &data, 4);
1731 1416
1732 if (retval < 0) { 1417 if (retval < 0) {
1733 SBP2_ERR("hpsb_node_write failed.\n"); 1418 SBP2_ERR("hpsb_node_write failed.\n");
1734 return -EIO; 1419 return -EIO;
1735 } 1420 }
1736 1421
1737 /* 1422 /* make sure that the ORB_POINTER is written on next command */
1738 * Need to make sure orb pointer is written on next command 1423 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
1739 */ 1424 lu->last_orb = NULL;
1740 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); 1425 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
1741 scsi_id->last_orb = NULL;
1742 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
1743 1426
1744 return 0; 1427 return 0;
1745} 1428}
1746 1429
1747static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb, 1430static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1748 struct sbp2scsi_host_info *hi, 1431 struct sbp2_fwhost_info *hi,
1749 struct sbp2_command_info *command, 1432 struct sbp2_command_info *cmd,
1750 unsigned int scsi_use_sg, 1433 unsigned int scsi_use_sg,
1751 struct scatterlist *sgpnt, 1434 struct scatterlist *sgpnt,
1752 u32 orb_direction, 1435 u32 orb_direction,
1753 enum dma_data_direction dma_dir) 1436 enum dma_data_direction dma_dir)
1754{ 1437{
1755 command->dma_dir = dma_dir; 1438 cmd->dma_dir = dma_dir;
1756 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); 1439 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1757 orb->misc |= ORB_SET_DIRECTION(orb_direction); 1440 orb->misc |= ORB_SET_DIRECTION(orb_direction);
1758 1441
1759 /* Special case if only one element (and less than 64KB in size) */ 1442 /* special case if only one element (and less than 64KB in size) */
1760 if ((scsi_use_sg == 1) && 1443 if ((scsi_use_sg == 1) &&
1761 (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) { 1444 (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
1762 1445
1763 SBP2_DEBUG("Only one s/g element"); 1446 cmd->dma_size = sgpnt[0].length;
1764 command->dma_size = sgpnt[0].length; 1447 cmd->dma_type = CMD_DMA_PAGE;
1765 command->dma_type = CMD_DMA_PAGE; 1448 cmd->cmd_dma = dma_map_page(&hi->host->device,
1766 command->cmd_dma = pci_map_page(hi->host->pdev, 1449 sgpnt[0].page, sgpnt[0].offset,
1767 sgpnt[0].page, 1450 cmd->dma_size, cmd->dma_dir);
1768 sgpnt[0].offset,
1769 command->dma_size,
1770 command->dma_dir);
1771 SBP2_DMA_ALLOC("single page scatter element");
1772 1451
1773 orb->data_descriptor_lo = command->cmd_dma; 1452 orb->data_descriptor_lo = cmd->cmd_dma;
1774 orb->misc |= ORB_SET_DATA_SIZE(command->dma_size); 1453 orb->misc |= ORB_SET_DATA_SIZE(cmd->dma_size);
1775 1454
1776 } else { 1455 } else {
1777 struct sbp2_unrestricted_page_table *sg_element = 1456 struct sbp2_unrestricted_page_table *sg_element =
1778 &command->scatter_gather_element[0]; 1457 &cmd->scatter_gather_element[0];
1779 u32 sg_count, sg_len; 1458 u32 sg_count, sg_len;
1780 dma_addr_t sg_addr; 1459 dma_addr_t sg_addr;
1781 int i, count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg, 1460 int i, count = dma_map_sg(&hi->host->device, sgpnt, scsi_use_sg,
1782 dma_dir); 1461 dma_dir);
1783 1462
1784 SBP2_DMA_ALLOC("scatter list"); 1463 cmd->dma_size = scsi_use_sg;
1785 1464 cmd->sge_buffer = sgpnt;
1786 command->dma_size = scsi_use_sg;
1787 command->sge_buffer = sgpnt;
1788 1465
1789 /* use page tables (s/g) */ 1466 /* use page tables (s/g) */
1790 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1); 1467 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1791 orb->data_descriptor_lo = command->sge_dma; 1468 orb->data_descriptor_lo = cmd->sge_dma;
1792 1469
1793 /* 1470 /* loop through and fill out our SBP-2 page tables
1794 * Loop through and fill out our sbp-2 page tables 1471 * (and split up anything too large) */
1795 * (and split up anything too large)
1796 */
1797 for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) { 1472 for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
1798 sg_len = sg_dma_len(sgpnt); 1473 sg_len = sg_dma_len(sgpnt);
1799 sg_addr = sg_dma_address(sgpnt); 1474 sg_addr = sg_dma_address(sgpnt);
@@ -1813,70 +1488,53 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1813 } 1488 }
1814 } 1489 }
1815 1490
1816 /* Number of page table (s/g) elements */
1817 orb->misc |= ORB_SET_DATA_SIZE(sg_count); 1491 orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1818 1492
1819 sbp2util_packet_dump(sg_element,
1820 (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1821 "sbp2 s/g list", command->sge_dma);
1822
1823 /* Byte swap page tables if necessary */
1824 sbp2util_cpu_to_be32_buffer(sg_element, 1493 sbp2util_cpu_to_be32_buffer(sg_element,
1825 (sizeof(struct sbp2_unrestricted_page_table)) * 1494 (sizeof(struct sbp2_unrestricted_page_table)) *
1826 sg_count); 1495 sg_count);
1827 } 1496 }
1828} 1497}
1829 1498
1830static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb, 1499static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
1831 struct sbp2scsi_host_info *hi, 1500 struct sbp2_fwhost_info *hi,
1832 struct sbp2_command_info *command, 1501 struct sbp2_command_info *cmd,
1833 struct scatterlist *sgpnt, 1502 struct scatterlist *sgpnt,
1834 u32 orb_direction, 1503 u32 orb_direction,
1835 unsigned int scsi_request_bufflen, 1504 unsigned int scsi_request_bufflen,
1836 void *scsi_request_buffer, 1505 void *scsi_request_buffer,
1837 enum dma_data_direction dma_dir) 1506 enum dma_data_direction dma_dir)
1838{ 1507{
1839 command->dma_dir = dma_dir; 1508 cmd->dma_dir = dma_dir;
1840 command->dma_size = scsi_request_bufflen; 1509 cmd->dma_size = scsi_request_bufflen;
1841 command->dma_type = CMD_DMA_SINGLE; 1510 cmd->dma_type = CMD_DMA_SINGLE;
1842 command->cmd_dma = pci_map_single(hi->host->pdev, scsi_request_buffer, 1511 cmd->cmd_dma = dma_map_single(&hi->host->device, scsi_request_buffer,
1843 command->dma_size, command->dma_dir); 1512 cmd->dma_size, cmd->dma_dir);
1844 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); 1513 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1845 orb->misc |= ORB_SET_DIRECTION(orb_direction); 1514 orb->misc |= ORB_SET_DIRECTION(orb_direction);
1846 1515
1847 SBP2_DMA_ALLOC("single bulk"); 1516 /* handle case where we get a command w/o s/g enabled
1848 1517 * (but check for transfers larger than 64K) */
1849 /*
1850 * Handle case where we get a command w/o s/g enabled (but
1851 * check for transfers larger than 64K)
1852 */
1853 if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) { 1518 if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
1854 1519
1855 orb->data_descriptor_lo = command->cmd_dma; 1520 orb->data_descriptor_lo = cmd->cmd_dma;
1856 orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen); 1521 orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
1857 1522
1858 } else { 1523 } else {
1524 /* The buffer is too large. Turn this into page tables. */
1525
1859 struct sbp2_unrestricted_page_table *sg_element = 1526 struct sbp2_unrestricted_page_table *sg_element =
1860 &command->scatter_gather_element[0]; 1527 &cmd->scatter_gather_element[0];
1861 u32 sg_count, sg_len; 1528 u32 sg_count, sg_len;
1862 dma_addr_t sg_addr; 1529 dma_addr_t sg_addr;
1863 1530
1864 /* 1531 orb->data_descriptor_lo = cmd->sge_dma;
1865 * Need to turn this into page tables, since the
1866 * buffer is too large.
1867 */
1868 orb->data_descriptor_lo = command->sge_dma;
1869
1870 /* Use page tables (s/g) */
1871 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1); 1532 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1872 1533
1873 /* 1534 /* fill out our SBP-2 page tables; split up the large buffer */
1874 * fill out our sbp-2 page tables (and split up
1875 * the large buffer)
1876 */
1877 sg_count = 0; 1535 sg_count = 0;
1878 sg_len = scsi_request_bufflen; 1536 sg_len = scsi_request_bufflen;
1879 sg_addr = command->cmd_dma; 1537 sg_addr = cmd->cmd_dma;
1880 while (sg_len) { 1538 while (sg_len) {
1881 sg_element[sg_count].segment_base_lo = sg_addr; 1539 sg_element[sg_count].segment_base_lo = sg_addr;
1882 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) { 1540 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
@@ -1892,50 +1550,40 @@ static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
1892 sg_count++; 1550 sg_count++;
1893 } 1551 }
1894 1552
1895 /* Number of page table (s/g) elements */
1896 orb->misc |= ORB_SET_DATA_SIZE(sg_count); 1553 orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1897 1554
1898 sbp2util_packet_dump(sg_element,
1899 (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1900 "sbp2 s/g list", command->sge_dma);
1901
1902 /* Byte swap page tables if necessary */
1903 sbp2util_cpu_to_be32_buffer(sg_element, 1555 sbp2util_cpu_to_be32_buffer(sg_element,
1904 (sizeof(struct sbp2_unrestricted_page_table)) * 1556 (sizeof(struct sbp2_unrestricted_page_table)) *
1905 sg_count); 1557 sg_count);
1906 } 1558 }
1907} 1559}
1908 1560
1909/* 1561static void sbp2_create_command_orb(struct sbp2_lu *lu,
1910 * This function is called to create the actual command orb and s/g list 1562 struct sbp2_command_info *cmd,
1911 * out of the scsi command itself.
1912 */
1913static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
1914 struct sbp2_command_info *command,
1915 unchar *scsi_cmd, 1563 unchar *scsi_cmd,
1916 unsigned int scsi_use_sg, 1564 unsigned int scsi_use_sg,
1917 unsigned int scsi_request_bufflen, 1565 unsigned int scsi_request_bufflen,
1918 void *scsi_request_buffer, 1566 void *scsi_request_buffer,
1919 enum dma_data_direction dma_dir) 1567 enum dma_data_direction dma_dir)
1920{ 1568{
1921 struct sbp2scsi_host_info *hi = scsi_id->hi; 1569 struct sbp2_fwhost_info *hi = lu->hi;
1922 struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer; 1570 struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer;
1923 struct sbp2_command_orb *command_orb = &command->command_orb; 1571 struct sbp2_command_orb *orb = &cmd->command_orb;
1924 u32 orb_direction; 1572 u32 orb_direction;
1925 1573
1926 /* 1574 /*
1927 * Set-up our command ORB.. 1575 * Set-up our command ORB.
1928 * 1576 *
1929 * NOTE: We're doing unrestricted page tables (s/g), as this is 1577 * NOTE: We're doing unrestricted page tables (s/g), as this is
1930 * best performance (at least with the devices I have). This means 1578 * best performance (at least with the devices I have). This means
1931 * that data_size becomes the number of s/g elements, and 1579 * that data_size becomes the number of s/g elements, and
1932 * page_size should be zero (for unrestricted). 1580 * page_size should be zero (for unrestricted).
1933 */ 1581 */
1934 command_orb->next_ORB_hi = ORB_SET_NULL_PTR(1); 1582 orb->next_ORB_hi = ORB_SET_NULL_PTR(1);
1935 command_orb->next_ORB_lo = 0x0; 1583 orb->next_ORB_lo = 0x0;
1936 command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size); 1584 orb->misc = ORB_SET_MAX_PAYLOAD(lu->max_payload_size);
1937 command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code); 1585 orb->misc |= ORB_SET_SPEED(lu->speed_code);
1938 command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */ 1586 orb->misc |= ORB_SET_NOTIFY(1);
1939 1587
1940 if (dma_dir == DMA_NONE) 1588 if (dma_dir == DMA_NONE)
1941 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER; 1589 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
@@ -1944,66 +1592,51 @@ static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
1944 else if (dma_dir == DMA_FROM_DEVICE && scsi_request_bufflen) 1592 else if (dma_dir == DMA_FROM_DEVICE && scsi_request_bufflen)
1945 orb_direction = ORB_DIRECTION_READ_FROM_MEDIA; 1593 orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
1946 else { 1594 else {
1947 SBP2_WARN("Falling back to DMA_NONE"); 1595 SBP2_INFO("Falling back to DMA_NONE");
1948 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER; 1596 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
1949 } 1597 }
1950 1598
1951 /* Set-up our pagetable stuff */ 1599 /* set up our page table stuff */
1952 if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) { 1600 if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) {
1953 SBP2_DEBUG("No data transfer"); 1601 orb->data_descriptor_hi = 0x0;
1954 command_orb->data_descriptor_hi = 0x0; 1602 orb->data_descriptor_lo = 0x0;
1955 command_orb->data_descriptor_lo = 0x0; 1603 orb->misc |= ORB_SET_DIRECTION(1);
1956 command_orb->misc |= ORB_SET_DIRECTION(1); 1604 } else if (scsi_use_sg)
1957 } else if (scsi_use_sg) { 1605 sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_use_sg, sgpnt,
1958 SBP2_DEBUG("Use scatter/gather"); 1606 orb_direction, dma_dir);
1959 sbp2_prep_command_orb_sg(command_orb, hi, command, scsi_use_sg, 1607 else
1960 sgpnt, orb_direction, dma_dir); 1608 sbp2_prep_command_orb_no_sg(orb, hi, cmd, sgpnt, orb_direction,
1961 } else { 1609 scsi_request_bufflen,
1962 SBP2_DEBUG("No scatter/gather");
1963 sbp2_prep_command_orb_no_sg(command_orb, hi, command, sgpnt,
1964 orb_direction, scsi_request_bufflen,
1965 scsi_request_buffer, dma_dir); 1610 scsi_request_buffer, dma_dir);
1966 }
1967 1611
1968 /* Byte swap command ORB if necessary */ 1612 sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));
1969 sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb));
1970 1613
1971 /* Put our scsi command in the command ORB */ 1614 memset(orb->cdb, 0, 12);
1972 memset(command_orb->cdb, 0, 12); 1615 memcpy(orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));
1973 memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));
1974} 1616}
1975 1617
1976/* 1618static void sbp2_link_orb_command(struct sbp2_lu *lu,
1977 * This function is called in order to begin a regular SBP-2 command. 1619 struct sbp2_command_info *cmd)
1978 */
1979static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
1980 struct sbp2_command_info *command)
1981{ 1620{
1982 struct sbp2scsi_host_info *hi = scsi_id->hi; 1621 struct sbp2_fwhost_info *hi = lu->hi;
1983 struct sbp2_command_orb *command_orb = &command->command_orb;
1984 struct sbp2_command_orb *last_orb; 1622 struct sbp2_command_orb *last_orb;
1985 dma_addr_t last_orb_dma; 1623 dma_addr_t last_orb_dma;
1986 u64 addr = scsi_id->sbp2_command_block_agent_addr; 1624 u64 addr = lu->command_block_agent_addr;
1987 quadlet_t data[2]; 1625 quadlet_t data[2];
1988 size_t length; 1626 size_t length;
1989 unsigned long flags; 1627 unsigned long flags;
1990 1628
1991 outstanding_orb_incr; 1629 dma_sync_single_for_device(&hi->host->device, cmd->command_orb_dma,
1992 SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x", 1630 sizeof(struct sbp2_command_orb),
1993 command_orb, global_outstanding_command_orbs); 1631 DMA_TO_DEVICE);
1994 1632 dma_sync_single_for_device(&hi->host->device, cmd->sge_dma,
1995 pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma, 1633 sizeof(cmd->scatter_gather_element),
1996 sizeof(struct sbp2_command_orb), 1634 DMA_BIDIRECTIONAL);
1997 PCI_DMA_TODEVICE); 1635
1998 pci_dma_sync_single_for_device(hi->host->pdev, command->sge_dma, 1636 /* check to see if there are any previous orbs to use */
1999 sizeof(command->scatter_gather_element), 1637 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
2000 PCI_DMA_BIDIRECTIONAL); 1638 last_orb = lu->last_orb;
2001 /* 1639 last_orb_dma = lu->last_orb_dma;
2002 * Check to see if there are any previous orbs to use
2003 */
2004 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
2005 last_orb = scsi_id->last_orb;
2006 last_orb_dma = scsi_id->last_orb_dma;
2007 if (!last_orb) { 1640 if (!last_orb) {
2008 /* 1641 /*
2009 * last_orb == NULL means: We know that the target's fetch agent 1642 * last_orb == NULL means: We know that the target's fetch agent
@@ -2011,7 +1644,7 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
2011 */ 1644 */
2012 addr += SBP2_ORB_POINTER_OFFSET; 1645 addr += SBP2_ORB_POINTER_OFFSET;
2013 data[0] = ORB_SET_NODE_ID(hi->host->node_id); 1646 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
2014 data[1] = command->command_orb_dma; 1647 data[1] = cmd->command_orb_dma;
2015 sbp2util_cpu_to_be32_buffer(data, 8); 1648 sbp2util_cpu_to_be32_buffer(data, 8);
2016 length = 8; 1649 length = 8;
2017 } else { 1650 } else {
@@ -2022,27 +1655,25 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
2022 * The target's fetch agent may or may not have read this 1655 * The target's fetch agent may or may not have read this
2023 * previous ORB yet. 1656 * previous ORB yet.
2024 */ 1657 */
2025 pci_dma_sync_single_for_cpu(hi->host->pdev, last_orb_dma, 1658 dma_sync_single_for_cpu(&hi->host->device, last_orb_dma,
2026 sizeof(struct sbp2_command_orb), 1659 sizeof(struct sbp2_command_orb),
2027 PCI_DMA_TODEVICE); 1660 DMA_TO_DEVICE);
2028 last_orb->next_ORB_lo = cpu_to_be32(command->command_orb_dma); 1661 last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma);
2029 wmb(); 1662 wmb();
2030 /* Tells hardware that this pointer is valid */ 1663 /* Tells hardware that this pointer is valid */
2031 last_orb->next_ORB_hi = 0; 1664 last_orb->next_ORB_hi = 0;
2032 pci_dma_sync_single_for_device(hi->host->pdev, last_orb_dma, 1665 dma_sync_single_for_device(&hi->host->device, last_orb_dma,
2033 sizeof(struct sbp2_command_orb), 1666 sizeof(struct sbp2_command_orb),
2034 PCI_DMA_TODEVICE); 1667 DMA_TO_DEVICE);
2035 addr += SBP2_DOORBELL_OFFSET; 1668 addr += SBP2_DOORBELL_OFFSET;
2036 data[0] = 0; 1669 data[0] = 0;
2037 length = 4; 1670 length = 4;
2038 } 1671 }
2039 scsi_id->last_orb = command_orb; 1672 lu->last_orb = &cmd->command_orb;
2040 scsi_id->last_orb_dma = command->command_orb_dma; 1673 lu->last_orb_dma = cmd->command_orb_dma;
2041 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 1674 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
2042 1675
2043 SBP2_ORB_DEBUG("write to %s register, command orb %p", 1676 if (sbp2util_node_write_no_wait(lu->ne, addr, data, length)) {
2044 last_orb ? "DOORBELL" : "ORB_POINTER", command_orb);
2045 if (sbp2util_node_write_no_wait(scsi_id->ne, addr, data, length)) {
2046 /* 1677 /*
2047 * sbp2util_node_write_no_wait failed. We certainly ran out 1678 * sbp2util_node_write_no_wait failed. We certainly ran out
2048 * of transaction labels, perhaps just because there were no 1679 * of transaction labels, perhaps just because there were no
@@ -2051,51 +1682,29 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
2051 * the workqueue job will sleep to guaranteedly get a tlabel. 1682 * the workqueue job will sleep to guaranteedly get a tlabel.
2052 * We do not accept new commands until the job is over. 1683 * We do not accept new commands until the job is over.
2053 */ 1684 */
2054 scsi_block_requests(scsi_id->scsi_host); 1685 scsi_block_requests(lu->shost);
2055 PREPARE_DELAYED_WORK(&scsi_id->protocol_work, 1686 PREPARE_WORK(&lu->protocol_work,
2056 last_orb ? sbp2util_write_doorbell: 1687 last_orb ? sbp2util_write_doorbell:
2057 sbp2util_write_orb_pointer); 1688 sbp2util_write_orb_pointer);
2058 schedule_delayed_work(&scsi_id->protocol_work, 0); 1689 schedule_work(&lu->protocol_work);
2059 } 1690 }
2060} 1691}
2061 1692
2062/* 1693static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,
2063 * This function is called in order to begin a regular SBP-2 command.
2064 */
2065static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
2066 struct scsi_cmnd *SCpnt,
2067 void (*done)(struct scsi_cmnd *)) 1694 void (*done)(struct scsi_cmnd *))
2068{ 1695{
2069 unchar *cmd = (unchar *) SCpnt->cmnd; 1696 unchar *scsi_cmd = (unchar *)SCpnt->cmnd;
2070 unsigned int request_bufflen = SCpnt->request_bufflen; 1697 unsigned int request_bufflen = SCpnt->request_bufflen;
2071 struct sbp2_command_info *command; 1698 struct sbp2_command_info *cmd;
2072 1699
2073 SBP2_DEBUG_ENTER(); 1700 cmd = sbp2util_allocate_command_orb(lu, SCpnt, done);
2074 SBP2_DEBUG("SCSI transfer size = %x", request_bufflen); 1701 if (!cmd)
2075 SBP2_DEBUG("SCSI s/g elements = %x", (unsigned int)SCpnt->use_sg);
2076
2077 /*
2078 * Allocate a command orb and s/g structure
2079 */
2080 command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done);
2081 if (!command) {
2082 return -EIO; 1702 return -EIO;
2083 }
2084 1703
2085 /* 1704 sbp2_create_command_orb(lu, cmd, scsi_cmd, SCpnt->use_sg,
2086 * Now actually fill in the comamnd orb and sbp2 s/g list
2087 */
2088 sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg,
2089 request_bufflen, SCpnt->request_buffer, 1705 request_bufflen, SCpnt->request_buffer,
2090 SCpnt->sc_data_direction); 1706 SCpnt->sc_data_direction);
2091 1707 sbp2_link_orb_command(lu, cmd);
2092 sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb),
2093 "sbp2 command orb", command->command_orb_dma);
2094
2095 /*
2096 * Link up the orb, and ring the doorbell if needed
2097 */
2098 sbp2_link_orb_command(scsi_id, command);
2099 1708
2100 return 0; 1709 return 0;
2101} 1710}
@@ -2103,13 +1712,10 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
2103/* 1712/*
2104 * Translates SBP-2 status into SCSI sense data for check conditions 1713 * Translates SBP-2 status into SCSI sense data for check conditions
2105 */ 1714 */
2106static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data) 1715static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status,
1716 unchar *sense_data)
2107{ 1717{
2108 SBP2_DEBUG_ENTER(); 1718 /* OK, it's pretty ugly... ;-) */
2109
2110 /*
2111 * Ok, it's pretty ugly... ;-)
2112 */
2113 sense_data[0] = 0x70; 1719 sense_data[0] = 0x70;
2114 sense_data[1] = 0x0; 1720 sense_data[1] = 0x0;
2115 sense_data[2] = sbp2_status[9]; 1721 sense_data[2] = sbp2_status[9];
@@ -2127,28 +1733,21 @@ static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense
2127 sense_data[14] = sbp2_status[20]; 1733 sense_data[14] = sbp2_status[20];
2128 sense_data[15] = sbp2_status[21]; 1734 sense_data[15] = sbp2_status[21];
2129 1735
2130 return sbp2_status[8] & 0x3f; /* return scsi status */ 1736 return sbp2_status[8] & 0x3f;
2131} 1737}
2132 1738
2133/*
2134 * This function deals with status writes from the SBP-2 device
2135 */
2136static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, 1739static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
2137 int destid, quadlet_t *data, u64 addr, 1740 int destid, quadlet_t *data, u64 addr,
2138 size_t length, u16 fl) 1741 size_t length, u16 fl)
2139{ 1742{
2140 struct sbp2scsi_host_info *hi; 1743 struct sbp2_fwhost_info *hi;
2141 struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp; 1744 struct sbp2_lu *lu = NULL, *lu_tmp;
2142 struct scsi_cmnd *SCpnt = NULL; 1745 struct scsi_cmnd *SCpnt = NULL;
2143 struct sbp2_status_block *sb; 1746 struct sbp2_status_block *sb;
2144 u32 scsi_status = SBP2_SCSI_STATUS_GOOD; 1747 u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
2145 struct sbp2_command_info *command; 1748 struct sbp2_command_info *cmd;
2146 unsigned long flags; 1749 unsigned long flags;
2147 1750
2148 SBP2_DEBUG_ENTER();
2149
2150 sbp2util_packet_dump(data, length, "sbp2 status write by device", (u32)addr);
2151
2152 if (unlikely(length < 8 || length > sizeof(struct sbp2_status_block))) { 1751 if (unlikely(length < 8 || length > sizeof(struct sbp2_status_block))) {
2153 SBP2_ERR("Wrong size of status block"); 1752 SBP2_ERR("Wrong size of status block");
2154 return RCODE_ADDRESS_ERROR; 1753 return RCODE_ADDRESS_ERROR;
@@ -2162,131 +1761,97 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
2162 SBP2_ERR("host info is NULL - this is bad!"); 1761 SBP2_ERR("host info is NULL - this is bad!");
2163 return RCODE_ADDRESS_ERROR; 1762 return RCODE_ADDRESS_ERROR;
2164 } 1763 }
2165 /* 1764
2166 * Find our scsi_id structure by looking at the status fifo address 1765 /* Find the unit which wrote the status. */
2167 * written to by the sbp2 device. 1766 list_for_each_entry(lu_tmp, &hi->logical_units, lu_list) {
2168 */ 1767 if (lu_tmp->ne->nodeid == nodeid &&
2169 list_for_each_entry(scsi_id_tmp, &hi->scsi_ids, scsi_list) { 1768 lu_tmp->status_fifo_addr == addr) {
2170 if (scsi_id_tmp->ne->nodeid == nodeid && 1769 lu = lu_tmp;
2171 scsi_id_tmp->status_fifo_addr == addr) {
2172 scsi_id = scsi_id_tmp;
2173 break; 1770 break;
2174 } 1771 }
2175 } 1772 }
2176 if (unlikely(!scsi_id)) { 1773 if (unlikely(!lu)) {
2177 SBP2_ERR("scsi_id is NULL - device is gone?"); 1774 SBP2_ERR("lu is NULL - device is gone?");
2178 return RCODE_ADDRESS_ERROR; 1775 return RCODE_ADDRESS_ERROR;
2179 } 1776 }
2180 1777
2181 /* 1778 /* Put response into lu status fifo buffer. The first two bytes
2182 * Put response into scsi_id status fifo buffer. The first two bytes
2183 * come in big endian bit order. Often the target writes only a 1779 * come in big endian bit order. Often the target writes only a
2184 * truncated status block, minimally the first two quadlets. The rest 1780 * truncated status block, minimally the first two quadlets. The rest
2185 * is implied to be zeros. 1781 * is implied to be zeros. */
2186 */ 1782 sb = &lu->status_block;
2187 sb = &scsi_id->status_block;
2188 memset(sb->command_set_dependent, 0, sizeof(sb->command_set_dependent)); 1783 memset(sb->command_set_dependent, 0, sizeof(sb->command_set_dependent));
2189 memcpy(sb, data, length); 1784 memcpy(sb, data, length);
2190 sbp2util_be32_to_cpu_buffer(sb, 8); 1785 sbp2util_be32_to_cpu_buffer(sb, 8);
2191 1786
2192 /* 1787 /* Ignore unsolicited status. Handle command ORB status. */
2193 * Ignore unsolicited status. Handle command ORB status.
2194 */
2195 if (unlikely(STATUS_GET_SRC(sb->ORB_offset_hi_misc) == 2)) 1788 if (unlikely(STATUS_GET_SRC(sb->ORB_offset_hi_misc) == 2))
2196 command = NULL; 1789 cmd = NULL;
2197 else 1790 else
2198 command = sbp2util_find_command_for_orb(scsi_id, 1791 cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo);
2199 sb->ORB_offset_lo); 1792 if (cmd) {
2200 if (command) { 1793 dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma,
2201 SBP2_DEBUG("Found status for command ORB"); 1794 sizeof(struct sbp2_command_orb),
2202 pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma, 1795 DMA_TO_DEVICE);
2203 sizeof(struct sbp2_command_orb), 1796 dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma,
2204 PCI_DMA_TODEVICE); 1797 sizeof(cmd->scatter_gather_element),
2205 pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma, 1798 DMA_BIDIRECTIONAL);
2206 sizeof(command->scatter_gather_element), 1799 /* Grab SCSI command pointers and check status. */
2207 PCI_DMA_BIDIRECTIONAL);
2208
2209 SBP2_ORB_DEBUG("matched command orb %p", &command->command_orb);
2210 outstanding_orb_decr;
2211
2212 /*
2213 * Matched status with command, now grab scsi command pointers
2214 * and check status.
2215 */
2216 /* 1800 /*
2217 * FIXME: If the src field in the status is 1, the ORB DMA must 1801 * FIXME: If the src field in the status is 1, the ORB DMA must
2218 * not be reused until status for a subsequent ORB is received. 1802 * not be reused until status for a subsequent ORB is received.
2219 */ 1803 */
2220 SCpnt = command->Current_SCpnt; 1804 SCpnt = cmd->Current_SCpnt;
2221 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); 1805 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
2222 sbp2util_mark_command_completed(scsi_id, command); 1806 sbp2util_mark_command_completed(lu, cmd);
2223 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 1807 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
2224 1808
2225 if (SCpnt) { 1809 if (SCpnt) {
2226 u32 h = sb->ORB_offset_hi_misc; 1810 u32 h = sb->ORB_offset_hi_misc;
2227 u32 r = STATUS_GET_RESP(h); 1811 u32 r = STATUS_GET_RESP(h);
2228 1812
2229 if (r != RESP_STATUS_REQUEST_COMPLETE) { 1813 if (r != RESP_STATUS_REQUEST_COMPLETE) {
2230 SBP2_WARN("resp 0x%x, sbp_status 0x%x", 1814 SBP2_INFO("resp 0x%x, sbp_status 0x%x",
2231 r, STATUS_GET_SBP_STATUS(h)); 1815 r, STATUS_GET_SBP_STATUS(h));
2232 scsi_status = 1816 scsi_status =
2233 r == RESP_STATUS_TRANSPORT_FAILURE ? 1817 r == RESP_STATUS_TRANSPORT_FAILURE ?
2234 SBP2_SCSI_STATUS_BUSY : 1818 SBP2_SCSI_STATUS_BUSY :
2235 SBP2_SCSI_STATUS_COMMAND_TERMINATED; 1819 SBP2_SCSI_STATUS_COMMAND_TERMINATED;
2236 } 1820 }
2237 /* 1821
2238 * See if the target stored any scsi status information. 1822 if (STATUS_GET_LEN(h) > 1)
2239 */
2240 if (STATUS_GET_LEN(h) > 1) {
2241 SBP2_DEBUG("CHECK CONDITION");
2242 scsi_status = sbp2_status_to_sense_data( 1823 scsi_status = sbp2_status_to_sense_data(
2243 (unchar *)sb, SCpnt->sense_buffer); 1824 (unchar *)sb, SCpnt->sense_buffer);
2244 } 1825
2245 /* 1826 if (STATUS_TEST_DEAD(h))
2246 * Check to see if the dead bit is set. If so, we'll 1827 sbp2_agent_reset(lu, 0);
2247 * have to initiate a fetch agent reset.
2248 */
2249 if (STATUS_TEST_DEAD(h)) {
2250 SBP2_DEBUG("Dead bit set - "
2251 "initiating fetch agent reset");
2252 sbp2_agent_reset(scsi_id, 0);
2253 }
2254 SBP2_ORB_DEBUG("completing command orb %p", &command->command_orb);
2255 } 1828 }
2256 1829
2257 /* 1830 /* Check here to see if there are no commands in-use. If there
2258 * Check here to see if there are no commands in-use. If there
2259 * are none, we know that the fetch agent left the active state 1831 * are none, we know that the fetch agent left the active state
2260 * _and_ that we did not reactivate it yet. Therefore clear 1832 * _and_ that we did not reactivate it yet. Therefore clear
2261 * last_orb so that next time we write directly to the 1833 * last_orb so that next time we write directly to the
2262 * ORB_POINTER register. That way the fetch agent does not need 1834 * ORB_POINTER register. That way the fetch agent does not need
2263 * to refetch the next_ORB. 1835 * to refetch the next_ORB. */
2264 */ 1836 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
2265 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); 1837 if (list_empty(&lu->cmd_orb_inuse))
2266 if (list_empty(&scsi_id->sbp2_command_orb_inuse)) 1838 lu->last_orb = NULL;
2267 scsi_id->last_orb = NULL; 1839 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
2268 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
2269 1840
2270 } else { 1841 } else {
2271 /* 1842 /* It's probably status after a management request. */
2272 * It's probably a login/logout/reconnect status. 1843 if ((sb->ORB_offset_lo == lu->reconnect_orb_dma) ||
2273 */ 1844 (sb->ORB_offset_lo == lu->login_orb_dma) ||
2274 if ((sb->ORB_offset_lo == scsi_id->reconnect_orb_dma) || 1845 (sb->ORB_offset_lo == lu->query_logins_orb_dma) ||
2275 (sb->ORB_offset_lo == scsi_id->login_orb_dma) || 1846 (sb->ORB_offset_lo == lu->logout_orb_dma)) {
2276 (sb->ORB_offset_lo == scsi_id->query_logins_orb_dma) || 1847 lu->access_complete = 1;
2277 (sb->ORB_offset_lo == scsi_id->logout_orb_dma)) { 1848 wake_up_interruptible(&sbp2_access_wq);
2278 scsi_id->access_complete = 1;
2279 wake_up_interruptible(&access_wq);
2280 } 1849 }
2281 } 1850 }
2282 1851
2283 if (SCpnt) { 1852 if (SCpnt)
2284 SBP2_DEBUG("Completing SCSI command"); 1853 sbp2scsi_complete_command(lu, scsi_status, SCpnt,
2285 sbp2scsi_complete_command(scsi_id, scsi_status, SCpnt, 1854 cmd->Current_done);
2286 command->Current_done);
2287 SBP2_ORB_DEBUG("command orb completed");
2288 }
2289
2290 return RCODE_COMPLETE; 1855 return RCODE_COMPLETE;
2291} 1856}
2292 1857
@@ -2294,77 +1859,57 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
2294 * SCSI interface related section 1859 * SCSI interface related section
2295 **************************************/ 1860 **************************************/
2296 1861
2297/*
2298 * This routine is the main request entry routine for doing I/O. It is
2299 * called from the scsi stack directly.
2300 */
2301static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt, 1862static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2302 void (*done)(struct scsi_cmnd *)) 1863 void (*done)(struct scsi_cmnd *))
2303{ 1864{
2304 struct scsi_id_instance_data *scsi_id = 1865 struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
2305 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0]; 1866 struct sbp2_fwhost_info *hi;
2306 struct sbp2scsi_host_info *hi;
2307 int result = DID_NO_CONNECT << 16; 1867 int result = DID_NO_CONNECT << 16;
2308 1868
2309 SBP2_DEBUG_ENTER(); 1869 if (unlikely(!sbp2util_node_is_available(lu)))
2310#if (CONFIG_IEEE1394_SBP2_DEBUG >= 2) || defined(CONFIG_IEEE1394_SBP2_PACKET_DUMP)
2311 scsi_print_command(SCpnt);
2312#endif
2313
2314 if (!sbp2util_node_is_available(scsi_id))
2315 goto done; 1870 goto done;
2316 1871
2317 hi = scsi_id->hi; 1872 hi = lu->hi;
2318 1873
2319 if (!hi) { 1874 if (unlikely(!hi)) {
2320 SBP2_ERR("sbp2scsi_host_info is NULL - this is bad!"); 1875 SBP2_ERR("sbp2_fwhost_info is NULL - this is bad!");
2321 goto done; 1876 goto done;
2322 } 1877 }
2323 1878
2324 /* 1879 /* Multiple units are currently represented to the SCSI core as separate
2325 * Until we handle multiple luns, just return selection time-out 1880 * targets, not as one target with multiple LUs. Therefore return
2326 * to any IO directed at non-zero LUNs 1881 * selection time-out to any IO directed at non-zero LUNs. */
2327 */ 1882 if (unlikely(SCpnt->device->lun))
2328 if (SCpnt->device->lun)
2329 goto done; 1883 goto done;
2330 1884
2331 /* 1885 /* handle the request sense command here (auto-request sense) */
2332 * Check for request sense command, and handle it here
2333 * (autorequest sense)
2334 */
2335 if (SCpnt->cmnd[0] == REQUEST_SENSE) { 1886 if (SCpnt->cmnd[0] == REQUEST_SENSE) {
2336 SBP2_DEBUG("REQUEST_SENSE"); 1887 memcpy(SCpnt->request_buffer, SCpnt->sense_buffer,
2337 memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen); 1888 SCpnt->request_bufflen);
2338 memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); 1889 memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
2339 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done); 1890 sbp2scsi_complete_command(lu, SBP2_SCSI_STATUS_GOOD, SCpnt,
1891 done);
2340 return 0; 1892 return 0;
2341 } 1893 }
2342 1894
2343 /* 1895 if (unlikely(!hpsb_node_entry_valid(lu->ne))) {
2344 * Check to see if we are in the middle of a bus reset.
2345 */
2346 if (!hpsb_node_entry_valid(scsi_id->ne)) {
2347 SBP2_ERR("Bus reset in progress - rejecting command"); 1896 SBP2_ERR("Bus reset in progress - rejecting command");
2348 result = DID_BUS_BUSY << 16; 1897 result = DID_BUS_BUSY << 16;
2349 goto done; 1898 goto done;
2350 } 1899 }
2351 1900
2352 /* 1901 /* Bidirectional commands are not yet implemented,
2353 * Bidirectional commands are not yet implemented, 1902 * and unknown transfer direction not handled. */
2354 * and unknown transfer direction not handled. 1903 if (unlikely(SCpnt->sc_data_direction == DMA_BIDIRECTIONAL)) {
2355 */
2356 if (SCpnt->sc_data_direction == DMA_BIDIRECTIONAL) {
2357 SBP2_ERR("Cannot handle DMA_BIDIRECTIONAL - rejecting command"); 1904 SBP2_ERR("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
2358 result = DID_ERROR << 16; 1905 result = DID_ERROR << 16;
2359 goto done; 1906 goto done;
2360 } 1907 }
2361 1908
2362 /* 1909 if (sbp2_send_command(lu, SCpnt, done)) {
2363 * Try and send our SCSI command
2364 */
2365 if (sbp2_send_command(scsi_id, SCpnt, done)) {
2366 SBP2_ERR("Error sending SCSI command"); 1910 SBP2_ERR("Error sending SCSI command");
2367 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT, 1911 sbp2scsi_complete_command(lu,
1912 SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
2368 SCpnt, done); 1913 SCpnt, done);
2369 } 1914 }
2370 return 0; 1915 return 0;
@@ -2375,75 +1920,46 @@ done:
2375 return 0; 1920 return 0;
2376} 1921}
2377 1922
2378/* 1923static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
2379 * This function is called in order to complete all outstanding SBP-2
2380 * commands (in case of resets, etc.).
2381 */
2382static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
2383 u32 status)
2384{ 1924{
2385 struct sbp2scsi_host_info *hi = scsi_id->hi; 1925 struct sbp2_fwhost_info *hi = lu->hi;
2386 struct list_head *lh; 1926 struct list_head *lh;
2387 struct sbp2_command_info *command; 1927 struct sbp2_command_info *cmd;
2388 unsigned long flags; 1928 unsigned long flags;
2389 1929
2390 SBP2_DEBUG_ENTER(); 1930 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
2391 1931 while (!list_empty(&lu->cmd_orb_inuse)) {
2392 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); 1932 lh = lu->cmd_orb_inuse.next;
2393 while (!list_empty(&scsi_id->sbp2_command_orb_inuse)) { 1933 cmd = list_entry(lh, struct sbp2_command_info, list);
2394 SBP2_DEBUG("Found pending command to complete"); 1934 dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma,
2395 lh = scsi_id->sbp2_command_orb_inuse.next; 1935 sizeof(struct sbp2_command_orb),
2396 command = list_entry(lh, struct sbp2_command_info, list); 1936 DMA_TO_DEVICE);
2397 pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma, 1937 dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma,
2398 sizeof(struct sbp2_command_orb), 1938 sizeof(cmd->scatter_gather_element),
2399 PCI_DMA_TODEVICE); 1939 DMA_BIDIRECTIONAL);
2400 pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma, 1940 sbp2util_mark_command_completed(lu, cmd);
2401 sizeof(command->scatter_gather_element), 1941 if (cmd->Current_SCpnt) {
2402 PCI_DMA_BIDIRECTIONAL); 1942 cmd->Current_SCpnt->result = status << 16;
2403 sbp2util_mark_command_completed(scsi_id, command); 1943 cmd->Current_done(cmd->Current_SCpnt);
2404 if (command->Current_SCpnt) {
2405 command->Current_SCpnt->result = status << 16;
2406 command->Current_done(command->Current_SCpnt);
2407 } 1944 }
2408 } 1945 }
2409 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 1946 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
2410 1947
2411 return; 1948 return;
2412} 1949}
2413 1950
2414/* 1951/*
2415 * This function is called in order to complete a regular SBP-2 command. 1952 * Complete a regular SCSI command. Can be called in atomic context.
2416 *
2417 * This can be called in interrupt context.
2418 */ 1953 */
2419static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id, 1954static void sbp2scsi_complete_command(struct sbp2_lu *lu, u32 scsi_status,
2420 u32 scsi_status, struct scsi_cmnd *SCpnt, 1955 struct scsi_cmnd *SCpnt,
2421 void (*done)(struct scsi_cmnd *)) 1956 void (*done)(struct scsi_cmnd *))
2422{ 1957{
2423 SBP2_DEBUG_ENTER();
2424
2425 /*
2426 * Sanity
2427 */
2428 if (!SCpnt) { 1958 if (!SCpnt) {
2429 SBP2_ERR("SCpnt is NULL"); 1959 SBP2_ERR("SCpnt is NULL");
2430 return; 1960 return;
2431 } 1961 }
2432 1962
2433 /*
2434 * If a bus reset is in progress and there was an error, don't
2435 * complete the command, just let it get retried at the end of the
2436 * bus reset.
2437 */
2438 if (!hpsb_node_entry_valid(scsi_id->ne)
2439 && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
2440 SBP2_ERR("Bus reset in progress - retry command later");
2441 return;
2442 }
2443
2444 /*
2445 * Switch on scsi status
2446 */
2447 switch (scsi_status) { 1963 switch (scsi_status) {
2448 case SBP2_SCSI_STATUS_GOOD: 1964 case SBP2_SCSI_STATUS_GOOD:
2449 SCpnt->result = DID_OK << 16; 1965 SCpnt->result = DID_OK << 16;
@@ -2455,12 +1971,7 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2455 break; 1971 break;
2456 1972
2457 case SBP2_SCSI_STATUS_CHECK_CONDITION: 1973 case SBP2_SCSI_STATUS_CHECK_CONDITION:
2458 SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION");
2459 SCpnt->result = CHECK_CONDITION << 1 | DID_OK << 16; 1974 SCpnt->result = CHECK_CONDITION << 1 | DID_OK << 16;
2460#if CONFIG_IEEE1394_SBP2_DEBUG >= 1
2461 scsi_print_command(SCpnt);
2462 scsi_print_sense(SBP2_DEVICE_NAME, SCpnt);
2463#endif
2464 break; 1975 break;
2465 1976
2466 case SBP2_SCSI_STATUS_SELECTION_TIMEOUT: 1977 case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
@@ -2482,118 +1993,88 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2482 SCpnt->result = DID_ERROR << 16; 1993 SCpnt->result = DID_ERROR << 16;
2483 } 1994 }
2484 1995
2485 /* 1996 /* If a bus reset is in progress and there was an error, complete
2486 * If a bus reset is in progress and there was an error, complete 1997 * the command as busy so that it will get retried. */
2487 * the command as busy so that it will get retried. 1998 if (!hpsb_node_entry_valid(lu->ne)
2488 */
2489 if (!hpsb_node_entry_valid(scsi_id->ne)
2490 && (scsi_status != SBP2_SCSI_STATUS_GOOD)) { 1999 && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
2491 SBP2_ERR("Completing command with busy (bus reset)"); 2000 SBP2_ERR("Completing command with busy (bus reset)");
2492 SCpnt->result = DID_BUS_BUSY << 16; 2001 SCpnt->result = DID_BUS_BUSY << 16;
2493 } 2002 }
2494 2003
2495 /* 2004 /* Tell the SCSI stack that we're done with this command. */
2496 * If a unit attention occurs, return busy status so it gets
2497 * retried... it could have happened because of a 1394 bus reset
2498 * or hot-plug...
2499 * XXX DID_BUS_BUSY is actually a bad idea because it will defy
2500 * the scsi layer's retry logic.
2501 */
2502#if 0
2503 if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) &&
2504 (SCpnt->sense_buffer[2] == UNIT_ATTENTION)) {
2505 SBP2_DEBUG("UNIT ATTENTION - return busy");
2506 SCpnt->result = DID_BUS_BUSY << 16;
2507 }
2508#endif
2509
2510 /*
2511 * Tell scsi stack that we're done with this command
2512 */
2513 done(SCpnt); 2005 done(SCpnt);
2514} 2006}
2515 2007
2516static int sbp2scsi_slave_alloc(struct scsi_device *sdev) 2008static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
2517{ 2009{
2518 struct scsi_id_instance_data *scsi_id = 2010 struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
2519 (struct scsi_id_instance_data *)sdev->host->hostdata[0];
2520 2011
2521 scsi_id->sdev = sdev; 2012 lu->sdev = sdev;
2522 sdev->allow_restart = 1; 2013 sdev->allow_restart = 1;
2523 2014
2524 if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36) 2015 if (lu->workarounds & SBP2_WORKAROUND_INQUIRY_36)
2525 sdev->inquiry_len = 36; 2016 sdev->inquiry_len = 36;
2526 return 0; 2017 return 0;
2527} 2018}
2528 2019
2529static int sbp2scsi_slave_configure(struct scsi_device *sdev) 2020static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2530{ 2021{
2531 struct scsi_id_instance_data *scsi_id = 2022 struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
2532 (struct scsi_id_instance_data *)sdev->host->hostdata[0];
2533 2023
2534 blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); 2024 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
2535 sdev->use_10_for_rw = 1; 2025 sdev->use_10_for_rw = 1;
2536 2026
2537 if (sdev->type == TYPE_DISK && 2027 if (sdev->type == TYPE_DISK &&
2538 scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) 2028 lu->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
2539 sdev->skip_ms_page_8 = 1; 2029 sdev->skip_ms_page_8 = 1;
2540 if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) 2030 if (lu->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
2541 sdev->fix_capacity = 1; 2031 sdev->fix_capacity = 1;
2542 return 0; 2032 return 0;
2543} 2033}
2544 2034
2545static void sbp2scsi_slave_destroy(struct scsi_device *sdev) 2035static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
2546{ 2036{
2547 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL; 2037 ((struct sbp2_lu *)sdev->host->hostdata[0])->sdev = NULL;
2548 return; 2038 return;
2549} 2039}
2550 2040
2551/* 2041/*
2552 * Called by scsi stack when something has really gone wrong. Usually 2042 * Called by scsi stack when something has really gone wrong.
2553 * called when a command has timed-out for some reason. 2043 * Usually called when a command has timed-out for some reason.
2554 */ 2044 */
2555static int sbp2scsi_abort(struct scsi_cmnd *SCpnt) 2045static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2556{ 2046{
2557 struct scsi_id_instance_data *scsi_id = 2047 struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
2558 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0]; 2048 struct sbp2_fwhost_info *hi = lu->hi;
2559 struct sbp2scsi_host_info *hi = scsi_id->hi; 2049 struct sbp2_command_info *cmd;
2560 struct sbp2_command_info *command;
2561 unsigned long flags; 2050 unsigned long flags;
2562 2051
2563 SBP2_ERR("aborting sbp2 command"); 2052 SBP2_INFO("aborting sbp2 command");
2564 scsi_print_command(SCpnt); 2053 scsi_print_command(SCpnt);
2565 2054
2566 if (sbp2util_node_is_available(scsi_id)) { 2055 if (sbp2util_node_is_available(lu)) {
2567 2056 sbp2_agent_reset(lu, 1);
2568 /* 2057
2569 * Right now, just return any matching command structures 2058 /* Return a matching command structure to the free pool. */
2570 * to the free pool. 2059 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
2571 */ 2060 cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt);
2572 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); 2061 if (cmd) {
2573 command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt); 2062 dma_sync_single_for_cpu(&hi->host->device,
2574 if (command) { 2063 cmd->command_orb_dma,
2575 SBP2_DEBUG("Found command to abort"); 2064 sizeof(struct sbp2_command_orb),
2576 pci_dma_sync_single_for_cpu(hi->host->pdev, 2065 DMA_TO_DEVICE);
2577 command->command_orb_dma, 2066 dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma,
2578 sizeof(struct sbp2_command_orb), 2067 sizeof(cmd->scatter_gather_element),
2579 PCI_DMA_TODEVICE); 2068 DMA_BIDIRECTIONAL);
2580 pci_dma_sync_single_for_cpu(hi->host->pdev, 2069 sbp2util_mark_command_completed(lu, cmd);
2581 command->sge_dma, 2070 if (cmd->Current_SCpnt) {
2582 sizeof(command->scatter_gather_element), 2071 cmd->Current_SCpnt->result = DID_ABORT << 16;
2583 PCI_DMA_BIDIRECTIONAL); 2072 cmd->Current_done(cmd->Current_SCpnt);
2584 sbp2util_mark_command_completed(scsi_id, command);
2585 if (command->Current_SCpnt) {
2586 command->Current_SCpnt->result = DID_ABORT << 16;
2587 command->Current_done(command->Current_SCpnt);
2588 } 2073 }
2589 } 2074 }
2590 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 2075 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
2591 2076
2592 /* 2077 sbp2scsi_complete_all_commands(lu, DID_BUS_BUSY);
2593 * Initiate a fetch agent reset.
2594 */
2595 sbp2_agent_reset(scsi_id, 1);
2596 sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
2597 } 2078 }
2598 2079
2599 return SUCCESS; 2080 return SUCCESS;
@@ -2604,14 +2085,13 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2604 */ 2085 */
2605static int sbp2scsi_reset(struct scsi_cmnd *SCpnt) 2086static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
2606{ 2087{
2607 struct scsi_id_instance_data *scsi_id = 2088 struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
2608 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2609 2089
2610 SBP2_ERR("reset requested"); 2090 SBP2_INFO("reset requested");
2611 2091
2612 if (sbp2util_node_is_available(scsi_id)) { 2092 if (sbp2util_node_is_available(lu)) {
2613 SBP2_ERR("Generating sbp2 fetch agent reset"); 2093 SBP2_INFO("generating sbp2 fetch agent reset");
2614 sbp2_agent_reset(scsi_id, 1); 2094 sbp2_agent_reset(lu, 1);
2615 } 2095 }
2616 2096
2617 return SUCCESS; 2097 return SUCCESS;
@@ -2622,90 +2102,50 @@ static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
2622 char *buf) 2102 char *buf)
2623{ 2103{
2624 struct scsi_device *sdev; 2104 struct scsi_device *sdev;
2625 struct scsi_id_instance_data *scsi_id; 2105 struct sbp2_lu *lu;
2626 int lun;
2627 2106
2628 if (!(sdev = to_scsi_device(dev))) 2107 if (!(sdev = to_scsi_device(dev)))
2629 return 0; 2108 return 0;
2630 2109
2631 if (!(scsi_id = (struct scsi_id_instance_data *)sdev->host->hostdata[0])) 2110 if (!(lu = (struct sbp2_lu *)sdev->host->hostdata[0]))
2632 return 0; 2111 return 0;
2633 2112
2634 lun = ORB_SET_LUN(scsi_id->sbp2_lun); 2113 return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)lu->ne->guid,
2635 2114 lu->ud->id, ORB_SET_LUN(lu->lun));
2636 return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)scsi_id->ne->guid,
2637 scsi_id->ud->id, lun);
2638} 2115}
2639static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
2640
2641static struct device_attribute *sbp2_sysfs_sdev_attrs[] = {
2642 &dev_attr_ieee1394_id,
2643 NULL
2644};
2645 2116
2646MODULE_AUTHOR("Ben Collins <bcollins@debian.org>"); 2117MODULE_AUTHOR("Ben Collins <bcollins@debian.org>");
2647MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver"); 2118MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
2648MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME); 2119MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
2649MODULE_LICENSE("GPL"); 2120MODULE_LICENSE("GPL");
2650 2121
2651/* SCSI host template */
2652static struct scsi_host_template scsi_driver_template = {
2653 .module = THIS_MODULE,
2654 .name = "SBP-2 IEEE-1394",
2655 .proc_name = SBP2_DEVICE_NAME,
2656 .queuecommand = sbp2scsi_queuecommand,
2657 .eh_abort_handler = sbp2scsi_abort,
2658 .eh_device_reset_handler = sbp2scsi_reset,
2659 .slave_alloc = sbp2scsi_slave_alloc,
2660 .slave_configure = sbp2scsi_slave_configure,
2661 .slave_destroy = sbp2scsi_slave_destroy,
2662 .this_id = -1,
2663 .sg_tablesize = SG_ALL,
2664 .use_clustering = ENABLE_CLUSTERING,
2665 .cmd_per_lun = SBP2_MAX_CMDS,
2666 .can_queue = SBP2_MAX_CMDS,
2667 .emulated = 1,
2668 .sdev_attrs = sbp2_sysfs_sdev_attrs,
2669};
2670
2671static int sbp2_module_init(void) 2122static int sbp2_module_init(void)
2672{ 2123{
2673 int ret; 2124 int ret;
2674 2125
2675 SBP2_DEBUG_ENTER(); 2126 if (sbp2_serialize_io) {
2676 2127 sbp2_shost_template.can_queue = 1;
2677 /* Module load debug option to force one command at a time (serializing I/O) */ 2128 sbp2_shost_template.cmd_per_lun = 1;
2678 if (serialize_io) {
2679 SBP2_INFO("Driver forced to serialize I/O (serialize_io=1)");
2680 SBP2_INFO("Try serialize_io=0 for better performance");
2681 scsi_driver_template.can_queue = 1;
2682 scsi_driver_template.cmd_per_lun = 1;
2683 } 2129 }
2684 2130
2685 if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS && 2131 if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
2686 (max_sectors * 512) > (128 * 1024)) 2132 (sbp2_max_sectors * 512) > (128 * 1024))
2687 max_sectors = 128 * 1024 / 512; 2133 sbp2_max_sectors = 128 * 1024 / 512;
2688 scsi_driver_template.max_sectors = max_sectors; 2134 sbp2_shost_template.max_sectors = sbp2_max_sectors;
2689 2135
2690 /* Register our high level driver with 1394 stack */
2691 hpsb_register_highlevel(&sbp2_highlevel); 2136 hpsb_register_highlevel(&sbp2_highlevel);
2692
2693 ret = hpsb_register_protocol(&sbp2_driver); 2137 ret = hpsb_register_protocol(&sbp2_driver);
2694 if (ret) { 2138 if (ret) {
2695 SBP2_ERR("Failed to register protocol"); 2139 SBP2_ERR("Failed to register protocol");
2696 hpsb_unregister_highlevel(&sbp2_highlevel); 2140 hpsb_unregister_highlevel(&sbp2_highlevel);
2697 return ret; 2141 return ret;
2698 } 2142 }
2699
2700 return 0; 2143 return 0;
2701} 2144}
2702 2145
2703static void __exit sbp2_module_exit(void) 2146static void __exit sbp2_module_exit(void)
2704{ 2147{
2705 SBP2_DEBUG_ENTER();
2706
2707 hpsb_unregister_protocol(&sbp2_driver); 2148 hpsb_unregister_protocol(&sbp2_driver);
2708
2709 hpsb_unregister_highlevel(&sbp2_highlevel); 2149 hpsb_unregister_highlevel(&sbp2_highlevel);
2710} 2150}
2711 2151
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index 1b16d6b9cf11..9ae842329bf3 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -25,25 +25,25 @@
25#define SBP2_DEVICE_NAME "sbp2" 25#define SBP2_DEVICE_NAME "sbp2"
26 26
27/* 27/*
28 * SBP2 specific structures and defines 28 * SBP-2 specific definitions
29 */ 29 */
30 30
31#define ORB_DIRECTION_WRITE_TO_MEDIA 0x0 31#define ORB_DIRECTION_WRITE_TO_MEDIA 0x0
32#define ORB_DIRECTION_READ_FROM_MEDIA 0x1 32#define ORB_DIRECTION_READ_FROM_MEDIA 0x1
33#define ORB_DIRECTION_NO_DATA_TRANSFER 0x2 33#define ORB_DIRECTION_NO_DATA_TRANSFER 0x2
34 34
35#define ORB_SET_NULL_PTR(value) ((value & 0x1) << 31) 35#define ORB_SET_NULL_PTR(v) (((v) & 0x1) << 31)
36#define ORB_SET_NOTIFY(value) ((value & 0x1) << 31) 36#define ORB_SET_NOTIFY(v) (((v) & 0x1) << 31)
37#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29) /* unused ? */ 37#define ORB_SET_RQ_FMT(v) (((v) & 0x3) << 29)
38#define ORB_SET_NODE_ID(value) ((value & 0xffff) << 16) 38#define ORB_SET_NODE_ID(v) (((v) & 0xffff) << 16)
39#define ORB_SET_STATUS_FIFO_HI(value, id) (value >> 32 | ORB_SET_NODE_ID(id)) 39#define ORB_SET_STATUS_FIFO_HI(v, id) ((v) >> 32 | ORB_SET_NODE_ID(id))
40#define ORB_SET_STATUS_FIFO_LO(value) (value & 0xffffffff) 40#define ORB_SET_STATUS_FIFO_LO(v) ((v) & 0xffffffff)
41#define ORB_SET_DATA_SIZE(value) (value & 0xffff) 41#define ORB_SET_DATA_SIZE(v) ((v) & 0xffff)
42#define ORB_SET_PAGE_SIZE(value) ((value & 0x7) << 16) 42#define ORB_SET_PAGE_SIZE(v) (((v) & 0x7) << 16)
43#define ORB_SET_PAGE_TABLE_PRESENT(value) ((value & 0x1) << 19) 43#define ORB_SET_PAGE_TABLE_PRESENT(v) (((v) & 0x1) << 19)
44#define ORB_SET_MAX_PAYLOAD(value) ((value & 0xf) << 20) 44#define ORB_SET_MAX_PAYLOAD(v) (((v) & 0xf) << 20)
45#define ORB_SET_SPEED(value) ((value & 0x7) << 24) 45#define ORB_SET_SPEED(v) (((v) & 0x7) << 24)
46#define ORB_SET_DIRECTION(value) ((value & 0x1) << 27) 46#define ORB_SET_DIRECTION(v) (((v) & 0x1) << 27)
47 47
48struct sbp2_command_orb { 48struct sbp2_command_orb {
49 u32 next_ORB_hi; 49 u32 next_ORB_hi;
@@ -64,12 +64,12 @@ struct sbp2_command_orb {
64#define SBP2_LOGICAL_UNIT_RESET 0xe 64#define SBP2_LOGICAL_UNIT_RESET 0xe
65#define SBP2_TARGET_RESET_REQUEST 0xf 65#define SBP2_TARGET_RESET_REQUEST 0xf
66 66
67#define ORB_SET_LUN(value) (value & 0xffff) 67#define ORB_SET_LUN(v) ((v) & 0xffff)
68#define ORB_SET_FUNCTION(value) ((value & 0xf) << 16) 68#define ORB_SET_FUNCTION(v) (((v) & 0xf) << 16)
69#define ORB_SET_RECONNECT(value) ((value & 0xf) << 20) 69#define ORB_SET_RECONNECT(v) (((v) & 0xf) << 20)
70#define ORB_SET_EXCLUSIVE(value) ((value & 0x1) << 28) 70#define ORB_SET_EXCLUSIVE(v) (((v) & 0x1) << 28)
71#define ORB_SET_LOGIN_RESP_LENGTH(value) (value & 0xffff) 71#define ORB_SET_LOGIN_RESP_LENGTH(v) ((v) & 0xffff)
72#define ORB_SET_PASSWD_LENGTH(value) ((value & 0xffff) << 16) 72#define ORB_SET_PASSWD_LENGTH(v) (((v) & 0xffff) << 16)
73 73
74struct sbp2_login_orb { 74struct sbp2_login_orb {
75 u32 password_hi; 75 u32 password_hi;
@@ -82,9 +82,9 @@ struct sbp2_login_orb {
82 u32 status_fifo_lo; 82 u32 status_fifo_lo;
83} __attribute__((packed)); 83} __attribute__((packed));
84 84
85#define RESPONSE_GET_LOGIN_ID(value) (value & 0xffff) 85#define RESPONSE_GET_LOGIN_ID(v) ((v) & 0xffff)
86#define RESPONSE_GET_LENGTH(value) ((value >> 16) & 0xffff) 86#define RESPONSE_GET_LENGTH(v) (((v) >> 16) & 0xffff)
87#define RESPONSE_GET_RECONNECT_HOLD(value) (value & 0xffff) 87#define RESPONSE_GET_RECONNECT_HOLD(v) ((v) & 0xffff)
88 88
89struct sbp2_login_response { 89struct sbp2_login_response {
90 u32 length_login_ID; 90 u32 length_login_ID;
@@ -93,9 +93,8 @@ struct sbp2_login_response {
93 u32 reconnect_hold; 93 u32 reconnect_hold;
94} __attribute__((packed)); 94} __attribute__((packed));
95 95
96#define ORB_SET_LOGIN_ID(value) (value & 0xffff) 96#define ORB_SET_LOGIN_ID(v) ((v) & 0xffff)
97 97#define ORB_SET_QUERY_LOGINS_RESP_LENGTH(v) ((v) & 0xffff)
98#define ORB_SET_QUERY_LOGINS_RESP_LENGTH(value) (value & 0xffff)
99 98
100struct sbp2_query_logins_orb { 99struct sbp2_query_logins_orb {
101 u32 reserved1; 100 u32 reserved1;
@@ -108,8 +107,8 @@ struct sbp2_query_logins_orb {
108 u32 status_fifo_lo; 107 u32 status_fifo_lo;
109} __attribute__((packed)); 108} __attribute__((packed));
110 109
111#define RESPONSE_GET_MAX_LOGINS(value) (value & 0xffff) 110#define RESPONSE_GET_MAX_LOGINS(v) ((v) & 0xffff)
112#define RESPONSE_GET_ACTIVE_LOGINS(value) ((RESPONSE_GET_LENGTH(value) - 4) / 12) 111#define RESPONSE_GET_ACTIVE_LOGINS(v) ((RESPONSE_GET_LENGTH((v)) - 4) / 12)
113 112
114struct sbp2_query_logins_response { 113struct sbp2_query_logins_response {
115 u32 length_max_logins; 114 u32 length_max_logins;
@@ -140,8 +139,8 @@ struct sbp2_logout_orb {
140 u32 status_fifo_lo; 139 u32 status_fifo_lo;
141} __attribute__((packed)); 140} __attribute__((packed));
142 141
143#define PAGE_TABLE_SET_SEGMENT_BASE_HI(value) (value & 0xffff) 142#define PAGE_TABLE_SET_SEGMENT_BASE_HI(v) ((v) & 0xffff)
144#define PAGE_TABLE_SET_SEGMENT_LENGTH(value) ((value & 0xffff) << 16) 143#define PAGE_TABLE_SET_SEGMENT_LENGTH(v) (((v) & 0xffff) << 16)
145 144
146struct sbp2_unrestricted_page_table { 145struct sbp2_unrestricted_page_table {
147 u32 length_segment_base_hi; 146 u32 length_segment_base_hi;
@@ -171,23 +170,14 @@ struct sbp2_unrestricted_page_table {
171#define SFMT_DEFERRED_ERROR 0x1 170#define SFMT_DEFERRED_ERROR 0x1
172#define SFMT_VENDOR_DEPENDENT_STATUS 0x3 171#define SFMT_VENDOR_DEPENDENT_STATUS 0x3
173 172
174#define SBP2_SCSI_STATUS_GOOD 0x0 173#define STATUS_GET_SRC(v) (((v) >> 30) & 0x3)
175#define SBP2_SCSI_STATUS_CHECK_CONDITION 0x2 174#define STATUS_GET_RESP(v) (((v) >> 28) & 0x3)
176#define SBP2_SCSI_STATUS_CONDITION_MET 0x4 175#define STATUS_GET_LEN(v) (((v) >> 24) & 0x7)
177#define SBP2_SCSI_STATUS_BUSY 0x8 176#define STATUS_GET_SBP_STATUS(v) (((v) >> 16) & 0xff)
178#define SBP2_SCSI_STATUS_RESERVATION_CONFLICT 0x18 177#define STATUS_GET_ORB_OFFSET_HI(v) ((v) & 0x0000ffff)
179#define SBP2_SCSI_STATUS_COMMAND_TERMINATED 0x22 178#define STATUS_TEST_DEAD(v) ((v) & 0x08000000)
180
181#define SBP2_SCSI_STATUS_SELECTION_TIMEOUT 0xff
182
183#define STATUS_GET_SRC(value) (((value) >> 30) & 0x3)
184#define STATUS_GET_RESP(value) (((value) >> 28) & 0x3)
185#define STATUS_GET_LEN(value) (((value) >> 24) & 0x7)
186#define STATUS_GET_SBP_STATUS(value) (((value) >> 16) & 0xff)
187#define STATUS_GET_ORB_OFFSET_HI(value) ((value) & 0x0000ffff)
188#define STATUS_TEST_DEAD(value) ((value) & 0x08000000)
189/* test 'resp' | 'dead' | 'sbp2_status' */ 179/* test 'resp' | 'dead' | 'sbp2_status' */
190#define STATUS_TEST_RDS(value) ((value) & 0x38ff0000) 180#define STATUS_TEST_RDS(v) ((v) & 0x38ff0000)
191 181
192struct sbp2_status_block { 182struct sbp2_status_block {
193 u32 ORB_offset_hi_misc; 183 u32 ORB_offset_hi_misc;
@@ -195,66 +185,70 @@ struct sbp2_status_block {
195 u8 command_set_dependent[24]; 185 u8 command_set_dependent[24];
196} __attribute__((packed)); 186} __attribute__((packed));
197 187
188
198/* 189/*
199 * Miscellaneous SBP2 related config rom defines 190 * SBP2 related configuration ROM definitions
200 */ 191 */
201 192
202#define SBP2_UNIT_DIRECTORY_OFFSET_KEY 0xd1 193#define SBP2_UNIT_DIRECTORY_OFFSET_KEY 0xd1
203#define SBP2_CSR_OFFSET_KEY 0x54 194#define SBP2_CSR_OFFSET_KEY 0x54
204#define SBP2_UNIT_SPEC_ID_KEY 0x12 195#define SBP2_UNIT_SPEC_ID_KEY 0x12
205#define SBP2_UNIT_SW_VERSION_KEY 0x13 196#define SBP2_UNIT_SW_VERSION_KEY 0x13
206#define SBP2_COMMAND_SET_SPEC_ID_KEY 0x38 197#define SBP2_COMMAND_SET_SPEC_ID_KEY 0x38
207#define SBP2_COMMAND_SET_KEY 0x39 198#define SBP2_COMMAND_SET_KEY 0x39
208#define SBP2_UNIT_CHARACTERISTICS_KEY 0x3a 199#define SBP2_UNIT_CHARACTERISTICS_KEY 0x3a
209#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14 200#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14
210#define SBP2_FIRMWARE_REVISION_KEY 0x3c 201#define SBP2_FIRMWARE_REVISION_KEY 0x3c
211 202
212#define SBP2_AGENT_STATE_OFFSET 0x00ULL 203#define SBP2_AGENT_STATE_OFFSET 0x00ULL
213#define SBP2_AGENT_RESET_OFFSET 0x04ULL 204#define SBP2_AGENT_RESET_OFFSET 0x04ULL
214#define SBP2_ORB_POINTER_OFFSET 0x08ULL 205#define SBP2_ORB_POINTER_OFFSET 0x08ULL
215#define SBP2_DOORBELL_OFFSET 0x10ULL 206#define SBP2_DOORBELL_OFFSET 0x10ULL
216#define SBP2_UNSOLICITED_STATUS_ENABLE_OFFSET 0x14ULL 207#define SBP2_UNSOLICITED_STATUS_ENABLE_OFFSET 0x14ULL
217#define SBP2_UNSOLICITED_STATUS_VALUE 0xf 208#define SBP2_UNSOLICITED_STATUS_VALUE 0xf
218 209
219#define SBP2_BUSY_TIMEOUT_ADDRESS 0xfffff0000210ULL 210#define SBP2_BUSY_TIMEOUT_ADDRESS 0xfffff0000210ULL
220#define SBP2_BUSY_TIMEOUT_VALUE 0xf 211/* biggest possible value for Single Phase Retry count is 0xf */
212#define SBP2_BUSY_TIMEOUT_VALUE 0xf
221 213
222#define SBP2_AGENT_RESET_DATA 0xf 214#define SBP2_AGENT_RESET_DATA 0xf
223 215
224/* 216#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
225 * Unit spec id and sw version entry for SBP-2 devices 217#define SBP2_SW_VERSION_ENTRY 0x00010483
226 */
227 218
228#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
229#define SBP2_SW_VERSION_ENTRY 0x00010483
230 219
231/* 220/*
232 * SCSI specific stuff 221 * SCSI specific definitions
233 */ 222 */
234 223
235#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 224#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
236#define SBP2_MAX_SECTORS 255 /* Max sectors supported */ 225#define SBP2_MAX_SECTORS 255
237#define SBP2_MAX_CMDS 8 /* This should be safe */ 226/* There is no real limitation of the queue depth (i.e. length of the linked
227 * list of command ORBs) at the target. The chosen depth is merely an
228 * implementation detail of the sbp2 driver. */
229#define SBP2_MAX_CMDS 8
230
231#define SBP2_SCSI_STATUS_GOOD 0x0
232#define SBP2_SCSI_STATUS_CHECK_CONDITION 0x2
233#define SBP2_SCSI_STATUS_CONDITION_MET 0x4
234#define SBP2_SCSI_STATUS_BUSY 0x8
235#define SBP2_SCSI_STATUS_RESERVATION_CONFLICT 0x18
236#define SBP2_SCSI_STATUS_COMMAND_TERMINATED 0x22
237#define SBP2_SCSI_STATUS_SELECTION_TIMEOUT 0xff
238 238
239/* Flags for detected oddities and brokeness */
240#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
241#define SBP2_WORKAROUND_INQUIRY_36 0x2
242#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
243#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
244#define SBP2_WORKAROUND_OVERRIDE 0x100
245 239
246/* This is the two dma types we use for cmd_dma below */ 240/*
247enum cmd_dma_types { 241 * Representations of commands and devices
242 */
243
244enum sbp2_dma_types {
248 CMD_DMA_NONE, 245 CMD_DMA_NONE,
249 CMD_DMA_PAGE, 246 CMD_DMA_PAGE,
250 CMD_DMA_SINGLE 247 CMD_DMA_SINGLE
251}; 248};
252 249
253/* 250/* Per SCSI command */
254 * Encapsulates all the info necessary for an outstanding command.
255 */
256struct sbp2_command_info { 251struct sbp2_command_info {
257
258 struct list_head list; 252 struct list_head list;
259 struct sbp2_command_orb command_orb ____cacheline_aligned; 253 struct sbp2_command_orb command_orb ____cacheline_aligned;
260 dma_addr_t command_orb_dma ____cacheline_aligned; 254 dma_addr_t command_orb_dma ____cacheline_aligned;
@@ -262,25 +256,25 @@ struct sbp2_command_info {
262 void (*Current_done)(struct scsi_cmnd *); 256 void (*Current_done)(struct scsi_cmnd *);
263 257
264 /* Also need s/g structure for each sbp2 command */ 258 /* Also need s/g structure for each sbp2 command */
265 struct sbp2_unrestricted_page_table scatter_gather_element[SG_ALL] ____cacheline_aligned; 259 struct sbp2_unrestricted_page_table
260 scatter_gather_element[SG_ALL] ____cacheline_aligned;
266 dma_addr_t sge_dma ____cacheline_aligned; 261 dma_addr_t sge_dma ____cacheline_aligned;
267 void *sge_buffer; 262 void *sge_buffer;
268 dma_addr_t cmd_dma; 263 dma_addr_t cmd_dma;
269 enum cmd_dma_types dma_type; 264 enum sbp2_dma_types dma_type;
270 unsigned long dma_size; 265 unsigned long dma_size;
271 int dma_dir; 266 enum dma_data_direction dma_dir;
272
273}; 267};
274 268
275struct sbp2scsi_host_info; 269/* Per FireWire host */
270struct sbp2_fwhost_info {
271 struct hpsb_host *host;
272 struct list_head logical_units;
273};
276 274
277/* 275/* Per logical unit */
278 * Information needed on a per scsi id basis (one for each sbp2 device) 276struct sbp2_lu {
279 */ 277 /* Operation request blocks */
280struct scsi_id_instance_data {
281 /*
282 * Various sbp2 specific structures
283 */
284 struct sbp2_command_orb *last_orb; 278 struct sbp2_command_orb *last_orb;
285 dma_addr_t last_orb_dma; 279 dma_addr_t last_orb_dma;
286 struct sbp2_login_orb *login_orb; 280 struct sbp2_login_orb *login_orb;
@@ -297,116 +291,59 @@ struct scsi_id_instance_data {
297 dma_addr_t logout_orb_dma; 291 dma_addr_t logout_orb_dma;
298 struct sbp2_status_block status_block; 292 struct sbp2_status_block status_block;
299 293
300 /* 294 /* How to talk to the unit */
301 * Stuff we need to know about the sbp2 device itself 295 u64 management_agent_addr;
302 */ 296 u64 command_block_agent_addr;
303 u64 sbp2_management_agent_addr;
304 u64 sbp2_command_block_agent_addr;
305 u32 speed_code; 297 u32 speed_code;
306 u32 max_payload_size; 298 u32 max_payload_size;
299 u16 lun;
307 300
308 /* 301 /* Address for the unit to write status blocks to */
309 * Values pulled from the device's unit directory
310 */
311 u32 sbp2_command_set_spec_id;
312 u32 sbp2_command_set;
313 u32 sbp2_unit_characteristics;
314 u32 sbp2_lun;
315 u32 sbp2_firmware_revision;
316
317 /*
318 * Address for the device to write status blocks to
319 */
320 u64 status_fifo_addr; 302 u64 status_fifo_addr;
321 303
322 /* 304 /* Waitqueue flag for logins, reconnects, logouts, query logins */
323 * Waitqueue flag for logins, reconnects, logouts, query logins 305 unsigned int access_complete:1;
324 */
325 int access_complete:1;
326 306
327 /* 307 /* Pool of command ORBs for this logical unit */
328 * Pool of command orbs, so we can have more than overlapped command per id 308 spinlock_t cmd_orb_lock;
329 */ 309 struct list_head cmd_orb_inuse;
330 spinlock_t sbp2_command_orb_lock; 310 struct list_head cmd_orb_completed;
331 struct list_head sbp2_command_orb_inuse;
332 struct list_head sbp2_command_orb_completed;
333 311
334 struct list_head scsi_list; 312 /* Backlink to FireWire host; list of units attached to the host */
313 struct sbp2_fwhost_info *hi;
314 struct list_head lu_list;
335 315
336 /* Node entry, as retrieved from NodeMgr entries */ 316 /* IEEE 1394 core's device representations */
337 struct node_entry *ne; 317 struct node_entry *ne;
338 struct unit_directory *ud; 318 struct unit_directory *ud;
339 319
340 /* A backlink to our host_info */ 320 /* SCSI core's device representations */
341 struct sbp2scsi_host_info *hi;
342
343 /* SCSI related pointers */
344 struct scsi_device *sdev; 321 struct scsi_device *sdev;
345 struct Scsi_Host *scsi_host; 322 struct Scsi_Host *shost;
346 323
347 /* Device specific workarounds/brokeness */ 324 /* Device specific workarounds/brokeness */
348 unsigned workarounds; 325 unsigned workarounds;
349 326
327 /* Connection state */
350 atomic_t state; 328 atomic_t state;
351 struct delayed_work protocol_work; 329
330 /* For deferred requests to the fetch agent */
331 struct work_struct protocol_work;
352}; 332};
353 333
354/* For use in scsi_id_instance_data.state */ 334/* For use in sbp2_lu.state */
355enum sbp2lu_state_types { 335enum sbp2lu_state_types {
356 SBP2LU_STATE_RUNNING, /* all normal */ 336 SBP2LU_STATE_RUNNING, /* all normal */
357 SBP2LU_STATE_IN_RESET, /* between bus reset and reconnect */ 337 SBP2LU_STATE_IN_RESET, /* between bus reset and reconnect */
358 SBP2LU_STATE_IN_SHUTDOWN /* when sbp2_remove was called */ 338 SBP2LU_STATE_IN_SHUTDOWN /* when sbp2_remove was called */
359}; 339};
360 340
361/* Sbp2 host data structure (one per IEEE1394 host) */ 341/* For use in sbp2_lu.workarounds and in the corresponding
362struct sbp2scsi_host_info { 342 * module load parameter */
363 struct hpsb_host *host; /* IEEE1394 host */ 343#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
364 struct list_head scsi_ids; /* List of scsi ids on this host */ 344#define SBP2_WORKAROUND_INQUIRY_36 0x2
365}; 345#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
366 346#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
367/* 347#define SBP2_WORKAROUND_OVERRIDE 0x100
368 * Function prototypes
369 */
370
371/*
372 * Various utility prototypes
373 */
374static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id);
375static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id);
376static struct sbp2_command_info *sbp2util_find_command_for_orb(struct scsi_id_instance_data *scsi_id, dma_addr_t orb);
377static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_instance_data *scsi_id, void *SCpnt);
378static struct sbp2_command_info *sbp2util_allocate_command_orb(struct scsi_id_instance_data *scsi_id,
379 struct scsi_cmnd *Current_SCpnt,
380 void (*Current_done)(struct scsi_cmnd *));
381static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id,
382 struct sbp2_command_info *command);
383
384
385static int sbp2_start_device(struct scsi_id_instance_data *scsi_id);
386static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id);
387
388#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
389static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data,
390 u64 addr, size_t length, u16 flags);
391static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data,
392 u64 addr, size_t length, u16 flags);
393#endif
394
395/*
396 * SBP-2 protocol related prototypes
397 */
398static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id);
399static int sbp2_login_device(struct scsi_id_instance_data *scsi_id);
400static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id);
401static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id);
402static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
403 quadlet_t *data, u64 addr, size_t length, u16 flags);
404static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait);
405static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status,
406 unchar *sense_data);
407static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
408 struct unit_directory *ud);
409static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id);
410static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id);
411 348
412#endif /* SBP2_H */ 349#endif /* SBP2_H */
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 9bc65059cc69..598b19fc5989 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -714,8 +714,8 @@ static inline unsigned video1394_buffer_state(struct dma_iso_ctx *d,
714 return ret; 714 return ret;
715} 715}
716 716
717static int __video1394_ioctl(struct file *file, 717static long video1394_ioctl(struct file *file,
718 unsigned int cmd, unsigned long arg) 718 unsigned int cmd, unsigned long arg)
719{ 719{
720 struct file_ctx *ctx = (struct file_ctx *)file->private_data; 720 struct file_ctx *ctx = (struct file_ctx *)file->private_data;
721 struct ti_ohci *ohci = ctx->ohci; 721 struct ti_ohci *ohci = ctx->ohci;
@@ -884,13 +884,14 @@ static int __video1394_ioctl(struct file *file,
884 struct dma_iso_ctx *d; 884 struct dma_iso_ctx *d;
885 int next_prg; 885 int next_prg;
886 886
887 if (copy_from_user(&v, argp, sizeof(v))) 887 if (unlikely(copy_from_user(&v, argp, sizeof(v))))
888 return -EFAULT; 888 return -EFAULT;
889 889
890 d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel); 890 d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
891 if (d == NULL) return -EFAULT; 891 if (unlikely(d == NULL))
892 return -EFAULT;
892 893
893 if ((v.buffer<0) || (v.buffer>=d->num_desc - 1)) { 894 if (unlikely((v.buffer<0) || (v.buffer>=d->num_desc - 1))) {
894 PRINT(KERN_ERR, ohci->host->id, 895 PRINT(KERN_ERR, ohci->host->id,
895 "Buffer %d out of range",v.buffer); 896 "Buffer %d out of range",v.buffer);
896 return -EINVAL; 897 return -EINVAL;
@@ -898,7 +899,7 @@ static int __video1394_ioctl(struct file *file,
898 899
899 spin_lock_irqsave(&d->lock,flags); 900 spin_lock_irqsave(&d->lock,flags);
900 901
901 if (d->buffer_status[v.buffer]==VIDEO1394_BUFFER_QUEUED) { 902 if (unlikely(d->buffer_status[v.buffer]==VIDEO1394_BUFFER_QUEUED)) {
902 PRINT(KERN_ERR, ohci->host->id, 903 PRINT(KERN_ERR, ohci->host->id,
903 "Buffer %d is already used",v.buffer); 904 "Buffer %d is already used",v.buffer);
904 spin_unlock_irqrestore(&d->lock,flags); 905 spin_unlock_irqrestore(&d->lock,flags);
@@ -949,13 +950,14 @@ static int __video1394_ioctl(struct file *file,
949 struct dma_iso_ctx *d; 950 struct dma_iso_ctx *d;
950 int i = 0; 951 int i = 0;
951 952
952 if (copy_from_user(&v, argp, sizeof(v))) 953 if (unlikely(copy_from_user(&v, argp, sizeof(v))))
953 return -EFAULT; 954 return -EFAULT;
954 955
955 d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel); 956 d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
956 if (d == NULL) return -EFAULT; 957 if (unlikely(d == NULL))
958 return -EFAULT;
957 959
958 if ((v.buffer<0) || (v.buffer>d->num_desc - 1)) { 960 if (unlikely((v.buffer<0) || (v.buffer>d->num_desc - 1))) {
959 PRINT(KERN_ERR, ohci->host->id, 961 PRINT(KERN_ERR, ohci->host->id,
960 "Buffer %d out of range",v.buffer); 962 "Buffer %d out of range",v.buffer);
961 return -EINVAL; 963 return -EINVAL;
@@ -1008,7 +1010,7 @@ static int __video1394_ioctl(struct file *file,
1008 spin_unlock_irqrestore(&d->lock, flags); 1010 spin_unlock_irqrestore(&d->lock, flags);
1009 1011
1010 v.buffer=i; 1012 v.buffer=i;
1011 if (copy_to_user(argp, &v, sizeof(v))) 1013 if (unlikely(copy_to_user(argp, &v, sizeof(v))))
1012 return -EFAULT; 1014 return -EFAULT;
1013 1015
1014 return 0; 1016 return 0;
@@ -1156,15 +1158,6 @@ static int __video1394_ioctl(struct file *file,
1156 } 1158 }
1157} 1159}
1158 1160
1159static long video1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1160{
1161 int err;
1162 lock_kernel();
1163 err = __video1394_ioctl(file, cmd, arg);
1164 unlock_kernel();
1165 return err;
1166}
1167
1168/* 1161/*
1169 * This maps the vmalloced and reserved buffer to user space. 1162 * This maps the vmalloced and reserved buffer to user space.
1170 * 1163 *
@@ -1177,17 +1170,14 @@ static long video1394_ioctl(struct file *file, unsigned int cmd, unsigned long a
1177static int video1394_mmap(struct file *file, struct vm_area_struct *vma) 1170static int video1394_mmap(struct file *file, struct vm_area_struct *vma)
1178{ 1171{
1179 struct file_ctx *ctx = (struct file_ctx *)file->private_data; 1172 struct file_ctx *ctx = (struct file_ctx *)file->private_data;
1180 int res = -EINVAL;
1181 1173
1182 lock_kernel();
1183 if (ctx->current_ctx == NULL) { 1174 if (ctx->current_ctx == NULL) {
1184 PRINT(KERN_ERR, ctx->ohci->host->id, 1175 PRINT(KERN_ERR, ctx->ohci->host->id,
1185 "Current iso context not set"); 1176 "Current iso context not set");
1186 } else 1177 return -EINVAL;
1187 res = dma_region_mmap(&ctx->current_ctx->dma, file, vma); 1178 }
1188 unlock_kernel();
1189 1179
1190 return res; 1180 return dma_region_mmap(&ctx->current_ctx->dma, file, vma);
1191} 1181}
1192 1182
1193static unsigned int video1394_poll(struct file *file, poll_table *pt) 1183static unsigned int video1394_poll(struct file *file, poll_table *pt)
@@ -1198,14 +1188,12 @@ static unsigned int video1394_poll(struct file *file, poll_table *pt)
1198 struct dma_iso_ctx *d; 1188 struct dma_iso_ctx *d;
1199 int i; 1189 int i;
1200 1190
1201 lock_kernel();
1202 ctx = file->private_data; 1191 ctx = file->private_data;
1203 d = ctx->current_ctx; 1192 d = ctx->current_ctx;
1204 if (d == NULL) { 1193 if (d == NULL) {
1205 PRINT(KERN_ERR, ctx->ohci->host->id, 1194 PRINT(KERN_ERR, ctx->ohci->host->id,
1206 "Current iso context not set"); 1195 "Current iso context not set");
1207 mask = POLLERR; 1196 return POLLERR;
1208 goto done;
1209 } 1197 }
1210 1198
1211 poll_wait(file, &d->waitq, pt); 1199 poll_wait(file, &d->waitq, pt);
@@ -1218,8 +1206,6 @@ static unsigned int video1394_poll(struct file *file, poll_table *pt)
1218 } 1206 }
1219 } 1207 }
1220 spin_unlock_irqrestore(&d->lock, flags); 1208 spin_unlock_irqrestore(&d->lock, flags);
1221done:
1222 unlock_kernel();
1223 1209
1224 return mask; 1210 return mask;
1225} 1211}
@@ -1255,7 +1241,6 @@ static int video1394_release(struct inode *inode, struct file *file)
1255 struct list_head *lh, *next; 1241 struct list_head *lh, *next;
1256 u64 mask; 1242 u64 mask;
1257 1243
1258 lock_kernel();
1259 list_for_each_safe(lh, next, &ctx->context_list) { 1244 list_for_each_safe(lh, next, &ctx->context_list) {
1260 struct dma_iso_ctx *d; 1245 struct dma_iso_ctx *d;
1261 d = list_entry(lh, struct dma_iso_ctx, link); 1246 d = list_entry(lh, struct dma_iso_ctx, link);
@@ -1276,7 +1261,6 @@ static int video1394_release(struct inode *inode, struct file *file)
1276 kfree(ctx); 1261 kfree(ctx);
1277 file->private_data = NULL; 1262 file->private_data = NULL;
1278 1263
1279 unlock_kernel();
1280 return 0; 1264 return 0;
1281} 1265}
1282 1266
@@ -1324,12 +1308,8 @@ static struct ieee1394_device_id video1394_id_table[] = {
1324MODULE_DEVICE_TABLE(ieee1394, video1394_id_table); 1308MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
1325 1309
1326static struct hpsb_protocol_driver video1394_driver = { 1310static struct hpsb_protocol_driver video1394_driver = {
1327 .name = "1394 Digital Camera Driver", 1311 .name = VIDEO1394_DRIVER_NAME,
1328 .id_table = video1394_id_table, 1312 .id_table = video1394_id_table,
1329 .driver = {
1330 .name = VIDEO1394_DRIVER_NAME,
1331 .bus = &ieee1394_bus_type,
1332 },
1333}; 1313};
1334 1314
1335 1315