diff options
Diffstat (limited to 'drivers/firewire')
-rw-r--r-- | drivers/firewire/Kconfig | 50 | ||||
-rw-r--r-- | drivers/firewire/fw-card.c | 61 | ||||
-rw-r--r-- | drivers/firewire/fw-cdev.c | 17 | ||||
-rw-r--r-- | drivers/firewire/fw-device.c | 69 | ||||
-rw-r--r-- | drivers/firewire/fw-device.h | 18 | ||||
-rw-r--r-- | drivers/firewire/fw-ohci.c | 108 | ||||
-rw-r--r-- | drivers/firewire/fw-sbp2.c | 428 | ||||
-rw-r--r-- | drivers/firewire/fw-topology.c | 4 | ||||
-rw-r--r-- | drivers/firewire/fw-transaction.c | 2 | ||||
-rw-r--r-- | drivers/firewire/fw-transaction.h | 8 |
10 files changed, 551 insertions, 214 deletions
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index fe9e768cfbc4..25bdc2dd9ce1 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig | |||
@@ -1,5 +1,3 @@ | |||
1 | # -*- shell-script -*- | ||
2 | |||
3 | comment "An alternative FireWire stack is available with EXPERIMENTAL=y" | 1 | comment "An alternative FireWire stack is available with EXPERIMENTAL=y" |
4 | depends on EXPERIMENTAL=n | 2 | depends on EXPERIMENTAL=n |
5 | 3 | ||
@@ -21,27 +19,7 @@ config FIREWIRE | |||
21 | NOTE: | 19 | NOTE: |
22 | 20 | ||
23 | You should only build ONE of the stacks, unless you REALLY know what | 21 | You should only build ONE of the stacks, unless you REALLY know what |
24 | you are doing. If you install both, you should configure them only as | 22 | you are doing. |
25 | modules rather than link them statically, and you should blacklist one | ||
26 | of the concurrent low-level drivers in /etc/modprobe.conf. Add either | ||
27 | |||
28 | blacklist firewire-ohci | ||
29 | or | ||
30 | blacklist ohci1394 | ||
31 | |||
32 | there depending on which driver you DON'T want to have auto-loaded. | ||
33 | You can optionally do the same with the other IEEE 1394/ FireWire | ||
34 | drivers. | ||
35 | |||
36 | If you have an old modprobe which doesn't implement the blacklist | ||
37 | directive, use either | ||
38 | |||
39 | install firewire-ohci /bin/true | ||
40 | or | ||
41 | install ohci1394 /bin/true | ||
42 | |||
43 | and so on, depending on which modules you DON't want to have | ||
44 | auto-loaded. | ||
45 | 23 | ||
46 | config FIREWIRE_OHCI | 24 | config FIREWIRE_OHCI |
47 | tristate "Support for OHCI FireWire host controllers" | 25 | tristate "Support for OHCI FireWire host controllers" |
@@ -57,8 +35,24 @@ config FIREWIRE_OHCI | |||
57 | 35 | ||
58 | NOTE: | 36 | NOTE: |
59 | 37 | ||
60 | If you also build ohci1394 of the classic stack, blacklist either | 38 | You should only build ohci1394 or firewire-ohci, but not both. |
61 | ohci1394 or firewire-ohci to let hotplug load only the desired driver. | 39 | If you nevertheless want to install both, you should configure them |
40 | only as modules and blacklist the driver(s) which you don't want to | ||
41 | have auto-loaded. Add either | ||
42 | |||
43 | blacklist firewire-ohci | ||
44 | or | ||
45 | blacklist ohci1394 | ||
46 | blacklist video1394 | ||
47 | blacklist dv1394 | ||
48 | |||
49 | to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf | ||
50 | depending on your distribution. The latter two modules should be | ||
51 | blacklisted together with ohci1394 because they depend on ohci1394. | ||
52 | |||
53 | If you have an old modprobe which doesn't implement the blacklist | ||
54 | directive, use "install modulename /bin/true" for the modules to be | ||
55 | blacklisted. | ||
62 | 56 | ||
63 | config FIREWIRE_SBP2 | 57 | config FIREWIRE_SBP2 |
64 | tristate "Support for storage devices (SBP-2 protocol driver)" | 58 | tristate "Support for storage devices (SBP-2 protocol driver)" |
@@ -75,9 +69,3 @@ config FIREWIRE_SBP2 | |||
75 | 69 | ||
76 | You should also enable support for disks, CD-ROMs, etc. in the SCSI | 70 | You should also enable support for disks, CD-ROMs, etc. in the SCSI |
77 | configuration section. | 71 | configuration section. |
78 | |||
79 | NOTE: | ||
80 | |||
81 | If you also build sbp2 of the classic stack, blacklist either sbp2 | ||
82 | or firewire-sbp2 to let hotplug load only the desired driver. | ||
83 | |||
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index 3e9719948a8e..a03462750b95 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/delay.h> | ||
21 | #include <linux/device.h> | 22 | #include <linux/device.h> |
22 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
23 | #include <linux/crc-itu-t.h> | 24 | #include <linux/crc-itu-t.h> |
@@ -214,17 +215,29 @@ static void | |||
214 | fw_card_bm_work(struct work_struct *work) | 215 | fw_card_bm_work(struct work_struct *work) |
215 | { | 216 | { |
216 | struct fw_card *card = container_of(work, struct fw_card, work.work); | 217 | struct fw_card *card = container_of(work, struct fw_card, work.work); |
217 | struct fw_device *root; | 218 | struct fw_device *root_device; |
219 | struct fw_node *root_node, *local_node; | ||
218 | struct bm_data bmd; | 220 | struct bm_data bmd; |
219 | unsigned long flags; | 221 | unsigned long flags; |
220 | int root_id, new_root_id, irm_id, gap_count, generation, grace; | 222 | int root_id, new_root_id, irm_id, gap_count, generation, grace; |
221 | int do_reset = 0; | 223 | int do_reset = 0; |
222 | 224 | ||
223 | spin_lock_irqsave(&card->lock, flags); | 225 | spin_lock_irqsave(&card->lock, flags); |
226 | local_node = card->local_node; | ||
227 | root_node = card->root_node; | ||
228 | |||
229 | if (local_node == NULL) { | ||
230 | spin_unlock_irqrestore(&card->lock, flags); | ||
231 | return; | ||
232 | } | ||
233 | fw_node_get(local_node); | ||
234 | fw_node_get(root_node); | ||
224 | 235 | ||
225 | generation = card->generation; | 236 | generation = card->generation; |
226 | root = card->root_node->data; | 237 | root_device = root_node->data; |
227 | root_id = card->root_node->node_id; | 238 | if (root_device) |
239 | fw_device_get(root_device); | ||
240 | root_id = root_node->node_id; | ||
228 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); | 241 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); |
229 | 242 | ||
230 | if (card->bm_generation + 1 == generation || | 243 | if (card->bm_generation + 1 == generation || |
@@ -243,14 +256,14 @@ fw_card_bm_work(struct work_struct *work) | |||
243 | 256 | ||
244 | irm_id = card->irm_node->node_id; | 257 | irm_id = card->irm_node->node_id; |
245 | if (!card->irm_node->link_on) { | 258 | if (!card->irm_node->link_on) { |
246 | new_root_id = card->local_node->node_id; | 259 | new_root_id = local_node->node_id; |
247 | fw_notify("IRM has link off, making local node (%02x) root.\n", | 260 | fw_notify("IRM has link off, making local node (%02x) root.\n", |
248 | new_root_id); | 261 | new_root_id); |
249 | goto pick_me; | 262 | goto pick_me; |
250 | } | 263 | } |
251 | 264 | ||
252 | bmd.lock.arg = cpu_to_be32(0x3f); | 265 | bmd.lock.arg = cpu_to_be32(0x3f); |
253 | bmd.lock.data = cpu_to_be32(card->local_node->node_id); | 266 | bmd.lock.data = cpu_to_be32(local_node->node_id); |
254 | 267 | ||
255 | spin_unlock_irqrestore(&card->lock, flags); | 268 | spin_unlock_irqrestore(&card->lock, flags); |
256 | 269 | ||
@@ -267,12 +280,12 @@ fw_card_bm_work(struct work_struct *work) | |||
267 | * Another bus reset happened. Just return, | 280 | * Another bus reset happened. Just return, |
268 | * the BM work has been rescheduled. | 281 | * the BM work has been rescheduled. |
269 | */ | 282 | */ |
270 | return; | 283 | goto out; |
271 | } | 284 | } |
272 | 285 | ||
273 | if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f) | 286 | if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f) |
274 | /* Somebody else is BM, let them do the work. */ | 287 | /* Somebody else is BM, let them do the work. */ |
275 | return; | 288 | goto out; |
276 | 289 | ||
277 | spin_lock_irqsave(&card->lock, flags); | 290 | spin_lock_irqsave(&card->lock, flags); |
278 | if (bmd.rcode != RCODE_COMPLETE) { | 291 | if (bmd.rcode != RCODE_COMPLETE) { |
@@ -282,7 +295,7 @@ fw_card_bm_work(struct work_struct *work) | |||
282 | * do a bus reset and pick the local node as | 295 | * do a bus reset and pick the local node as |
283 | * root, and thus, IRM. | 296 | * root, and thus, IRM. |
284 | */ | 297 | */ |
285 | new_root_id = card->local_node->node_id; | 298 | new_root_id = local_node->node_id; |
286 | fw_notify("BM lock failed, making local node (%02x) root.\n", | 299 | fw_notify("BM lock failed, making local node (%02x) root.\n", |
287 | new_root_id); | 300 | new_root_id); |
288 | goto pick_me; | 301 | goto pick_me; |
@@ -295,7 +308,7 @@ fw_card_bm_work(struct work_struct *work) | |||
295 | */ | 308 | */ |
296 | spin_unlock_irqrestore(&card->lock, flags); | 309 | spin_unlock_irqrestore(&card->lock, flags); |
297 | schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); | 310 | schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); |
298 | return; | 311 | goto out; |
299 | } | 312 | } |
300 | 313 | ||
301 | /* | 314 | /* |
@@ -305,20 +318,20 @@ fw_card_bm_work(struct work_struct *work) | |||
305 | */ | 318 | */ |
306 | card->bm_generation = generation; | 319 | card->bm_generation = generation; |
307 | 320 | ||
308 | if (root == NULL) { | 321 | if (root_device == NULL) { |
309 | /* | 322 | /* |
310 | * Either link_on is false, or we failed to read the | 323 | * Either link_on is false, or we failed to read the |
311 | * config rom. In either case, pick another root. | 324 | * config rom. In either case, pick another root. |
312 | */ | 325 | */ |
313 | new_root_id = card->local_node->node_id; | 326 | new_root_id = local_node->node_id; |
314 | } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) { | 327 | } else if (atomic_read(&root_device->state) != FW_DEVICE_RUNNING) { |
315 | /* | 328 | /* |
316 | * If we haven't probed this device yet, bail out now | 329 | * If we haven't probed this device yet, bail out now |
317 | * and let's try again once that's done. | 330 | * and let's try again once that's done. |
318 | */ | 331 | */ |
319 | spin_unlock_irqrestore(&card->lock, flags); | 332 | spin_unlock_irqrestore(&card->lock, flags); |
320 | return; | 333 | goto out; |
321 | } else if (root->config_rom[2] & BIB_CMC) { | 334 | } else if (root_device->config_rom[2] & BIB_CMC) { |
322 | /* | 335 | /* |
323 | * FIXME: I suppose we should set the cmstr bit in the | 336 | * FIXME: I suppose we should set the cmstr bit in the |
324 | * STATE_CLEAR register of this node, as described in | 337 | * STATE_CLEAR register of this node, as described in |
@@ -332,7 +345,7 @@ fw_card_bm_work(struct work_struct *work) | |||
332 | * successfully read the config rom, but it's not | 345 | * successfully read the config rom, but it's not |
333 | * cycle master capable. | 346 | * cycle master capable. |
334 | */ | 347 | */ |
335 | new_root_id = card->local_node->node_id; | 348 | new_root_id = local_node->node_id; |
336 | } | 349 | } |
337 | 350 | ||
338 | pick_me: | 351 | pick_me: |
@@ -341,8 +354,8 @@ fw_card_bm_work(struct work_struct *work) | |||
341 | * the typically much larger 1394b beta repeater delays though. | 354 | * the typically much larger 1394b beta repeater delays though. |
342 | */ | 355 | */ |
343 | if (!card->beta_repeaters_present && | 356 | if (!card->beta_repeaters_present && |
344 | card->root_node->max_hops < ARRAY_SIZE(gap_count_table)) | 357 | root_node->max_hops < ARRAY_SIZE(gap_count_table)) |
345 | gap_count = gap_count_table[card->root_node->max_hops]; | 358 | gap_count = gap_count_table[root_node->max_hops]; |
346 | else | 359 | else |
347 | gap_count = 63; | 360 | gap_count = 63; |
348 | 361 | ||
@@ -364,6 +377,11 @@ fw_card_bm_work(struct work_struct *work) | |||
364 | fw_send_phy_config(card, new_root_id, generation, gap_count); | 377 | fw_send_phy_config(card, new_root_id, generation, gap_count); |
365 | fw_core_initiate_bus_reset(card, 1); | 378 | fw_core_initiate_bus_reset(card, 1); |
366 | } | 379 | } |
380 | out: | ||
381 | if (root_device) | ||
382 | fw_device_put(root_device); | ||
383 | fw_node_put(root_node); | ||
384 | fw_node_put(local_node); | ||
367 | } | 385 | } |
368 | 386 | ||
369 | static void | 387 | static void |
@@ -381,6 +399,7 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, | |||
381 | static atomic_t index = ATOMIC_INIT(-1); | 399 | static atomic_t index = ATOMIC_INIT(-1); |
382 | 400 | ||
383 | kref_init(&card->kref); | 401 | kref_init(&card->kref); |
402 | atomic_set(&card->device_count, 0); | ||
384 | card->index = atomic_inc_return(&index); | 403 | card->index = atomic_inc_return(&index); |
385 | card->driver = driver; | 404 | card->driver = driver; |
386 | card->device = device; | 405 | card->device = device; |
@@ -511,8 +530,14 @@ fw_core_remove_card(struct fw_card *card) | |||
511 | card->driver = &dummy_driver; | 530 | card->driver = &dummy_driver; |
512 | 531 | ||
513 | fw_destroy_nodes(card); | 532 | fw_destroy_nodes(card); |
514 | flush_scheduled_work(); | 533 | /* |
534 | * Wait for all device workqueue jobs to finish. Otherwise the | ||
535 | * firewire-core module could be unloaded before the jobs ran. | ||
536 | */ | ||
537 | while (atomic_read(&card->device_count) > 0) | ||
538 | msleep(100); | ||
515 | 539 | ||
540 | cancel_delayed_work_sync(&card->work); | ||
516 | fw_flush_transactions(card); | 541 | fw_flush_transactions(card); |
517 | del_timer_sync(&card->flush_timer); | 542 | del_timer_sync(&card->flush_timer); |
518 | 543 | ||
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 7e73cbaa4121..46bc197a047f 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c | |||
@@ -109,15 +109,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file) | |||
109 | struct client *client; | 109 | struct client *client; |
110 | unsigned long flags; | 110 | unsigned long flags; |
111 | 111 | ||
112 | device = fw_device_from_devt(inode->i_rdev); | 112 | device = fw_device_get_by_devt(inode->i_rdev); |
113 | if (device == NULL) | 113 | if (device == NULL) |
114 | return -ENODEV; | 114 | return -ENODEV; |
115 | 115 | ||
116 | client = kzalloc(sizeof(*client), GFP_KERNEL); | 116 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
117 | if (client == NULL) | 117 | if (client == NULL) { |
118 | fw_device_put(device); | ||
118 | return -ENOMEM; | 119 | return -ENOMEM; |
120 | } | ||
119 | 121 | ||
120 | client->device = fw_device_get(device); | 122 | client->device = device; |
121 | INIT_LIST_HEAD(&client->event_list); | 123 | INIT_LIST_HEAD(&client->event_list); |
122 | INIT_LIST_HEAD(&client->resource_list); | 124 | INIT_LIST_HEAD(&client->resource_list); |
123 | spin_lock_init(&client->lock); | 125 | spin_lock_init(&client->lock); |
@@ -644,6 +646,10 @@ static int ioctl_create_iso_context(struct client *client, void *buffer) | |||
644 | struct fw_cdev_create_iso_context *request = buffer; | 646 | struct fw_cdev_create_iso_context *request = buffer; |
645 | struct fw_iso_context *context; | 647 | struct fw_iso_context *context; |
646 | 648 | ||
649 | /* We only support one context at this time. */ | ||
650 | if (client->iso_context != NULL) | ||
651 | return -EBUSY; | ||
652 | |||
647 | if (request->channel > 63) | 653 | if (request->channel > 63) |
648 | return -EINVAL; | 654 | return -EINVAL; |
649 | 655 | ||
@@ -790,8 +796,9 @@ static int ioctl_start_iso(struct client *client, void *buffer) | |||
790 | { | 796 | { |
791 | struct fw_cdev_start_iso *request = buffer; | 797 | struct fw_cdev_start_iso *request = buffer; |
792 | 798 | ||
793 | if (request->handle != 0) | 799 | if (client->iso_context == NULL || request->handle != 0) |
794 | return -EINVAL; | 800 | return -EINVAL; |
801 | |||
795 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { | 802 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { |
796 | if (request->tags == 0 || request->tags > 15) | 803 | if (request->tags == 0 || request->tags > 15) |
797 | return -EINVAL; | 804 | return -EINVAL; |
@@ -808,7 +815,7 @@ static int ioctl_stop_iso(struct client *client, void *buffer) | |||
808 | { | 815 | { |
809 | struct fw_cdev_stop_iso *request = buffer; | 816 | struct fw_cdev_stop_iso *request = buffer; |
810 | 817 | ||
811 | if (request->handle != 0) | 818 | if (client->iso_context == NULL || request->handle != 0) |
812 | return -EINVAL; | 819 | return -EINVAL; |
813 | 820 | ||
814 | return fw_iso_context_stop(client->iso_context); | 821 | return fw_iso_context_stop(client->iso_context); |
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index de9066e69adf..870125a3638e 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c | |||
@@ -150,21 +150,10 @@ struct bus_type fw_bus_type = { | |||
150 | }; | 150 | }; |
151 | EXPORT_SYMBOL(fw_bus_type); | 151 | EXPORT_SYMBOL(fw_bus_type); |
152 | 152 | ||
153 | struct fw_device *fw_device_get(struct fw_device *device) | ||
154 | { | ||
155 | get_device(&device->device); | ||
156 | |||
157 | return device; | ||
158 | } | ||
159 | |||
160 | void fw_device_put(struct fw_device *device) | ||
161 | { | ||
162 | put_device(&device->device); | ||
163 | } | ||
164 | |||
165 | static void fw_device_release(struct device *dev) | 153 | static void fw_device_release(struct device *dev) |
166 | { | 154 | { |
167 | struct fw_device *device = fw_device(dev); | 155 | struct fw_device *device = fw_device(dev); |
156 | struct fw_card *card = device->card; | ||
168 | unsigned long flags; | 157 | unsigned long flags; |
169 | 158 | ||
170 | /* | 159 | /* |
@@ -176,9 +165,9 @@ static void fw_device_release(struct device *dev) | |||
176 | spin_unlock_irqrestore(&device->card->lock, flags); | 165 | spin_unlock_irqrestore(&device->card->lock, flags); |
177 | 166 | ||
178 | fw_node_put(device->node); | 167 | fw_node_put(device->node); |
179 | fw_card_put(device->card); | ||
180 | kfree(device->config_rom); | 168 | kfree(device->config_rom); |
181 | kfree(device); | 169 | kfree(device); |
170 | atomic_dec(&card->device_count); | ||
182 | } | 171 | } |
183 | 172 | ||
184 | int fw_device_enable_phys_dma(struct fw_device *device) | 173 | int fw_device_enable_phys_dma(struct fw_device *device) |
@@ -358,12 +347,9 @@ static ssize_t | |||
358 | guid_show(struct device *dev, struct device_attribute *attr, char *buf) | 347 | guid_show(struct device *dev, struct device_attribute *attr, char *buf) |
359 | { | 348 | { |
360 | struct fw_device *device = fw_device(dev); | 349 | struct fw_device *device = fw_device(dev); |
361 | u64 guid; | ||
362 | |||
363 | guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4]; | ||
364 | 350 | ||
365 | return snprintf(buf, PAGE_SIZE, "0x%016llx\n", | 351 | return snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", |
366 | (unsigned long long)guid); | 352 | device->config_rom[3], device->config_rom[4]); |
367 | } | 353 | } |
368 | 354 | ||
369 | static struct device_attribute fw_device_attributes[] = { | 355 | static struct device_attribute fw_device_attributes[] = { |
@@ -610,12 +596,14 @@ static DECLARE_RWSEM(idr_rwsem); | |||
610 | static DEFINE_IDR(fw_device_idr); | 596 | static DEFINE_IDR(fw_device_idr); |
611 | int fw_cdev_major; | 597 | int fw_cdev_major; |
612 | 598 | ||
613 | struct fw_device *fw_device_from_devt(dev_t devt) | 599 | struct fw_device *fw_device_get_by_devt(dev_t devt) |
614 | { | 600 | { |
615 | struct fw_device *device; | 601 | struct fw_device *device; |
616 | 602 | ||
617 | down_read(&idr_rwsem); | 603 | down_read(&idr_rwsem); |
618 | device = idr_find(&fw_device_idr, MINOR(devt)); | 604 | device = idr_find(&fw_device_idr, MINOR(devt)); |
605 | if (device) | ||
606 | fw_device_get(device); | ||
619 | up_read(&idr_rwsem); | 607 | up_read(&idr_rwsem); |
620 | 608 | ||
621 | return device; | 609 | return device; |
@@ -627,13 +615,14 @@ static void fw_device_shutdown(struct work_struct *work) | |||
627 | container_of(work, struct fw_device, work.work); | 615 | container_of(work, struct fw_device, work.work); |
628 | int minor = MINOR(device->device.devt); | 616 | int minor = MINOR(device->device.devt); |
629 | 617 | ||
630 | down_write(&idr_rwsem); | ||
631 | idr_remove(&fw_device_idr, minor); | ||
632 | up_write(&idr_rwsem); | ||
633 | |||
634 | fw_device_cdev_remove(device); | 618 | fw_device_cdev_remove(device); |
635 | device_for_each_child(&device->device, NULL, shutdown_unit); | 619 | device_for_each_child(&device->device, NULL, shutdown_unit); |
636 | device_unregister(&device->device); | 620 | device_unregister(&device->device); |
621 | |||
622 | down_write(&idr_rwsem); | ||
623 | idr_remove(&fw_device_idr, minor); | ||
624 | up_write(&idr_rwsem); | ||
625 | fw_device_put(device); | ||
637 | } | 626 | } |
638 | 627 | ||
639 | static struct device_type fw_device_type = { | 628 | static struct device_type fw_device_type = { |
@@ -668,7 +657,8 @@ static void fw_device_init(struct work_struct *work) | |||
668 | */ | 657 | */ |
669 | 658 | ||
670 | if (read_bus_info_block(device, device->generation) < 0) { | 659 | if (read_bus_info_block(device, device->generation) < 0) { |
671 | if (device->config_rom_retries < MAX_RETRIES) { | 660 | if (device->config_rom_retries < MAX_RETRIES && |
661 | atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { | ||
672 | device->config_rom_retries++; | 662 | device->config_rom_retries++; |
673 | schedule_delayed_work(&device->work, RETRY_DELAY); | 663 | schedule_delayed_work(&device->work, RETRY_DELAY); |
674 | } else { | 664 | } else { |
@@ -682,10 +672,13 @@ static void fw_device_init(struct work_struct *work) | |||
682 | } | 672 | } |
683 | 673 | ||
684 | err = -ENOMEM; | 674 | err = -ENOMEM; |
675 | |||
676 | fw_device_get(device); | ||
685 | down_write(&idr_rwsem); | 677 | down_write(&idr_rwsem); |
686 | if (idr_pre_get(&fw_device_idr, GFP_KERNEL)) | 678 | if (idr_pre_get(&fw_device_idr, GFP_KERNEL)) |
687 | err = idr_get_new(&fw_device_idr, device, &minor); | 679 | err = idr_get_new(&fw_device_idr, device, &minor); |
688 | up_write(&idr_rwsem); | 680 | up_write(&idr_rwsem); |
681 | |||
689 | if (err < 0) | 682 | if (err < 0) |
690 | goto error; | 683 | goto error; |
691 | 684 | ||
@@ -717,13 +710,22 @@ static void fw_device_init(struct work_struct *work) | |||
717 | */ | 710 | */ |
718 | if (atomic_cmpxchg(&device->state, | 711 | if (atomic_cmpxchg(&device->state, |
719 | FW_DEVICE_INITIALIZING, | 712 | FW_DEVICE_INITIALIZING, |
720 | FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) | 713 | FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) { |
721 | fw_device_shutdown(&device->work.work); | 714 | fw_device_shutdown(&device->work.work); |
722 | else | 715 | } else { |
723 | fw_notify("created new fw device %s " | 716 | if (device->config_rom_retries) |
724 | "(%d config rom retries, S%d00)\n", | 717 | fw_notify("created device %s: GUID %08x%08x, S%d00, " |
725 | device->device.bus_id, device->config_rom_retries, | 718 | "%d config ROM retries\n", |
726 | 1 << device->max_speed); | 719 | device->device.bus_id, |
720 | device->config_rom[3], device->config_rom[4], | ||
721 | 1 << device->max_speed, | ||
722 | device->config_rom_retries); | ||
723 | else | ||
724 | fw_notify("created device %s: GUID %08x%08x, S%d00\n", | ||
725 | device->device.bus_id, | ||
726 | device->config_rom[3], device->config_rom[4], | ||
727 | 1 << device->max_speed); | ||
728 | } | ||
727 | 729 | ||
728 | /* | 730 | /* |
729 | * Reschedule the IRM work if we just finished reading the | 731 | * Reschedule the IRM work if we just finished reading the |
@@ -741,7 +743,9 @@ static void fw_device_init(struct work_struct *work) | |||
741 | idr_remove(&fw_device_idr, minor); | 743 | idr_remove(&fw_device_idr, minor); |
742 | up_write(&idr_rwsem); | 744 | up_write(&idr_rwsem); |
743 | error: | 745 | error: |
744 | put_device(&device->device); | 746 | fw_device_put(device); /* fw_device_idr's reference */ |
747 | |||
748 | put_device(&device->device); /* our reference */ | ||
745 | } | 749 | } |
746 | 750 | ||
747 | static int update_unit(struct device *dev, void *data) | 751 | static int update_unit(struct device *dev, void *data) |
@@ -791,7 +795,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
791 | */ | 795 | */ |
792 | device_initialize(&device->device); | 796 | device_initialize(&device->device); |
793 | atomic_set(&device->state, FW_DEVICE_INITIALIZING); | 797 | atomic_set(&device->state, FW_DEVICE_INITIALIZING); |
794 | device->card = fw_card_get(card); | 798 | atomic_inc(&card->device_count); |
799 | device->card = card; | ||
795 | device->node = fw_node_get(node); | 800 | device->node = fw_node_get(node); |
796 | device->node_id = node->node_id; | 801 | device->node_id = node->node_id; |
797 | device->generation = card->generation; | 802 | device->generation = card->generation; |
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 0854fe2bc110..78ecd3991b7f 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h | |||
@@ -76,14 +76,26 @@ fw_device_is_shutdown(struct fw_device *device) | |||
76 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; | 76 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; |
77 | } | 77 | } |
78 | 78 | ||
79 | struct fw_device *fw_device_get(struct fw_device *device); | 79 | static inline struct fw_device * |
80 | void fw_device_put(struct fw_device *device); | 80 | fw_device_get(struct fw_device *device) |
81 | { | ||
82 | get_device(&device->device); | ||
83 | |||
84 | return device; | ||
85 | } | ||
86 | |||
87 | static inline void | ||
88 | fw_device_put(struct fw_device *device) | ||
89 | { | ||
90 | put_device(&device->device); | ||
91 | } | ||
92 | |||
93 | struct fw_device *fw_device_get_by_devt(dev_t devt); | ||
81 | int fw_device_enable_phys_dma(struct fw_device *device); | 94 | int fw_device_enable_phys_dma(struct fw_device *device); |
82 | 95 | ||
83 | void fw_device_cdev_update(struct fw_device *device); | 96 | void fw_device_cdev_update(struct fw_device *device); |
84 | void fw_device_cdev_remove(struct fw_device *device); | 97 | void fw_device_cdev_remove(struct fw_device *device); |
85 | 98 | ||
86 | struct fw_device *fw_device_from_devt(dev_t devt); | ||
87 | extern int fw_cdev_major; | 99 | extern int fw_cdev_major; |
88 | 100 | ||
89 | struct fw_unit { | 101 | struct fw_unit { |
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 7ebad3c14cb8..996d61f0d460 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c | |||
@@ -33,6 +33,10 @@ | |||
33 | #include <asm/page.h> | 33 | #include <asm/page.h> |
34 | #include <asm/system.h> | 34 | #include <asm/system.h> |
35 | 35 | ||
36 | #ifdef CONFIG_PPC_PMAC | ||
37 | #include <asm/pmac_feature.h> | ||
38 | #endif | ||
39 | |||
36 | #include "fw-ohci.h" | 40 | #include "fw-ohci.h" |
37 | #include "fw-transaction.h" | 41 | #include "fw-transaction.h" |
38 | 42 | ||
@@ -175,6 +179,7 @@ struct fw_ohci { | |||
175 | int generation; | 179 | int generation; |
176 | int request_generation; | 180 | int request_generation; |
177 | u32 bus_seconds; | 181 | u32 bus_seconds; |
182 | bool old_uninorth; | ||
178 | 183 | ||
179 | /* | 184 | /* |
180 | * Spinlock for accessing fw_ohci data. Never call out of | 185 | * Spinlock for accessing fw_ohci data. Never call out of |
@@ -276,19 +281,13 @@ static int ar_context_add_page(struct ar_context *ctx) | |||
276 | { | 281 | { |
277 | struct device *dev = ctx->ohci->card.device; | 282 | struct device *dev = ctx->ohci->card.device; |
278 | struct ar_buffer *ab; | 283 | struct ar_buffer *ab; |
279 | dma_addr_t ab_bus; | 284 | dma_addr_t uninitialized_var(ab_bus); |
280 | size_t offset; | 285 | size_t offset; |
281 | 286 | ||
282 | ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC); | 287 | ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); |
283 | if (ab == NULL) | 288 | if (ab == NULL) |
284 | return -ENOMEM; | 289 | return -ENOMEM; |
285 | 290 | ||
286 | ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
287 | if (dma_mapping_error(ab_bus)) { | ||
288 | free_page((unsigned long) ab); | ||
289 | return -ENOMEM; | ||
290 | } | ||
291 | |||
292 | memset(&ab->descriptor, 0, sizeof(ab->descriptor)); | 291 | memset(&ab->descriptor, 0, sizeof(ab->descriptor)); |
293 | ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | | 292 | ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | |
294 | DESCRIPTOR_STATUS | | 293 | DESCRIPTOR_STATUS | |
@@ -299,8 +298,6 @@ static int ar_context_add_page(struct ar_context *ctx) | |||
299 | ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); | 298 | ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); |
300 | ab->descriptor.branch_address = 0; | 299 | ab->descriptor.branch_address = 0; |
301 | 300 | ||
302 | dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
303 | |||
304 | ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); | 301 | ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); |
305 | ctx->last_buffer->next = ab; | 302 | ctx->last_buffer->next = ab; |
306 | ctx->last_buffer = ab; | 303 | ctx->last_buffer = ab; |
@@ -311,15 +308,22 @@ static int ar_context_add_page(struct ar_context *ctx) | |||
311 | return 0; | 308 | return 0; |
312 | } | 309 | } |
313 | 310 | ||
311 | #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) | ||
312 | #define cond_le32_to_cpu(v) \ | ||
313 | (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v)) | ||
314 | #else | ||
315 | #define cond_le32_to_cpu(v) le32_to_cpu(v) | ||
316 | #endif | ||
317 | |||
314 | static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | 318 | static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) |
315 | { | 319 | { |
316 | struct fw_ohci *ohci = ctx->ohci; | 320 | struct fw_ohci *ohci = ctx->ohci; |
317 | struct fw_packet p; | 321 | struct fw_packet p; |
318 | u32 status, length, tcode; | 322 | u32 status, length, tcode; |
319 | 323 | ||
320 | p.header[0] = le32_to_cpu(buffer[0]); | 324 | p.header[0] = cond_le32_to_cpu(buffer[0]); |
321 | p.header[1] = le32_to_cpu(buffer[1]); | 325 | p.header[1] = cond_le32_to_cpu(buffer[1]); |
322 | p.header[2] = le32_to_cpu(buffer[2]); | 326 | p.header[2] = cond_le32_to_cpu(buffer[2]); |
323 | 327 | ||
324 | tcode = (p.header[0] >> 4) & 0x0f; | 328 | tcode = (p.header[0] >> 4) & 0x0f; |
325 | switch (tcode) { | 329 | switch (tcode) { |
@@ -331,7 +335,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
331 | break; | 335 | break; |
332 | 336 | ||
333 | case TCODE_READ_BLOCK_REQUEST : | 337 | case TCODE_READ_BLOCK_REQUEST : |
334 | p.header[3] = le32_to_cpu(buffer[3]); | 338 | p.header[3] = cond_le32_to_cpu(buffer[3]); |
335 | p.header_length = 16; | 339 | p.header_length = 16; |
336 | p.payload_length = 0; | 340 | p.payload_length = 0; |
337 | break; | 341 | break; |
@@ -340,7 +344,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
340 | case TCODE_READ_BLOCK_RESPONSE: | 344 | case TCODE_READ_BLOCK_RESPONSE: |
341 | case TCODE_LOCK_REQUEST: | 345 | case TCODE_LOCK_REQUEST: |
342 | case TCODE_LOCK_RESPONSE: | 346 | case TCODE_LOCK_RESPONSE: |
343 | p.header[3] = le32_to_cpu(buffer[3]); | 347 | p.header[3] = cond_le32_to_cpu(buffer[3]); |
344 | p.header_length = 16; | 348 | p.header_length = 16; |
345 | p.payload_length = p.header[3] >> 16; | 349 | p.payload_length = p.header[3] >> 16; |
346 | break; | 350 | break; |
@@ -357,7 +361,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
357 | 361 | ||
358 | /* FIXME: What to do about evt_* errors? */ | 362 | /* FIXME: What to do about evt_* errors? */ |
359 | length = (p.header_length + p.payload_length + 3) / 4; | 363 | length = (p.header_length + p.payload_length + 3) / 4; |
360 | status = le32_to_cpu(buffer[length]); | 364 | status = cond_le32_to_cpu(buffer[length]); |
361 | 365 | ||
362 | p.ack = ((status >> 16) & 0x1f) - 16; | 366 | p.ack = ((status >> 16) & 0x1f) - 16; |
363 | p.speed = (status >> 21) & 0x7; | 367 | p.speed = (status >> 21) & 0x7; |
@@ -375,7 +379,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
375 | */ | 379 | */ |
376 | 380 | ||
377 | if (p.ack + 16 == 0x09) | 381 | if (p.ack + 16 == 0x09) |
378 | ohci->request_generation = (buffer[2] >> 16) & 0xff; | 382 | ohci->request_generation = (p.header[2] >> 16) & 0xff; |
379 | else if (ctx == &ohci->ar_request_ctx) | 383 | else if (ctx == &ohci->ar_request_ctx) |
380 | fw_core_handle_request(&ohci->card, &p); | 384 | fw_core_handle_request(&ohci->card, &p); |
381 | else | 385 | else |
@@ -397,6 +401,7 @@ static void ar_context_tasklet(unsigned long data) | |||
397 | 401 | ||
398 | if (d->res_count == 0) { | 402 | if (d->res_count == 0) { |
399 | size_t size, rest, offset; | 403 | size_t size, rest, offset; |
404 | dma_addr_t buffer_bus; | ||
400 | 405 | ||
401 | /* | 406 | /* |
402 | * This descriptor is finished and we may have a | 407 | * This descriptor is finished and we may have a |
@@ -405,9 +410,7 @@ static void ar_context_tasklet(unsigned long data) | |||
405 | */ | 410 | */ |
406 | 411 | ||
407 | offset = offsetof(struct ar_buffer, data); | 412 | offset = offsetof(struct ar_buffer, data); |
408 | dma_unmap_single(ohci->card.device, | 413 | buffer_bus = le32_to_cpu(ab->descriptor.data_address) - offset; |
409 | le32_to_cpu(ab->descriptor.data_address) - offset, | ||
410 | PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
411 | 414 | ||
412 | buffer = ab; | 415 | buffer = ab; |
413 | ab = ab->next; | 416 | ab = ab->next; |
@@ -423,7 +426,8 @@ static void ar_context_tasklet(unsigned long data) | |||
423 | while (buffer < end) | 426 | while (buffer < end) |
424 | buffer = handle_ar_packet(ctx, buffer); | 427 | buffer = handle_ar_packet(ctx, buffer); |
425 | 428 | ||
426 | free_page((unsigned long)buffer); | 429 | dma_free_coherent(ohci->card.device, PAGE_SIZE, |
430 | buffer, buffer_bus); | ||
427 | ar_context_add_page(ctx); | 431 | ar_context_add_page(ctx); |
428 | } else { | 432 | } else { |
429 | buffer = ctx->pointer; | 433 | buffer = ctx->pointer; |
@@ -532,7 +536,7 @@ static int | |||
532 | context_add_buffer(struct context *ctx) | 536 | context_add_buffer(struct context *ctx) |
533 | { | 537 | { |
534 | struct descriptor_buffer *desc; | 538 | struct descriptor_buffer *desc; |
535 | dma_addr_t bus_addr; | 539 | dma_addr_t uninitialized_var(bus_addr); |
536 | int offset; | 540 | int offset; |
537 | 541 | ||
538 | /* | 542 | /* |
@@ -1022,13 +1026,14 @@ static void bus_reset_tasklet(unsigned long data) | |||
1022 | */ | 1026 | */ |
1023 | 1027 | ||
1024 | self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff; | 1028 | self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff; |
1025 | generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; | 1029 | generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; |
1026 | rmb(); | 1030 | rmb(); |
1027 | 1031 | ||
1028 | for (i = 1, j = 0; j < self_id_count; i += 2, j++) { | 1032 | for (i = 1, j = 0; j < self_id_count; i += 2, j++) { |
1029 | if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) | 1033 | if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) |
1030 | fw_error("inconsistent self IDs\n"); | 1034 | fw_error("inconsistent self IDs\n"); |
1031 | ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]); | 1035 | ohci->self_id_buffer[j] = |
1036 | cond_le32_to_cpu(ohci->self_id_cpu[i]); | ||
1032 | } | 1037 | } |
1033 | rmb(); | 1038 | rmb(); |
1034 | 1039 | ||
@@ -1316,7 +1321,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) | |||
1316 | unsigned long flags; | 1321 | unsigned long flags; |
1317 | int retval = -EBUSY; | 1322 | int retval = -EBUSY; |
1318 | __be32 *next_config_rom; | 1323 | __be32 *next_config_rom; |
1319 | dma_addr_t next_config_rom_bus; | 1324 | dma_addr_t uninitialized_var(next_config_rom_bus); |
1320 | 1325 | ||
1321 | ohci = fw_ohci(card); | 1326 | ohci = fw_ohci(card); |
1322 | 1327 | ||
@@ -1487,7 +1492,7 @@ static int handle_ir_dualbuffer_packet(struct context *context, | |||
1487 | void *p, *end; | 1492 | void *p, *end; |
1488 | int i; | 1493 | int i; |
1489 | 1494 | ||
1490 | if (db->first_res_count > 0 && db->second_res_count > 0) { | 1495 | if (db->first_res_count != 0 && db->second_res_count != 0) { |
1491 | if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { | 1496 | if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { |
1492 | /* This descriptor isn't done yet, stop iteration. */ | 1497 | /* This descriptor isn't done yet, stop iteration. */ |
1493 | return 0; | 1498 | return 0; |
@@ -1513,7 +1518,7 @@ static int handle_ir_dualbuffer_packet(struct context *context, | |||
1513 | memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); | 1518 | memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); |
1514 | i += ctx->base.header_size; | 1519 | i += ctx->base.header_size; |
1515 | ctx->excess_bytes += | 1520 | ctx->excess_bytes += |
1516 | (le32_to_cpu(*(u32 *)(p + 4)) >> 16) & 0xffff; | 1521 | (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; |
1517 | p += ctx->base.header_size + 4; | 1522 | p += ctx->base.header_size + 4; |
1518 | } | 1523 | } |
1519 | ctx->header_length = i; | 1524 | ctx->header_length = i; |
@@ -2048,6 +2053,18 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
2048 | int err; | 2053 | int err; |
2049 | size_t size; | 2054 | size_t size; |
2050 | 2055 | ||
2056 | #ifdef CONFIG_PPC_PMAC | ||
2057 | /* Necessary on some machines if fw-ohci was loaded/ unloaded before */ | ||
2058 | if (machine_is(powermac)) { | ||
2059 | struct device_node *ofn = pci_device_to_OF_node(dev); | ||
2060 | |||
2061 | if (ofn) { | ||
2062 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1); | ||
2063 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); | ||
2064 | } | ||
2065 | } | ||
2066 | #endif /* CONFIG_PPC_PMAC */ | ||
2067 | |||
2051 | ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); | 2068 | ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); |
2052 | if (ohci == NULL) { | 2069 | if (ohci == NULL) { |
2053 | fw_error("Could not malloc fw_ohci data.\n"); | 2070 | fw_error("Could not malloc fw_ohci data.\n"); |
@@ -2066,6 +2083,10 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
2066 | pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); | 2083 | pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); |
2067 | pci_set_drvdata(dev, ohci); | 2084 | pci_set_drvdata(dev, ohci); |
2068 | 2085 | ||
2086 | #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) | ||
2087 | ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE && | ||
2088 | dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW; | ||
2089 | #endif | ||
2069 | spin_lock_init(&ohci->lock); | 2090 | spin_lock_init(&ohci->lock); |
2070 | 2091 | ||
2071 | tasklet_init(&ohci->bus_reset_tasklet, | 2092 | tasklet_init(&ohci->bus_reset_tasklet, |
@@ -2182,6 +2203,19 @@ static void pci_remove(struct pci_dev *dev) | |||
2182 | pci_disable_device(dev); | 2203 | pci_disable_device(dev); |
2183 | fw_card_put(&ohci->card); | 2204 | fw_card_put(&ohci->card); |
2184 | 2205 | ||
2206 | #ifdef CONFIG_PPC_PMAC | ||
2207 | /* On UniNorth, power down the cable and turn off the chip clock | ||
2208 | * to save power on laptops */ | ||
2209 | if (machine_is(powermac)) { | ||
2210 | struct device_node *ofn = pci_device_to_OF_node(dev); | ||
2211 | |||
2212 | if (ofn) { | ||
2213 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); | ||
2214 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0); | ||
2215 | } | ||
2216 | } | ||
2217 | #endif /* CONFIG_PPC_PMAC */ | ||
2218 | |||
2185 | fw_notify("Removed fw-ohci device.\n"); | 2219 | fw_notify("Removed fw-ohci device.\n"); |
2186 | } | 2220 | } |
2187 | 2221 | ||
@@ -2202,6 +2236,16 @@ static int pci_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2202 | if (err) | 2236 | if (err) |
2203 | fw_error("pci_set_power_state failed with %d\n", err); | 2237 | fw_error("pci_set_power_state failed with %d\n", err); |
2204 | 2238 | ||
2239 | /* PowerMac suspend code comes last */ | ||
2240 | #ifdef CONFIG_PPC_PMAC | ||
2241 | if (machine_is(powermac)) { | ||
2242 | struct device_node *ofn = pci_device_to_OF_node(pdev); | ||
2243 | |||
2244 | if (ofn) | ||
2245 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); | ||
2246 | } | ||
2247 | #endif /* CONFIG_PPC_PMAC */ | ||
2248 | |||
2205 | return 0; | 2249 | return 0; |
2206 | } | 2250 | } |
2207 | 2251 | ||
@@ -2210,6 +2254,16 @@ static int pci_resume(struct pci_dev *pdev) | |||
2210 | struct fw_ohci *ohci = pci_get_drvdata(pdev); | 2254 | struct fw_ohci *ohci = pci_get_drvdata(pdev); |
2211 | int err; | 2255 | int err; |
2212 | 2256 | ||
2257 | /* PowerMac resume code comes first */ | ||
2258 | #ifdef CONFIG_PPC_PMAC | ||
2259 | if (machine_is(powermac)) { | ||
2260 | struct device_node *ofn = pci_device_to_OF_node(pdev); | ||
2261 | |||
2262 | if (ofn) | ||
2263 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); | ||
2264 | } | ||
2265 | #endif /* CONFIG_PPC_PMAC */ | ||
2266 | |||
2213 | pci_set_power_state(pdev, PCI_D0); | 2267 | pci_set_power_state(pdev, PCI_D0); |
2214 | pci_restore_state(pdev); | 2268 | pci_restore_state(pdev); |
2215 | err = pci_enable_device(pdev); | 2269 | err = pci_enable_device(pdev); |
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 19ece9b6d742..62b4e47d0cc0 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c | |||
@@ -28,14 +28,15 @@ | |||
28 | * and many others. | 28 | * and many others. |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/blkdev.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/device.h> | ||
34 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/mod_devicetable.h> | ||
32 | #include <linux/module.h> | 37 | #include <linux/module.h> |
33 | #include <linux/moduleparam.h> | 38 | #include <linux/moduleparam.h> |
34 | #include <linux/mod_devicetable.h> | ||
35 | #include <linux/device.h> | ||
36 | #include <linux/scatterlist.h> | 39 | #include <linux/scatterlist.h> |
37 | #include <linux/dma-mapping.h> | ||
38 | #include <linux/blkdev.h> | ||
39 | #include <linux/string.h> | 40 | #include <linux/string.h> |
40 | #include <linux/stringify.h> | 41 | #include <linux/stringify.h> |
41 | #include <linux/timer.h> | 42 | #include <linux/timer.h> |
@@ -47,9 +48,9 @@ | |||
47 | #include <scsi/scsi_device.h> | 48 | #include <scsi/scsi_device.h> |
48 | #include <scsi/scsi_host.h> | 49 | #include <scsi/scsi_host.h> |
49 | 50 | ||
50 | #include "fw-transaction.h" | ||
51 | #include "fw-topology.h" | ||
52 | #include "fw-device.h" | 51 | #include "fw-device.h" |
52 | #include "fw-topology.h" | ||
53 | #include "fw-transaction.h" | ||
53 | 54 | ||
54 | /* | 55 | /* |
55 | * So far only bridges from Oxford Semiconductor are known to support | 56 | * So far only bridges from Oxford Semiconductor are known to support |
@@ -82,6 +83,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
82 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. | 83 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. |
83 | * Don't use this with devices which don't have this bug. | 84 | * Don't use this with devices which don't have this bug. |
84 | * | 85 | * |
86 | * - delay inquiry | ||
87 | * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. | ||
88 | * | ||
85 | * - override internal blacklist | 89 | * - override internal blacklist |
86 | * Instead of adding to the built-in blacklist, use only the workarounds | 90 | * Instead of adding to the built-in blacklist, use only the workarounds |
87 | * specified in the module load parameter. | 91 | * specified in the module load parameter. |
@@ -91,6 +95,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
91 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 | 95 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 |
92 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 | 96 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 |
93 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 | 97 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 |
98 | #define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 | ||
99 | #define SBP2_INQUIRY_DELAY 12 | ||
94 | #define SBP2_WORKAROUND_OVERRIDE 0x100 | 100 | #define SBP2_WORKAROUND_OVERRIDE 0x100 |
95 | 101 | ||
96 | static int sbp2_param_workarounds; | 102 | static int sbp2_param_workarounds; |
@@ -100,6 +106,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | |||
100 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) | 106 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) |
101 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) | 107 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) |
102 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) | 108 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) |
109 | ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) | ||
103 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | 110 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) |
104 | ", or a combination)"); | 111 | ", or a combination)"); |
105 | 112 | ||
@@ -115,7 +122,6 @@ static const char sbp2_driver_name[] = "sbp2"; | |||
115 | struct sbp2_logical_unit { | 122 | struct sbp2_logical_unit { |
116 | struct sbp2_target *tgt; | 123 | struct sbp2_target *tgt; |
117 | struct list_head link; | 124 | struct list_head link; |
118 | struct scsi_device *sdev; | ||
119 | struct fw_address_handler address_handler; | 125 | struct fw_address_handler address_handler; |
120 | struct list_head orb_list; | 126 | struct list_head orb_list; |
121 | 127 | ||
@@ -132,6 +138,8 @@ struct sbp2_logical_unit { | |||
132 | int generation; | 138 | int generation; |
133 | int retries; | 139 | int retries; |
134 | struct delayed_work work; | 140 | struct delayed_work work; |
141 | bool has_sdev; | ||
142 | bool blocked; | ||
135 | }; | 143 | }; |
136 | 144 | ||
137 | /* | 145 | /* |
@@ -141,16 +149,18 @@ struct sbp2_logical_unit { | |||
141 | struct sbp2_target { | 149 | struct sbp2_target { |
142 | struct kref kref; | 150 | struct kref kref; |
143 | struct fw_unit *unit; | 151 | struct fw_unit *unit; |
152 | const char *bus_id; | ||
153 | struct list_head lu_list; | ||
144 | 154 | ||
145 | u64 management_agent_address; | 155 | u64 management_agent_address; |
146 | int directory_id; | 156 | int directory_id; |
147 | int node_id; | 157 | int node_id; |
148 | int address_high; | 158 | int address_high; |
149 | 159 | unsigned int workarounds; | |
150 | unsigned workarounds; | ||
151 | struct list_head lu_list; | ||
152 | |||
153 | unsigned int mgt_orb_timeout; | 160 | unsigned int mgt_orb_timeout; |
161 | |||
162 | int dont_block; /* counter for each logical unit */ | ||
163 | int blocked; /* ditto */ | ||
154 | }; | 164 | }; |
155 | 165 | ||
156 | /* | 166 | /* |
@@ -160,9 +170,10 @@ struct sbp2_target { | |||
160 | */ | 170 | */ |
161 | #define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ | 171 | #define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ |
162 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ | 172 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ |
163 | #define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */ | 173 | #define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ |
164 | #define SBP2_ORB_NULL 0x80000000 | 174 | #define SBP2_ORB_NULL 0x80000000 |
165 | #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 | 175 | #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 |
176 | #define SBP2_RETRY_LIMIT 0xf /* 15 retries */ | ||
166 | 177 | ||
167 | #define SBP2_DIRECTION_TO_MEDIA 0x0 | 178 | #define SBP2_DIRECTION_TO_MEDIA 0x0 |
168 | #define SBP2_DIRECTION_FROM_MEDIA 0x1 | 179 | #define SBP2_DIRECTION_FROM_MEDIA 0x1 |
@@ -297,7 +308,7 @@ struct sbp2_command_orb { | |||
297 | static const struct { | 308 | static const struct { |
298 | u32 firmware_revision; | 309 | u32 firmware_revision; |
299 | u32 model; | 310 | u32 model; |
300 | unsigned workarounds; | 311 | unsigned int workarounds; |
301 | } sbp2_workarounds_table[] = { | 312 | } sbp2_workarounds_table[] = { |
302 | /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { | 313 | /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { |
303 | .firmware_revision = 0x002800, | 314 | .firmware_revision = 0x002800, |
@@ -305,6 +316,11 @@ static const struct { | |||
305 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | | 316 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | |
306 | SBP2_WORKAROUND_MODE_SENSE_8, | 317 | SBP2_WORKAROUND_MODE_SENSE_8, |
307 | }, | 318 | }, |
319 | /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { | ||
320 | .firmware_revision = 0x002800, | ||
321 | .model = 0x000000, | ||
322 | .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY, | ||
323 | }, | ||
308 | /* Initio bridges, actually only needed for some older ones */ { | 324 | /* Initio bridges, actually only needed for some older ones */ { |
309 | .firmware_revision = 0x000200, | 325 | .firmware_revision = 0x000200, |
310 | .model = ~0, | 326 | .model = ~0, |
@@ -315,6 +331,11 @@ static const struct { | |||
315 | .model = ~0, | 331 | .model = ~0, |
316 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, | 332 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, |
317 | }, | 333 | }, |
334 | /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ { | ||
335 | .firmware_revision = 0x002600, | ||
336 | .model = ~0, | ||
337 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, | ||
338 | }, | ||
318 | 339 | ||
319 | /* | 340 | /* |
320 | * There are iPods (2nd gen, 3rd gen) with model_id == 0, but | 341 | * There are iPods (2nd gen, 3rd gen) with model_id == 0, but |
@@ -501,6 +522,9 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
501 | unsigned int timeout; | 522 | unsigned int timeout; |
502 | int retval = -ENOMEM; | 523 | int retval = -ENOMEM; |
503 | 524 | ||
525 | if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device)) | ||
526 | return 0; | ||
527 | |||
504 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); | 528 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); |
505 | if (orb == NULL) | 529 | if (orb == NULL) |
506 | return -ENOMEM; | 530 | return -ENOMEM; |
@@ -553,20 +577,20 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
553 | 577 | ||
554 | retval = -EIO; | 578 | retval = -EIO; |
555 | if (sbp2_cancel_orbs(lu) == 0) { | 579 | if (sbp2_cancel_orbs(lu) == 0) { |
556 | fw_error("orb reply timed out, rcode=0x%02x\n", | 580 | fw_error("%s: orb reply timed out, rcode=0x%02x\n", |
557 | orb->base.rcode); | 581 | lu->tgt->bus_id, orb->base.rcode); |
558 | goto out; | 582 | goto out; |
559 | } | 583 | } |
560 | 584 | ||
561 | if (orb->base.rcode != RCODE_COMPLETE) { | 585 | if (orb->base.rcode != RCODE_COMPLETE) { |
562 | fw_error("management write failed, rcode 0x%02x\n", | 586 | fw_error("%s: management write failed, rcode 0x%02x\n", |
563 | orb->base.rcode); | 587 | lu->tgt->bus_id, orb->base.rcode); |
564 | goto out; | 588 | goto out; |
565 | } | 589 | } |
566 | 590 | ||
567 | if (STATUS_GET_RESPONSE(orb->status) != 0 || | 591 | if (STATUS_GET_RESPONSE(orb->status) != 0 || |
568 | STATUS_GET_SBP_STATUS(orb->status) != 0) { | 592 | STATUS_GET_SBP_STATUS(orb->status) != 0) { |
569 | fw_error("error status: %d:%d\n", | 593 | fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id, |
570 | STATUS_GET_RESPONSE(orb->status), | 594 | STATUS_GET_RESPONSE(orb->status), |
571 | STATUS_GET_SBP_STATUS(orb->status)); | 595 | STATUS_GET_SBP_STATUS(orb->status)); |
572 | goto out; | 596 | goto out; |
@@ -590,29 +614,158 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
590 | 614 | ||
591 | static void | 615 | static void |
592 | complete_agent_reset_write(struct fw_card *card, int rcode, | 616 | complete_agent_reset_write(struct fw_card *card, int rcode, |
593 | void *payload, size_t length, void *data) | 617 | void *payload, size_t length, void *done) |
594 | { | 618 | { |
595 | struct fw_transaction *t = data; | 619 | complete(done); |
620 | } | ||
596 | 621 | ||
597 | kfree(t); | 622 | static void sbp2_agent_reset(struct sbp2_logical_unit *lu) |
623 | { | ||
624 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
625 | DECLARE_COMPLETION_ONSTACK(done); | ||
626 | struct fw_transaction t; | ||
627 | static u32 z; | ||
628 | |||
629 | fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST, | ||
630 | lu->tgt->node_id, lu->generation, device->max_speed, | ||
631 | lu->command_block_agent_address + SBP2_AGENT_RESET, | ||
632 | &z, sizeof(z), complete_agent_reset_write, &done); | ||
633 | wait_for_completion(&done); | ||
598 | } | 634 | } |
599 | 635 | ||
600 | static int sbp2_agent_reset(struct sbp2_logical_unit *lu) | 636 | static void |
637 | complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, | ||
638 | void *payload, size_t length, void *data) | ||
639 | { | ||
640 | kfree(data); | ||
641 | } | ||
642 | |||
643 | static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) | ||
601 | { | 644 | { |
602 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | 645 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); |
603 | struct fw_transaction *t; | 646 | struct fw_transaction *t; |
604 | static u32 zero; | 647 | static u32 z; |
605 | 648 | ||
606 | t = kzalloc(sizeof(*t), GFP_ATOMIC); | 649 | t = kmalloc(sizeof(*t), GFP_ATOMIC); |
607 | if (t == NULL) | 650 | if (t == NULL) |
608 | return -ENOMEM; | 651 | return; |
609 | 652 | ||
610 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, | 653 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, |
611 | lu->tgt->node_id, lu->generation, device->max_speed, | 654 | lu->tgt->node_id, lu->generation, device->max_speed, |
612 | lu->command_block_agent_address + SBP2_AGENT_RESET, | 655 | lu->command_block_agent_address + SBP2_AGENT_RESET, |
613 | &zero, sizeof(zero), complete_agent_reset_write, t); | 656 | &z, sizeof(z), complete_agent_reset_write_no_wait, t); |
657 | } | ||
614 | 658 | ||
615 | return 0; | 659 | static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation) |
660 | { | ||
661 | struct fw_card *card = fw_device(lu->tgt->unit->device.parent)->card; | ||
662 | unsigned long flags; | ||
663 | |||
664 | /* serialize with comparisons of lu->generation and card->generation */ | ||
665 | spin_lock_irqsave(&card->lock, flags); | ||
666 | lu->generation = generation; | ||
667 | spin_unlock_irqrestore(&card->lock, flags); | ||
668 | } | ||
669 | |||
670 | static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | ||
671 | { | ||
672 | /* | ||
673 | * We may access dont_block without taking card->lock here: | ||
674 | * All callers of sbp2_allow_block() and all callers of sbp2_unblock() | ||
675 | * are currently serialized against each other. | ||
676 | * And a wrong result in sbp2_conditionally_block()'s access of | ||
677 | * dont_block is rather harmless, it simply misses its first chance. | ||
678 | */ | ||
679 | --lu->tgt->dont_block; | ||
680 | } | ||
681 | |||
682 | /* | ||
683 | * Blocks lu->tgt if all of the following conditions are met: | ||
684 | * - Login, INQUIRY, and high-level SCSI setup of all of the target's | ||
685 | * logical units have been finished (indicated by dont_block == 0). | ||
686 | * - lu->generation is stale. | ||
687 | * | ||
688 | * Note, scsi_block_requests() must be called while holding card->lock, | ||
689 | * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to | ||
690 | * unblock the target. | ||
691 | */ | ||
692 | static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) | ||
693 | { | ||
694 | struct sbp2_target *tgt = lu->tgt; | ||
695 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
696 | struct Scsi_Host *shost = | ||
697 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
698 | unsigned long flags; | ||
699 | |||
700 | spin_lock_irqsave(&card->lock, flags); | ||
701 | if (!tgt->dont_block && !lu->blocked && | ||
702 | lu->generation != card->generation) { | ||
703 | lu->blocked = true; | ||
704 | if (++tgt->blocked == 1) { | ||
705 | scsi_block_requests(shost); | ||
706 | fw_notify("blocked %s\n", lu->tgt->bus_id); | ||
707 | } | ||
708 | } | ||
709 | spin_unlock_irqrestore(&card->lock, flags); | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * Unblocks lu->tgt as soon as all its logical units can be unblocked. | ||
714 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
715 | * card->lock protected section. On the other hand, running it inside | ||
716 | * the section might clash with shost->host_lock. | ||
717 | */ | ||
718 | static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | ||
719 | { | ||
720 | struct sbp2_target *tgt = lu->tgt; | ||
721 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
722 | struct Scsi_Host *shost = | ||
723 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
724 | unsigned long flags; | ||
725 | bool unblock = false; | ||
726 | |||
727 | spin_lock_irqsave(&card->lock, flags); | ||
728 | if (lu->blocked && lu->generation == card->generation) { | ||
729 | lu->blocked = false; | ||
730 | unblock = --tgt->blocked == 0; | ||
731 | } | ||
732 | spin_unlock_irqrestore(&card->lock, flags); | ||
733 | |||
734 | if (unblock) { | ||
735 | scsi_unblock_requests(shost); | ||
736 | fw_notify("unblocked %s\n", lu->tgt->bus_id); | ||
737 | } | ||
738 | } | ||
739 | |||
740 | /* | ||
741 | * Prevents future blocking of tgt and unblocks it. | ||
742 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
743 | * card->lock protected section. On the other hand, running it inside | ||
744 | * the section might clash with shost->host_lock. | ||
745 | */ | ||
746 | static void sbp2_unblock(struct sbp2_target *tgt) | ||
747 | { | ||
748 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
749 | struct Scsi_Host *shost = | ||
750 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
751 | unsigned long flags; | ||
752 | |||
753 | spin_lock_irqsave(&card->lock, flags); | ||
754 | ++tgt->dont_block; | ||
755 | spin_unlock_irqrestore(&card->lock, flags); | ||
756 | |||
757 | scsi_unblock_requests(shost); | ||
758 | } | ||
759 | |||
760 | static int sbp2_lun2int(u16 lun) | ||
761 | { | ||
762 | struct scsi_lun eight_bytes_lun; | ||
763 | |||
764 | memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); | ||
765 | eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff; | ||
766 | eight_bytes_lun.scsi_lun[1] = lun & 0xff; | ||
767 | |||
768 | return scsilun_to_int(&eight_bytes_lun); | ||
616 | } | 769 | } |
617 | 770 | ||
618 | static void sbp2_release_target(struct kref *kref) | 771 | static void sbp2_release_target(struct kref *kref) |
@@ -621,26 +774,31 @@ static void sbp2_release_target(struct kref *kref) | |||
621 | struct sbp2_logical_unit *lu, *next; | 774 | struct sbp2_logical_unit *lu, *next; |
622 | struct Scsi_Host *shost = | 775 | struct Scsi_Host *shost = |
623 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 776 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
777 | struct scsi_device *sdev; | ||
624 | struct fw_device *device = fw_device(tgt->unit->device.parent); | 778 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
625 | 779 | ||
626 | list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { | 780 | /* prevent deadlocks */ |
627 | if (lu->sdev) | 781 | sbp2_unblock(tgt); |
628 | scsi_remove_device(lu->sdev); | ||
629 | 782 | ||
630 | if (!fw_device_is_shutdown(device)) | 783 | list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { |
631 | sbp2_send_management_orb(lu, tgt->node_id, | 784 | sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun)); |
632 | lu->generation, SBP2_LOGOUT_REQUEST, | 785 | if (sdev) { |
633 | lu->login_id, NULL); | 786 | scsi_remove_device(sdev); |
787 | scsi_device_put(sdev); | ||
788 | } | ||
789 | sbp2_send_management_orb(lu, tgt->node_id, lu->generation, | ||
790 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
634 | 791 | ||
635 | fw_core_remove_address_handler(&lu->address_handler); | 792 | fw_core_remove_address_handler(&lu->address_handler); |
636 | list_del(&lu->link); | 793 | list_del(&lu->link); |
637 | kfree(lu); | 794 | kfree(lu); |
638 | } | 795 | } |
639 | scsi_remove_host(shost); | 796 | scsi_remove_host(shost); |
640 | fw_notify("released %s\n", tgt->unit->device.bus_id); | 797 | fw_notify("released %s\n", tgt->bus_id); |
641 | 798 | ||
642 | put_device(&tgt->unit->device); | 799 | put_device(&tgt->unit->device); |
643 | scsi_host_put(shost); | 800 | scsi_host_put(shost); |
801 | fw_device_put(device); | ||
644 | } | 802 | } |
645 | 803 | ||
646 | static struct workqueue_struct *sbp2_wq; | 804 | static struct workqueue_struct *sbp2_wq; |
@@ -660,39 +818,72 @@ static void sbp2_target_put(struct sbp2_target *tgt) | |||
660 | kref_put(&tgt->kref, sbp2_release_target); | 818 | kref_put(&tgt->kref, sbp2_release_target); |
661 | } | 819 | } |
662 | 820 | ||
821 | static void | ||
822 | complete_set_busy_timeout(struct fw_card *card, int rcode, | ||
823 | void *payload, size_t length, void *done) | ||
824 | { | ||
825 | complete(done); | ||
826 | } | ||
827 | |||
828 | static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) | ||
829 | { | ||
830 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
831 | DECLARE_COMPLETION_ONSTACK(done); | ||
832 | struct fw_transaction t; | ||
833 | static __be32 busy_timeout; | ||
834 | |||
835 | /* FIXME: we should try to set dual-phase cycle_limit too */ | ||
836 | busy_timeout = cpu_to_be32(SBP2_RETRY_LIMIT); | ||
837 | |||
838 | fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST, | ||
839 | lu->tgt->node_id, lu->generation, device->max_speed, | ||
840 | CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &busy_timeout, | ||
841 | sizeof(busy_timeout), complete_set_busy_timeout, &done); | ||
842 | wait_for_completion(&done); | ||
843 | } | ||
844 | |||
663 | static void sbp2_reconnect(struct work_struct *work); | 845 | static void sbp2_reconnect(struct work_struct *work); |
664 | 846 | ||
665 | static void sbp2_login(struct work_struct *work) | 847 | static void sbp2_login(struct work_struct *work) |
666 | { | 848 | { |
667 | struct sbp2_logical_unit *lu = | 849 | struct sbp2_logical_unit *lu = |
668 | container_of(work, struct sbp2_logical_unit, work.work); | 850 | container_of(work, struct sbp2_logical_unit, work.work); |
669 | struct Scsi_Host *shost = | 851 | struct sbp2_target *tgt = lu->tgt; |
670 | container_of((void *)lu->tgt, struct Scsi_Host, hostdata[0]); | 852 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
853 | struct Scsi_Host *shost; | ||
671 | struct scsi_device *sdev; | 854 | struct scsi_device *sdev; |
672 | struct scsi_lun eight_bytes_lun; | ||
673 | struct fw_unit *unit = lu->tgt->unit; | ||
674 | struct fw_device *device = fw_device(unit->device.parent); | ||
675 | struct sbp2_login_response response; | 855 | struct sbp2_login_response response; |
676 | int generation, node_id, local_node_id; | 856 | int generation, node_id, local_node_id; |
677 | 857 | ||
858 | if (fw_device_is_shutdown(device)) | ||
859 | goto out; | ||
860 | |||
678 | generation = device->generation; | 861 | generation = device->generation; |
679 | smp_rmb(); /* node_id must not be older than generation */ | 862 | smp_rmb(); /* node_id must not be older than generation */ |
680 | node_id = device->node_id; | 863 | node_id = device->node_id; |
681 | local_node_id = device->card->node_id; | 864 | local_node_id = device->card->node_id; |
682 | 865 | ||
866 | /* If this is a re-login attempt, log out, or we might be rejected. */ | ||
867 | if (lu->has_sdev) | ||
868 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
869 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
870 | |||
683 | if (sbp2_send_management_orb(lu, node_id, generation, | 871 | if (sbp2_send_management_orb(lu, node_id, generation, |
684 | SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { | 872 | SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { |
685 | if (lu->retries++ < 5) | 873 | if (lu->retries++ < 5) { |
686 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); | 874 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); |
687 | else | 875 | } else { |
688 | fw_error("failed to login to %s LUN %04x\n", | 876 | fw_error("%s: failed to login to LUN %04x\n", |
689 | unit->device.bus_id, lu->lun); | 877 | tgt->bus_id, lu->lun); |
878 | /* Let any waiting I/O fail from now on. */ | ||
879 | sbp2_unblock(lu->tgt); | ||
880 | } | ||
690 | goto out; | 881 | goto out; |
691 | } | 882 | } |
692 | 883 | ||
693 | lu->generation = generation; | 884 | tgt->node_id = node_id; |
694 | lu->tgt->node_id = node_id; | 885 | tgt->address_high = local_node_id << 16; |
695 | lu->tgt->address_high = local_node_id << 16; | 886 | sbp2_set_generation(lu, generation); |
696 | 887 | ||
697 | /* Get command block agent offset and login id. */ | 888 | /* Get command block agent offset and login id. */ |
698 | lu->command_block_agent_address = | 889 | lu->command_block_agent_address = |
@@ -700,37 +891,67 @@ static void sbp2_login(struct work_struct *work) | |||
700 | response.command_block_agent.low; | 891 | response.command_block_agent.low; |
701 | lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); | 892 | lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); |
702 | 893 | ||
703 | fw_notify("logged in to %s LUN %04x (%d retries)\n", | 894 | fw_notify("%s: logged in to LUN %04x (%d retries)\n", |
704 | unit->device.bus_id, lu->lun, lu->retries); | 895 | tgt->bus_id, lu->lun, lu->retries); |
705 | 896 | ||
706 | #if 0 | 897 | /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ |
707 | /* FIXME: The linux1394 sbp2 does this last step. */ | 898 | sbp2_set_busy_timeout(lu); |
708 | sbp2_set_busy_timeout(scsi_id); | ||
709 | #endif | ||
710 | 899 | ||
711 | PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); | 900 | PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); |
712 | sbp2_agent_reset(lu); | 901 | sbp2_agent_reset(lu); |
713 | 902 | ||
714 | memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); | 903 | /* This was a re-login. */ |
715 | eight_bytes_lun.scsi_lun[0] = (lu->lun >> 8) & 0xff; | 904 | if (lu->has_sdev) { |
716 | eight_bytes_lun.scsi_lun[1] = lu->lun & 0xff; | 905 | sbp2_cancel_orbs(lu); |
906 | sbp2_conditionally_unblock(lu); | ||
907 | goto out; | ||
908 | } | ||
717 | 909 | ||
718 | sdev = __scsi_add_device(shost, 0, 0, | 910 | if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) |
719 | scsilun_to_int(&eight_bytes_lun), lu); | 911 | ssleep(SBP2_INQUIRY_DELAY); |
720 | if (IS_ERR(sdev)) { | 912 | |
721 | sbp2_send_management_orb(lu, node_id, generation, | 913 | shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
722 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | 914 | sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu); |
723 | /* | 915 | /* |
724 | * Set this back to sbp2_login so we fall back and | 916 | * FIXME: We are unable to perform reconnects while in sbp2_login(). |
725 | * retry login on bus reset. | 917 | * Therefore __scsi_add_device() will get into trouble if a bus reset |
726 | */ | 918 | * happens in parallel. It will either fail or leave us with an |
727 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | 919 | * unusable sdev. As a workaround we check for this and retry the |
728 | } else { | 920 | * whole login and SCSI probing. |
729 | lu->sdev = sdev; | 921 | */ |
922 | |||
923 | /* Reported error during __scsi_add_device() */ | ||
924 | if (IS_ERR(sdev)) | ||
925 | goto out_logout_login; | ||
926 | |||
927 | /* Unreported error during __scsi_add_device() */ | ||
928 | smp_rmb(); /* get current card generation */ | ||
929 | if (generation != device->card->generation) { | ||
930 | scsi_remove_device(sdev); | ||
730 | scsi_device_put(sdev); | 931 | scsi_device_put(sdev); |
932 | goto out_logout_login; | ||
731 | } | 933 | } |
934 | |||
935 | /* No error during __scsi_add_device() */ | ||
936 | lu->has_sdev = true; | ||
937 | scsi_device_put(sdev); | ||
938 | sbp2_allow_block(lu); | ||
939 | goto out; | ||
940 | |||
941 | out_logout_login: | ||
942 | smp_rmb(); /* generation may have changed */ | ||
943 | generation = device->generation; | ||
944 | smp_rmb(); /* node_id must not be older than generation */ | ||
945 | |||
946 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
947 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
948 | /* | ||
949 | * If a bus reset happened, sbp2_update will have requeued | ||
950 | * lu->work already. Reset the work from reconnect to login. | ||
951 | */ | ||
952 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | ||
732 | out: | 953 | out: |
733 | sbp2_target_put(lu->tgt); | 954 | sbp2_target_put(tgt); |
734 | } | 955 | } |
735 | 956 | ||
736 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | 957 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) |
@@ -751,10 +972,12 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | |||
751 | return -ENOMEM; | 972 | return -ENOMEM; |
752 | } | 973 | } |
753 | 974 | ||
754 | lu->tgt = tgt; | 975 | lu->tgt = tgt; |
755 | lu->sdev = NULL; | 976 | lu->lun = lun_entry & 0xffff; |
756 | lu->lun = lun_entry & 0xffff; | 977 | lu->retries = 0; |
757 | lu->retries = 0; | 978 | lu->has_sdev = false; |
979 | lu->blocked = false; | ||
980 | ++tgt->dont_block; | ||
758 | INIT_LIST_HEAD(&lu->orb_list); | 981 | INIT_LIST_HEAD(&lu->orb_list); |
759 | INIT_DELAYED_WORK(&lu->work, sbp2_login); | 982 | INIT_DELAYED_WORK(&lu->work, sbp2_login); |
760 | 983 | ||
@@ -813,7 +1036,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, | |||
813 | if (timeout > tgt->mgt_orb_timeout) | 1036 | if (timeout > tgt->mgt_orb_timeout) |
814 | fw_notify("%s: config rom contains %ds " | 1037 | fw_notify("%s: config rom contains %ds " |
815 | "management ORB timeout, limiting " | 1038 | "management ORB timeout, limiting " |
816 | "to %ds\n", tgt->unit->device.bus_id, | 1039 | "to %ds\n", tgt->bus_id, |
817 | timeout / 1000, | 1040 | timeout / 1000, |
818 | tgt->mgt_orb_timeout / 1000); | 1041 | tgt->mgt_orb_timeout / 1000); |
819 | break; | 1042 | break; |
@@ -836,12 +1059,12 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | |||
836 | u32 firmware_revision) | 1059 | u32 firmware_revision) |
837 | { | 1060 | { |
838 | int i; | 1061 | int i; |
839 | unsigned w = sbp2_param_workarounds; | 1062 | unsigned int w = sbp2_param_workarounds; |
840 | 1063 | ||
841 | if (w) | 1064 | if (w) |
842 | fw_notify("Please notify linux1394-devel@lists.sourceforge.net " | 1065 | fw_notify("Please notify linux1394-devel@lists.sourceforge.net " |
843 | "if you need the workarounds parameter for %s\n", | 1066 | "if you need the workarounds parameter for %s\n", |
844 | tgt->unit->device.bus_id); | 1067 | tgt->bus_id); |
845 | 1068 | ||
846 | if (w & SBP2_WORKAROUND_OVERRIDE) | 1069 | if (w & SBP2_WORKAROUND_OVERRIDE) |
847 | goto out; | 1070 | goto out; |
@@ -863,8 +1086,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | |||
863 | if (w) | 1086 | if (w) |
864 | fw_notify("Workarounds for %s: 0x%x " | 1087 | fw_notify("Workarounds for %s: 0x%x " |
865 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", | 1088 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", |
866 | tgt->unit->device.bus_id, | 1089 | tgt->bus_id, w, firmware_revision, model); |
867 | w, firmware_revision, model); | ||
868 | tgt->workarounds = w; | 1090 | tgt->workarounds = w; |
869 | } | 1091 | } |
870 | 1092 | ||
@@ -888,6 +1110,7 @@ static int sbp2_probe(struct device *dev) | |||
888 | tgt->unit = unit; | 1110 | tgt->unit = unit; |
889 | kref_init(&tgt->kref); | 1111 | kref_init(&tgt->kref); |
890 | INIT_LIST_HEAD(&tgt->lu_list); | 1112 | INIT_LIST_HEAD(&tgt->lu_list); |
1113 | tgt->bus_id = unit->device.bus_id; | ||
891 | 1114 | ||
892 | if (fw_device_enable_phys_dma(device) < 0) | 1115 | if (fw_device_enable_phys_dma(device) < 0) |
893 | goto fail_shost_put; | 1116 | goto fail_shost_put; |
@@ -895,6 +1118,8 @@ static int sbp2_probe(struct device *dev) | |||
895 | if (scsi_add_host(shost, &unit->device) < 0) | 1118 | if (scsi_add_host(shost, &unit->device) < 0) |
896 | goto fail_shost_put; | 1119 | goto fail_shost_put; |
897 | 1120 | ||
1121 | fw_device_get(device); | ||
1122 | |||
898 | /* Initialize to values that won't match anything in our table. */ | 1123 | /* Initialize to values that won't match anything in our table. */ |
899 | firmware_revision = 0xff000000; | 1124 | firmware_revision = 0xff000000; |
900 | model = 0xff000000; | 1125 | model = 0xff000000; |
@@ -938,10 +1163,13 @@ static void sbp2_reconnect(struct work_struct *work) | |||
938 | { | 1163 | { |
939 | struct sbp2_logical_unit *lu = | 1164 | struct sbp2_logical_unit *lu = |
940 | container_of(work, struct sbp2_logical_unit, work.work); | 1165 | container_of(work, struct sbp2_logical_unit, work.work); |
941 | struct fw_unit *unit = lu->tgt->unit; | 1166 | struct sbp2_target *tgt = lu->tgt; |
942 | struct fw_device *device = fw_device(unit->device.parent); | 1167 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
943 | int generation, node_id, local_node_id; | 1168 | int generation, node_id, local_node_id; |
944 | 1169 | ||
1170 | if (fw_device_is_shutdown(device)) | ||
1171 | goto out; | ||
1172 | |||
945 | generation = device->generation; | 1173 | generation = device->generation; |
946 | smp_rmb(); /* node_id must not be older than generation */ | 1174 | smp_rmb(); /* node_id must not be older than generation */ |
947 | node_id = device->node_id; | 1175 | node_id = device->node_id; |
@@ -950,10 +1178,17 @@ static void sbp2_reconnect(struct work_struct *work) | |||
950 | if (sbp2_send_management_orb(lu, node_id, generation, | 1178 | if (sbp2_send_management_orb(lu, node_id, generation, |
951 | SBP2_RECONNECT_REQUEST, | 1179 | SBP2_RECONNECT_REQUEST, |
952 | lu->login_id, NULL) < 0) { | 1180 | lu->login_id, NULL) < 0) { |
953 | if (lu->retries++ >= 5) { | 1181 | /* |
954 | fw_error("failed to reconnect to %s\n", | 1182 | * If reconnect was impossible even though we are in the |
955 | unit->device.bus_id); | 1183 | * current generation, fall back and try to log in again. |
956 | /* Fall back and try to log in again. */ | 1184 | * |
1185 | * We could check for "Function rejected" status, but | ||
1186 | * looking at the bus generation as simpler and more general. | ||
1187 | */ | ||
1188 | smp_rmb(); /* get current card generation */ | ||
1189 | if (generation == device->card->generation || | ||
1190 | lu->retries++ >= 5) { | ||
1191 | fw_error("%s: failed to reconnect\n", tgt->bus_id); | ||
957 | lu->retries = 0; | 1192 | lu->retries = 0; |
958 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | 1193 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); |
959 | } | 1194 | } |
@@ -961,17 +1196,18 @@ static void sbp2_reconnect(struct work_struct *work) | |||
961 | goto out; | 1196 | goto out; |
962 | } | 1197 | } |
963 | 1198 | ||
964 | lu->generation = generation; | 1199 | tgt->node_id = node_id; |
965 | lu->tgt->node_id = node_id; | 1200 | tgt->address_high = local_node_id << 16; |
966 | lu->tgt->address_high = local_node_id << 16; | 1201 | sbp2_set_generation(lu, generation); |
967 | 1202 | ||
968 | fw_notify("reconnected to %s LUN %04x (%d retries)\n", | 1203 | fw_notify("%s: reconnected to LUN %04x (%d retries)\n", |
969 | unit->device.bus_id, lu->lun, lu->retries); | 1204 | tgt->bus_id, lu->lun, lu->retries); |
970 | 1205 | ||
971 | sbp2_agent_reset(lu); | 1206 | sbp2_agent_reset(lu); |
972 | sbp2_cancel_orbs(lu); | 1207 | sbp2_cancel_orbs(lu); |
1208 | sbp2_conditionally_unblock(lu); | ||
973 | out: | 1209 | out: |
974 | sbp2_target_put(lu->tgt); | 1210 | sbp2_target_put(tgt); |
975 | } | 1211 | } |
976 | 1212 | ||
977 | static void sbp2_update(struct fw_unit *unit) | 1213 | static void sbp2_update(struct fw_unit *unit) |
@@ -986,6 +1222,7 @@ static void sbp2_update(struct fw_unit *unit) | |||
986 | * Iteration over tgt->lu_list is therefore safe here. | 1222 | * Iteration over tgt->lu_list is therefore safe here. |
987 | */ | 1223 | */ |
988 | list_for_each_entry(lu, &tgt->lu_list, link) { | 1224 | list_for_each_entry(lu, &tgt->lu_list, link) { |
1225 | sbp2_conditionally_block(lu); | ||
989 | lu->retries = 0; | 1226 | lu->retries = 0; |
990 | sbp2_queue_work(lu, 0); | 1227 | sbp2_queue_work(lu, 0); |
991 | } | 1228 | } |
@@ -1063,7 +1300,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
1063 | 1300 | ||
1064 | if (status != NULL) { | 1301 | if (status != NULL) { |
1065 | if (STATUS_GET_DEAD(*status)) | 1302 | if (STATUS_GET_DEAD(*status)) |
1066 | sbp2_agent_reset(orb->lu); | 1303 | sbp2_agent_reset_no_wait(orb->lu); |
1067 | 1304 | ||
1068 | switch (STATUS_GET_RESPONSE(*status)) { | 1305 | switch (STATUS_GET_RESPONSE(*status)) { |
1069 | case SBP2_STATUS_REQUEST_COMPLETE: | 1306 | case SBP2_STATUS_REQUEST_COMPLETE: |
@@ -1089,6 +1326,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
1089 | * or when sending the write (less likely). | 1326 | * or when sending the write (less likely). |
1090 | */ | 1327 | */ |
1091 | result = DID_BUS_BUSY << 16; | 1328 | result = DID_BUS_BUSY << 16; |
1329 | sbp2_conditionally_block(orb->lu); | ||
1092 | } | 1330 | } |
1093 | 1331 | ||
1094 | dma_unmap_single(device->card->device, orb->base.request_bus, | 1332 | dma_unmap_single(device->card->device, orb->base.request_bus, |
@@ -1197,7 +1435,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1197 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1435 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1198 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | 1436 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); |
1199 | struct sbp2_command_orb *orb; | 1437 | struct sbp2_command_orb *orb; |
1200 | unsigned max_payload; | 1438 | unsigned int max_payload; |
1201 | int retval = SCSI_MLQUEUE_HOST_BUSY; | 1439 | int retval = SCSI_MLQUEUE_HOST_BUSY; |
1202 | 1440 | ||
1203 | /* | 1441 | /* |
@@ -1275,6 +1513,10 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) | |||
1275 | { | 1513 | { |
1276 | struct sbp2_logical_unit *lu = sdev->hostdata; | 1514 | struct sbp2_logical_unit *lu = sdev->hostdata; |
1277 | 1515 | ||
1516 | /* (Re-)Adding logical units via the SCSI stack is not supported. */ | ||
1517 | if (!lu) | ||
1518 | return -ENOSYS; | ||
1519 | |||
1278 | sdev->allow_restart = 1; | 1520 | sdev->allow_restart = 1; |
1279 | 1521 | ||
1280 | /* | 1522 | /* |
@@ -1319,7 +1561,7 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd) | |||
1319 | { | 1561 | { |
1320 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1562 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1321 | 1563 | ||
1322 | fw_notify("sbp2_scsi_abort\n"); | 1564 | fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id); |
1323 | sbp2_agent_reset(lu); | 1565 | sbp2_agent_reset(lu); |
1324 | sbp2_cancel_orbs(lu); | 1566 | sbp2_cancel_orbs(lu); |
1325 | 1567 | ||
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c index 172c1867e9aa..d2c7a3d7e1cb 100644 --- a/drivers/firewire/fw-topology.c +++ b/drivers/firewire/fw-topology.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
24 | #include <asm/bug.h> | ||
24 | #include <asm/system.h> | 25 | #include <asm/system.h> |
25 | #include "fw-transaction.h" | 26 | #include "fw-transaction.h" |
26 | #include "fw-topology.h" | 27 | #include "fw-topology.h" |
@@ -383,6 +384,7 @@ void fw_destroy_nodes(struct fw_card *card) | |||
383 | card->color++; | 384 | card->color++; |
384 | if (card->local_node != NULL) | 385 | if (card->local_node != NULL) |
385 | for_each_fw_node(card, card->local_node, report_lost_node); | 386 | for_each_fw_node(card, card->local_node, report_lost_node); |
387 | card->local_node = NULL; | ||
386 | spin_unlock_irqrestore(&card->lock, flags); | 388 | spin_unlock_irqrestore(&card->lock, flags); |
387 | } | 389 | } |
388 | 390 | ||
@@ -423,8 +425,8 @@ update_tree(struct fw_card *card, struct fw_node *root) | |||
423 | node1 = fw_node(list1.next); | 425 | node1 = fw_node(list1.next); |
424 | 426 | ||
425 | while (&node0->link != &list0) { | 427 | while (&node0->link != &list0) { |
428 | WARN_ON(node0->port_count != node1->port_count); | ||
426 | 429 | ||
427 | /* assert(node0->port_count == node1->port_count); */ | ||
428 | if (node0->link_on && !node1->link_on) | 430 | if (node0->link_on && !node1->link_on) |
429 | event = FW_NODE_LINK_OFF; | 431 | event = FW_NODE_LINK_OFF; |
430 | else if (!node0->link_on && node1->link_on) | 432 | else if (!node0->link_on && node1->link_on) |
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 7fcc59dedf08..99529e59a0b1 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c | |||
@@ -751,7 +751,7 @@ handle_topology_map(struct fw_card *card, struct fw_request *request, | |||
751 | void *payload, size_t length, void *callback_data) | 751 | void *payload, size_t length, void *callback_data) |
752 | { | 752 | { |
753 | int i, start, end; | 753 | int i, start, end; |
754 | u32 *map; | 754 | __be32 *map; |
755 | 755 | ||
756 | if (!TCODE_IS_READ_REQUEST(tcode)) { | 756 | if (!TCODE_IS_READ_REQUEST(tcode)) { |
757 | fw_send_response(card, request, RCODE_TYPE_ERROR); | 757 | fw_send_response(card, request, RCODE_TYPE_ERROR); |
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index fa7967b57408..a43bb22912f9 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/dma-mapping.h> | 27 | #include <linux/dma-mapping.h> |
28 | #include <linux/firewire-constants.h> | 28 | #include <linux/firewire-constants.h> |
29 | #include <asm/atomic.h> | ||
29 | 30 | ||
30 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) | 31 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) |
31 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) | 32 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) |
@@ -85,12 +86,12 @@ | |||
85 | static inline void | 86 | static inline void |
86 | fw_memcpy_from_be32(void *_dst, void *_src, size_t size) | 87 | fw_memcpy_from_be32(void *_dst, void *_src, size_t size) |
87 | { | 88 | { |
88 | u32 *dst = _dst; | 89 | u32 *dst = _dst; |
89 | u32 *src = _src; | 90 | __be32 *src = _src; |
90 | int i; | 91 | int i; |
91 | 92 | ||
92 | for (i = 0; i < size / 4; i++) | 93 | for (i = 0; i < size / 4; i++) |
93 | dst[i] = cpu_to_be32(src[i]); | 94 | dst[i] = be32_to_cpu(src[i]); |
94 | } | 95 | } |
95 | 96 | ||
96 | static inline void | 97 | static inline void |
@@ -219,6 +220,7 @@ extern struct bus_type fw_bus_type; | |||
219 | struct fw_card { | 220 | struct fw_card { |
220 | const struct fw_card_driver *driver; | 221 | const struct fw_card_driver *driver; |
221 | struct device *device; | 222 | struct device *device; |
223 | atomic_t device_count; | ||
222 | struct kref kref; | 224 | struct kref kref; |
223 | 225 | ||
224 | int node_id; | 226 | int node_id; |