aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 14:24:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 14:24:29 -0400
commiteddeb0e2d863e3941d8768e70cb50c6120e61fa0 (patch)
treee30f2b6654e6eb397f9d7fb23a08d717cc19b12d
parent855d854a33fca71cf68bad258bd5e66e5b265d10 (diff)
parentdb8be076cad4b843aa743ef462c75022cddd9c63 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (43 commits) firewire: cleanups firewire: fix synchronization of gap counts firewire: wait until PHY configuration packet was transmitted (fix bus reset loop) firewire: remove unused struct member firewire: use bitwise and to get reg in handle_registers firewire: replace more hex values with defined csr constants firewire: reread config ROM when device reset the bus firewire: replace static ROM cache by allocated cache firewire: fw-ohci: work around generation bug in TI controllers (fix AV/C and more) firewire: fw-ohci: extend logging of bus generations and node ID firewire: fw-ohci: conditionally log busReset interrupts firewire: fw-ohci: don't append to AT context when it's not active firewire: fw-ohci: log regAccessFail events firewire: fw-ohci: make sure HCControl register LPS bit is set firewire: fw-ohci: missing PPC PMac feature calls in failure path firewire: fw-ohci: untangle a mixed unsigned/signed expression firewire: debug interrupt events firewire: fw-ohci: catch self_id_count == 0 firewire: fw-ohci: add self ID error check firewire: fw-ohci: refactor probe, remove, suspend, resume ...
-rw-r--r--Documentation/debugging-via-ohci1394.txt16
-rw-r--r--drivers/firewire/Kconfig5
-rw-r--r--drivers/firewire/fw-card.c50
-rw-r--r--drivers/firewire/fw-cdev.c13
-rw-r--r--drivers/firewire/fw-device.c263
-rw-r--r--drivers/firewire/fw-device.h38
-rw-r--r--drivers/firewire/fw-iso.c5
-rw-r--r--drivers/firewire/fw-ohci.c402
-rw-r--r--drivers/firewire/fw-ohci.h2
-rw-r--r--drivers/firewire/fw-sbp2.c150
-rw-r--r--drivers/firewire/fw-topology.c10
-rw-r--r--drivers/firewire/fw-topology.h11
-rw-r--r--drivers/firewire/fw-transaction.c75
-rw-r--r--drivers/firewire/fw-transaction.h10
-rw-r--r--drivers/ieee1394/csr.c6
-rw-r--r--drivers/ieee1394/dv1394.c4
-rw-r--r--drivers/ieee1394/highlevel.c6
-rw-r--r--drivers/ieee1394/ieee1394_core.c4
-rw-r--r--drivers/ieee1394/nodemgr.c6
-rw-r--r--drivers/ieee1394/ohci1394.c229
-rw-r--r--drivers/ieee1394/pcilynx.c15
-rw-r--r--drivers/ieee1394/raw1394.c2
-rw-r--r--drivers/ieee1394/sbp2.c11
-rw-r--r--drivers/ieee1394/video1394.c4
-rw-r--r--lib/Kconfig.debug13
25 files changed, 877 insertions, 473 deletions
diff --git a/Documentation/debugging-via-ohci1394.txt b/Documentation/debugging-via-ohci1394.txt
index c360d4e91b48..59a91e5c6909 100644
--- a/Documentation/debugging-via-ohci1394.txt
+++ b/Documentation/debugging-via-ohci1394.txt
@@ -41,15 +41,19 @@ to a working state and enables physical DMA by default for all remote nodes.
41This can be turned off by ohci1394's module parameter phys_dma=0. 41This can be turned off by ohci1394's module parameter phys_dma=0.
42 42
43The alternative firewire-ohci driver in drivers/firewire uses filtered physical 43The alternative firewire-ohci driver in drivers/firewire uses filtered physical
44DMA, hence is not yet suitable for remote debugging. 44DMA by default, which is more secure but not suitable for remote debugging.
45Compile the driver with CONFIG_FIREWIRE_OHCI_REMOTE_DMA (Kernel hacking menu:
46Remote debugging over FireWire with firewire-ohci) to get unfiltered physical
47DMA.
45 48
46Because ohci1394 depends on the PCI enumeration to be completed, an 49Because ohci1394 and firewire-ohci depend on the PCI enumeration to be
47initialization routine which runs pretty early (long before console_init() 50completed, an initialization routine which runs pretty early has been
48which makes the printk buffer appear on the console can be called) was written. 51implemented for x86. This routine runs long before console_init() can be
52called, i.e. before the printk buffer appears on the console.
49 53
50To activate it, enable CONFIG_PROVIDE_OHCI1394_DMA_INIT (Kernel hacking menu: 54To activate it, enable CONFIG_PROVIDE_OHCI1394_DMA_INIT (Kernel hacking menu:
51Provide code for enabling DMA over FireWire early on boot) and pass the 55Remote debugging over FireWire early on boot) and pass the parameter
52parameter "ohci1394_dma=early" to the recompiled kernel on boot. 56"ohci1394_dma=early" to the recompiled kernel on boot.
53 57
54Tools 58Tools
55----- 59-----
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 25bdc2dd9ce1..fb4d391810b6 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -54,6 +54,11 @@ config FIREWIRE_OHCI
54 directive, use "install modulename /bin/true" for the modules to be 54 directive, use "install modulename /bin/true" for the modules to be
55 blacklisted. 55 blacklisted.
56 56
57config FIREWIRE_OHCI_DEBUG
58 bool
59 depends on FIREWIRE_OHCI
60 default y
61
57config FIREWIRE_SBP2 62config FIREWIRE_SBP2
58 tristate "Support for storage devices (SBP-2 protocol driver)" 63 tristate "Support for storage devices (SBP-2 protocol driver)"
59 depends on FIREWIRE && SCSI 64 depends on FIREWIRE && SCSI
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index a03462750b95..5b4c0d9f5173 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -167,7 +167,6 @@ fw_core_add_descriptor(struct fw_descriptor *desc)
167 167
168 return 0; 168 return 0;
169} 169}
170EXPORT_SYMBOL(fw_core_add_descriptor);
171 170
172void 171void
173fw_core_remove_descriptor(struct fw_descriptor *desc) 172fw_core_remove_descriptor(struct fw_descriptor *desc)
@@ -182,7 +181,6 @@ fw_core_remove_descriptor(struct fw_descriptor *desc)
182 181
183 mutex_unlock(&card_mutex); 182 mutex_unlock(&card_mutex);
184} 183}
185EXPORT_SYMBOL(fw_core_remove_descriptor);
186 184
187static const char gap_count_table[] = { 185static const char gap_count_table[] = {
188 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 186 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
@@ -220,7 +218,7 @@ fw_card_bm_work(struct work_struct *work)
220 struct bm_data bmd; 218 struct bm_data bmd;
221 unsigned long flags; 219 unsigned long flags;
222 int root_id, new_root_id, irm_id, gap_count, generation, grace; 220 int root_id, new_root_id, irm_id, gap_count, generation, grace;
223 int do_reset = 0; 221 bool do_reset = false;
224 222
225 spin_lock_irqsave(&card->lock, flags); 223 spin_lock_irqsave(&card->lock, flags);
226 local_node = card->local_node; 224 local_node = card->local_node;
@@ -331,7 +329,7 @@ fw_card_bm_work(struct work_struct *work)
331 */ 329 */
332 spin_unlock_irqrestore(&card->lock, flags); 330 spin_unlock_irqrestore(&card->lock, flags);
333 goto out; 331 goto out;
334 } else if (root_device->config_rom[2] & BIB_CMC) { 332 } else if (root_device->cmc) {
335 /* 333 /*
336 * FIXME: I suppose we should set the cmstr bit in the 334 * FIXME: I suppose we should set the cmstr bit in the
337 * STATE_CLEAR register of this node, as described in 335 * STATE_CLEAR register of this node, as described in
@@ -360,14 +358,14 @@ fw_card_bm_work(struct work_struct *work)
360 gap_count = 63; 358 gap_count = 63;
361 359
362 /* 360 /*
363 * Finally, figure out if we should do a reset or not. If we've 361 * Finally, figure out if we should do a reset or not. If we have
364 * done less that 5 resets with the same physical topology and we 362 * done less than 5 resets with the same physical topology and we
365 * have either a new root or a new gap count setting, let's do it. 363 * have either a new root or a new gap count setting, let's do it.
366 */ 364 */
367 365
368 if (card->bm_retries++ < 5 && 366 if (card->bm_retries++ < 5 &&
369 (card->gap_count != gap_count || new_root_id != root_id)) 367 (card->gap_count != gap_count || new_root_id != root_id))
370 do_reset = 1; 368 do_reset = true;
371 369
372 spin_unlock_irqrestore(&card->lock, flags); 370 spin_unlock_irqrestore(&card->lock, flags);
373 371
@@ -398,7 +396,6 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
398{ 396{
399 static atomic_t index = ATOMIC_INIT(-1); 397 static atomic_t index = ATOMIC_INIT(-1);
400 398
401 kref_init(&card->kref);
402 atomic_set(&card->device_count, 0); 399 atomic_set(&card->device_count, 0);
403 card->index = atomic_inc_return(&index); 400 card->index = atomic_inc_return(&index);
404 card->driver = driver; 401 card->driver = driver;
@@ -429,12 +426,6 @@ fw_card_add(struct fw_card *card,
429 card->link_speed = link_speed; 426 card->link_speed = link_speed;
430 card->guid = guid; 427 card->guid = guid;
431 428
432 /*
433 * The subsystem grabs a reference when the card is added and
434 * drops it when the driver calls fw_core_remove_card.
435 */
436 fw_card_get(card);
437
438 mutex_lock(&card_mutex); 429 mutex_lock(&card_mutex);
439 config_rom = generate_config_rom(card, &length); 430 config_rom = generate_config_rom(card, &length);
440 list_add_tail(&card->link, &card_list); 431 list_add_tail(&card->link, &card_list);
@@ -540,40 +531,9 @@ fw_core_remove_card(struct fw_card *card)
540 cancel_delayed_work_sync(&card->work); 531 cancel_delayed_work_sync(&card->work);
541 fw_flush_transactions(card); 532 fw_flush_transactions(card);
542 del_timer_sync(&card->flush_timer); 533 del_timer_sync(&card->flush_timer);
543
544 fw_card_put(card);
545} 534}
546EXPORT_SYMBOL(fw_core_remove_card); 535EXPORT_SYMBOL(fw_core_remove_card);
547 536
548struct fw_card *
549fw_card_get(struct fw_card *card)
550{
551 kref_get(&card->kref);
552
553 return card;
554}
555EXPORT_SYMBOL(fw_card_get);
556
557static void
558release_card(struct kref *kref)
559{
560 struct fw_card *card = container_of(kref, struct fw_card, kref);
561
562 kfree(card);
563}
564
565/*
566 * An assumption for fw_card_put() is that the card driver allocates
567 * the fw_card struct with kalloc and that it has been shut down
568 * before the last ref is dropped.
569 */
570void
571fw_card_put(struct fw_card *card)
572{
573 kref_put(&card->kref, release_card);
574}
575EXPORT_SYMBOL(fw_card_put);
576
577int 537int
578fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) 538fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
579{ 539{
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index 46bc197a047f..4a541921a14a 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -269,21 +269,28 @@ static int ioctl_get_info(struct client *client, void *buffer)
269{ 269{
270 struct fw_cdev_get_info *get_info = buffer; 270 struct fw_cdev_get_info *get_info = buffer;
271 struct fw_cdev_event_bus_reset bus_reset; 271 struct fw_cdev_event_bus_reset bus_reset;
272 unsigned long ret = 0;
272 273
273 client->version = get_info->version; 274 client->version = get_info->version;
274 get_info->version = FW_CDEV_VERSION; 275 get_info->version = FW_CDEV_VERSION;
275 276
277 down_read(&fw_device_rwsem);
278
276 if (get_info->rom != 0) { 279 if (get_info->rom != 0) {
277 void __user *uptr = u64_to_uptr(get_info->rom); 280 void __user *uptr = u64_to_uptr(get_info->rom);
278 size_t want = get_info->rom_length; 281 size_t want = get_info->rom_length;
279 size_t have = client->device->config_rom_length * 4; 282 size_t have = client->device->config_rom_length * 4;
280 283
281 if (copy_to_user(uptr, client->device->config_rom, 284 ret = copy_to_user(uptr, client->device->config_rom,
282 min(want, have))) 285 min(want, have));
283 return -EFAULT;
284 } 286 }
285 get_info->rom_length = client->device->config_rom_length * 4; 287 get_info->rom_length = client->device->config_rom_length * 4;
286 288
289 up_read(&fw_device_rwsem);
290
291 if (ret != 0)
292 return -EFAULT;
293
287 client->bus_reset_closure = get_info->bus_reset_closure; 294 client->bus_reset_closure = get_info->bus_reset_closure;
288 if (get_info->bus_reset != 0) { 295 if (get_info->bus_reset != 0) {
289 void __user *uptr = u64_to_uptr(get_info->bus_reset); 296 void __user *uptr = u64_to_uptr(get_info->bus_reset);
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index 870125a3638e..2d01bc1b9752 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -25,7 +25,7 @@
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/idr.h> 27#include <linux/idr.h>
28#include <linux/rwsem.h> 28#include <linux/string.h>
29#include <asm/semaphore.h> 29#include <asm/semaphore.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <linux/ctype.h> 31#include <linux/ctype.h>
@@ -160,9 +160,9 @@ static void fw_device_release(struct device *dev)
160 * Take the card lock so we don't set this to NULL while a 160 * Take the card lock so we don't set this to NULL while a
161 * FW_NODE_UPDATED callback is being handled. 161 * FW_NODE_UPDATED callback is being handled.
162 */ 162 */
163 spin_lock_irqsave(&device->card->lock, flags); 163 spin_lock_irqsave(&card->lock, flags);
164 device->node->data = NULL; 164 device->node->data = NULL;
165 spin_unlock_irqrestore(&device->card->lock, flags); 165 spin_unlock_irqrestore(&card->lock, flags);
166 166
167 fw_node_put(device->node); 167 fw_node_put(device->node);
168 kfree(device->config_rom); 168 kfree(device->config_rom);
@@ -195,7 +195,9 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
195 container_of(dattr, struct config_rom_attribute, attr); 195 container_of(dattr, struct config_rom_attribute, attr);
196 struct fw_csr_iterator ci; 196 struct fw_csr_iterator ci;
197 u32 *dir; 197 u32 *dir;
198 int key, value; 198 int key, value, ret = -ENOENT;
199
200 down_read(&fw_device_rwsem);
199 201
200 if (is_fw_unit(dev)) 202 if (is_fw_unit(dev))
201 dir = fw_unit(dev)->directory; 203 dir = fw_unit(dev)->directory;
@@ -204,11 +206,15 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
204 206
205 fw_csr_iterator_init(&ci, dir); 207 fw_csr_iterator_init(&ci, dir);
206 while (fw_csr_iterator_next(&ci, &key, &value)) 208 while (fw_csr_iterator_next(&ci, &key, &value))
207 if (attr->key == key) 209 if (attr->key == key) {
208 return snprintf(buf, buf ? PAGE_SIZE : 0, 210 ret = snprintf(buf, buf ? PAGE_SIZE : 0,
209 "0x%06x\n", value); 211 "0x%06x\n", value);
212 break;
213 }
214
215 up_read(&fw_device_rwsem);
210 216
211 return -ENOENT; 217 return ret;
212} 218}
213 219
214#define IMMEDIATE_ATTR(name, key) \ 220#define IMMEDIATE_ATTR(name, key) \
@@ -221,9 +227,11 @@ show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
221 container_of(dattr, struct config_rom_attribute, attr); 227 container_of(dattr, struct config_rom_attribute, attr);
222 struct fw_csr_iterator ci; 228 struct fw_csr_iterator ci;
223 u32 *dir, *block = NULL, *p, *end; 229 u32 *dir, *block = NULL, *p, *end;
224 int length, key, value, last_key = 0; 230 int length, key, value, last_key = 0, ret = -ENOENT;
225 char *b; 231 char *b;
226 232
233 down_read(&fw_device_rwsem);
234
227 if (is_fw_unit(dev)) 235 if (is_fw_unit(dev))
228 dir = fw_unit(dev)->directory; 236 dir = fw_unit(dev)->directory;
229 else 237 else
@@ -238,18 +246,20 @@ show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
238 } 246 }
239 247
240 if (block == NULL) 248 if (block == NULL)
241 return -ENOENT; 249 goto out;
242 250
243 length = min(block[0] >> 16, 256U); 251 length = min(block[0] >> 16, 256U);
244 if (length < 3) 252 if (length < 3)
245 return -ENOENT; 253 goto out;
246 254
247 if (block[1] != 0 || block[2] != 0) 255 if (block[1] != 0 || block[2] != 0)
248 /* Unknown encoding. */ 256 /* Unknown encoding. */
249 return -ENOENT; 257 goto out;
250 258
251 if (buf == NULL) 259 if (buf == NULL) {
252 return length * 4; 260 ret = length * 4;
261 goto out;
262 }
253 263
254 b = buf; 264 b = buf;
255 end = &block[length + 1]; 265 end = &block[length + 1];
@@ -259,8 +269,11 @@ show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
259 /* Strip trailing whitespace and add newline. */ 269 /* Strip trailing whitespace and add newline. */
260 while (b--, (isspace(*b) || *b == '\0') && b > buf); 270 while (b--, (isspace(*b) || *b == '\0') && b > buf);
261 strcpy(b + 1, "\n"); 271 strcpy(b + 1, "\n");
272 ret = b + 2 - buf;
273 out:
274 up_read(&fw_device_rwsem);
262 275
263 return b + 2 - buf; 276 return ret;
264} 277}
265 278
266#define TEXT_LEAF_ATTR(name, key) \ 279#define TEXT_LEAF_ATTR(name, key) \
@@ -337,19 +350,28 @@ static ssize_t
337config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) 350config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
338{ 351{
339 struct fw_device *device = fw_device(dev); 352 struct fw_device *device = fw_device(dev);
353 size_t length;
340 354
341 memcpy(buf, device->config_rom, device->config_rom_length * 4); 355 down_read(&fw_device_rwsem);
356 length = device->config_rom_length * 4;
357 memcpy(buf, device->config_rom, length);
358 up_read(&fw_device_rwsem);
342 359
343 return device->config_rom_length * 4; 360 return length;
344} 361}
345 362
346static ssize_t 363static ssize_t
347guid_show(struct device *dev, struct device_attribute *attr, char *buf) 364guid_show(struct device *dev, struct device_attribute *attr, char *buf)
348{ 365{
349 struct fw_device *device = fw_device(dev); 366 struct fw_device *device = fw_device(dev);
367 int ret;
368
369 down_read(&fw_device_rwsem);
370 ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
371 device->config_rom[3], device->config_rom[4]);
372 up_read(&fw_device_rwsem);
350 373
351 return snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", 374 return ret;
352 device->config_rom[3], device->config_rom[4]);
353} 375}
354 376
355static struct device_attribute fw_device_attributes[] = { 377static struct device_attribute fw_device_attributes[] = {
@@ -388,7 +410,7 @@ read_rom(struct fw_device *device, int generation, int index, u32 *data)
388 410
389 init_completion(&callback_data.done); 411 init_completion(&callback_data.done);
390 412
391 offset = 0xfffff0000400ULL + index * 4; 413 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
392 fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST, 414 fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST,
393 device->node_id, generation, device->max_speed, 415 device->node_id, generation, device->max_speed,
394 offset, NULL, 4, complete_transaction, &callback_data); 416 offset, NULL, 4, complete_transaction, &callback_data);
@@ -400,6 +422,9 @@ read_rom(struct fw_device *device, int generation, int index, u32 *data)
400 return callback_data.rcode; 422 return callback_data.rcode;
401} 423}
402 424
425#define READ_BIB_ROM_SIZE 256
426#define READ_BIB_STACK_SIZE 16
427
403/* 428/*
404 * Read the bus info block, perform a speed probe, and read all of the rest of 429 * Read the bus info block, perform a speed probe, and read all of the rest of
405 * the config ROM. We do all this with a cached bus generation. If the bus 430 * the config ROM. We do all this with a cached bus generation. If the bus
@@ -409,16 +434,23 @@ read_rom(struct fw_device *device, int generation, int index, u32 *data)
409 */ 434 */
410static int read_bus_info_block(struct fw_device *device, int generation) 435static int read_bus_info_block(struct fw_device *device, int generation)
411{ 436{
412 static u32 rom[256]; 437 u32 *rom, *stack, *old_rom, *new_rom;
413 u32 stack[16], sp, key; 438 u32 sp, key;
414 int i, end, length; 439 int i, end, length, ret = -1;
440
441 rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE +
442 sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL);
443 if (rom == NULL)
444 return -ENOMEM;
445
446 stack = &rom[READ_BIB_ROM_SIZE];
415 447
416 device->max_speed = SCODE_100; 448 device->max_speed = SCODE_100;
417 449
418 /* First read the bus info block. */ 450 /* First read the bus info block. */
419 for (i = 0; i < 5; i++) { 451 for (i = 0; i < 5; i++) {
420 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE) 452 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
421 return -1; 453 goto out;
422 /* 454 /*
423 * As per IEEE1212 7.2, during power-up, devices can 455 * As per IEEE1212 7.2, during power-up, devices can
424 * reply with a 0 for the first quadlet of the config 456 * reply with a 0 for the first quadlet of the config
@@ -428,7 +460,7 @@ static int read_bus_info_block(struct fw_device *device, int generation)
428 * retry mechanism will try again later. 460 * retry mechanism will try again later.
429 */ 461 */
430 if (i == 0 && rom[i] == 0) 462 if (i == 0 && rom[i] == 0)
431 return -1; 463 goto out;
432 } 464 }
433 465
434 device->max_speed = device->node->max_speed; 466 device->max_speed = device->node->max_speed;
@@ -478,26 +510,26 @@ static int read_bus_info_block(struct fw_device *device, int generation)
478 */ 510 */
479 key = stack[--sp]; 511 key = stack[--sp];
480 i = key & 0xffffff; 512 i = key & 0xffffff;
481 if (i >= ARRAY_SIZE(rom)) 513 if (i >= READ_BIB_ROM_SIZE)
482 /* 514 /*
483 * The reference points outside the standard 515 * The reference points outside the standard
484 * config rom area, something's fishy. 516 * config rom area, something's fishy.
485 */ 517 */
486 return -1; 518 goto out;
487 519
488 /* Read header quadlet for the block to get the length. */ 520 /* Read header quadlet for the block to get the length. */
489 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE) 521 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
490 return -1; 522 goto out;
491 end = i + (rom[i] >> 16) + 1; 523 end = i + (rom[i] >> 16) + 1;
492 i++; 524 i++;
493 if (end > ARRAY_SIZE(rom)) 525 if (end > READ_BIB_ROM_SIZE)
494 /* 526 /*
495 * This block extends outside standard config 527 * This block extends outside standard config
496 * area (and the array we're reading it 528 * area (and the array we're reading it
497 * into). That's broken, so ignore this 529 * into). That's broken, so ignore this
498 * device. 530 * device.
499 */ 531 */
500 return -1; 532 goto out;
501 533
502 /* 534 /*
503 * Now read in the block. If this is a directory 535 * Now read in the block. If this is a directory
@@ -507,9 +539,9 @@ static int read_bus_info_block(struct fw_device *device, int generation)
507 while (i < end) { 539 while (i < end) {
508 if (read_rom(device, generation, i, &rom[i]) != 540 if (read_rom(device, generation, i, &rom[i]) !=
509 RCODE_COMPLETE) 541 RCODE_COMPLETE)
510 return -1; 542 goto out;
511 if ((key >> 30) == 3 && (rom[i] >> 30) > 1 && 543 if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
512 sp < ARRAY_SIZE(stack)) 544 sp < READ_BIB_STACK_SIZE)
513 stack[sp++] = i + rom[i]; 545 stack[sp++] = i + rom[i];
514 i++; 546 i++;
515 } 547 }
@@ -517,13 +549,23 @@ static int read_bus_info_block(struct fw_device *device, int generation)
517 length = i; 549 length = i;
518 } 550 }
519 551
520 device->config_rom = kmalloc(length * 4, GFP_KERNEL); 552 old_rom = device->config_rom;
521 if (device->config_rom == NULL) 553 new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
522 return -1; 554 if (new_rom == NULL)
523 memcpy(device->config_rom, rom, length * 4); 555 goto out;
556
557 down_write(&fw_device_rwsem);
558 device->config_rom = new_rom;
524 device->config_rom_length = length; 559 device->config_rom_length = length;
560 up_write(&fw_device_rwsem);
525 561
526 return 0; 562 kfree(old_rom);
563 ret = 0;
564 device->cmc = rom[2] & 1 << 30;
565 out:
566 kfree(rom);
567
568 return ret;
527} 569}
528 570
529static void fw_unit_release(struct device *dev) 571static void fw_unit_release(struct device *dev)
@@ -592,7 +634,14 @@ static int shutdown_unit(struct device *device, void *data)
592 return 0; 634 return 0;
593} 635}
594 636
595static DECLARE_RWSEM(idr_rwsem); 637/*
638 * fw_device_rwsem acts as dual purpose mutex:
639 * - serializes accesses to fw_device_idr,
640 * - serializes accesses to fw_device.config_rom/.config_rom_length and
641 * fw_unit.directory, unless those accesses happen at safe occasions
642 */
643DECLARE_RWSEM(fw_device_rwsem);
644
596static DEFINE_IDR(fw_device_idr); 645static DEFINE_IDR(fw_device_idr);
597int fw_cdev_major; 646int fw_cdev_major;
598 647
@@ -600,11 +649,11 @@ struct fw_device *fw_device_get_by_devt(dev_t devt)
600{ 649{
601 struct fw_device *device; 650 struct fw_device *device;
602 651
603 down_read(&idr_rwsem); 652 down_read(&fw_device_rwsem);
604 device = idr_find(&fw_device_idr, MINOR(devt)); 653 device = idr_find(&fw_device_idr, MINOR(devt));
605 if (device) 654 if (device)
606 fw_device_get(device); 655 fw_device_get(device);
607 up_read(&idr_rwsem); 656 up_read(&fw_device_rwsem);
608 657
609 return device; 658 return device;
610} 659}
@@ -619,9 +668,9 @@ static void fw_device_shutdown(struct work_struct *work)
619 device_for_each_child(&device->device, NULL, shutdown_unit); 668 device_for_each_child(&device->device, NULL, shutdown_unit);
620 device_unregister(&device->device); 669 device_unregister(&device->device);
621 670
622 down_write(&idr_rwsem); 671 down_write(&fw_device_rwsem);
623 idr_remove(&fw_device_idr, minor); 672 idr_remove(&fw_device_idr, minor);
624 up_write(&idr_rwsem); 673 up_write(&fw_device_rwsem);
625 fw_device_put(device); 674 fw_device_put(device);
626} 675}
627 676
@@ -674,10 +723,10 @@ static void fw_device_init(struct work_struct *work)
674 err = -ENOMEM; 723 err = -ENOMEM;
675 724
676 fw_device_get(device); 725 fw_device_get(device);
677 down_write(&idr_rwsem); 726 down_write(&fw_device_rwsem);
678 if (idr_pre_get(&fw_device_idr, GFP_KERNEL)) 727 if (idr_pre_get(&fw_device_idr, GFP_KERNEL))
679 err = idr_get_new(&fw_device_idr, device, &minor); 728 err = idr_get_new(&fw_device_idr, device, &minor);
680 up_write(&idr_rwsem); 729 up_write(&fw_device_rwsem);
681 730
682 if (err < 0) 731 if (err < 0)
683 goto error; 732 goto error;
@@ -711,7 +760,7 @@ static void fw_device_init(struct work_struct *work)
711 if (atomic_cmpxchg(&device->state, 760 if (atomic_cmpxchg(&device->state,
712 FW_DEVICE_INITIALIZING, 761 FW_DEVICE_INITIALIZING,
713 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) { 762 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) {
714 fw_device_shutdown(&device->work.work); 763 fw_device_shutdown(work);
715 } else { 764 } else {
716 if (device->config_rom_retries) 765 if (device->config_rom_retries)
717 fw_notify("created device %s: GUID %08x%08x, S%d00, " 766 fw_notify("created device %s: GUID %08x%08x, S%d00, "
@@ -725,6 +774,7 @@ static void fw_device_init(struct work_struct *work)
725 device->device.bus_id, 774 device->device.bus_id,
726 device->config_rom[3], device->config_rom[4], 775 device->config_rom[3], device->config_rom[4],
727 1 << device->max_speed); 776 1 << device->max_speed);
777 device->config_rom_retries = 0;
728 } 778 }
729 779
730 /* 780 /*
@@ -739,9 +789,9 @@ static void fw_device_init(struct work_struct *work)
739 return; 789 return;
740 790
741 error_with_cdev: 791 error_with_cdev:
742 down_write(&idr_rwsem); 792 down_write(&fw_device_rwsem);
743 idr_remove(&fw_device_idr, minor); 793 idr_remove(&fw_device_idr, minor);
744 up_write(&idr_rwsem); 794 up_write(&fw_device_rwsem);
745 error: 795 error:
746 fw_device_put(device); /* fw_device_idr's reference */ 796 fw_device_put(device); /* fw_device_idr's reference */
747 797
@@ -771,6 +821,106 @@ static void fw_device_update(struct work_struct *work)
771 device_for_each_child(&device->device, NULL, update_unit); 821 device_for_each_child(&device->device, NULL, update_unit);
772} 822}
773 823
824enum {
825 REREAD_BIB_ERROR,
826 REREAD_BIB_GONE,
827 REREAD_BIB_UNCHANGED,
828 REREAD_BIB_CHANGED,
829};
830
831/* Reread and compare bus info block and header of root directory */
832static int reread_bus_info_block(struct fw_device *device, int generation)
833{
834 u32 q;
835 int i;
836
837 for (i = 0; i < 6; i++) {
838 if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
839 return REREAD_BIB_ERROR;
840
841 if (i == 0 && q == 0)
842 return REREAD_BIB_GONE;
843
844 if (i > device->config_rom_length || q != device->config_rom[i])
845 return REREAD_BIB_CHANGED;
846 }
847
848 return REREAD_BIB_UNCHANGED;
849}
850
851static void fw_device_refresh(struct work_struct *work)
852{
853 struct fw_device *device =
854 container_of(work, struct fw_device, work.work);
855 struct fw_card *card = device->card;
856 int node_id = device->node_id;
857
858 switch (reread_bus_info_block(device, device->generation)) {
859 case REREAD_BIB_ERROR:
860 if (device->config_rom_retries < MAX_RETRIES / 2 &&
861 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
862 device->config_rom_retries++;
863 schedule_delayed_work(&device->work, RETRY_DELAY / 2);
864
865 return;
866 }
867 goto give_up;
868
869 case REREAD_BIB_GONE:
870 goto gone;
871
872 case REREAD_BIB_UNCHANGED:
873 if (atomic_cmpxchg(&device->state,
874 FW_DEVICE_INITIALIZING,
875 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
876 goto gone;
877
878 fw_device_update(work);
879 device->config_rom_retries = 0;
880 goto out;
881
882 case REREAD_BIB_CHANGED:
883 break;
884 }
885
886 /*
887 * Something changed. We keep things simple and don't investigate
888 * further. We just destroy all previous units and create new ones.
889 */
890 device_for_each_child(&device->device, NULL, shutdown_unit);
891
892 if (read_bus_info_block(device, device->generation) < 0) {
893 if (device->config_rom_retries < MAX_RETRIES &&
894 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
895 device->config_rom_retries++;
896 schedule_delayed_work(&device->work, RETRY_DELAY);
897
898 return;
899 }
900 goto give_up;
901 }
902
903 create_units(device);
904
905 if (atomic_cmpxchg(&device->state,
906 FW_DEVICE_INITIALIZING,
907 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
908 goto gone;
909
910 fw_notify("refreshed device %s\n", device->device.bus_id);
911 device->config_rom_retries = 0;
912 goto out;
913
914 give_up:
915 fw_notify("giving up on refresh of device %s\n", device->device.bus_id);
916 gone:
917 atomic_set(&device->state, FW_DEVICE_SHUTDOWN);
918 fw_device_shutdown(work);
919 out:
920 if (node_id == card->root_node->node_id)
921 schedule_delayed_work(&card->work, 0);
922}
923
774void fw_node_event(struct fw_card *card, struct fw_node *node, int event) 924void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
775{ 925{
776 struct fw_device *device; 926 struct fw_device *device;
@@ -780,7 +930,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
780 case FW_NODE_LINK_ON: 930 case FW_NODE_LINK_ON:
781 if (!node->link_on) 931 if (!node->link_on)
782 break; 932 break;
783 933 create:
784 device = kzalloc(sizeof(*device), GFP_ATOMIC); 934 device = kzalloc(sizeof(*device), GFP_ATOMIC);
785 if (device == NULL) 935 if (device == NULL)
786 break; 936 break;
@@ -819,6 +969,23 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
819 schedule_delayed_work(&device->work, INITIAL_DELAY); 969 schedule_delayed_work(&device->work, INITIAL_DELAY);
820 break; 970 break;
821 971
972 case FW_NODE_INITIATED_RESET:
973 device = node->data;
974 if (device == NULL)
975 goto create;
976
977 device->node_id = node->node_id;
978 smp_wmb(); /* update node_id before generation */
979 device->generation = card->generation;
980 if (atomic_cmpxchg(&device->state,
981 FW_DEVICE_RUNNING,
982 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
983 PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
984 schedule_delayed_work(&device->work,
985 node == card->local_node ? 0 : INITIAL_DELAY);
986 }
987 break;
988
822 case FW_NODE_UPDATED: 989 case FW_NODE_UPDATED:
823 if (!node->link_on || node->data == NULL) 990 if (!node->link_on || node->data == NULL)
824 break; 991 break;
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index 78ecd3991b7f..5f131f5129da 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -21,6 +21,7 @@
21 21
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/cdev.h> 23#include <linux/cdev.h>
24#include <linux/rwsem.h>
24#include <asm/atomic.h> 25#include <asm/atomic.h>
25 26
26enum fw_device_state { 27enum fw_device_state {
@@ -46,6 +47,11 @@ struct fw_attribute_group {
46 * fw_device.node_id is guaranteed to be current too. 47 * fw_device.node_id is guaranteed to be current too.
47 * 48 *
48 * The same applies to fw_device.card->node_id vs. fw_device.generation. 49 * The same applies to fw_device.card->node_id vs. fw_device.generation.
50 *
51 * fw_device.config_rom and fw_device.config_rom_length may be accessed during
52 * the lifetime of any fw_unit belonging to the fw_device, before device_del()
53 * was called on the last fw_unit. Alternatively, they may be accessed while
54 * holding fw_device_rwsem.
49 */ 55 */
50struct fw_device { 56struct fw_device {
51 atomic_t state; 57 atomic_t state;
@@ -53,6 +59,7 @@ struct fw_device {
53 int node_id; 59 int node_id;
54 int generation; 60 int generation;
55 unsigned max_speed; 61 unsigned max_speed;
62 bool cmc;
56 struct fw_card *card; 63 struct fw_card *card;
57 struct device device; 64 struct device device;
58 struct list_head link; 65 struct list_head link;
@@ -64,28 +71,24 @@ struct fw_device {
64 struct fw_attribute_group attribute_group; 71 struct fw_attribute_group attribute_group;
65}; 72};
66 73
67static inline struct fw_device * 74static inline struct fw_device *fw_device(struct device *dev)
68fw_device(struct device *dev)
69{ 75{
70 return container_of(dev, struct fw_device, device); 76 return container_of(dev, struct fw_device, device);
71} 77}
72 78
73static inline int 79static inline int fw_device_is_shutdown(struct fw_device *device)
74fw_device_is_shutdown(struct fw_device *device)
75{ 80{
76 return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; 81 return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
77} 82}
78 83
79static inline struct fw_device * 84static inline struct fw_device *fw_device_get(struct fw_device *device)
80fw_device_get(struct fw_device *device)
81{ 85{
82 get_device(&device->device); 86 get_device(&device->device);
83 87
84 return device; 88 return device;
85} 89}
86 90
87static inline void 91static inline void fw_device_put(struct fw_device *device)
88fw_device_put(struct fw_device *device)
89{ 92{
90 put_device(&device->device); 93 put_device(&device->device);
91} 94}
@@ -96,20 +99,35 @@ int fw_device_enable_phys_dma(struct fw_device *device);
96void fw_device_cdev_update(struct fw_device *device); 99void fw_device_cdev_update(struct fw_device *device);
97void fw_device_cdev_remove(struct fw_device *device); 100void fw_device_cdev_remove(struct fw_device *device);
98 101
102extern struct rw_semaphore fw_device_rwsem;
99extern int fw_cdev_major; 103extern int fw_cdev_major;
100 104
105/*
106 * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
107 */
101struct fw_unit { 108struct fw_unit {
102 struct device device; 109 struct device device;
103 u32 *directory; 110 u32 *directory;
104 struct fw_attribute_group attribute_group; 111 struct fw_attribute_group attribute_group;
105}; 112};
106 113
107static inline struct fw_unit * 114static inline struct fw_unit *fw_unit(struct device *dev)
108fw_unit(struct device *dev)
109{ 115{
110 return container_of(dev, struct fw_unit, device); 116 return container_of(dev, struct fw_unit, device);
111} 117}
112 118
119static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
120{
121 get_device(&unit->device);
122
123 return unit;
124}
125
126static inline void fw_unit_put(struct fw_unit *unit)
127{
128 put_device(&unit->device);
129}
130
113#define CSR_OFFSET 0x40 131#define CSR_OFFSET 0x40
114#define CSR_LEAF 0x80 132#define CSR_LEAF 0x80
115#define CSR_DIRECTORY 0xc0 133#define CSR_DIRECTORY 0xc0
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
index 2b640e9be6de..bcbe794a3ea5 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/fw-iso.c
@@ -126,7 +126,6 @@ fw_iso_context_create(struct fw_card *card, int type,
126 126
127 return ctx; 127 return ctx;
128} 128}
129EXPORT_SYMBOL(fw_iso_context_create);
130 129
131void fw_iso_context_destroy(struct fw_iso_context *ctx) 130void fw_iso_context_destroy(struct fw_iso_context *ctx)
132{ 131{
@@ -134,14 +133,12 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
134 133
135 card->driver->free_iso_context(ctx); 134 card->driver->free_iso_context(ctx);
136} 135}
137EXPORT_SYMBOL(fw_iso_context_destroy);
138 136
139int 137int
140fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags) 138fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
141{ 139{
142 return ctx->card->driver->start_iso(ctx, cycle, sync, tags); 140 return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
143} 141}
144EXPORT_SYMBOL(fw_iso_context_start);
145 142
146int 143int
147fw_iso_context_queue(struct fw_iso_context *ctx, 144fw_iso_context_queue(struct fw_iso_context *ctx,
@@ -153,11 +150,9 @@ fw_iso_context_queue(struct fw_iso_context *ctx,
153 150
154 return card->driver->queue_iso(ctx, packet, buffer, payload); 151 return card->driver->queue_iso(ctx, packet, buffer, payload);
155} 152}
156EXPORT_SYMBOL(fw_iso_context_queue);
157 153
158int 154int
159fw_iso_context_stop(struct fw_iso_context *ctx) 155fw_iso_context_stop(struct fw_iso_context *ctx)
160{ 156{
161 return ctx->card->driver->stop_iso(ctx); 157 return ctx->card->driver->stop_iso(ctx);
162} 158}
163EXPORT_SYMBOL(fw_iso_context_stop);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index ca6d51efd8bb..4f02c55f13e1 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -27,6 +27,7 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/moduleparam.h>
30#include <linux/pci.h> 31#include <linux/pci.h>
31#include <linux/spinlock.h> 32#include <linux/spinlock.h>
32 33
@@ -177,9 +178,10 @@ struct fw_ohci {
177 struct tasklet_struct bus_reset_tasklet; 178 struct tasklet_struct bus_reset_tasklet;
178 int node_id; 179 int node_id;
179 int generation; 180 int generation;
180 int request_generation; 181 int request_generation; /* for timestamping incoming requests */
181 u32 bus_seconds; 182 u32 bus_seconds;
182 bool old_uninorth; 183 bool old_uninorth;
184 bool bus_reset_packet_quirk;
183 185
184 /* 186 /*
185 * Spinlock for accessing fw_ohci data. Never call out of 187 * Spinlock for accessing fw_ohci data. Never call out of
@@ -237,6 +239,196 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
237 239
238static char ohci_driver_name[] = KBUILD_MODNAME; 240static char ohci_driver_name[] = KBUILD_MODNAME;
239 241
242#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
243
244#define OHCI_PARAM_DEBUG_AT_AR 1
245#define OHCI_PARAM_DEBUG_SELFIDS 2
246#define OHCI_PARAM_DEBUG_IRQS 4
247#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
248
249static int param_debug;
250module_param_named(debug, param_debug, int, 0644);
251MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
252 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
253 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
254 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
255 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
256 ", or a combination, or all = -1)");
257
258static void log_irqs(u32 evt)
259{
260 if (likely(!(param_debug &
261 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
262 return;
263
264 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
265 !(evt & OHCI1394_busReset))
266 return;
267
268 printk(KERN_DEBUG KBUILD_MODNAME ": IRQ "
269 "%08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
270 evt,
271 evt & OHCI1394_selfIDComplete ? " selfID" : "",
272 evt & OHCI1394_RQPkt ? " AR_req" : "",
273 evt & OHCI1394_RSPkt ? " AR_resp" : "",
274 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
275 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
276 evt & OHCI1394_isochRx ? " IR" : "",
277 evt & OHCI1394_isochTx ? " IT" : "",
278 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
279 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
280 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
281 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
282 evt & OHCI1394_busReset ? " busReset" : "",
283 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
284 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
285 OHCI1394_respTxComplete | OHCI1394_isochRx |
286 OHCI1394_isochTx | OHCI1394_postedWriteErr |
287 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
288 OHCI1394_regAccessFail | OHCI1394_busReset)
289 ? " ?" : "");
290}
291
292static const char *speed[] = {
293 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
294};
295static const char *power[] = {
296 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
297 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
298};
299static const char port[] = { '.', '-', 'p', 'c', };
300
301static char _p(u32 *s, int shift)
302{
303 return port[*s >> shift & 3];
304}
305
306static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
307{
308 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
309 return;
310
311 printk(KERN_DEBUG KBUILD_MODNAME ": %d selfIDs, generation %d, "
312 "local node ID %04x\n", self_id_count, generation, node_id);
313
314 for (; self_id_count--; ++s)
315 if ((*s & 1 << 23) == 0)
316 printk(KERN_DEBUG "selfID 0: %08x, phy %d [%c%c%c] "
317 "%s gc=%d %s %s%s%s\n",
318 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
319 speed[*s >> 14 & 3], *s >> 16 & 63,
320 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
321 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
322 else
323 printk(KERN_DEBUG "selfID n: %08x, phy %d "
324 "[%c%c%c%c%c%c%c%c]\n",
325 *s, *s >> 24 & 63,
326 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
327 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
328}
329
330static const char *evts[] = {
331 [0x00] = "evt_no_status", [0x01] = "-reserved-",
332 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
333 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
334 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
335 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
336 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
337 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
338 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
339 [0x10] = "-reserved-", [0x11] = "ack_complete",
340 [0x12] = "ack_pending ", [0x13] = "-reserved-",
341 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
342 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
343 [0x18] = "-reserved-", [0x19] = "-reserved-",
344 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
345 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
346 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
347 [0x20] = "pending/cancelled",
348};
349static const char *tcodes[] = {
350 [0x0] = "QW req", [0x1] = "BW req",
351 [0x2] = "W resp", [0x3] = "-reserved-",
352 [0x4] = "QR req", [0x5] = "BR req",
353 [0x6] = "QR resp", [0x7] = "BR resp",
354 [0x8] = "cycle start", [0x9] = "Lk req",
355 [0xa] = "async stream packet", [0xb] = "Lk resp",
356 [0xc] = "-reserved-", [0xd] = "-reserved-",
357 [0xe] = "link internal", [0xf] = "-reserved-",
358};
359static const char *phys[] = {
360 [0x0] = "phy config packet", [0x1] = "link-on packet",
361 [0x2] = "self-id packet", [0x3] = "-reserved-",
362};
363
364static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
365{
366 int tcode = header[0] >> 4 & 0xf;
367 char specific[12];
368
369 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
370 return;
371
372 if (unlikely(evt >= ARRAY_SIZE(evts)))
373 evt = 0x1f;
374
375 if (evt == OHCI1394_evt_bus_reset) {
376 printk(KERN_DEBUG "A%c evt_bus_reset, generation %d\n",
377 dir, (header[2] >> 16) & 0xff);
378 return;
379 }
380
381 if (header[0] == ~header[1]) {
382 printk(KERN_DEBUG "A%c %s, %s, %08x\n",
383 dir, evts[evt], phys[header[0] >> 30 & 0x3],
384 header[0]);
385 return;
386 }
387
388 switch (tcode) {
389 case 0x0: case 0x6: case 0x8:
390 snprintf(specific, sizeof(specific), " = %08x",
391 be32_to_cpu((__force __be32)header[3]));
392 break;
393 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
394 snprintf(specific, sizeof(specific), " %x,%x",
395 header[3] >> 16, header[3] & 0xffff);
396 break;
397 default:
398 specific[0] = '\0';
399 }
400
401 switch (tcode) {
402 case 0xe: case 0xa:
403 printk(KERN_DEBUG "A%c %s, %s\n",
404 dir, evts[evt], tcodes[tcode]);
405 break;
406 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
407 printk(KERN_DEBUG "A%c spd %x tl %02x, "
408 "%04x -> %04x, %s, "
409 "%s, %04x%08x%s\n",
410 dir, speed, header[0] >> 10 & 0x3f,
411 header[1] >> 16, header[0] >> 16, evts[evt],
412 tcodes[tcode], header[1] & 0xffff, header[2], specific);
413 break;
414 default:
415 printk(KERN_DEBUG "A%c spd %x tl %02x, "
416 "%04x -> %04x, %s, "
417 "%s%s\n",
418 dir, speed, header[0] >> 10 & 0x3f,
419 header[1] >> 16, header[0] >> 16, evts[evt],
420 tcodes[tcode], specific);
421 }
422}
423
424#else
425
426#define log_irqs(evt)
427#define log_selfids(node_id, generation, self_id_count, sid)
428#define log_ar_at_event(dir, speed, header, evt)
429
430#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
431
240static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) 432static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
241{ 433{
242 writel(data, ohci->registers + offset); 434 writel(data, ohci->registers + offset);
@@ -320,6 +512,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
320 struct fw_ohci *ohci = ctx->ohci; 512 struct fw_ohci *ohci = ctx->ohci;
321 struct fw_packet p; 513 struct fw_packet p;
322 u32 status, length, tcode; 514 u32 status, length, tcode;
515 int evt;
323 516
324 p.header[0] = cond_le32_to_cpu(buffer[0]); 517 p.header[0] = cond_le32_to_cpu(buffer[0]);
325 p.header[1] = cond_le32_to_cpu(buffer[1]); 518 p.header[1] = cond_le32_to_cpu(buffer[1]);
@@ -362,12 +555,15 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
362 /* FIXME: What to do about evt_* errors? */ 555 /* FIXME: What to do about evt_* errors? */
363 length = (p.header_length + p.payload_length + 3) / 4; 556 length = (p.header_length + p.payload_length + 3) / 4;
364 status = cond_le32_to_cpu(buffer[length]); 557 status = cond_le32_to_cpu(buffer[length]);
558 evt = (status >> 16) & 0x1f;
365 559
366 p.ack = ((status >> 16) & 0x1f) - 16; 560 p.ack = evt - 16;
367 p.speed = (status >> 21) & 0x7; 561 p.speed = (status >> 21) & 0x7;
368 p.timestamp = status & 0xffff; 562 p.timestamp = status & 0xffff;
369 p.generation = ohci->request_generation; 563 p.generation = ohci->request_generation;
370 564
565 log_ar_at_event('R', p.speed, p.header, evt);
566
371 /* 567 /*
372 * The OHCI bus reset handler synthesizes a phy packet with 568 * The OHCI bus reset handler synthesizes a phy packet with
373 * the new generation number when a bus reset happens (see 569 * the new generation number when a bus reset happens (see
@@ -376,14 +572,19 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
376 * generation. We only need this for requests; for responses 572 * generation. We only need this for requests; for responses
377 * we use the unique tlabel for finding the matching 573 * we use the unique tlabel for finding the matching
378 * request. 574 * request.
575 *
576 * Alas some chips sometimes emit bus reset packets with a
577 * wrong generation. We set the correct generation for these
578 * at a slightly incorrect time (in bus_reset_tasklet).
379 */ 579 */
380 580 if (evt == OHCI1394_evt_bus_reset) {
381 if (p.ack + 16 == 0x09) 581 if (!ohci->bus_reset_packet_quirk)
382 ohci->request_generation = (p.header[2] >> 16) & 0xff; 582 ohci->request_generation = (p.header[2] >> 16) & 0xff;
383 else if (ctx == &ohci->ar_request_ctx) 583 } else if (ctx == &ohci->ar_request_ctx) {
384 fw_core_handle_request(&ohci->card, &p); 584 fw_core_handle_request(&ohci->card, &p);
385 else 585 } else {
386 fw_core_handle_response(&ohci->card, &p); 586 fw_core_handle_response(&ohci->card, &p);
587 }
387 588
388 return buffer + length + 1; 589 return buffer + length + 1;
389} 590}
@@ -770,8 +971,19 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
770 DESCRIPTOR_IRQ_ALWAYS | 971 DESCRIPTOR_IRQ_ALWAYS |
771 DESCRIPTOR_BRANCH_ALWAYS); 972 DESCRIPTOR_BRANCH_ALWAYS);
772 973
773 /* FIXME: Document how the locking works. */ 974 /*
774 if (ohci->generation != packet->generation) { 975 * If the controller and packet generations don't match, we need to
976 * bail out and try again. If IntEvent.busReset is set, the AT context
977 * is halted, so appending to the context and trying to run it is
978 * futile. Most controllers do the right thing and just flush the AT
979 * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
980 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
981 * up stalling out. So we just bail out in software and try again
982 * later, and everyone is happy.
983 * FIXME: Document how the locking works.
984 */
985 if (ohci->generation != packet->generation ||
986 reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
775 if (packet->payload_length > 0) 987 if (packet->payload_length > 0)
776 dma_unmap_single(ohci->card.device, payload_bus, 988 dma_unmap_single(ohci->card.device, payload_bus,
777 packet->payload_length, DMA_TO_DEVICE); 989 packet->payload_length, DMA_TO_DEVICE);
@@ -817,6 +1029,8 @@ static int handle_at_packet(struct context *context,
817 evt = le16_to_cpu(last->transfer_status) & 0x1f; 1029 evt = le16_to_cpu(last->transfer_status) & 0x1f;
818 packet->timestamp = le16_to_cpu(last->res_count); 1030 packet->timestamp = le16_to_cpu(last->res_count);
819 1031
1032 log_ar_at_event('T', packet->speed, packet->header, evt);
1033
820 switch (evt) { 1034 switch (evt) {
821 case OHCI1394_evt_timeout: 1035 case OHCI1394_evt_timeout:
822 /* Async response transmit timed out. */ 1036 /* Async response transmit timed out. */
@@ -1019,20 +1233,30 @@ static void bus_reset_tasklet(unsigned long data)
1019 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | 1233 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1020 OHCI1394_NodeID_nodeNumber); 1234 OHCI1394_NodeID_nodeNumber);
1021 1235
1236 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1237 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1238 fw_notify("inconsistent self IDs\n");
1239 return;
1240 }
1022 /* 1241 /*
1023 * The count in the SelfIDCount register is the number of 1242 * The count in the SelfIDCount register is the number of
1024 * bytes in the self ID receive buffer. Since we also receive 1243 * bytes in the self ID receive buffer. Since we also receive
1025 * the inverted quadlets and a header quadlet, we shift one 1244 * the inverted quadlets and a header quadlet, we shift one
1026 * bit extra to get the actual number of self IDs. 1245 * bit extra to get the actual number of self IDs.
1027 */ 1246 */
1028 1247 self_id_count = (reg >> 3) & 0x3ff;
1029 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff; 1248 if (self_id_count == 0) {
1249 fw_notify("inconsistent self IDs\n");
1250 return;
1251 }
1030 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; 1252 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1031 rmb(); 1253 rmb();
1032 1254
1033 for (i = 1, j = 0; j < self_id_count; i += 2, j++) { 1255 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1034 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) 1256 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
1035 fw_error("inconsistent self IDs\n"); 1257 fw_notify("inconsistent self IDs\n");
1258 return;
1259 }
1036 ohci->self_id_buffer[j] = 1260 ohci->self_id_buffer[j] =
1037 cond_le32_to_cpu(ohci->self_id_cpu[i]); 1261 cond_le32_to_cpu(ohci->self_id_cpu[i]);
1038 } 1262 }
@@ -1067,6 +1291,9 @@ static void bus_reset_tasklet(unsigned long data)
1067 context_stop(&ohci->at_response_ctx); 1291 context_stop(&ohci->at_response_ctx);
1068 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 1292 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1069 1293
1294 if (ohci->bus_reset_packet_quirk)
1295 ohci->request_generation = generation;
1296
1070 /* 1297 /*
1071 * This next bit is unrelated to the AT context stuff but we 1298 * This next bit is unrelated to the AT context stuff but we
1072 * have to do it under the spinlock also. If a new config rom 1299 * have to do it under the spinlock also. If a new config rom
@@ -1097,12 +1324,20 @@ static void bus_reset_tasklet(unsigned long data)
1097 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header); 1324 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
1098 } 1325 }
1099 1326
1327#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1328 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
1329 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
1330#endif
1331
1100 spin_unlock_irqrestore(&ohci->lock, flags); 1332 spin_unlock_irqrestore(&ohci->lock, flags);
1101 1333
1102 if (free_rom) 1334 if (free_rom)
1103 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1335 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1104 free_rom, free_rom_bus); 1336 free_rom, free_rom_bus);
1105 1337
1338 log_selfids(ohci->node_id, generation,
1339 self_id_count, ohci->self_id_buffer);
1340
1106 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, 1341 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
1107 self_id_count, ohci->self_id_buffer); 1342 self_id_count, ohci->self_id_buffer);
1108} 1343}
@@ -1118,7 +1353,9 @@ static irqreturn_t irq_handler(int irq, void *data)
1118 if (!event || !~event) 1353 if (!event || !~event)
1119 return IRQ_NONE; 1354 return IRQ_NONE;
1120 1355
1121 reg_write(ohci, OHCI1394_IntEventClear, event); 1356 /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
1357 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
1358 log_irqs(event);
1122 1359
1123 if (event & OHCI1394_selfIDComplete) 1360 if (event & OHCI1394_selfIDComplete)
1124 tasklet_schedule(&ohci->bus_reset_tasklet); 1361 tasklet_schedule(&ohci->bus_reset_tasklet);
@@ -1153,6 +1390,10 @@ static irqreturn_t irq_handler(int irq, void *data)
1153 iso_event &= ~(1 << i); 1390 iso_event &= ~(1 << i);
1154 } 1391 }
1155 1392
1393 if (unlikely(event & OHCI1394_regAccessFail))
1394 fw_error("Register access failure - "
1395 "please notify linux1394-devel@lists.sf.net\n");
1396
1156 if (unlikely(event & OHCI1394_postedWriteErr)) 1397 if (unlikely(event & OHCI1394_postedWriteErr))
1157 fw_error("PCI posted write error\n"); 1398 fw_error("PCI posted write error\n");
1158 1399
@@ -1192,6 +1433,8 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1192{ 1433{
1193 struct fw_ohci *ohci = fw_ohci(card); 1434 struct fw_ohci *ohci = fw_ohci(card);
1194 struct pci_dev *dev = to_pci_dev(card->device); 1435 struct pci_dev *dev = to_pci_dev(card->device);
1436 u32 lps;
1437 int i;
1195 1438
1196 if (software_reset(ohci)) { 1439 if (software_reset(ohci)) {
1197 fw_error("Failed to reset ohci card.\n"); 1440 fw_error("Failed to reset ohci card.\n");
@@ -1203,13 +1446,24 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1203 * most of the registers. In fact, on some cards (ALI M5251), 1446 * most of the registers. In fact, on some cards (ALI M5251),
1204 * accessing registers in the SClk domain without LPS enabled 1447 * accessing registers in the SClk domain without LPS enabled
1205 * will lock up the machine. Wait 50msec to make sure we have 1448 * will lock up the machine. Wait 50msec to make sure we have
1206 * full link enabled. 1449 * full link enabled. However, with some cards (well, at least
1450 * a JMicron PCIe card), we have to try again sometimes.
1207 */ 1451 */
1208 reg_write(ohci, OHCI1394_HCControlSet, 1452 reg_write(ohci, OHCI1394_HCControlSet,
1209 OHCI1394_HCControl_LPS | 1453 OHCI1394_HCControl_LPS |
1210 OHCI1394_HCControl_postedWriteEnable); 1454 OHCI1394_HCControl_postedWriteEnable);
1211 flush_writes(ohci); 1455 flush_writes(ohci);
1212 msleep(50); 1456
1457 for (lps = 0, i = 0; !lps && i < 3; i++) {
1458 msleep(50);
1459 lps = reg_read(ohci, OHCI1394_HCControlSet) &
1460 OHCI1394_HCControl_LPS;
1461 }
1462
1463 if (!lps) {
1464 fw_error("Failed to set Link Power Status\n");
1465 return -EIO;
1466 }
1213 1467
1214 reg_write(ohci, OHCI1394_HCControlClear, 1468 reg_write(ohci, OHCI1394_HCControlClear,
1215 OHCI1394_HCControl_noByteSwapData); 1469 OHCI1394_HCControl_noByteSwapData);
@@ -1237,7 +1491,10 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1237 OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 1491 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1238 OHCI1394_isochRx | OHCI1394_isochTx | 1492 OHCI1394_isochRx | OHCI1394_isochTx |
1239 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong | 1493 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1240 OHCI1394_cycle64Seconds | OHCI1394_masterIntEnable); 1494 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
1495 OHCI1394_masterIntEnable);
1496 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1497 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
1241 1498
1242 /* Activate link_on bit and contender bit in our self ID packets.*/ 1499 /* Activate link_on bit and contender bit in our self ID packets.*/
1243 if (ohci_update_phy_reg(card, 4, 0, 1500 if (ohci_update_phy_reg(card, 4, 0,
@@ -1421,6 +1678,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1421 if (packet->ack != 0) 1678 if (packet->ack != 0)
1422 goto out; 1679 goto out;
1423 1680
1681 log_ar_at_event('T', packet->speed, packet->header, 0x20);
1424 driver_data->packet = NULL; 1682 driver_data->packet = NULL;
1425 packet->ack = RCODE_CANCELLED; 1683 packet->ack = RCODE_CANCELLED;
1426 packet->callback(packet, &ohci->card, packet->ack); 1684 packet->callback(packet, &ohci->card, packet->ack);
@@ -1435,6 +1693,9 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1435static int 1693static int
1436ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) 1694ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1437{ 1695{
1696#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1697 return 0;
1698#else
1438 struct fw_ohci *ohci = fw_ohci(card); 1699 struct fw_ohci *ohci = fw_ohci(card);
1439 unsigned long flags; 1700 unsigned long flags;
1440 int n, retval = 0; 1701 int n, retval = 0;
@@ -1466,6 +1727,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1466 out: 1727 out:
1467 spin_unlock_irqrestore(&ohci->lock, flags); 1728 spin_unlock_irqrestore(&ohci->lock, flags);
1468 return retval; 1729 return retval;
1730#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1469} 1731}
1470 1732
1471static u64 1733static u64
@@ -2045,17 +2307,9 @@ static const struct fw_card_driver ohci_driver = {
2045 .stop_iso = ohci_stop_iso, 2307 .stop_iso = ohci_stop_iso,
2046}; 2308};
2047 2309
2048static int __devinit
2049pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2050{
2051 struct fw_ohci *ohci;
2052 u32 bus_options, max_receive, link_speed;
2053 u64 guid;
2054 int err;
2055 size_t size;
2056
2057#ifdef CONFIG_PPC_PMAC 2310#ifdef CONFIG_PPC_PMAC
2058 /* Necessary on some machines if fw-ohci was loaded/ unloaded before */ 2311static void ohci_pmac_on(struct pci_dev *dev)
2312{
2059 if (machine_is(powermac)) { 2313 if (machine_is(powermac)) {
2060 struct device_node *ofn = pci_device_to_OF_node(dev); 2314 struct device_node *ofn = pci_device_to_OF_node(dev);
2061 2315
@@ -2064,8 +2318,33 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2064 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); 2318 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
2065 } 2319 }
2066 } 2320 }
2321}
2322
2323static void ohci_pmac_off(struct pci_dev *dev)
2324{
2325 if (machine_is(powermac)) {
2326 struct device_node *ofn = pci_device_to_OF_node(dev);
2327
2328 if (ofn) {
2329 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
2330 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
2331 }
2332 }
2333}
2334#else
2335#define ohci_pmac_on(dev)
2336#define ohci_pmac_off(dev)
2067#endif /* CONFIG_PPC_PMAC */ 2337#endif /* CONFIG_PPC_PMAC */
2068 2338
2339static int __devinit
2340pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2341{
2342 struct fw_ohci *ohci;
2343 u32 bus_options, max_receive, link_speed;
2344 u64 guid;
2345 int err;
2346 size_t size;
2347
2069 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); 2348 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
2070 if (ohci == NULL) { 2349 if (ohci == NULL) {
2071 fw_error("Could not malloc fw_ohci data.\n"); 2350 fw_error("Could not malloc fw_ohci data.\n");
@@ -2074,10 +2353,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2074 2353
2075 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); 2354 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
2076 2355
2356 ohci_pmac_on(dev);
2357
2077 err = pci_enable_device(dev); 2358 err = pci_enable_device(dev);
2078 if (err) { 2359 if (err) {
2079 fw_error("Failed to enable OHCI hardware.\n"); 2360 fw_error("Failed to enable OHCI hardware.\n");
2080 goto fail_put_card; 2361 goto fail_free;
2081 } 2362 }
2082 2363
2083 pci_set_master(dev); 2364 pci_set_master(dev);
@@ -2088,6 +2369,8 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2088 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE && 2369 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
2089 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW; 2370 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
2090#endif 2371#endif
2372 ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
2373
2091 spin_lock_init(&ohci->lock); 2374 spin_lock_init(&ohci->lock);
2092 2375
2093 tasklet_init(&ohci->bus_reset_tasklet, 2376 tasklet_init(&ohci->bus_reset_tasklet,
@@ -2173,8 +2456,9 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2173 pci_release_region(dev, 0); 2456 pci_release_region(dev, 0);
2174 fail_disable: 2457 fail_disable:
2175 pci_disable_device(dev); 2458 pci_disable_device(dev);
2176 fail_put_card: 2459 fail_free:
2177 fw_card_put(&ohci->card); 2460 kfree(&ohci->card);
2461 ohci_pmac_off(dev);
2178 2462
2179 return err; 2463 return err;
2180} 2464}
@@ -2202,72 +2486,42 @@ static void pci_remove(struct pci_dev *dev)
2202 pci_iounmap(dev, ohci->registers); 2486 pci_iounmap(dev, ohci->registers);
2203 pci_release_region(dev, 0); 2487 pci_release_region(dev, 0);
2204 pci_disable_device(dev); 2488 pci_disable_device(dev);
2205 fw_card_put(&ohci->card); 2489 kfree(&ohci->card);
2206 2490 ohci_pmac_off(dev);
2207#ifdef CONFIG_PPC_PMAC
2208 /* On UniNorth, power down the cable and turn off the chip clock
2209 * to save power on laptops */
2210 if (machine_is(powermac)) {
2211 struct device_node *ofn = pci_device_to_OF_node(dev);
2212
2213 if (ofn) {
2214 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
2215 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
2216 }
2217 }
2218#endif /* CONFIG_PPC_PMAC */
2219 2491
2220 fw_notify("Removed fw-ohci device.\n"); 2492 fw_notify("Removed fw-ohci device.\n");
2221} 2493}
2222 2494
2223#ifdef CONFIG_PM 2495#ifdef CONFIG_PM
2224static int pci_suspend(struct pci_dev *pdev, pm_message_t state) 2496static int pci_suspend(struct pci_dev *dev, pm_message_t state)
2225{ 2497{
2226 struct fw_ohci *ohci = pci_get_drvdata(pdev); 2498 struct fw_ohci *ohci = pci_get_drvdata(dev);
2227 int err; 2499 int err;
2228 2500
2229 software_reset(ohci); 2501 software_reset(ohci);
2230 free_irq(pdev->irq, ohci); 2502 free_irq(dev->irq, ohci);
2231 err = pci_save_state(pdev); 2503 err = pci_save_state(dev);
2232 if (err) { 2504 if (err) {
2233 fw_error("pci_save_state failed\n"); 2505 fw_error("pci_save_state failed\n");
2234 return err; 2506 return err;
2235 } 2507 }
2236 err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2508 err = pci_set_power_state(dev, pci_choose_state(dev, state));
2237 if (err) 2509 if (err)
2238 fw_error("pci_set_power_state failed with %d\n", err); 2510 fw_error("pci_set_power_state failed with %d\n", err);
2239 2511 ohci_pmac_off(dev);
2240/* PowerMac suspend code comes last */
2241#ifdef CONFIG_PPC_PMAC
2242 if (machine_is(powermac)) {
2243 struct device_node *ofn = pci_device_to_OF_node(pdev);
2244
2245 if (ofn)
2246 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
2247 }
2248#endif /* CONFIG_PPC_PMAC */
2249 2512
2250 return 0; 2513 return 0;
2251} 2514}
2252 2515
2253static int pci_resume(struct pci_dev *pdev) 2516static int pci_resume(struct pci_dev *dev)
2254{ 2517{
2255 struct fw_ohci *ohci = pci_get_drvdata(pdev); 2518 struct fw_ohci *ohci = pci_get_drvdata(dev);
2256 int err; 2519 int err;
2257 2520
2258/* PowerMac resume code comes first */ 2521 ohci_pmac_on(dev);
2259#ifdef CONFIG_PPC_PMAC 2522 pci_set_power_state(dev, PCI_D0);
2260 if (machine_is(powermac)) { 2523 pci_restore_state(dev);
2261 struct device_node *ofn = pci_device_to_OF_node(pdev); 2524 err = pci_enable_device(dev);
2262
2263 if (ofn)
2264 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
2265 }
2266#endif /* CONFIG_PPC_PMAC */
2267
2268 pci_set_power_state(pdev, PCI_D0);
2269 pci_restore_state(pdev);
2270 err = pci_enable_device(pdev);
2271 if (err) { 2525 if (err) {
2272 fw_error("pci_enable_device failed\n"); 2526 fw_error("pci_enable_device failed\n");
2273 return err; 2527 return err;
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/fw-ohci.h
index dec4f04e6b24..a2fbb6240ca7 100644
--- a/drivers/firewire/fw-ohci.h
+++ b/drivers/firewire/fw-ohci.h
@@ -30,6 +30,7 @@
30#define OHCI1394_HCControl_softReset 0x00010000 30#define OHCI1394_HCControl_softReset 0x00010000
31#define OHCI1394_SelfIDBuffer 0x064 31#define OHCI1394_SelfIDBuffer 0x064
32#define OHCI1394_SelfIDCount 0x068 32#define OHCI1394_SelfIDCount 0x068
33#define OHCI1394_SelfIDCount_selfIDError 0x80000000
33#define OHCI1394_IRMultiChanMaskHiSet 0x070 34#define OHCI1394_IRMultiChanMaskHiSet 0x070
34#define OHCI1394_IRMultiChanMaskHiClear 0x074 35#define OHCI1394_IRMultiChanMaskHiClear 0x074
35#define OHCI1394_IRMultiChanMaskLoSet 0x078 36#define OHCI1394_IRMultiChanMaskLoSet 0x078
@@ -124,6 +125,7 @@
124#define OHCI1394_lockRespErr 0x00000200 125#define OHCI1394_lockRespErr 0x00000200
125#define OHCI1394_selfIDComplete 0x00010000 126#define OHCI1394_selfIDComplete 0x00010000
126#define OHCI1394_busReset 0x00020000 127#define OHCI1394_busReset 0x00020000
128#define OHCI1394_regAccessFail 0x00040000
127#define OHCI1394_phy 0x00080000 129#define OHCI1394_phy 0x00080000
128#define OHCI1394_cycleSynch 0x00100000 130#define OHCI1394_cycleSynch 0x00100000
129#define OHCI1394_cycle64Seconds 0x00200000 131#define OHCI1394_cycle64Seconds 0x00200000
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 62b4e47d0cc0..2a999373863e 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -153,6 +153,7 @@ struct sbp2_target {
153 struct list_head lu_list; 153 struct list_head lu_list;
154 154
155 u64 management_agent_address; 155 u64 management_agent_address;
156 u64 guid;
156 int directory_id; 157 int directory_id;
157 int node_id; 158 int node_id;
158 int address_high; 159 int address_high;
@@ -173,10 +174,8 @@ struct sbp2_target {
173#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ 174#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */
174#define SBP2_ORB_NULL 0x80000000 175#define SBP2_ORB_NULL 0x80000000
175#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 176#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
176#define SBP2_RETRY_LIMIT 0xf /* 15 retries */ 177#define SBP2_RETRY_LIMIT 0xf /* 15 retries */
177 178#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */
178#define SBP2_DIRECTION_TO_MEDIA 0x0
179#define SBP2_DIRECTION_FROM_MEDIA 0x1
180 179
181/* Unit directory keys */ 180/* Unit directory keys */
182#define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a 181#define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a
@@ -224,8 +223,8 @@ struct sbp2_status {
224}; 223};
225 224
226struct sbp2_pointer { 225struct sbp2_pointer {
227 u32 high; 226 __be32 high;
228 u32 low; 227 __be32 low;
229}; 228};
230 229
231struct sbp2_orb { 230struct sbp2_orb {
@@ -253,8 +252,8 @@ struct sbp2_management_orb {
253 struct { 252 struct {
254 struct sbp2_pointer password; 253 struct sbp2_pointer password;
255 struct sbp2_pointer response; 254 struct sbp2_pointer response;
256 u32 misc; 255 __be32 misc;
257 u32 length; 256 __be32 length;
258 struct sbp2_pointer status_fifo; 257 struct sbp2_pointer status_fifo;
259 } request; 258 } request;
260 __be32 response[4]; 259 __be32 response[4];
@@ -263,20 +262,17 @@ struct sbp2_management_orb {
263 struct sbp2_status status; 262 struct sbp2_status status;
264}; 263};
265 264
266#define LOGIN_RESPONSE_GET_LOGIN_ID(v) ((v).misc & 0xffff)
267#define LOGIN_RESPONSE_GET_LENGTH(v) (((v).misc >> 16) & 0xffff)
268
269struct sbp2_login_response { 265struct sbp2_login_response {
270 u32 misc; 266 __be32 misc;
271 struct sbp2_pointer command_block_agent; 267 struct sbp2_pointer command_block_agent;
272 u32 reconnect_hold; 268 __be32 reconnect_hold;
273}; 269};
274#define COMMAND_ORB_DATA_SIZE(v) ((v)) 270#define COMMAND_ORB_DATA_SIZE(v) ((v))
275#define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16) 271#define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16)
276#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19) 272#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
277#define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20) 273#define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20)
278#define COMMAND_ORB_SPEED(v) ((v) << 24) 274#define COMMAND_ORB_SPEED(v) ((v) << 24)
279#define COMMAND_ORB_DIRECTION(v) ((v) << 27) 275#define COMMAND_ORB_DIRECTION ((1) << 27)
280#define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29) 276#define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29)
281#define COMMAND_ORB_NOTIFY ((1) << 31) 277#define COMMAND_ORB_NOTIFY ((1) << 31)
282 278
@@ -285,7 +281,7 @@ struct sbp2_command_orb {
285 struct { 281 struct {
286 struct sbp2_pointer next; 282 struct sbp2_pointer next;
287 struct sbp2_pointer data_descriptor; 283 struct sbp2_pointer data_descriptor;
288 u32 misc; 284 __be32 misc;
289 u8 command_block[12]; 285 u8 command_block[12];
290 } request; 286 } request;
291 struct scsi_cmnd *cmd; 287 struct scsi_cmnd *cmd;
@@ -459,8 +455,7 @@ sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
459 unsigned long flags; 455 unsigned long flags;
460 456
461 orb->pointer.high = 0; 457 orb->pointer.high = 0;
462 orb->pointer.low = orb->request_bus; 458 orb->pointer.low = cpu_to_be32(orb->request_bus);
463 fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer));
464 459
465 spin_lock_irqsave(&device->card->lock, flags); 460 spin_lock_irqsave(&device->card->lock, flags);
466 list_add_tail(&orb->link, &lu->orb_list); 461 list_add_tail(&orb->link, &lu->orb_list);
@@ -536,31 +531,31 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
536 if (dma_mapping_error(orb->response_bus)) 531 if (dma_mapping_error(orb->response_bus))
537 goto fail_mapping_response; 532 goto fail_mapping_response;
538 533
539 orb->request.response.high = 0; 534 orb->request.response.high = 0;
540 orb->request.response.low = orb->response_bus; 535 orb->request.response.low = cpu_to_be32(orb->response_bus);
541 536
542 orb->request.misc = 537 orb->request.misc = cpu_to_be32(
543 MANAGEMENT_ORB_NOTIFY | 538 MANAGEMENT_ORB_NOTIFY |
544 MANAGEMENT_ORB_FUNCTION(function) | 539 MANAGEMENT_ORB_FUNCTION(function) |
545 MANAGEMENT_ORB_LUN(lun_or_login_id); 540 MANAGEMENT_ORB_LUN(lun_or_login_id));
546 orb->request.length = 541 orb->request.length = cpu_to_be32(
547 MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)); 542 MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)));
548 543
549 orb->request.status_fifo.high = lu->address_handler.offset >> 32; 544 orb->request.status_fifo.high =
550 orb->request.status_fifo.low = lu->address_handler.offset; 545 cpu_to_be32(lu->address_handler.offset >> 32);
546 orb->request.status_fifo.low =
547 cpu_to_be32(lu->address_handler.offset);
551 548
552 if (function == SBP2_LOGIN_REQUEST) { 549 if (function == SBP2_LOGIN_REQUEST) {
553 /* Ask for 2^2 == 4 seconds reconnect grace period */ 550 /* Ask for 2^2 == 4 seconds reconnect grace period */
554 orb->request.misc |= 551 orb->request.misc |= cpu_to_be32(
555 MANAGEMENT_ORB_RECONNECT(2) | 552 MANAGEMENT_ORB_RECONNECT(2) |
556 MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login); 553 MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login));
557 timeout = lu->tgt->mgt_orb_timeout; 554 timeout = lu->tgt->mgt_orb_timeout;
558 } else { 555 } else {
559 timeout = SBP2_ORB_TIMEOUT; 556 timeout = SBP2_ORB_TIMEOUT;
560 } 557 }
561 558
562 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
563
564 init_completion(&orb->done); 559 init_completion(&orb->done);
565 orb->base.callback = complete_management_orb; 560 orb->base.callback = complete_management_orb;
566 561
@@ -605,8 +600,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
605 sizeof(orb->response), DMA_FROM_DEVICE); 600 sizeof(orb->response), DMA_FROM_DEVICE);
606 fail_mapping_response: 601 fail_mapping_response:
607 if (response) 602 if (response)
608 fw_memcpy_from_be32(response, 603 memcpy(response, orb->response, sizeof(orb->response));
609 orb->response, sizeof(orb->response));
610 kref_put(&orb->base.kref, free_orb); 604 kref_put(&orb->base.kref, free_orb);
611 605
612 return retval; 606 return retval;
@@ -701,10 +695,8 @@ static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
701 if (!tgt->dont_block && !lu->blocked && 695 if (!tgt->dont_block && !lu->blocked &&
702 lu->generation != card->generation) { 696 lu->generation != card->generation) {
703 lu->blocked = true; 697 lu->blocked = true;
704 if (++tgt->blocked == 1) { 698 if (++tgt->blocked == 1)
705 scsi_block_requests(shost); 699 scsi_block_requests(shost);
706 fw_notify("blocked %s\n", lu->tgt->bus_id);
707 }
708 } 700 }
709 spin_unlock_irqrestore(&card->lock, flags); 701 spin_unlock_irqrestore(&card->lock, flags);
710} 702}
@@ -731,10 +723,8 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
731 } 723 }
732 spin_unlock_irqrestore(&card->lock, flags); 724 spin_unlock_irqrestore(&card->lock, flags);
733 725
734 if (unblock) { 726 if (unblock)
735 scsi_unblock_requests(shost); 727 scsi_unblock_requests(shost);
736 fw_notify("unblocked %s\n", lu->tgt->bus_id);
737 }
738} 728}
739 729
740/* 730/*
@@ -796,7 +786,7 @@ static void sbp2_release_target(struct kref *kref)
796 scsi_remove_host(shost); 786 scsi_remove_host(shost);
797 fw_notify("released %s\n", tgt->bus_id); 787 fw_notify("released %s\n", tgt->bus_id);
798 788
799 put_device(&tgt->unit->device); 789 fw_unit_put(tgt->unit);
800 scsi_host_put(shost); 790 scsi_host_put(shost);
801 fw_device_put(device); 791 fw_device_put(device);
802} 792}
@@ -825,6 +815,22 @@ complete_set_busy_timeout(struct fw_card *card, int rcode,
825 complete(done); 815 complete(done);
826} 816}
827 817
818/*
819 * Write retransmit retry values into the BUSY_TIMEOUT register.
820 * - The single-phase retry protocol is supported by all SBP-2 devices, but the
821 * default retry_limit value is 0 (i.e. never retry transmission). We write a
822 * saner value after logging into the device.
823 * - The dual-phase retry protocol is optional to implement, and if not
824 * supported, writes to the dual-phase portion of the register will be
825 * ignored. We try to write the original 1394-1995 default here.
826 * - In the case of devices that are also SBP-3-compliant, all writes are
827 * ignored, as the register is read-only, but contains single-phase retry of
828 * 15, which is what we're trying to set for all SBP-2 device anyway, so this
829 * write attempt is safe and yields more consistent behavior for all devices.
830 *
831 * See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec,
832 * and section 6.4 of the SBP-3 spec for further details.
833 */
828static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) 834static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
829{ 835{
830 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 836 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
@@ -832,8 +838,7 @@ static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
832 struct fw_transaction t; 838 struct fw_transaction t;
833 static __be32 busy_timeout; 839 static __be32 busy_timeout;
834 840
835 /* FIXME: we should try to set dual-phase cycle_limit too */ 841 busy_timeout = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
836 busy_timeout = cpu_to_be32(SBP2_RETRY_LIMIT);
837 842
838 fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST, 843 fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST,
839 lu->tgt->node_id, lu->generation, device->max_speed, 844 lu->tgt->node_id, lu->generation, device->max_speed,
@@ -885,11 +890,10 @@ static void sbp2_login(struct work_struct *work)
885 tgt->address_high = local_node_id << 16; 890 tgt->address_high = local_node_id << 16;
886 sbp2_set_generation(lu, generation); 891 sbp2_set_generation(lu, generation);
887 892
888 /* Get command block agent offset and login id. */
889 lu->command_block_agent_address = 893 lu->command_block_agent_address =
890 ((u64) (response.command_block_agent.high & 0xffff) << 32) | 894 ((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
891 response.command_block_agent.low; 895 << 32) | be32_to_cpu(response.command_block_agent.low);
892 lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); 896 lu->login_id = be32_to_cpu(response.misc) & 0xffff;
893 897
894 fw_notify("%s: logged in to LUN %04x (%d retries)\n", 898 fw_notify("%s: logged in to LUN %04x (%d retries)\n",
895 tgt->bus_id, lu->lun, lu->retries); 899 tgt->bus_id, lu->lun, lu->retries);
@@ -1111,6 +1115,7 @@ static int sbp2_probe(struct device *dev)
1111 kref_init(&tgt->kref); 1115 kref_init(&tgt->kref);
1112 INIT_LIST_HEAD(&tgt->lu_list); 1116 INIT_LIST_HEAD(&tgt->lu_list);
1113 tgt->bus_id = unit->device.bus_id; 1117 tgt->bus_id = unit->device.bus_id;
1118 tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
1114 1119
1115 if (fw_device_enable_phys_dma(device) < 0) 1120 if (fw_device_enable_phys_dma(device) < 0)
1116 goto fail_shost_put; 1121 goto fail_shost_put;
@@ -1119,6 +1124,7 @@ static int sbp2_probe(struct device *dev)
1119 goto fail_shost_put; 1124 goto fail_shost_put;
1120 1125
1121 fw_device_get(device); 1126 fw_device_get(device);
1127 fw_unit_get(unit);
1122 1128
1123 /* Initialize to values that won't match anything in our table. */ 1129 /* Initialize to values that won't match anything in our table. */
1124 firmware_revision = 0xff000000; 1130 firmware_revision = 0xff000000;
@@ -1134,8 +1140,6 @@ static int sbp2_probe(struct device *dev)
1134 1140
1135 sbp2_init_workarounds(tgt, model, firmware_revision); 1141 sbp2_init_workarounds(tgt, model, firmware_revision);
1136 1142
1137 get_device(&unit->device);
1138
1139 /* Do the login in a workqueue so we can easily reschedule retries. */ 1143 /* Do the login in a workqueue so we can easily reschedule retries. */
1140 list_for_each_entry(lu, &tgt->lu_list, link) 1144 list_for_each_entry(lu, &tgt->lu_list, link)
1141 sbp2_queue_work(lu, 0); 1145 sbp2_queue_work(lu, 0);
@@ -1367,9 +1371,12 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
1367 * tables. 1371 * tables.
1368 */ 1372 */
1369 if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) { 1373 if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
1370 orb->request.data_descriptor.high = lu->tgt->address_high; 1374 orb->request.data_descriptor.high =
1371 orb->request.data_descriptor.low = sg_dma_address(sg); 1375 cpu_to_be32(lu->tgt->address_high);
1372 orb->request.misc |= COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)); 1376 orb->request.data_descriptor.low =
1377 cpu_to_be32(sg_dma_address(sg));
1378 orb->request.misc |=
1379 cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)));
1373 return 0; 1380 return 0;
1374 } 1381 }
1375 1382
@@ -1390,16 +1397,14 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
1390 goto fail_page_table; 1397 goto fail_page_table;
1391 } 1398 }
1392 l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH); 1399 l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
1393 orb->page_table[j].low = sg_addr; 1400 orb->page_table[j].low = cpu_to_be32(sg_addr);
1394 orb->page_table[j].high = (l << 16); 1401 orb->page_table[j].high = cpu_to_be32(l << 16);
1395 sg_addr += l; 1402 sg_addr += l;
1396 sg_len -= l; 1403 sg_len -= l;
1397 j++; 1404 j++;
1398 } 1405 }
1399 } 1406 }
1400 1407
1401 fw_memcpy_to_be32(orb->page_table, orb->page_table,
1402 sizeof(orb->page_table[0]) * j);
1403 orb->page_table_bus = 1408 orb->page_table_bus =
1404 dma_map_single(device->card->device, orb->page_table, 1409 dma_map_single(device->card->device, orb->page_table,
1405 sizeof(orb->page_table), DMA_TO_DEVICE); 1410 sizeof(orb->page_table), DMA_TO_DEVICE);
@@ -1413,11 +1418,10 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
1413 * initiator (i.e. us), but data_descriptor can refer to data 1418 * initiator (i.e. us), but data_descriptor can refer to data
1414 * on other nodes so we need to put our ID in descriptor.high. 1419 * on other nodes so we need to put our ID in descriptor.high.
1415 */ 1420 */
1416 orb->request.data_descriptor.high = lu->tgt->address_high; 1421 orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
1417 orb->request.data_descriptor.low = orb->page_table_bus; 1422 orb->request.data_descriptor.low = cpu_to_be32(orb->page_table_bus);
1418 orb->request.misc |= 1423 orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
1419 COMMAND_ORB_PAGE_TABLE_PRESENT | 1424 COMMAND_ORB_DATA_SIZE(j));
1420 COMMAND_ORB_DATA_SIZE(j);
1421 1425
1422 return 0; 1426 return 0;
1423 1427
@@ -1463,8 +1467,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1463 orb->done = done; 1467 orb->done = done;
1464 orb->cmd = cmd; 1468 orb->cmd = cmd;
1465 1469
1466 orb->request.next.high = SBP2_ORB_NULL; 1470 orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
1467 orb->request.next.low = 0x0;
1468 /* 1471 /*
1469 * At speed 100 we can do 512 bytes per packet, at speed 200, 1472 * At speed 100 we can do 512 bytes per packet, at speed 200,
1470 * 1024 bytes per packet etc. The SBP-2 max_payload field 1473 * 1024 bytes per packet etc. The SBP-2 max_payload field
@@ -1473,25 +1476,17 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1473 */ 1476 */
1474 max_payload = min(device->max_speed + 7, 1477 max_payload = min(device->max_speed + 7,
1475 device->card->max_receive - 1); 1478 device->card->max_receive - 1);
1476 orb->request.misc = 1479 orb->request.misc = cpu_to_be32(
1477 COMMAND_ORB_MAX_PAYLOAD(max_payload) | 1480 COMMAND_ORB_MAX_PAYLOAD(max_payload) |
1478 COMMAND_ORB_SPEED(device->max_speed) | 1481 COMMAND_ORB_SPEED(device->max_speed) |
1479 COMMAND_ORB_NOTIFY; 1482 COMMAND_ORB_NOTIFY);
1480 1483
1481 if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1484 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1482 orb->request.misc |= 1485 orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
1483 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA);
1484 else if (cmd->sc_data_direction == DMA_TO_DEVICE)
1485 orb->request.misc |=
1486 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
1487 1486
1488 if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0) 1487 if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
1489 goto out; 1488 goto out;
1490 1489
1491 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
1492
1493 memset(orb->request.command_block,
1494 0, sizeof(orb->request.command_block));
1495 memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd)); 1490 memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
1496 1491
1497 orb->base.callback = complete_command_orb; 1492 orb->base.callback = complete_command_orb;
@@ -1519,11 +1514,8 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
1519 1514
1520 sdev->allow_restart = 1; 1515 sdev->allow_restart = 1;
1521 1516
1522 /* 1517 /* SBP-2 requires quadlet alignment of the data buffers. */
1523 * Update the dma alignment (minimum alignment requirements for 1518 blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
1524 * start and end of DMA transfers) to be a sector
1525 */
1526 blk_queue_update_dma_alignment(sdev->request_queue, 511);
1527 1519
1528 if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36) 1520 if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
1529 sdev->inquiry_len = 36; 1521 sdev->inquiry_len = 36;
@@ -1581,16 +1573,14 @@ sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr,
1581{ 1573{
1582 struct scsi_device *sdev = to_scsi_device(dev); 1574 struct scsi_device *sdev = to_scsi_device(dev);
1583 struct sbp2_logical_unit *lu; 1575 struct sbp2_logical_unit *lu;
1584 struct fw_device *device;
1585 1576
1586 if (!sdev) 1577 if (!sdev)
1587 return 0; 1578 return 0;
1588 1579
1589 lu = sdev->hostdata; 1580 lu = sdev->hostdata;
1590 device = fw_device(lu->tgt->unit->device.parent);
1591 1581
1592 return sprintf(buf, "%08x%08x:%06x:%04x\n", 1582 return sprintf(buf, "%016llx:%06x:%04x\n",
1593 device->config_rom[3], device->config_rom[4], 1583 (unsigned long long)lu->tgt->guid,
1594 lu->tgt->directory_id, lu->lun); 1584 lu->tgt->directory_id, lu->lun);
1595} 1585}
1596 1586
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index d2c7a3d7e1cb..213b0ff8f3d6 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -108,6 +108,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
108 node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid); 108 node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
109 node->link_on = SELF_ID_LINK_ON(sid); 109 node->link_on = SELF_ID_LINK_ON(sid);
110 node->phy_speed = SELF_ID_PHY_SPEED(sid); 110 node->phy_speed = SELF_ID_PHY_SPEED(sid);
111 node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
111 node->port_count = port_count; 112 node->port_count = port_count;
112 113
113 atomic_set(&node->ref_count, 1); 114 atomic_set(&node->ref_count, 1);
@@ -289,12 +290,11 @@ static struct fw_node *build_tree(struct fw_card *card,
289 beta_repeaters_present = true; 290 beta_repeaters_present = true;
290 291
291 /* 292 /*
292 * If all PHYs does not report the same gap count 293 * If PHYs report different gap counts, set an invalid count
293 * setting, we fall back to 63 which will force a gap 294 * which will force a gap count reconfiguration and a reset.
294 * count reconfiguration and a reset.
295 */ 295 */
296 if (SELF_ID_GAP_COUNT(q) != gap_count) 296 if (SELF_ID_GAP_COUNT(q) != gap_count)
297 gap_count = 63; 297 gap_count = 0;
298 298
299 update_hop_count(node); 299 update_hop_count(node);
300 300
@@ -431,6 +431,8 @@ update_tree(struct fw_card *card, struct fw_node *root)
431 event = FW_NODE_LINK_OFF; 431 event = FW_NODE_LINK_OFF;
432 else if (!node0->link_on && node1->link_on) 432 else if (!node0->link_on && node1->link_on)
433 event = FW_NODE_LINK_ON; 433 event = FW_NODE_LINK_ON;
434 else if (node1->initiated_reset && node1->link_on)
435 event = FW_NODE_INITIATED_RESET;
434 else 436 else
435 event = FW_NODE_UPDATED; 437 event = FW_NODE_UPDATED;
436 438
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
index cedc1ec906e9..addb9f8ea776 100644
--- a/drivers/firewire/fw-topology.h
+++ b/drivers/firewire/fw-topology.h
@@ -20,11 +20,12 @@
20#define __fw_topology_h 20#define __fw_topology_h
21 21
22enum { 22enum {
23 FW_NODE_CREATED = 0x00, 23 FW_NODE_CREATED,
24 FW_NODE_UPDATED = 0x01, 24 FW_NODE_UPDATED,
25 FW_NODE_DESTROYED = 0x02, 25 FW_NODE_DESTROYED,
26 FW_NODE_LINK_ON = 0x03, 26 FW_NODE_LINK_ON,
27 FW_NODE_LINK_OFF = 0x04, 27 FW_NODE_LINK_OFF,
28 FW_NODE_INITIATED_RESET,
28}; 29};
29 30
30struct fw_node { 31struct fw_node {
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index e6f1bda38940..3a59e9b783b0 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -18,6 +18,7 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/completion.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/init.h> 24#include <linux/init.h>
@@ -294,42 +295,40 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
294} 295}
295EXPORT_SYMBOL(fw_send_request); 296EXPORT_SYMBOL(fw_send_request);
296 297
298struct fw_phy_packet {
299 struct fw_packet packet;
300 struct completion done;
301};
302
297static void 303static void
298transmit_phy_packet_callback(struct fw_packet *packet, 304transmit_phy_packet_callback(struct fw_packet *packet,
299 struct fw_card *card, int status) 305 struct fw_card *card, int status)
300{ 306{
301 kfree(packet); 307 struct fw_phy_packet *p =
302} 308 container_of(packet, struct fw_phy_packet, packet);
303
304static void send_phy_packet(struct fw_card *card, u32 data, int generation)
305{
306 struct fw_packet *packet;
307
308 packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
309 if (packet == NULL)
310 return;
311
312 packet->header[0] = data;
313 packet->header[1] = ~data;
314 packet->header_length = 8;
315 packet->payload_length = 0;
316 packet->speed = SCODE_100;
317 packet->generation = generation;
318 packet->callback = transmit_phy_packet_callback;
319 309
320 card->driver->send_request(card, packet); 310 complete(&p->done);
321} 311}
322 312
323void fw_send_phy_config(struct fw_card *card, 313void fw_send_phy_config(struct fw_card *card,
324 int node_id, int generation, int gap_count) 314 int node_id, int generation, int gap_count)
325{ 315{
326 u32 q; 316 struct fw_phy_packet p;
327 317 u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
328 q = PHY_IDENTIFIER(PHY_PACKET_CONFIG) | 318 PHY_CONFIG_ROOT_ID(node_id) |
329 PHY_CONFIG_ROOT_ID(node_id) | 319 PHY_CONFIG_GAP_COUNT(gap_count);
330 PHY_CONFIG_GAP_COUNT(gap_count); 320
331 321 p.packet.header[0] = data;
332 send_phy_packet(card, q, generation); 322 p.packet.header[1] = ~data;
323 p.packet.header_length = 8;
324 p.packet.payload_length = 0;
325 p.packet.speed = SCODE_100;
326 p.packet.generation = generation;
327 p.packet.callback = transmit_phy_packet_callback;
328 init_completion(&p.done);
329
330 card->driver->send_request(card, &p.packet);
331 wait_for_completion(&p.done);
333} 332}
334 333
335void fw_flush_transactions(struct fw_card *card) 334void fw_flush_transactions(struct fw_card *card)
@@ -389,21 +388,21 @@ lookup_enclosing_address_handler(struct list_head *list,
389static DEFINE_SPINLOCK(address_handler_lock); 388static DEFINE_SPINLOCK(address_handler_lock);
390static LIST_HEAD(address_handler_list); 389static LIST_HEAD(address_handler_list);
391 390
392const struct fw_address_region fw_low_memory_region =
393 { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
394const struct fw_address_region fw_high_memory_region = 391const struct fw_address_region fw_high_memory_region =
395 { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, }; 392 { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
393EXPORT_SYMBOL(fw_high_memory_region);
394
395#if 0
396const struct fw_address_region fw_low_memory_region =
397 { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
396const struct fw_address_region fw_private_region = 398const struct fw_address_region fw_private_region =
397 { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, }; 399 { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
398const struct fw_address_region fw_csr_region = 400const struct fw_address_region fw_csr_region =
399 { .start = 0xfffff0000000ULL, .end = 0xfffff0000800ULL, }; 401 { .start = CSR_REGISTER_BASE,
402 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, };
400const struct fw_address_region fw_unit_space_region = 403const struct fw_address_region fw_unit_space_region =
401 { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; 404 { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
402EXPORT_SYMBOL(fw_low_memory_region); 405#endif /* 0 */
403EXPORT_SYMBOL(fw_high_memory_region);
404EXPORT_SYMBOL(fw_private_region);
405EXPORT_SYMBOL(fw_csr_region);
406EXPORT_SYMBOL(fw_unit_space_region);
407 406
408/** 407/**
409 * Allocate a range of addresses in the node space of the OHCI 408 * Allocate a range of addresses in the node space of the OHCI
@@ -747,7 +746,8 @@ fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
747EXPORT_SYMBOL(fw_core_handle_response); 746EXPORT_SYMBOL(fw_core_handle_response);
748 747
749static const struct fw_address_region topology_map_region = 748static const struct fw_address_region topology_map_region =
750 { .start = 0xfffff0001000ull, .end = 0xfffff0001400ull, }; 749 { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
750 .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
751 751
752static void 752static void
753handle_topology_map(struct fw_card *card, struct fw_request *request, 753handle_topology_map(struct fw_card *card, struct fw_request *request,
@@ -785,7 +785,8 @@ static struct fw_address_handler topology_map = {
785}; 785};
786 786
787static const struct fw_address_region registers_region = 787static const struct fw_address_region registers_region =
788 { .start = 0xfffff0000000ull, .end = 0xfffff0000400ull, }; 788 { .start = CSR_REGISTER_BASE,
789 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
789 790
790static void 791static void
791handle_registers(struct fw_card *card, struct fw_request *request, 792handle_registers(struct fw_card *card, struct fw_request *request,
@@ -794,7 +795,7 @@ handle_registers(struct fw_card *card, struct fw_request *request,
794 unsigned long long offset, 795 unsigned long long offset,
795 void *payload, size_t length, void *callback_data) 796 void *payload, size_t length, void *callback_data)
796{ 797{
797 int reg = offset - CSR_REGISTER_BASE; 798 int reg = offset & ~CSR_REGISTER_BASE;
798 unsigned long long bus_time; 799 unsigned long long bus_time;
799 __be32 *data = payload; 800 __be32 *data = payload;
800 801
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index a43bb22912f9..04d3854f6560 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -201,11 +201,7 @@ struct fw_address_region {
201 u64 end; 201 u64 end;
202}; 202};
203 203
204extern const struct fw_address_region fw_low_memory_region;
205extern const struct fw_address_region fw_high_memory_region; 204extern const struct fw_address_region fw_high_memory_region;
206extern const struct fw_address_region fw_private_region;
207extern const struct fw_address_region fw_csr_region;
208extern const struct fw_address_region fw_unit_space_region;
209 205
210int fw_core_add_address_handler(struct fw_address_handler *handler, 206int fw_core_add_address_handler(struct fw_address_handler *handler,
211 const struct fw_address_region *region); 207 const struct fw_address_region *region);
@@ -221,12 +217,9 @@ struct fw_card {
221 const struct fw_card_driver *driver; 217 const struct fw_card_driver *driver;
222 struct device *device; 218 struct device *device;
223 atomic_t device_count; 219 atomic_t device_count;
224 struct kref kref;
225 220
226 int node_id; 221 int node_id;
227 int generation; 222 int generation;
228 /* This is the generation used for timestamping incoming requests. */
229 int request_generation;
230 int current_tlabel, tlabel_mask; 223 int current_tlabel, tlabel_mask;
231 struct list_head transaction_list; 224 struct list_head transaction_list;
232 struct timer_list flush_timer; 225 struct timer_list flush_timer;
@@ -263,9 +256,6 @@ struct fw_card {
263 int bm_generation; 256 int bm_generation;
264}; 257};
265 258
266struct fw_card *fw_card_get(struct fw_card *card);
267void fw_card_put(struct fw_card *card);
268
269/* 259/*
270 * The iso packet format allows for an immediate header/payload part 260 * The iso packet format allows for an immediate header/payload part
271 * stored in 'header' immediately after the packet info plus an 261 * stored in 'header' immediately after the packet info plus an
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c
index 52ac83e0ebee..c90be4070e40 100644
--- a/drivers/ieee1394/csr.c
+++ b/drivers/ieee1394/csr.c
@@ -133,8 +133,7 @@ static void host_reset(struct hpsb_host *host)
133 host->csr.state &= ~0x100; 133 host->csr.state &= ~0x100;
134 } 134 }
135 135
136 host->csr.topology_map[1] = 136 be32_add_cpu(&host->csr.topology_map[1], 1);
137 cpu_to_be32(be32_to_cpu(host->csr.topology_map[1]) + 1);
138 host->csr.topology_map[2] = cpu_to_be32(host->node_count << 16 137 host->csr.topology_map[2] = cpu_to_be32(host->node_count << 16
139 | host->selfid_count); 138 | host->selfid_count);
140 host->csr.topology_map[0] = 139 host->csr.topology_map[0] =
@@ -142,8 +141,7 @@ static void host_reset(struct hpsb_host *host)
142 | csr_crc16(host->csr.topology_map + 1, 141 | csr_crc16(host->csr.topology_map + 1,
143 host->selfid_count + 2)); 142 host->selfid_count + 2));
144 143
145 host->csr.speed_map[1] = 144 be32_add_cpu(&host->csr.speed_map[1], 1);
146 cpu_to_be32(be32_to_cpu(host->csr.speed_map[1]) + 1);
147 host->csr.speed_map[0] = cpu_to_be32(0x3f1 << 16 145 host->csr.speed_map[0] = cpu_to_be32(0x3f1 << 16
148 | csr_crc16(host->csr.speed_map+1, 146 | csr_crc16(host->csr.speed_map+1,
149 0x3f1)); 147 0x3f1));
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 65722117ab6e..6228fadacd38 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -2179,8 +2179,7 @@ static struct ieee1394_device_id dv1394_id_table[] = {
2179MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table); 2179MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
2180 2180
2181static struct hpsb_protocol_driver dv1394_driver = { 2181static struct hpsb_protocol_driver dv1394_driver = {
2182 .name = "dv1394", 2182 .name = "dv1394",
2183 .id_table = dv1394_id_table,
2184}; 2183};
2185 2184
2186 2185
@@ -2568,7 +2567,6 @@ static int __init dv1394_init_module(void)
2568 2567
2569 cdev_init(&dv1394_cdev, &dv1394_fops); 2568 cdev_init(&dv1394_cdev, &dv1394_fops);
2570 dv1394_cdev.owner = THIS_MODULE; 2569 dv1394_cdev.owner = THIS_MODULE;
2571 kobject_set_name(&dv1394_cdev.kobj, "dv1394");
2572 ret = cdev_add(&dv1394_cdev, IEEE1394_DV1394_DEV, 16); 2570 ret = cdev_add(&dv1394_cdev, IEEE1394_DV1394_DEV, 16);
2573 if (ret) { 2571 if (ret) {
2574 printk(KERN_ERR "dv1394: unable to register character device\n"); 2572 printk(KERN_ERR "dv1394: unable to register character device\n");
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index b6425469b6ee..fa2bfec0fca2 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -339,7 +339,7 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
339 if ((alignment & 3) || (alignment > 0x800000000000ULL) || 339 if ((alignment & 3) || (alignment > 0x800000000000ULL) ||
340 (hweight64(alignment) != 1)) { 340 (hweight64(alignment) != 1)) {
341 HPSB_ERR("%s called with invalid alignment: 0x%048llx", 341 HPSB_ERR("%s called with invalid alignment: 0x%048llx",
342 __FUNCTION__, (unsigned long long)alignment); 342 __func__, (unsigned long long)alignment);
343 return retval; 343 return retval;
344 } 344 }
345 345
@@ -354,7 +354,7 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
354 if (((start|end) & ~align_mask) || (start >= end) || 354 if (((start|end) & ~align_mask) || (start >= end) ||
355 (end > CSR1212_ALL_SPACE_END)) { 355 (end > CSR1212_ALL_SPACE_END)) {
356 HPSB_ERR("%s called with invalid addresses " 356 HPSB_ERR("%s called with invalid addresses "
357 "(start = %012Lx end = %012Lx)", __FUNCTION__, 357 "(start = %012Lx end = %012Lx)", __func__,
358 (unsigned long long)start,(unsigned long long)end); 358 (unsigned long long)start,(unsigned long long)end);
359 return retval; 359 return retval;
360 } 360 }
@@ -422,7 +422,7 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
422 422
423 if (((start|end) & 3) || (start >= end) || 423 if (((start|end) & 3) || (start >= end) ||
424 (end > CSR1212_ALL_SPACE_END)) { 424 (end > CSR1212_ALL_SPACE_END)) {
425 HPSB_ERR("%s called with invalid addresses", __FUNCTION__); 425 HPSB_ERR("%s called with invalid addresses", __func__);
426 return 0; 426 return 0;
427 } 427 }
428 428
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 36c747b277d0..dcdb71a7718d 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -242,7 +242,7 @@ int hpsb_bus_reset(struct hpsb_host *host)
242{ 242{
243 if (host->in_bus_reset) { 243 if (host->in_bus_reset) {
244 HPSB_NOTICE("%s called while bus reset already in progress", 244 HPSB_NOTICE("%s called while bus reset already in progress",
245 __FUNCTION__); 245 __func__);
246 return 1; 246 return 1;
247 } 247 }
248 248
@@ -373,6 +373,8 @@ static void build_speed_map(struct hpsb_host *host, int nodecount)
373 if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++; 373 if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++;
374 374
375 speedcap[n] = sid->speed; 375 speedcap[n] = sid->speed;
376 if (speedcap[n] > host->csr.lnk_spd)
377 speedcap[n] = host->csr.lnk_spd;
376 n--; 378 n--;
377 } 379 }
378 } 380 }
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 511e4321c6b6..70afa3786f3f 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -701,7 +701,11 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
701 return 0; 701 return 0;
702 702
703 driver = container_of(drv, struct hpsb_protocol_driver, driver); 703 driver = container_of(drv, struct hpsb_protocol_driver, driver);
704 for (id = driver->id_table; id->match_flags != 0; id++) { 704 id = driver->id_table;
705 if (!id)
706 return 0;
707
708 for (; id->match_flags != 0; id++) {
705 if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) && 709 if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
706 id->vendor_id != ud->vendor_id) 710 id->vendor_id != ud->vendor_id)
707 continue; 711 continue;
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 969de2a2d633..0690469fcecf 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -149,7 +149,7 @@ printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id ,
149/* Module Parameters */ 149/* Module Parameters */
150static int phys_dma = 1; 150static int phys_dma = 1;
151module_param(phys_dma, int, 0444); 151module_param(phys_dma, int, 0444);
152MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1)."); 152MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
153 153
154static void dma_trm_tasklet(unsigned long data); 154static void dma_trm_tasklet(unsigned long data);
155static void dma_trm_reset(struct dma_trm_ctx *d); 155static void dma_trm_reset(struct dma_trm_ctx *d);
@@ -708,7 +708,7 @@ static void insert_packet(struct ti_ohci *ohci,
708 /* FIXME: do something about it */ 708 /* FIXME: do something about it */
709 PRINT(KERN_ERR, 709 PRINT(KERN_ERR,
710 "%s: packet data addr: %p size %Zd bytes " 710 "%s: packet data addr: %p size %Zd bytes "
711 "cross page boundary", __FUNCTION__, 711 "cross page boundary", __func__,
712 packet->data, packet->data_size); 712 packet->data, packet->data_size);
713 } 713 }
714#endif 714#endif
@@ -2089,10 +2089,8 @@ static void dma_trm_reset(struct dma_trm_ctx *d)
2089 2089
2090 spin_lock_irqsave(&d->lock, flags); 2090 spin_lock_irqsave(&d->lock, flags);
2091 2091
2092 list_splice(&d->fifo_list, &packet_list); 2092 list_splice_init(&d->fifo_list, &packet_list);
2093 list_splice(&d->pending_list, &packet_list); 2093 list_splice_init(&d->pending_list, &packet_list);
2094 INIT_LIST_HEAD(&d->fifo_list);
2095 INIT_LIST_HEAD(&d->pending_list);
2096 2094
2097 d->branchAddrPtr = NULL; 2095 d->branchAddrPtr = NULL;
2098 d->sent_ind = d->prg_ind; 2096 d->sent_ind = d->prg_ind;
@@ -2787,7 +2785,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2787 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC); 2785 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2788 2786
2789 if (d->buf_cpu == NULL || d->buf_bus == NULL) { 2787 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2790 PRINT(KERN_ERR, "Failed to allocate dma buffer"); 2788 PRINT(KERN_ERR, "Failed to allocate %s", "DMA buffer");
2791 free_dma_rcv_ctx(d); 2789 free_dma_rcv_ctx(d);
2792 return -ENOMEM; 2790 return -ENOMEM;
2793 } 2791 }
@@ -2796,7 +2794,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2796 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC); 2794 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2797 2795
2798 if (d->prg_cpu == NULL || d->prg_bus == NULL) { 2796 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2799 PRINT(KERN_ERR, "Failed to allocate dma prg"); 2797 PRINT(KERN_ERR, "Failed to allocate %s", "DMA prg");
2800 free_dma_rcv_ctx(d); 2798 free_dma_rcv_ctx(d);
2801 return -ENOMEM; 2799 return -ENOMEM;
2802 } 2800 }
@@ -2804,7 +2802,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2804 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC); 2802 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2805 2803
2806 if (d->spb == NULL) { 2804 if (d->spb == NULL) {
2807 PRINT(KERN_ERR, "Failed to allocate split buffer"); 2805 PRINT(KERN_ERR, "Failed to allocate %s", "split buffer");
2808 free_dma_rcv_ctx(d); 2806 free_dma_rcv_ctx(d);
2809 return -ENOMEM; 2807 return -ENOMEM;
2810 } 2808 }
@@ -2830,7 +2828,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2830 memset(d->buf_cpu[i], 0, d->buf_size); 2828 memset(d->buf_cpu[i], 0, d->buf_size);
2831 } else { 2829 } else {
2832 PRINT(KERN_ERR, 2830 PRINT(KERN_ERR,
2833 "Failed to allocate dma buffer"); 2831 "Failed to allocate %s", "DMA buffer");
2834 free_dma_rcv_ctx(d); 2832 free_dma_rcv_ctx(d);
2835 return -ENOMEM; 2833 return -ENOMEM;
2836 } 2834 }
@@ -2841,7 +2839,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2841 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd)); 2839 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2842 } else { 2840 } else {
2843 PRINT(KERN_ERR, 2841 PRINT(KERN_ERR,
2844 "Failed to allocate dma prg"); 2842 "Failed to allocate %s", "DMA prg");
2845 free_dma_rcv_ctx(d); 2843 free_dma_rcv_ctx(d);
2846 return -ENOMEM; 2844 return -ENOMEM;
2847 } 2845 }
@@ -2902,7 +2900,7 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
2902 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL); 2900 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
2903 2901
2904 if (d->prg_cpu == NULL || d->prg_bus == NULL) { 2902 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2905 PRINT(KERN_ERR, "Failed to allocate at dma prg"); 2903 PRINT(KERN_ERR, "Failed to allocate %s", "AT DMA prg");
2906 free_dma_trm_ctx(d); 2904 free_dma_trm_ctx(d);
2907 return -ENOMEM; 2905 return -ENOMEM;
2908 } 2906 }
@@ -2925,7 +2923,7 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
2925 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg)); 2923 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
2926 } else { 2924 } else {
2927 PRINT(KERN_ERR, 2925 PRINT(KERN_ERR,
2928 "Failed to allocate at dma prg"); 2926 "Failed to allocate %s", "AT DMA prg");
2929 free_dma_trm_ctx(d); 2927 free_dma_trm_ctx(d);
2930 return -ENOMEM; 2928 return -ENOMEM;
2931 } 2929 }
@@ -2986,22 +2984,9 @@ static struct hpsb_host_driver ohci1394_driver = {
2986 * PCI Driver Interface functions * 2984 * PCI Driver Interface functions *
2987 ***********************************/ 2985 ***********************************/
2988 2986
2989#define FAIL(err, fmt, args...) \
2990do { \
2991 PRINT_G(KERN_ERR, fmt , ## args); \
2992 ohci1394_pci_remove(dev); \
2993 return err; \
2994} while (0)
2995
2996static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
2997 const struct pci_device_id *ent)
2998{
2999 struct hpsb_host *host;
3000 struct ti_ohci *ohci; /* shortcut to currently handled device */
3001 resource_size_t ohci_base;
3002
3003#ifdef CONFIG_PPC_PMAC 2987#ifdef CONFIG_PPC_PMAC
3004 /* Necessary on some machines if ohci1394 was loaded/ unloaded before */ 2988static void ohci1394_pmac_on(struct pci_dev *dev)
2989{
3005 if (machine_is(powermac)) { 2990 if (machine_is(powermac)) {
3006 struct device_node *ofn = pci_device_to_OF_node(dev); 2991 struct device_node *ofn = pci_device_to_OF_node(dev);
3007 2992
@@ -3010,15 +2995,45 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3010 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); 2995 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3011 } 2996 }
3012 } 2997 }
2998}
2999
3000static void ohci1394_pmac_off(struct pci_dev *dev)
3001{
3002 if (machine_is(powermac)) {
3003 struct device_node *ofn = pci_device_to_OF_node(dev);
3004
3005 if (ofn) {
3006 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3007 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3008 }
3009 }
3010}
3011#else
3012#define ohci1394_pmac_on(dev)
3013#define ohci1394_pmac_off(dev)
3013#endif /* CONFIG_PPC_PMAC */ 3014#endif /* CONFIG_PPC_PMAC */
3014 3015
3015 if (pci_enable_device(dev)) 3016static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3016 FAIL(-ENXIO, "Failed to enable OHCI hardware"); 3017 const struct pci_device_id *ent)
3018{
3019 struct hpsb_host *host;
3020 struct ti_ohci *ohci; /* shortcut to currently handled device */
3021 resource_size_t ohci_base;
3022 int err = -ENOMEM;
3023
3024 ohci1394_pmac_on(dev);
3025 if (pci_enable_device(dev)) {
3026 PRINT_G(KERN_ERR, "Failed to enable OHCI hardware");
3027 err = -ENXIO;
3028 goto err;
3029 }
3017 pci_set_master(dev); 3030 pci_set_master(dev);
3018 3031
3019 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev); 3032 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3020 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure"); 3033 if (!host) {
3021 3034 PRINT_G(KERN_ERR, "Failed to allocate %s", "host structure");
3035 goto err;
3036 }
3022 ohci = host->hostdata; 3037 ohci = host->hostdata;
3023 ohci->dev = dev; 3038 ohci->dev = dev;
3024 ohci->host = host; 3039 ohci->host = host;
@@ -3067,15 +3082,20 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3067 (unsigned long long)pci_resource_len(dev, 0)); 3082 (unsigned long long)pci_resource_len(dev, 0));
3068 3083
3069 if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE, 3084 if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
3070 OHCI1394_DRIVER_NAME)) 3085 OHCI1394_DRIVER_NAME)) {
3071 FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable", 3086 PRINT_G(KERN_ERR, "MMIO resource (0x%llx - 0x%llx) unavailable",
3072 (unsigned long long)ohci_base, 3087 (unsigned long long)ohci_base,
3073 (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE); 3088 (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
3089 goto err;
3090 }
3074 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION; 3091 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3075 3092
3076 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE); 3093 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3077 if (ohci->registers == NULL) 3094 if (ohci->registers == NULL) {
3078 FAIL(-ENXIO, "Failed to remap registers - card not accessible"); 3095 PRINT_G(KERN_ERR, "Failed to remap registers");
3096 err = -ENXIO;
3097 goto err;
3098 }
3079 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING; 3099 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3080 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers); 3100 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3081 3101
@@ -3083,16 +3103,20 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3083 ohci->csr_config_rom_cpu = 3103 ohci->csr_config_rom_cpu =
3084 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN, 3104 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3085 &ohci->csr_config_rom_bus); 3105 &ohci->csr_config_rom_bus);
3086 if (ohci->csr_config_rom_cpu == NULL) 3106 if (ohci->csr_config_rom_cpu == NULL) {
3087 FAIL(-ENOMEM, "Failed to allocate buffer config rom"); 3107 PRINT_G(KERN_ERR, "Failed to allocate %s", "buffer config rom");
3108 goto err;
3109 }
3088 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER; 3110 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3089 3111
3090 /* self-id dma buffer allocation */ 3112 /* self-id dma buffer allocation */
3091 ohci->selfid_buf_cpu = 3113 ohci->selfid_buf_cpu =
3092 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE, 3114 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3093 &ohci->selfid_buf_bus); 3115 &ohci->selfid_buf_bus);
3094 if (ohci->selfid_buf_cpu == NULL) 3116 if (ohci->selfid_buf_cpu == NULL) {
3095 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets"); 3117 PRINT_G(KERN_ERR, "Failed to allocate %s", "self-ID buffer");
3118 goto err;
3119 }
3096 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER; 3120 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3097 3121
3098 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff) 3122 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
@@ -3108,28 +3132,32 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3108 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context, 3132 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3109 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC, 3133 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3110 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE, 3134 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3111 OHCI1394_AsReqRcvContextBase) < 0) 3135 OHCI1394_AsReqRcvContextBase) < 0) {
3112 FAIL(-ENOMEM, "Failed to allocate AR Req context"); 3136 PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Req context");
3113 3137 goto err;
3138 }
3114 /* AR DMA response context allocation */ 3139 /* AR DMA response context allocation */
3115 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context, 3140 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3116 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC, 3141 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3117 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE, 3142 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3118 OHCI1394_AsRspRcvContextBase) < 0) 3143 OHCI1394_AsRspRcvContextBase) < 0) {
3119 FAIL(-ENOMEM, "Failed to allocate AR Resp context"); 3144 PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Resp context");
3120 3145 goto err;
3146 }
3121 /* AT DMA request context */ 3147 /* AT DMA request context */
3122 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context, 3148 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3123 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC, 3149 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3124 OHCI1394_AsReqTrContextBase) < 0) 3150 OHCI1394_AsReqTrContextBase) < 0) {
3125 FAIL(-ENOMEM, "Failed to allocate AT Req context"); 3151 PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Req context");
3126 3152 goto err;
3153 }
3127 /* AT DMA response context */ 3154 /* AT DMA response context */
3128 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context, 3155 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3129 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC, 3156 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3130 OHCI1394_AsRspTrContextBase) < 0) 3157 OHCI1394_AsRspTrContextBase) < 0) {
3131 FAIL(-ENOMEM, "Failed to allocate AT Resp context"); 3158 PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Resp context");
3132 3159 goto err;
3160 }
3133 /* Start off with a soft reset, to clear everything to a sane 3161 /* Start off with a soft reset, to clear everything to a sane
3134 * state. */ 3162 * state. */
3135 ohci_soft_reset(ohci); 3163 ohci_soft_reset(ohci);
@@ -3172,9 +3200,10 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3172 * by that point. 3200 * by that point.
3173 */ 3201 */
3174 if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED, 3202 if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3175 OHCI1394_DRIVER_NAME, ohci)) 3203 OHCI1394_DRIVER_NAME, ohci)) {
3176 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq); 3204 PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
3177 3205 goto err;
3206 }
3178 ohci->init_state = OHCI_INIT_HAVE_IRQ; 3207 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3179 ohci_initialize(ohci); 3208 ohci_initialize(ohci);
3180 3209
@@ -3194,25 +3223,28 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3194 host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE; 3223 host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3195 3224
3196 /* Tell the highlevel this host is ready */ 3225 /* Tell the highlevel this host is ready */
3197 if (hpsb_add_host(host)) 3226 if (hpsb_add_host(host)) {
3198 FAIL(-ENOMEM, "Failed to register host with highlevel"); 3227 PRINT_G(KERN_ERR, "Failed to register host with highlevel");
3199 3228 goto err;
3229 }
3200 ohci->init_state = OHCI_INIT_DONE; 3230 ohci->init_state = OHCI_INIT_DONE;
3201 3231
3202 return 0; 3232 return 0;
3203#undef FAIL 3233err:
3234 ohci1394_pci_remove(dev);
3235 return err;
3204} 3236}
3205 3237
3206static void ohci1394_pci_remove(struct pci_dev *pdev) 3238static void ohci1394_pci_remove(struct pci_dev *dev)
3207{ 3239{
3208 struct ti_ohci *ohci; 3240 struct ti_ohci *ohci;
3209 struct device *dev; 3241 struct device *device;
3210 3242
3211 ohci = pci_get_drvdata(pdev); 3243 ohci = pci_get_drvdata(dev);
3212 if (!ohci) 3244 if (!ohci)
3213 return; 3245 goto out;
3214 3246
3215 dev = get_device(&ohci->host->device); 3247 device = get_device(&ohci->host->device);
3216 3248
3217 switch (ohci->init_state) { 3249 switch (ohci->init_state) {
3218 case OHCI_INIT_DONE: 3250 case OHCI_INIT_DONE:
@@ -3246,7 +3278,7 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
3246 /* Soft reset before we start - this disables 3278 /* Soft reset before we start - this disables
3247 * interrupts and clears linkEnable and LPS. */ 3279 * interrupts and clears linkEnable and LPS. */
3248 ohci_soft_reset(ohci); 3280 ohci_soft_reset(ohci);
3249 free_irq(ohci->dev->irq, ohci); 3281 free_irq(dev->irq, ohci);
3250 3282
3251 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE: 3283 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3252 /* The ohci_soft_reset() stops all DMA contexts, so we 3284 /* The ohci_soft_reset() stops all DMA contexts, so we
@@ -3257,12 +3289,12 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
3257 free_dma_trm_ctx(&ohci->at_resp_context); 3289 free_dma_trm_ctx(&ohci->at_resp_context);
3258 3290
3259 case OHCI_INIT_HAVE_SELFID_BUFFER: 3291 case OHCI_INIT_HAVE_SELFID_BUFFER:
3260 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE, 3292 pci_free_consistent(dev, OHCI1394_SI_DMA_BUF_SIZE,
3261 ohci->selfid_buf_cpu, 3293 ohci->selfid_buf_cpu,
3262 ohci->selfid_buf_bus); 3294 ohci->selfid_buf_bus);
3263 3295
3264 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER: 3296 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3265 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN, 3297 pci_free_consistent(dev, OHCI_CONFIG_ROM_LEN,
3266 ohci->csr_config_rom_cpu, 3298 ohci->csr_config_rom_cpu,
3267 ohci->csr_config_rom_bus); 3299 ohci->csr_config_rom_bus);
3268 3300
@@ -3270,35 +3302,24 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
3270 iounmap(ohci->registers); 3302 iounmap(ohci->registers);
3271 3303
3272 case OHCI_INIT_HAVE_MEM_REGION: 3304 case OHCI_INIT_HAVE_MEM_REGION:
3273 release_mem_region(pci_resource_start(ohci->dev, 0), 3305 release_mem_region(pci_resource_start(dev, 0),
3274 OHCI1394_REGISTER_SIZE); 3306 OHCI1394_REGISTER_SIZE);
3275 3307
3276#ifdef CONFIG_PPC_PMAC
3277 /* On UniNorth, power down the cable and turn off the chip clock
3278 * to save power on laptops */
3279 if (machine_is(powermac)) {
3280 struct device_node* ofn = pci_device_to_OF_node(ohci->dev);
3281
3282 if (ofn) {
3283 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3284 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3285 }
3286 }
3287#endif /* CONFIG_PPC_PMAC */
3288
3289 case OHCI_INIT_ALLOC_HOST: 3308 case OHCI_INIT_ALLOC_HOST:
3290 pci_set_drvdata(ohci->dev, NULL); 3309 pci_set_drvdata(dev, NULL);
3291 } 3310 }
3292 3311
3293 if (dev) 3312 if (device)
3294 put_device(dev); 3313 put_device(device);
3314out:
3315 ohci1394_pmac_off(dev);
3295} 3316}
3296 3317
3297#ifdef CONFIG_PM 3318#ifdef CONFIG_PM
3298static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state) 3319static int ohci1394_pci_suspend(struct pci_dev *dev, pm_message_t state)
3299{ 3320{
3300 int err; 3321 int err;
3301 struct ti_ohci *ohci = pci_get_drvdata(pdev); 3322 struct ti_ohci *ohci = pci_get_drvdata(dev);
3302 3323
3303 if (!ohci) { 3324 if (!ohci) {
3304 printk(KERN_ERR "%s: tried to suspend nonexisting host\n", 3325 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
@@ -3326,32 +3347,23 @@ static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3326 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT); 3347 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3327 ohci_soft_reset(ohci); 3348 ohci_soft_reset(ohci);
3328 3349
3329 err = pci_save_state(pdev); 3350 err = pci_save_state(dev);
3330 if (err) { 3351 if (err) {
3331 PRINT(KERN_ERR, "pci_save_state failed with %d", err); 3352 PRINT(KERN_ERR, "pci_save_state failed with %d", err);
3332 return err; 3353 return err;
3333 } 3354 }
3334 err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3355 err = pci_set_power_state(dev, pci_choose_state(dev, state));
3335 if (err) 3356 if (err)
3336 DBGMSG("pci_set_power_state failed with %d", err); 3357 DBGMSG("pci_set_power_state failed with %d", err);
3337 3358 ohci1394_pmac_off(dev);
3338/* PowerMac suspend code comes last */
3339#ifdef CONFIG_PPC_PMAC
3340 if (machine_is(powermac)) {
3341 struct device_node *ofn = pci_device_to_OF_node(pdev);
3342
3343 if (ofn)
3344 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3345 }
3346#endif /* CONFIG_PPC_PMAC */
3347 3359
3348 return 0; 3360 return 0;
3349} 3361}
3350 3362
3351static int ohci1394_pci_resume(struct pci_dev *pdev) 3363static int ohci1394_pci_resume(struct pci_dev *dev)
3352{ 3364{
3353 int err; 3365 int err;
3354 struct ti_ohci *ohci = pci_get_drvdata(pdev); 3366 struct ti_ohci *ohci = pci_get_drvdata(dev);
3355 3367
3356 if (!ohci) { 3368 if (!ohci) {
3357 printk(KERN_ERR "%s: tried to resume nonexisting host\n", 3369 printk(KERN_ERR "%s: tried to resume nonexisting host\n",
@@ -3360,19 +3372,10 @@ static int ohci1394_pci_resume(struct pci_dev *pdev)
3360 } 3372 }
3361 DBGMSG("resume called"); 3373 DBGMSG("resume called");
3362 3374
3363/* PowerMac resume code comes first */ 3375 ohci1394_pmac_on(dev);
3364#ifdef CONFIG_PPC_PMAC 3376 pci_set_power_state(dev, PCI_D0);
3365 if (machine_is(powermac)) { 3377 pci_restore_state(dev);
3366 struct device_node *ofn = pci_device_to_OF_node(pdev); 3378 err = pci_enable_device(dev);
3367
3368 if (ofn)
3369 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3370 }
3371#endif /* CONFIG_PPC_PMAC */
3372
3373 pci_set_power_state(pdev, PCI_D0);
3374 pci_restore_state(pdev);
3375 err = pci_enable_device(pdev);
3376 if (err) { 3379 if (err) {
3377 PRINT(KERN_ERR, "pci_enable_device failed with %d", err); 3380 PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
3378 return err; 3381 return err;
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index 8af01ab30cc9..7aee1ac97c80 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -226,7 +226,7 @@ static int get_phy_reg(struct ti_lynx *lynx, int addr)
226 if (addr > 15) { 226 if (addr > 15) {
227 PRINT(KERN_ERR, lynx->id, 227 PRINT(KERN_ERR, lynx->id,
228 "%s: PHY register address %d out of range", 228 "%s: PHY register address %d out of range",
229 __FUNCTION__, addr); 229 __func__, addr);
230 return -1; 230 return -1;
231 } 231 }
232 232
@@ -238,7 +238,7 @@ static int get_phy_reg(struct ti_lynx *lynx, int addr)
238 238
239 if (i > 10000) { 239 if (i > 10000) {
240 PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting", 240 PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
241 __FUNCTION__); 241 __func__);
242 retval = -1; 242 retval = -1;
243 break; 243 break;
244 } 244 }
@@ -261,13 +261,13 @@ static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
261 261
262 if (addr > 15) { 262 if (addr > 15) {
263 PRINT(KERN_ERR, lynx->id, 263 PRINT(KERN_ERR, lynx->id,
264 "%s: PHY register address %d out of range", __FUNCTION__, addr); 264 "%s: PHY register address %d out of range", __func__, addr);
265 return -1; 265 return -1;
266 } 266 }
267 267
268 if (val > 0xff) { 268 if (val > 0xff) {
269 PRINT(KERN_ERR, lynx->id, 269 PRINT(KERN_ERR, lynx->id,
270 "%s: PHY register value %d out of range", __FUNCTION__, val); 270 "%s: PHY register value %d out of range", __func__, val);
271 return -1; 271 return -1;
272 } 272 }
273 273
@@ -287,7 +287,7 @@ static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
287 287
288 if (page > 7) { 288 if (page > 7) {
289 PRINT(KERN_ERR, lynx->id, 289 PRINT(KERN_ERR, lynx->id,
290 "%s: PHY page %d out of range", __FUNCTION__, page); 290 "%s: PHY page %d out of range", __func__, page);
291 return -1; 291 return -1;
292 } 292 }
293 293
@@ -309,7 +309,7 @@ static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
309 309
310 if (port > 15) { 310 if (port > 15) {
311 PRINT(KERN_ERR, lynx->id, 311 PRINT(KERN_ERR, lynx->id,
312 "%s: PHY port %d out of range", __FUNCTION__, port); 312 "%s: PHY port %d out of range", __func__, port);
313 return -1; 313 return -1;
314 } 314 }
315 315
@@ -738,8 +738,7 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
738 spin_lock_irqsave(&lynx->async.queue_lock, flags); 738 spin_lock_irqsave(&lynx->async.queue_lock, flags);
739 739
740 reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0); 740 reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
741 list_splice(&lynx->async.queue, &packet_list); 741 list_splice_init(&lynx->async.queue, &packet_list);
742 INIT_LIST_HEAD(&lynx->async.queue);
743 742
744 if (list_empty(&lynx->async.pcl_queue)) { 743 if (list_empty(&lynx->async.pcl_queue)) {
745 spin_unlock_irqrestore(&lynx->async.queue_lock, flags); 744 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 37e7e109af38..04e96ba56e09 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2959,7 +2959,6 @@ MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
2959 2959
2960static struct hpsb_protocol_driver raw1394_driver = { 2960static struct hpsb_protocol_driver raw1394_driver = {
2961 .name = "raw1394", 2961 .name = "raw1394",
2962 .id_table = raw1394_id_table,
2963}; 2962};
2964 2963
2965/******************************************************************************/ 2964/******************************************************************************/
@@ -3004,7 +3003,6 @@ static int __init init_raw1394(void)
3004 3003
3005 cdev_init(&raw1394_cdev, &raw1394_fops); 3004 cdev_init(&raw1394_cdev, &raw1394_fops);
3006 raw1394_cdev.owner = THIS_MODULE; 3005 raw1394_cdev.owner = THIS_MODULE;
3007 kobject_set_name(&raw1394_cdev.kobj, RAW1394_DEVICE_NAME);
3008 ret = cdev_add(&raw1394_cdev, IEEE1394_RAW1394_DEV, 1); 3006 ret = cdev_add(&raw1394_cdev, IEEE1394_RAW1394_DEV, 1);
3009 if (ret) { 3007 if (ret) {
3010 HPSB_ERR("raw1394 failed to register minor device block"); 3008 HPSB_ERR("raw1394 failed to register minor device block");
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f53f72daae34..16b9d0ad154e 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -615,7 +615,7 @@ static struct sbp2_command_info *sbp2util_allocate_command_orb(
615 cmd->Current_SCpnt = Current_SCpnt; 615 cmd->Current_SCpnt = Current_SCpnt;
616 list_add_tail(&cmd->list, &lu->cmd_orb_inuse); 616 list_add_tail(&cmd->list, &lu->cmd_orb_inuse);
617 } else 617 } else
618 SBP2_ERR("%s: no orbs available", __FUNCTION__); 618 SBP2_ERR("%s: no orbs available", __func__);
619 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags); 619 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
620 return cmd; 620 return cmd;
621} 621}
@@ -1294,7 +1294,7 @@ static int sbp2_set_busy_timeout(struct sbp2_lu *lu)
1294 1294
1295 data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE); 1295 data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
1296 if (hpsb_node_write(lu->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4)) 1296 if (hpsb_node_write(lu->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4))
1297 SBP2_ERR("%s error", __FUNCTION__); 1297 SBP2_ERR("%s error", __func__);
1298 return 0; 1298 return 0;
1299} 1299}
1300 1300
@@ -1985,11 +1985,8 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
1985 lu->sdev = sdev; 1985 lu->sdev = sdev;
1986 sdev->allow_restart = 1; 1986 sdev->allow_restart = 1;
1987 1987
1988 /* 1988 /* SBP-2 requires quadlet alignment of the data buffers. */
1989 * Update the dma alignment (minimum alignment requirements for 1989 blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
1990 * start and end of DMA transfers) to be a sector
1991 */
1992 blk_queue_update_dma_alignment(sdev->request_queue, 511);
1993 1990
1994 if (lu->workarounds & SBP2_WORKAROUND_INQUIRY_36) 1991 if (lu->workarounds & SBP2_WORKAROUND_INQUIRY_36)
1995 sdev->inquiry_len = 36; 1992 sdev->inquiry_len = 36;
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index bd28adfd7afc..e03024eeeac1 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1315,8 +1315,7 @@ static struct ieee1394_device_id video1394_id_table[] = {
1315MODULE_DEVICE_TABLE(ieee1394, video1394_id_table); 1315MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
1316 1316
1317static struct hpsb_protocol_driver video1394_driver = { 1317static struct hpsb_protocol_driver video1394_driver = {
1318 .name = VIDEO1394_DRIVER_NAME, 1318 .name = VIDEO1394_DRIVER_NAME,
1319 .id_table = video1394_id_table,
1320}; 1319};
1321 1320
1322 1321
@@ -1504,7 +1503,6 @@ static int __init video1394_init_module (void)
1504 1503
1505 cdev_init(&video1394_cdev, &video1394_fops); 1504 cdev_init(&video1394_cdev, &video1394_fops);
1506 video1394_cdev.owner = THIS_MODULE; 1505 video1394_cdev.owner = THIS_MODULE;
1507 kobject_set_name(&video1394_cdev.kobj, VIDEO1394_DRIVER_NAME);
1508 ret = cdev_add(&video1394_cdev, IEEE1394_VIDEO1394_DEV, 16); 1506 ret = cdev_add(&video1394_cdev, IEEE1394_VIDEO1394_DEV, 16);
1509 if (ret) { 1507 if (ret) {
1510 PRINT_G(KERN_ERR, "video1394: unable to get minor device block"); 1508 PRINT_G(KERN_ERR, "video1394: unable to get minor device block");
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index aaba784332e0..95de3102bc87 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -583,7 +583,7 @@ config LATENCYTOP
583 to find out which userspace is blocking on what kernel operations. 583 to find out which userspace is blocking on what kernel operations.
584 584
585config PROVIDE_OHCI1394_DMA_INIT 585config PROVIDE_OHCI1394_DMA_INIT
586 bool "Provide code for enabling DMA over FireWire early on boot" 586 bool "Remote debugging over FireWire early on boot"
587 depends on PCI && X86 587 depends on PCI && X86
588 help 588 help
589 If you want to debug problems which hang or crash the kernel early 589 If you want to debug problems which hang or crash the kernel early
@@ -611,6 +611,17 @@ config PROVIDE_OHCI1394_DMA_INIT
611 611
612 See Documentation/debugging-via-ohci1394.txt for more information. 612 See Documentation/debugging-via-ohci1394.txt for more information.
613 613
614config FIREWIRE_OHCI_REMOTE_DMA
615 bool "Remote debugging over FireWire with firewire-ohci"
616 depends on FIREWIRE_OHCI
617 help
618 This option lets you use the FireWire bus for remote debugging
619 with help of the firewire-ohci driver. It enables unfiltered
620 remote DMA in firewire-ohci.
621 See Documentation/debugging-via-ohci1394.txt for more information.
622
623 If unsure, say N.
624
614source "samples/Kconfig" 625source "samples/Kconfig"
615 626
616source "lib/Kconfig.kgdb" 627source "lib/Kconfig.kgdb"