aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKristian Høgsberg <krh@redhat.com>2007-05-07 20:33:32 -0400
committerStefan Richter <stefanr@s5r6.in-berlin.de>2007-05-10 12:24:13 -0400
commitc781c06d119d04601727f2fbc30151e6760d536d (patch)
tree1faf19acc6bc2a2a3b3bdae8368e395e75cd7518
parente175569c4639872b5cf242c9d4a71cc40c5f3c29 (diff)
firewire: Clean up comment style.
Drop filenames from file preamble, drop editor annotations and use standard indent style for block comments. Signed-off-by: Kristian Hoegsberg <krh@redhat.com> Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de> (fixed typo)
-rw-r--r--drivers/firewire/fw-card.c105
-rw-r--r--drivers/firewire/fw-cdev.c33
-rw-r--r--drivers/firewire/fw-device.c101
-rw-r--r--drivers/firewire/fw-device.h5
-rw-r--r--drivers/firewire/fw-iso.c4
-rw-r--r--drivers/firewire/fw-ohci.c144
-rw-r--r--drivers/firewire/fw-sbp2.c101
-rw-r--r--drivers/firewire/fw-topology.c65
-rw-r--r--drivers/firewire/fw-topology.h5
-rw-r--r--drivers/firewire/fw-transaction.c71
-rw-r--r--drivers/firewire/fw-transaction.h58
11 files changed, 439 insertions, 253 deletions
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index 216965615ee8..b2aa85555a7a 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -1,8 +1,5 @@
1/* -*- c-basic-offset: 8 -*- 1/*
2 * 2 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
3 * fw-card.c - card level functions
4 *
5 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * 3 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -69,12 +66,14 @@ generate_config_rom (struct fw_card *card, size_t *config_rom_length)
69 static u32 config_rom[256]; 66 static u32 config_rom[256];
70 int i, j, length; 67 int i, j, length;
71 68
72 /* Initialize contents of config rom buffer. On the OHCI 69 /*
70 * Initialize contents of config rom buffer. On the OHCI
73 * controller, block reads to the config rom accesses the host 71 * controller, block reads to the config rom accesses the host
74 * memory, but quadlet read access the hardware bus info block 72 * memory, but quadlet read access the hardware bus info block
75 * registers. That's just crack, but it means we should make 73 * registers. That's just crack, but it means we should make
76 * sure the contents of bus info block in host memory mathces 74 * sure the contents of bus info block in host memory mathces
77 * the version stored in the OHCI registers. */ 75 * the version stored in the OHCI registers.
76 */
78 77
79 memset(config_rom, 0, sizeof config_rom); 78 memset(config_rom, 0, sizeof config_rom);
80 config_rom[0] = bib_crc_length(4) | bib_info_length(4) | bib_crc(0); 79 config_rom[0] = bib_crc_length(4) | bib_info_length(4) | bib_crc(0);
@@ -143,9 +142,11 @@ fw_core_add_descriptor (struct fw_descriptor *desc)
143{ 142{
144 size_t i; 143 size_t i;
145 144
146 /* Check descriptor is valid; the length of all blocks in the 145 /*
146 * Check descriptor is valid; the length of all blocks in the
147 * descriptor has to add up to exactly the length of the 147 * descriptor has to add up to exactly the length of the
148 * block. */ 148 * block.
149 */
149 i = 0; 150 i = 0;
150 while (i < desc->length) 151 while (i < desc->length)
151 i += (desc->data[i] >> 16) + 1; 152 i += (desc->data[i] >> 16) + 1;
@@ -228,7 +229,8 @@ fw_card_bm_work(struct work_struct *work)
228 229
229 if (card->bm_generation + 1 == generation || 230 if (card->bm_generation + 1 == generation ||
230 (card->bm_generation != generation && grace)) { 231 (card->bm_generation != generation && grace)) {
231 /* This first step is to figure out who is IRM and 232 /*
233 * This first step is to figure out who is IRM and
232 * then try to become bus manager. If the IRM is not 234 * then try to become bus manager. If the IRM is not
233 * well defined (e.g. does not have an active link 235 * well defined (e.g. does not have an active link
234 * layer or does not responds to our lock request, we 236 * layer or does not responds to our lock request, we
@@ -236,7 +238,8 @@ fw_card_bm_work(struct work_struct *work)
236 * In that case, we do a goto into the gap count logic 238 * In that case, we do a goto into the gap count logic
237 * so that when we do the reset, we still optimize the 239 * so that when we do the reset, we still optimize the
238 * gap count. That could well save a reset in the 240 * gap count. That could well save a reset in the
239 * next generation. */ 241 * next generation.
242 */
240 243
241 irm_id = card->irm_node->node_id; 244 irm_id = card->irm_node->node_id;
242 if (!card->irm_node->link_on) { 245 if (!card->irm_node->link_on) {
@@ -260,8 +263,10 @@ fw_card_bm_work(struct work_struct *work)
260 wait_for_completion(&bmd.done); 263 wait_for_completion(&bmd.done);
261 264
262 if (bmd.rcode == RCODE_GENERATION) { 265 if (bmd.rcode == RCODE_GENERATION) {
263 /* Another bus reset happened. Just return, 266 /*
264 * the BM work has been rescheduled. */ 267 * Another bus reset happened. Just return,
268 * the BM work has been rescheduled.
269 */
265 return; 270 return;
266 } 271 }
267 272
@@ -271,48 +276,62 @@ fw_card_bm_work(struct work_struct *work)
271 276
272 spin_lock_irqsave(&card->lock, flags); 277 spin_lock_irqsave(&card->lock, flags);
273 if (bmd.rcode != RCODE_COMPLETE) { 278 if (bmd.rcode != RCODE_COMPLETE) {
274 /* The lock request failed, maybe the IRM 279 /*
280 * The lock request failed, maybe the IRM
275 * isn't really IRM capable after all. Let's 281 * isn't really IRM capable after all. Let's
276 * do a bus reset and pick the local node as 282 * do a bus reset and pick the local node as
277 * root, and thus, IRM. */ 283 * root, and thus, IRM.
284 */
278 new_root_id = card->local_node->node_id; 285 new_root_id = card->local_node->node_id;
279 fw_notify("BM lock failed, making local node (%02x) root.\n", 286 fw_notify("BM lock failed, making local node (%02x) root.\n",
280 new_root_id); 287 new_root_id);
281 goto pick_me; 288 goto pick_me;
282 } 289 }
283 } else if (card->bm_generation != generation) { 290 } else if (card->bm_generation != generation) {
284 /* OK, we weren't BM in the last generation, and it's 291 /*
292 * OK, we weren't BM in the last generation, and it's
285 * less than 100ms since last bus reset. Reschedule 293 * less than 100ms since last bus reset. Reschedule
286 * this task 100ms from now. */ 294 * this task 100ms from now.
295 */
287 spin_unlock_irqrestore(&card->lock, flags); 296 spin_unlock_irqrestore(&card->lock, flags);
288 schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); 297 schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10));
289 return; 298 return;
290 } 299 }
291 300
292 /* We're bus manager for this generation, so next step is to 301 /*
302 * We're bus manager for this generation, so next step is to
293 * make sure we have an active cycle master and do gap count 303 * make sure we have an active cycle master and do gap count
294 * optimization. */ 304 * optimization.
305 */
295 card->bm_generation = generation; 306 card->bm_generation = generation;
296 307
297 if (root == NULL) { 308 if (root == NULL) {
298 /* Either link_on is false, or we failed to read the 309 /*
299 * config rom. In either case, pick another root. */ 310 * Either link_on is false, or we failed to read the
311 * config rom. In either case, pick another root.
312 */
300 new_root_id = card->local_node->node_id; 313 new_root_id = card->local_node->node_id;
301 } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) { 314 } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) {
302 /* If we haven't probed this device yet, bail out now 315 /*
303 * and let's try again once that's done. */ 316 * If we haven't probed this device yet, bail out now
317 * and let's try again once that's done.
318 */
304 spin_unlock_irqrestore(&card->lock, flags); 319 spin_unlock_irqrestore(&card->lock, flags);
305 return; 320 return;
306 } else if (root->config_rom[2] & bib_cmc) { 321 } else if (root->config_rom[2] & bib_cmc) {
307 /* FIXME: I suppose we should set the cmstr bit in the 322 /*
323 * FIXME: I suppose we should set the cmstr bit in the
308 * STATE_CLEAR register of this node, as described in 324 * STATE_CLEAR register of this node, as described in
309 * 1394-1995, 8.4.2.6. Also, send out a force root 325 * 1394-1995, 8.4.2.6. Also, send out a force root
310 * packet for this node. */ 326 * packet for this node.
327 */
311 new_root_id = root_id; 328 new_root_id = root_id;
312 } else { 329 } else {
313 /* Current root has an active link layer and we 330 /*
331 * Current root has an active link layer and we
314 * successfully read the config rom, but it's not 332 * successfully read the config rom, but it's not
315 * cycle master capable. */ 333 * cycle master capable.
334 */
316 new_root_id = card->local_node->node_id; 335 new_root_id = card->local_node->node_id;
317 } 336 }
318 337
@@ -324,9 +343,11 @@ fw_card_bm_work(struct work_struct *work)
324 else 343 else
325 gap_count = 63; 344 gap_count = 63;
326 345
327 /* Finally, figure out if we should do a reset or not. If we've 346 /*
347 * Finally, figure out if we should do a reset or not. If we've
328 * done less that 5 resets with the same physical topology and we 348 * done less that 5 resets with the same physical topology and we
329 * have either a new root or a new gap count setting, let's do it. */ 349 * have either a new root or a new gap count setting, let's do it.
350 */
330 351
331 if (card->bm_retries++ < 5 && 352 if (card->bm_retries++ < 5 &&
332 (card->gap_count != gap_count || new_root_id != root_id)) 353 (card->gap_count != gap_count || new_root_id != root_id))
@@ -391,8 +412,10 @@ fw_card_add(struct fw_card *card,
391 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0) 412 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
392 return -EIO; 413 return -EIO;
393 414
394 /* The subsystem grabs a reference when the card is added and 415 /*
395 * drops it when the driver calls fw_core_remove_card. */ 416 * The subsystem grabs a reference when the card is added and
417 * drops it when the driver calls fw_core_remove_card.
418 */
396 fw_card_get(card); 419 fw_card_get(card);
397 420
398 down_write(&card_rwsem); 421 down_write(&card_rwsem);
@@ -405,11 +428,13 @@ fw_card_add(struct fw_card *card,
405EXPORT_SYMBOL(fw_card_add); 428EXPORT_SYMBOL(fw_card_add);
406 429
407 430
408/* The next few functions implements a dummy driver that use once a 431/*
432 * The next few functions implements a dummy driver that use once a
409 * card driver shuts down an fw_card. This allows the driver to 433 * card driver shuts down an fw_card. This allows the driver to
410 * cleanly unload, as all IO to the card will be handled by the dummy 434 * cleanly unload, as all IO to the card will be handled by the dummy
411 * driver instead of calling into the (possibly) unloaded module. The 435 * driver instead of calling into the (possibly) unloaded module. The
412 * dummy driver just fails all IO. */ 436 * dummy driver just fails all IO.
437 */
413 438
414static int 439static int
415dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) 440dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
@@ -429,8 +454,10 @@ static int
429dummy_set_config_rom(struct fw_card *card, 454dummy_set_config_rom(struct fw_card *card,
430 u32 *config_rom, size_t length) 455 u32 *config_rom, size_t length)
431{ 456{
432 /* We take the card out of card_list before setting the dummy 457 /*
433 * driver, so this should never get called. */ 458 * We take the card out of card_list before setting the dummy
459 * driver, so this should never get called.
460 */
434 BUG(); 461 BUG();
435 return -1; 462 return -1;
436} 463}
@@ -510,9 +537,11 @@ release_card(struct kref *kref)
510 kfree(card); 537 kfree(card);
511} 538}
512 539
513/* An assumption for fw_card_put() is that the card driver allocates 540/*
541 * An assumption for fw_card_put() is that the card driver allocates
514 * the fw_card struct with kalloc and that it has been shut down 542 * the fw_card struct with kalloc and that it has been shut down
515 * before the last ref is dropped. */ 543 * before the last ref is dropped.
544 */
516void 545void
517fw_card_put(struct fw_card *card) 546fw_card_put(struct fw_card *card)
518{ 547{
@@ -524,8 +553,6 @@ int
524fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) 553fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
525{ 554{
526 int reg = short_reset ? 5 : 1; 555 int reg = short_reset ? 5 : 1;
527 /* The following values happen to be the same bit. However be
528 * explicit for clarity. */
529 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; 556 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
530 557
531 return card->driver->update_phy_reg(card, reg, 0, bit); 558 return card->driver->update_phy_reg(card, reg, 0, bit);
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index 88b8fd917f54..c876ac3c50e0 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -1,8 +1,7 @@
1/* -*- c-basic-offset: 8 -*- 1/*
2 * Char device for device raw access
2 * 3 *
3 * fw-device-cdev.c - Char device for device raw access 4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
4 *
5 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -36,9 +35,6 @@
36#include "fw-topology.h" 35#include "fw-topology.h"
37#include "fw-device.h" 36#include "fw-device.h"
38 37
39/* dequeue_event() just kfree()'s the event, so the event has to be
40 * the first field in the struct. */
41
42struct client; 38struct client;
43struct client_resource { 39struct client_resource {
44 struct list_head link; 40 struct list_head link;
@@ -46,6 +42,11 @@ struct client_resource {
46 u32 handle; 42 u32 handle;
47}; 43};
48 44
45/*
46 * dequeue_event() just kfree()'s the event, so the event has to be
47 * the first field in the struct.
48 */
49
49struct event { 50struct event {
50 struct { void *data; size_t size; } v[2]; 51 struct { void *data; size_t size; } v[2];
51 struct list_head link; 52 struct list_head link;
@@ -691,13 +692,15 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
691 if (ctx == NULL || request->handle != 0) 692 if (ctx == NULL || request->handle != 0)
692 return -EINVAL; 693 return -EINVAL;
693 694
694 /* If the user passes a non-NULL data pointer, has mmap()'ed 695 /*
696 * If the user passes a non-NULL data pointer, has mmap()'ed
695 * the iso buffer, and the pointer points inside the buffer, 697 * the iso buffer, and the pointer points inside the buffer,
696 * we setup the payload pointers accordingly. Otherwise we 698 * we setup the payload pointers accordingly. Otherwise we
697 * set them both to 0, which will still let packets with 699 * set them both to 0, which will still let packets with
698 * payload_length == 0 through. In other words, if no packets 700 * payload_length == 0 through. In other words, if no packets
699 * use the indirect payload, the iso buffer need not be mapped 701 * use the indirect payload, the iso buffer need not be mapped
700 * and the request->data pointer is ignored.*/ 702 * and the request->data pointer is ignored.
703 */
701 704
702 payload = (unsigned long)request->data - client->vm_start; 705 payload = (unsigned long)request->data - client->vm_start;
703 buffer_end = client->buffer.page_count << PAGE_SHIFT; 706 buffer_end = client->buffer.page_count << PAGE_SHIFT;
@@ -720,8 +723,10 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
720 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { 723 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
721 header_length = u.packet.header_length; 724 header_length = u.packet.header_length;
722 } else { 725 } else {
723 /* We require that header_length is a multiple of 726 /*
724 * the fixed header size, ctx->header_size */ 727 * We require that header_length is a multiple of
728 * the fixed header size, ctx->header_size.
729 */
725 if (ctx->header_size == 0) { 730 if (ctx->header_size == 0) {
726 if (u.packet.header_length > 0) 731 if (u.packet.header_length > 0)
727 return -EINVAL; 732 return -EINVAL;
@@ -908,8 +913,10 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
908 list_for_each_entry_safe(r, next_r, &client->resource_list, link) 913 list_for_each_entry_safe(r, next_r, &client->resource_list, link)
909 r->release(client, r); 914 r->release(client, r);
910 915
911 /* FIXME: We should wait for the async tasklets to stop 916 /*
912 * running before freeing the memory. */ 917 * FIXME: We should wait for the async tasklets to stop
918 * running before freeing the memory.
919 */
913 920
914 list_for_each_entry_safe(e, next_e, &client->event_list, link) 921 list_for_each_entry_safe(e, next_e, &client->event_list, link)
915 kfree(e); 922 kfree(e);
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index 99d1c418d2b0..8e5f17f5e98a 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -1,6 +1,5 @@
1/* -*- c-basic-offset: 8 -*- 1/*
2 * 2 * Device probing and sysfs code.
3 * fw-device.c - Device probing and sysfs code.
4 * 3 *
5 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> 4 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * 5 *
@@ -174,8 +173,10 @@ static void fw_device_release(struct device *dev)
174 struct fw_device *device = fw_device(dev); 173 struct fw_device *device = fw_device(dev);
175 unsigned long flags; 174 unsigned long flags;
176 175
177 /* Take the card lock so we don't set this to NULL while a 176 /*
178 * FW_NODE_UPDATED callback is being handled. */ 177 * Take the card lock so we don't set this to NULL while a
178 * FW_NODE_UPDATED callback is being handled.
179 */
179 spin_lock_irqsave(&device->card->lock, flags); 180 spin_lock_irqsave(&device->card->lock, flags);
180 device->node->data = NULL; 181 device->node->data = NULL;
181 spin_unlock_irqrestore(&device->card->lock, flags); 182 spin_unlock_irqrestore(&device->card->lock, flags);
@@ -421,34 +422,42 @@ static int read_bus_info_block(struct fw_device *device)
421 for (i = 0; i < 5; i++) { 422 for (i = 0; i < 5; i++) {
422 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE) 423 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
423 return -1; 424 return -1;
424 /* As per IEEE1212 7.2, during power-up, devices can 425 /*
426 * As per IEEE1212 7.2, during power-up, devices can
425 * reply with a 0 for the first quadlet of the config 427 * reply with a 0 for the first quadlet of the config
426 * rom to indicate that they are booting (for example, 428 * rom to indicate that they are booting (for example,
427 * if the firmware is on the disk of a external 429 * if the firmware is on the disk of a external
428 * harddisk). In that case we just fail, and the 430 * harddisk). In that case we just fail, and the
429 * retry mechanism will try again later. */ 431 * retry mechanism will try again later.
432 */
430 if (i == 0 && rom[i] == 0) 433 if (i == 0 && rom[i] == 0)
431 return -1; 434 return -1;
432 } 435 }
433 436
434 /* Now parse the config rom. The config rom is a recursive 437 /*
438 * Now parse the config rom. The config rom is a recursive
435 * directory structure so we parse it using a stack of 439 * directory structure so we parse it using a stack of
436 * references to the blocks that make up the structure. We 440 * references to the blocks that make up the structure. We
437 * push a reference to the root directory on the stack to 441 * push a reference to the root directory on the stack to
438 * start things off. */ 442 * start things off.
443 */
439 length = i; 444 length = i;
440 sp = 0; 445 sp = 0;
441 stack[sp++] = 0xc0000005; 446 stack[sp++] = 0xc0000005;
442 while (sp > 0) { 447 while (sp > 0) {
443 /* Pop the next block reference of the stack. The 448 /*
449 * Pop the next block reference of the stack. The
444 * lower 24 bits is the offset into the config rom, 450 * lower 24 bits is the offset into the config rom,
445 * the upper 8 bits are the type of the reference the 451 * the upper 8 bits are the type of the reference the
446 * block. */ 452 * block.
453 */
447 key = stack[--sp]; 454 key = stack[--sp];
448 i = key & 0xffffff; 455 i = key & 0xffffff;
449 if (i >= ARRAY_SIZE(rom)) 456 if (i >= ARRAY_SIZE(rom))
450 /* The reference points outside the standard 457 /*
451 * config rom area, something's fishy. */ 458 * The reference points outside the standard
459 * config rom area, something's fishy.
460 */
452 return -1; 461 return -1;
453 462
454 /* Read header quadlet for the block to get the length. */ 463 /* Read header quadlet for the block to get the length. */
@@ -457,15 +466,19 @@ static int read_bus_info_block(struct fw_device *device)
457 end = i + (rom[i] >> 16) + 1; 466 end = i + (rom[i] >> 16) + 1;
458 i++; 467 i++;
459 if (end > ARRAY_SIZE(rom)) 468 if (end > ARRAY_SIZE(rom))
460 /* This block extends outside standard config 469 /*
470 * This block extends outside standard config
461 * area (and the array we're reading it 471 * area (and the array we're reading it
462 * into). That's broken, so ignore this 472 * into). That's broken, so ignore this
463 * device. */ 473 * device.
474 */
464 return -1; 475 return -1;
465 476
466 /* Now read in the block. If this is a directory 477 /*
478 * Now read in the block. If this is a directory
467 * block, check the entries as we read them to see if 479 * block, check the entries as we read them to see if
468 * it references another block, and push it in that case. */ 480 * it references another block, and push it in that case.
481 */
469 while (i < end) { 482 while (i < end) {
470 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE) 483 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
471 return -1; 484 return -1;
@@ -516,8 +529,10 @@ static void create_units(struct fw_device *device)
516 if (key != (CSR_UNIT | CSR_DIRECTORY)) 529 if (key != (CSR_UNIT | CSR_DIRECTORY))
517 continue; 530 continue;
518 531
519 /* Get the address of the unit directory and try to 532 /*
520 * match the drivers id_tables against it. */ 533 * Get the address of the unit directory and try to
534 * match the drivers id_tables against it.
535 */
521 unit = kzalloc(sizeof *unit, GFP_KERNEL); 536 unit = kzalloc(sizeof *unit, GFP_KERNEL);
522 if (unit == NULL) { 537 if (unit == NULL) {
523 fw_error("failed to allocate memory for unit\n"); 538 fw_error("failed to allocate memory for unit\n");
@@ -585,14 +600,16 @@ static struct device_type fw_device_type = {
585 .release = fw_device_release, 600 .release = fw_device_release,
586}; 601};
587 602
588/* These defines control the retry behavior for reading the config 603/*
604 * These defines control the retry behavior for reading the config
589 * rom. It shouldn't be necessary to tweak these; if the device 605 * rom. It shouldn't be necessary to tweak these; if the device
590 * doesn't respond to a config rom read within 10 seconds, it's not 606 * doesn't respond to a config rom read within 10 seconds, it's not
591 * going to respond at all. As for the initial delay, a lot of 607 * going to respond at all. As for the initial delay, a lot of
592 * devices will be able to respond within half a second after bus 608 * devices will be able to respond within half a second after bus
593 * reset. On the other hand, it's not really worth being more 609 * reset. On the other hand, it's not really worth being more
594 * aggressive than that, since it scales pretty well; if 10 devices 610 * aggressive than that, since it scales pretty well; if 10 devices
595 * are plugged in, they're all getting read within one second. */ 611 * are plugged in, they're all getting read within one second.
612 */
596 613
597#define MAX_RETRIES 10 614#define MAX_RETRIES 10
598#define RETRY_DELAY (3 * HZ) 615#define RETRY_DELAY (3 * HZ)
@@ -604,9 +621,11 @@ static void fw_device_init(struct work_struct *work)
604 container_of(work, struct fw_device, work.work); 621 container_of(work, struct fw_device, work.work);
605 int minor, err; 622 int minor, err;
606 623
607 /* All failure paths here set node->data to NULL, so that we 624 /*
625 * All failure paths here set node->data to NULL, so that we
608 * don't try to do device_for_each_child() on a kfree()'d 626 * don't try to do device_for_each_child() on a kfree()'d
609 * device. */ 627 * device.
628 */
610 629
611 if (read_bus_info_block(device) < 0) { 630 if (read_bus_info_block(device) < 0) {
612 if (device->config_rom_retries < MAX_RETRIES) { 631 if (device->config_rom_retries < MAX_RETRIES) {
@@ -647,13 +666,15 @@ static void fw_device_init(struct work_struct *work)
647 666
648 create_units(device); 667 create_units(device);
649 668
650 /* Transition the device to running state. If it got pulled 669 /*
670 * Transition the device to running state. If it got pulled
651 * out from under us while we did the intialization work, we 671 * out from under us while we did the intialization work, we
652 * have to shut down the device again here. Normally, though, 672 * have to shut down the device again here. Normally, though,
653 * fw_node_event will be responsible for shutting it down when 673 * fw_node_event will be responsible for shutting it down when
654 * necessary. We have to use the atomic cmpxchg here to avoid 674 * necessary. We have to use the atomic cmpxchg here to avoid
655 * racing with the FW_NODE_DESTROYED case in 675 * racing with the FW_NODE_DESTROYED case in
656 * fw_node_event(). */ 676 * fw_node_event().
677 */
657 if (atomic_cmpxchg(&device->state, 678 if (atomic_cmpxchg(&device->state,
658 FW_DEVICE_INITIALIZING, 679 FW_DEVICE_INITIALIZING,
659 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) 680 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
@@ -662,10 +683,12 @@ static void fw_device_init(struct work_struct *work)
662 fw_notify("created new fw device %s (%d config rom retries)\n", 683 fw_notify("created new fw device %s (%d config rom retries)\n",
663 device->device.bus_id, device->config_rom_retries); 684 device->device.bus_id, device->config_rom_retries);
664 685
665 /* Reschedule the IRM work if we just finished reading the 686 /*
687 * Reschedule the IRM work if we just finished reading the
666 * root node config rom. If this races with a bus reset we 688 * root node config rom. If this races with a bus reset we
667 * just end up running the IRM work a couple of extra times - 689 * just end up running the IRM work a couple of extra times -
668 * pretty harmless. */ 690 * pretty harmless.
691 */
669 if (device->node == device->card->root_node) 692 if (device->node == device->card->root_node)
670 schedule_delayed_work(&device->card->work, 0); 693 schedule_delayed_work(&device->card->work, 0);
671 694
@@ -716,12 +739,14 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
716 if (device == NULL) 739 if (device == NULL)
717 break; 740 break;
718 741
719 /* Do minimal intialization of the device here, the 742 /*
743 * Do minimal intialization of the device here, the
720 * rest will happen in fw_device_init(). We need the 744 * rest will happen in fw_device_init(). We need the
721 * card and node so we can read the config rom and we 745 * card and node so we can read the config rom and we
722 * need to do device_initialize() now so 746 * need to do device_initialize() now so
723 * device_for_each_child() in FW_NODE_UPDATED is 747 * device_for_each_child() in FW_NODE_UPDATED is
724 * doesn't freak out. */ 748 * doesn't freak out.
749 */
725 device_initialize(&device->device); 750 device_initialize(&device->device);
726 atomic_set(&device->state, FW_DEVICE_INITIALIZING); 751 atomic_set(&device->state, FW_DEVICE_INITIALIZING);
727 device->card = fw_card_get(card); 752 device->card = fw_card_get(card);
@@ -730,15 +755,19 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
730 device->generation = card->generation; 755 device->generation = card->generation;
731 INIT_LIST_HEAD(&device->client_list); 756 INIT_LIST_HEAD(&device->client_list);
732 757
733 /* Set the node data to point back to this device so 758 /*
759 * Set the node data to point back to this device so
734 * FW_NODE_UPDATED callbacks can update the node_id 760 * FW_NODE_UPDATED callbacks can update the node_id
735 * and generation for the device. */ 761 * and generation for the device.
762 */
736 node->data = device; 763 node->data = device;
737 764
738 /* Many devices are slow to respond after bus resets, 765 /*
766 * Many devices are slow to respond after bus resets,
739 * especially if they are bus powered and go through 767 * especially if they are bus powered and go through
740 * power-up after getting plugged in. We schedule the 768 * power-up after getting plugged in. We schedule the
741 * first config rom scan half a second after bus reset. */ 769 * first config rom scan half a second after bus reset.
770 */
742 INIT_DELAYED_WORK(&device->work, fw_device_init); 771 INIT_DELAYED_WORK(&device->work, fw_device_init);
743 schedule_delayed_work(&device->work, INITIAL_DELAY); 772 schedule_delayed_work(&device->work, INITIAL_DELAY);
744 break; 773 break;
@@ -761,7 +790,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
761 if (!node->data) 790 if (!node->data)
762 break; 791 break;
763 792
764 /* Destroy the device associated with the node. There 793 /*
794 * Destroy the device associated with the node. There
765 * are two cases here: either the device is fully 795 * are two cases here: either the device is fully
766 * initialized (FW_DEVICE_RUNNING) or we're in the 796 * initialized (FW_DEVICE_RUNNING) or we're in the
767 * process of reading its config rom 797 * process of reading its config rom
@@ -770,7 +800,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
770 * full fw_device_shutdown(). If not, there's work 800 * full fw_device_shutdown(). If not, there's work
771 * scheduled to read it's config rom, and we just put 801 * scheduled to read it's config rom, and we just put
772 * the device in shutdown state to have that code fail 802 * the device in shutdown state to have that code fail
773 * to create the device. */ 803 * to create the device.
804 */
774 device = node->data; 805 device = node->data;
775 if (atomic_xchg(&device->state, 806 if (atomic_xchg(&device->state,
776 FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) { 807 FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) {
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index c167d59da68a..0ba9d64ccf4c 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -1,7 +1,4 @@
1/* -*- c-basic-offset: 8 -*- 1/*
2 *
3 * fw-device.h - Device probing and sysfs code.
4 *
5 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> 2 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * 3 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
index 2ce26db656e0..2b640e9be6de 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/fw-iso.c
@@ -1,6 +1,6 @@
1/* -*- c-basic-offset: 8 -*- 1/*
2 * Isochronous IO functionality
2 * 3 *
3 * fw-iso.c - Isochronous IO
4 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> 4 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 34eb4681e272..beb924403dab 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -1,6 +1,6 @@
1/* -*- c-basic-offset: 8 -*- 1/*
2 * Driver for OHCI 1394 controllers
2 * 3 *
3 * fw-ohci.c - Driver for OHCI 1394 boards
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> 4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
@@ -141,8 +141,10 @@ struct fw_ohci {
141 int request_generation; 141 int request_generation;
142 u32 bus_seconds; 142 u32 bus_seconds;
143 143
144 /* Spinlock for accessing fw_ohci data. Never call out of 144 /*
145 * this driver with this lock held. */ 145 * Spinlock for accessing fw_ohci data. Never call out of
146 * this driver with this lock held.
147 */
146 spinlock_t lock; 148 spinlock_t lock;
147 u32 self_id_buffer[512]; 149 u32 self_id_buffer[512];
148 150
@@ -328,13 +330,15 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
328 p.timestamp = status & 0xffff; 330 p.timestamp = status & 0xffff;
329 p.generation = ohci->request_generation; 331 p.generation = ohci->request_generation;
330 332
331 /* The OHCI bus reset handler synthesizes a phy packet with 333 /*
334 * The OHCI bus reset handler synthesizes a phy packet with
332 * the new generation number when a bus reset happens (see 335 * the new generation number when a bus reset happens (see
333 * section 8.4.2.3). This helps us determine when a request 336 * section 8.4.2.3). This helps us determine when a request
334 * was received and make sure we send the response in the same 337 * was received and make sure we send the response in the same
335 * generation. We only need this for requests; for responses 338 * generation. We only need this for requests; for responses
336 * we use the unique tlabel for finding the matching 339 * we use the unique tlabel for finding the matching
337 * request. */ 340 * request.
341 */
338 342
339 if (p.ack + 16 == 0x09) 343 if (p.ack + 16 == 0x09)
340 ohci->request_generation = (buffer[2] >> 16) & 0xff; 344 ohci->request_generation = (buffer[2] >> 16) & 0xff;
@@ -360,9 +364,11 @@ static void ar_context_tasklet(unsigned long data)
360 if (d->res_count == 0) { 364 if (d->res_count == 0) {
361 size_t size, rest, offset; 365 size_t size, rest, offset;
362 366
363 /* This descriptor is finished and we may have a 367 /*
368 * This descriptor is finished and we may have a
364 * packet split across this and the next buffer. We 369 * packet split across this and the next buffer. We
365 * reuse the page for reassembling the split packet. */ 370 * reuse the page for reassembling the split packet.
371 */
366 372
367 offset = offsetof(struct ar_buffer, data); 373 offset = offsetof(struct ar_buffer, data);
368 dma_unmap_single(ohci->card.device, 374 dma_unmap_single(ohci->card.device,
@@ -473,11 +479,13 @@ context_init(struct context *ctx, struct fw_ohci *ohci,
473 ctx->tail_descriptor = ctx->buffer; 479 ctx->tail_descriptor = ctx->buffer;
474 ctx->tail_descriptor_last = ctx->buffer; 480 ctx->tail_descriptor_last = ctx->buffer;
475 481
476 /* We put a dummy descriptor in the buffer that has a NULL 482 /*
483 * We put a dummy descriptor in the buffer that has a NULL
477 * branch address and looks like it's been sent. That way we 484 * branch address and looks like it's been sent. That way we
478 * have a descriptor to append DMA programs to. Also, the 485 * have a descriptor to append DMA programs to. Also, the
479 * ring buffer invariant is that it always has at least one 486 * ring buffer invariant is that it always has at least one
480 * element so that head == tail means buffer full. */ 487 * element so that head == tail means buffer full.
488 */
481 489
482 memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor); 490 memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor);
483 ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last); 491 ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last);
@@ -575,9 +583,11 @@ struct driver_data {
575 struct fw_packet *packet; 583 struct fw_packet *packet;
576}; 584};
577 585
578/* This function apppends a packet to the DMA queue for transmission. 586/*
587 * This function apppends a packet to the DMA queue for transmission.
579 * Must always be called with the ochi->lock held to ensure proper 588 * Must always be called with the ochi->lock held to ensure proper
580 * generation handling and locking around packet queue manipulation. */ 589 * generation handling and locking around packet queue manipulation.
590 */
581static int 591static int
582at_context_queue_packet(struct context *ctx, struct fw_packet *packet) 592at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
583{ 593{
@@ -598,10 +608,12 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
598 d[0].control = cpu_to_le16(descriptor_key_immediate); 608 d[0].control = cpu_to_le16(descriptor_key_immediate);
599 d[0].res_count = cpu_to_le16(packet->timestamp); 609 d[0].res_count = cpu_to_le16(packet->timestamp);
600 610
601 /* The DMA format for asyncronous link packets is different 611 /*
612 * The DMA format for asyncronous link packets is different
602 * from the IEEE1394 layout, so shift the fields around 613 * from the IEEE1394 layout, so shift the fields around
603 * accordingly. If header_length is 8, it's a PHY packet, to 614 * accordingly. If header_length is 8, it's a PHY packet, to
604 * which we need to prepend an extra quadlet. */ 615 * which we need to prepend an extra quadlet.
616 */
605 617
606 header = (__le32 *) &d[1]; 618 header = (__le32 *) &d[1];
607 if (packet->header_length > 8) { 619 if (packet->header_length > 8) {
@@ -703,14 +715,18 @@ static int handle_at_packet(struct context *context,
703 break; 715 break;
704 716
705 case OHCI1394_evt_flushed: 717 case OHCI1394_evt_flushed:
706 /* The packet was flushed should give same error as 718 /*
707 * when we try to use a stale generation count. */ 719 * The packet was flushed should give same error as
720 * when we try to use a stale generation count.
721 */
708 packet->ack = RCODE_GENERATION; 722 packet->ack = RCODE_GENERATION;
709 break; 723 break;
710 724
711 case OHCI1394_evt_missing_ack: 725 case OHCI1394_evt_missing_ack:
712 /* Using a valid (current) generation count, but the 726 /*
713 * node is not on the bus or not sending acks. */ 727 * Using a valid (current) generation count, but the
728 * node is not on the bus or not sending acks.
729 */
714 packet->ack = RCODE_NO_ACK; 730 packet->ack = RCODE_NO_ACK;
715 break; 731 break;
716 732
@@ -887,10 +903,12 @@ static void bus_reset_tasklet(unsigned long data)
887 } 903 }
888 ohci->node_id = reg & 0xffff; 904 ohci->node_id = reg & 0xffff;
889 905
890 /* The count in the SelfIDCount register is the number of 906 /*
907 * The count in the SelfIDCount register is the number of
891 * bytes in the self ID receive buffer. Since we also receive 908 * bytes in the self ID receive buffer. Since we also receive
892 * the inverted quadlets and a header quadlet, we shift one 909 * the inverted quadlets and a header quadlet, we shift one
893 * bit extra to get the actual number of self IDs. */ 910 * bit extra to get the actual number of self IDs.
911 */
894 912
895 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff; 913 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
896 generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; 914 generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
@@ -901,7 +919,8 @@ static void bus_reset_tasklet(unsigned long data)
901 ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]); 919 ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
902 } 920 }
903 921
904 /* Check the consistency of the self IDs we just read. The 922 /*
923 * Check the consistency of the self IDs we just read. The
905 * problem we face is that a new bus reset can start while we 924 * problem we face is that a new bus reset can start while we
906 * read out the self IDs from the DMA buffer. If this happens, 925 * read out the self IDs from the DMA buffer. If this happens,
907 * the DMA buffer will be overwritten with new self IDs and we 926 * the DMA buffer will be overwritten with new self IDs and we
@@ -911,7 +930,8 @@ static void bus_reset_tasklet(unsigned long data)
911 * self IDs in the buffer before reading them out and compare 930 * self IDs in the buffer before reading them out and compare
912 * it to the current generation after reading them out. If 931 * it to the current generation after reading them out. If
913 * the two generations match we know we have a consistent set 932 * the two generations match we know we have a consistent set
914 * of self IDs. */ 933 * of self IDs.
934 */
915 935
916 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; 936 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
917 if (new_generation != generation) { 937 if (new_generation != generation) {
@@ -928,12 +948,14 @@ static void bus_reset_tasklet(unsigned long data)
928 context_stop(&ohci->at_response_ctx); 948 context_stop(&ohci->at_response_ctx);
929 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 949 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
930 950
931 /* This next bit is unrelated to the AT context stuff but we 951 /*
952 * This next bit is unrelated to the AT context stuff but we
932 * have to do it under the spinlock also. If a new config rom 953 * have to do it under the spinlock also. If a new config rom
933 * was set up before this reset, the old one is now no longer 954 * was set up before this reset, the old one is now no longer
934 * in use and we can free it. Update the config rom pointers 955 * in use and we can free it. Update the config rom pointers
935 * to point to the current config rom and clear the 956 * to point to the current config rom and clear the
936 * next_config_rom pointer so a new udpate can take place. */ 957 * next_config_rom pointer so a new udpate can take place.
958 */
937 959
938 if (ohci->next_config_rom != NULL) { 960 if (ohci->next_config_rom != NULL) {
939 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 961 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
@@ -942,10 +964,12 @@ static void bus_reset_tasklet(unsigned long data)
942 ohci->config_rom_bus = ohci->next_config_rom_bus; 964 ohci->config_rom_bus = ohci->next_config_rom_bus;
943 ohci->next_config_rom = NULL; 965 ohci->next_config_rom = NULL;
944 966
945 /* Restore config_rom image and manually update 967 /*
968 * Restore config_rom image and manually update
946 * config_rom registers. Writing the header quadlet 969 * config_rom registers. Writing the header quadlet
947 * will indicate that the config rom is ready, so we 970 * will indicate that the config rom is ready, so we
948 * do that last. */ 971 * do that last.
972 */
949 reg_write(ohci, OHCI1394_BusOptions, 973 reg_write(ohci, OHCI1394_BusOptions,
950 be32_to_cpu(ohci->config_rom[2])); 974 be32_to_cpu(ohci->config_rom[2]));
951 ohci->config_rom[0] = cpu_to_be32(ohci->next_header); 975 ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
@@ -1018,7 +1042,8 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1018 struct fw_ohci *ohci = fw_ohci(card); 1042 struct fw_ohci *ohci = fw_ohci(card);
1019 struct pci_dev *dev = to_pci_dev(card->device); 1043 struct pci_dev *dev = to_pci_dev(card->device);
1020 1044
1021 /* When the link is not yet enabled, the atomic config rom 1045 /*
1046 * When the link is not yet enabled, the atomic config rom
1022 * update mechanism described below in ohci_set_config_rom() 1047 * update mechanism described below in ohci_set_config_rom()
1023 * is not active. We have to update ConfigRomHeader and 1048 * is not active. We have to update ConfigRomHeader and
1024 * BusOptions manually, and the write to ConfigROMmap takes 1049 * BusOptions manually, and the write to ConfigROMmap takes
@@ -1067,8 +1092,10 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1067 OHCI1394_HCControl_BIBimageValid); 1092 OHCI1394_HCControl_BIBimageValid);
1068 flush_writes(ohci); 1093 flush_writes(ohci);
1069 1094
1070 /* We are ready to go, initiate bus reset to finish the 1095 /*
1071 * initialization. */ 1096 * We are ready to go, initiate bus reset to finish the
1097 * initialization.
1098 */
1072 1099
1073 fw_core_initiate_bus_reset(&ohci->card, 1); 1100 fw_core_initiate_bus_reset(&ohci->card, 1);
1074 1101
@@ -1086,7 +1113,8 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1086 1113
1087 ohci = fw_ohci(card); 1114 ohci = fw_ohci(card);
1088 1115
1089 /* When the OHCI controller is enabled, the config rom update 1116 /*
1117 * When the OHCI controller is enabled, the config rom update
1090 * mechanism is a bit tricky, but easy enough to use. See 1118 * mechanism is a bit tricky, but easy enough to use. See
1091 * section 5.5.6 in the OHCI specification. 1119 * section 5.5.6 in the OHCI specification.
1092 * 1120 *
@@ -1141,11 +1169,13 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1141 1169
1142 spin_unlock_irqrestore(&ohci->lock, flags); 1170 spin_unlock_irqrestore(&ohci->lock, flags);
1143 1171
1144 /* Now initiate a bus reset to have the changes take 1172 /*
1173 * Now initiate a bus reset to have the changes take
1145 * effect. We clean up the old config rom memory and DMA 1174 * effect. We clean up the old config rom memory and DMA
1146 * mappings in the bus reset tasklet, since the OHCI 1175 * mappings in the bus reset tasklet, since the OHCI
1147 * controller could need to access it before the bus reset 1176 * controller could need to access it before the bus reset
1148 * takes effect. */ 1177 * takes effect.
1178 */
1149 if (retval == 0) 1179 if (retval == 0)
1150 fw_core_initiate_bus_reset(&ohci->card, 1); 1180 fw_core_initiate_bus_reset(&ohci->card, 1);
1151 1181
@@ -1196,8 +1226,10 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1196 unsigned long flags; 1226 unsigned long flags;
1197 int n, retval = 0; 1227 int n, retval = 0;
1198 1228
1199 /* FIXME: Make sure this bitmask is cleared when we clear the busReset 1229 /*
1200 * interrupt bit. Clear physReqResourceAllBuses on bus reset. */ 1230 * FIXME: Make sure this bitmask is cleared when we clear the busReset
1231 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
1232 */
1201 1233
1202 spin_lock_irqsave(&ohci->lock, flags); 1234 spin_lock_irqsave(&ohci->lock, flags);
1203 1235
@@ -1206,8 +1238,10 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1206 goto out; 1238 goto out;
1207 } 1239 }
1208 1240
1209 /* NOTE, if the node ID contains a non-local bus ID, physical DMA is 1241 /*
1210 * enabled for _all_ nodes on remote buses. */ 1242 * Note, if the node ID contains a non-local bus ID, physical DMA is
1243 * enabled for _all_ nodes on remote buses.
1244 */
1211 1245
1212 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63; 1246 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1213 if (n < 32) 1247 if (n < 32)
@@ -1257,11 +1291,13 @@ static int handle_ir_dualbuffer_packet(struct context *context,
1257 p = db + 1; 1291 p = db + 1;
1258 end = p + header_length; 1292 end = p + header_length;
1259 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { 1293 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
1260 /* The iso header is byteswapped to little endian by 1294 /*
1295 * The iso header is byteswapped to little endian by
1261 * the controller, but the remaining header quadlets 1296 * the controller, but the remaining header quadlets
1262 * are big endian. We want to present all the headers 1297 * are big endian. We want to present all the headers
1263 * as big endian, so we have to swap the first 1298 * as big endian, so we have to swap the first
1264 * quadlet. */ 1299 * quadlet.
1300 */
1265 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); 1301 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1266 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); 1302 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1267 i += ctx->base.header_size; 1303 i += ctx->base.header_size;
@@ -1457,8 +1493,10 @@ ohci_queue_iso_transmit(struct fw_iso_context *base,
1457 u32 payload_index, payload_end_index, next_page_index; 1493 u32 payload_index, payload_end_index, next_page_index;
1458 int page, end_page, i, length, offset; 1494 int page, end_page, i, length, offset;
1459 1495
1460 /* FIXME: Cycle lost behavior should be configurable: lose 1496 /*
1461 * packet, retransmit or terminate.. */ 1497 * FIXME: Cycle lost behavior should be configurable: lose
1498 * packet, retransmit or terminate..
1499 */
1462 1500
1463 p = packet; 1501 p = packet;
1464 payload_index = payload; 1502 payload_index = payload;
@@ -1553,8 +1591,10 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1553 u32 z, header_z, length, rest; 1591 u32 z, header_z, length, rest;
1554 int page, offset, packet_count, header_size; 1592 int page, offset, packet_count, header_size;
1555 1593
1556 /* FIXME: Cycle lost behavior should be configurable: lose 1594 /*
1557 * packet, retransmit or terminate.. */ 1595 * FIXME: Cycle lost behavior should be configurable: lose
1596 * packet, retransmit or terminate..
1597 */
1558 1598
1559 if (packet->skip) { 1599 if (packet->skip) {
1560 d = context_get_descriptors(&ctx->context, 2, &d_bus); 1600 d = context_get_descriptors(&ctx->context, 2, &d_bus);
@@ -1572,8 +1612,10 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1572 p = packet; 1612 p = packet;
1573 z = 2; 1613 z = 2;
1574 1614
1575 /* The OHCI controller puts the status word in the header 1615 /*
1576 * buffer too, so we need 4 extra bytes per packet. */ 1616 * The OHCI controller puts the status word in the header
1617 * buffer too, so we need 4 extra bytes per packet.
1618 */
1577 packet_count = p->header_length / ctx->base.header_size; 1619 packet_count = p->header_length / ctx->base.header_size;
1578 header_size = packet_count * (ctx->base.header_size + 4); 1620 header_size = packet_count * (ctx->base.header_size + 4);
1579 1621
@@ -1673,8 +1715,6 @@ static int software_reset(struct fw_ohci *ohci)
1673 return -EBUSY; 1715 return -EBUSY;
1674} 1716}
1675 1717
1676/* ---------- pci subsystem interface ---------- */
1677
1678enum { 1718enum {
1679 CLEANUP_SELF_ID, 1719 CLEANUP_SELF_ID,
1680 CLEANUP_REGISTERS, 1720 CLEANUP_REGISTERS,
@@ -1753,11 +1793,13 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
1753 return cleanup(ohci, CLEANUP_REGISTERS, -EBUSY); 1793 return cleanup(ohci, CLEANUP_REGISTERS, -EBUSY);
1754 } 1794 }
1755 1795
1756 /* Now enable LPS, which we need in order to start accessing 1796 /*
1797 * Now enable LPS, which we need in order to start accessing
1757 * most of the registers. In fact, on some cards (ALI M5251), 1798 * most of the registers. In fact, on some cards (ALI M5251),
1758 * accessing registers in the SClk domain without LPS enabled 1799 * accessing registers in the SClk domain without LPS enabled
1759 * will lock up the machine. Wait 50msec to make sure we have 1800 * will lock up the machine. Wait 50msec to make sure we have
1760 * full link enabled. */ 1801 * full link enabled.
1802 */
1761 reg_write(ohci, OHCI1394_HCControlSet, 1803 reg_write(ohci, OHCI1394_HCControlSet,
1762 OHCI1394_HCControl_LPS | 1804 OHCI1394_HCControl_LPS |
1763 OHCI1394_HCControl_postedWriteEnable); 1805 OHCI1394_HCControl_postedWriteEnable);
@@ -1854,8 +1896,10 @@ static void pci_remove(struct pci_dev *dev)
1854 flush_writes(ohci); 1896 flush_writes(ohci);
1855 fw_core_remove_card(&ohci->card); 1897 fw_core_remove_card(&ohci->card);
1856 1898
1857 /* FIXME: Fail all pending packets here, now that the upper 1899 /*
1858 * layers can't queue any more. */ 1900 * FIXME: Fail all pending packets here, now that the upper
1901 * layers can't queue any more.
1902 */
1859 1903
1860 software_reset(ohci); 1904 software_reset(ohci);
1861 free_irq(dev->irq, ohci); 1905 free_irq(dev->irq, ohci);
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 7ce9b811431a..eb3bddb162e4 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -1,5 +1,5 @@
1/* -*- c-basic-offset: 8 -*- 1/*
2 * fw-spb2.c -- SBP2 driver (SCSI over IEEE1394) 2 * SBP2 driver (SCSI over IEEE1394)
3 * 3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> 4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 * 5 *
@@ -18,7 +18,8 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21/* The basic structure of this driver is based the old storage driver, 21/*
22 * The basic structure of this driver is based on the old storage driver,
22 * drivers/ieee1394/sbp2.c, originally written by 23 * drivers/ieee1394/sbp2.c, originally written by
23 * James Goodwin <jamesg@filanet.com> 24 * James Goodwin <jamesg@filanet.com>
24 * with later contributions and ongoing maintenance from 25 * with later contributions and ongoing maintenance from
@@ -60,11 +61,13 @@ struct sbp2_device {
60 u32 workarounds; 61 u32 workarounds;
61 int login_id; 62 int login_id;
62 63
63 /* We cache these addresses and only update them once we've 64 /*
65 * We cache these addresses and only update them once we've
64 * logged in or reconnected to the sbp2 device. That way, any 66 * logged in or reconnected to the sbp2 device. That way, any
65 * IO to the device will automatically fail and get retried if 67 * IO to the device will automatically fail and get retried if
66 * it happens in a window where the device is not ready to 68 * it happens in a window where the device is not ready to
67 * handle it (e.g. after a bus reset but before we reconnect). */ 69 * handle it (e.g. after a bus reset but before we reconnect).
70 */
68 int node_id; 71 int node_id;
69 int address_high; 72 int address_high;
70 int generation; 73 int generation;
@@ -239,10 +242,14 @@ static const struct {
239 .model = ~0, 242 .model = ~0,
240 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, 243 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
241 }, 244 },
242 /* There are iPods (2nd gen, 3rd gen) with model_id == 0, but 245
246 /*
247 * There are iPods (2nd gen, 3rd gen) with model_id == 0, but
243 * these iPods do not feature the read_capacity bug according 248 * these iPods do not feature the read_capacity bug according
244 * to one report. Read_capacity behaviour as well as model_id 249 * to one report. Read_capacity behaviour as well as model_id
245 * could change due to Apple-supplied firmware updates though. */ 250 * could change due to Apple-supplied firmware updates though.
251 */
252
246 /* iPod 4th generation. */ { 253 /* iPod 4th generation. */ {
247 .firmware_revision = 0x0a2700, 254 .firmware_revision = 0x0a2700,
248 .model = 0x000021, 255 .model = 0x000021,
@@ -398,9 +405,10 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
398 if (orb == NULL) 405 if (orb == NULL)
399 return -ENOMEM; 406 return -ENOMEM;
400 407
401 /* The sbp2 device is going to send a block read request to 408 /*
402 * read out the request from host memory, so map it for 409 * The sbp2 device is going to send a block read request to
403 * dma. */ 410 * read out the request from host memory, so map it for dma.
411 */
404 orb->base.request_bus = 412 orb->base.request_bus =
405 dma_map_single(device->card->device, &orb->request, 413 dma_map_single(device->card->device, &orb->request,
406 sizeof orb->request, DMA_TO_DEVICE); 414 sizeof orb->request, DMA_TO_DEVICE);
@@ -426,10 +434,11 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
426 orb->request.status_fifo.high = sd->address_handler.offset >> 32; 434 orb->request.status_fifo.high = sd->address_handler.offset >> 32;
427 orb->request.status_fifo.low = sd->address_handler.offset; 435 orb->request.status_fifo.low = sd->address_handler.offset;
428 436
429 /* FIXME: Yeah, ok this isn't elegant, we hardwire exclusive 437 /*
438 * FIXME: Yeah, ok this isn't elegant, we hardwire exclusive
430 * login and 1 second reconnect time. The reconnect setting 439 * login and 1 second reconnect time. The reconnect setting
431 * is probably fine, but the exclusive login should be an 440 * is probably fine, but the exclusive login should be an option.
432 * option. */ 441 */
433 if (function == SBP2_LOGIN_REQUEST) { 442 if (function == SBP2_LOGIN_REQUEST) {
434 orb->request.misc |= 443 orb->request.misc |=
435 management_orb_exclusive | 444 management_orb_exclusive |
@@ -592,8 +601,10 @@ static void sbp2_login(struct work_struct *work)
592 sbp2_send_management_orb(unit, sd->node_id, sd->generation, 601 sbp2_send_management_orb(unit, sd->node_id, sd->generation,
593 SBP2_LOGOUT_REQUEST, sd->login_id, 602 SBP2_LOGOUT_REQUEST, sd->login_id,
594 NULL); 603 NULL);
595 /* Set this back to sbp2_login so we fall back and 604 /*
596 * retry login on bus reset. */ 605 * Set this back to sbp2_login so we fall back and
606 * retry login on bus reset.
607 */
597 PREPARE_DELAYED_WORK(&sd->work, sbp2_login); 608 PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
598 } 609 }
599 kref_put(&sd->kref, release_sbp2_device); 610 kref_put(&sd->kref, release_sbp2_device);
@@ -633,9 +644,11 @@ static int sbp2_probe(struct device *dev)
633 return -EBUSY; 644 return -EBUSY;
634 } 645 }
635 646
636 /* Scan unit directory to get management agent address, 647 /*
648 * Scan unit directory to get management agent address,
637 * firmware revison and model. Initialize firmware_revision 649 * firmware revison and model. Initialize firmware_revision
638 * and model to values that wont match anything in our table. */ 650 * and model to values that wont match anything in our table.
651 */
639 firmware_revision = 0xff000000; 652 firmware_revision = 0xff000000;
640 model = 0xff000000; 653 model = 0xff000000;
641 fw_csr_iterator_init(&ci, unit->directory); 654 fw_csr_iterator_init(&ci, unit->directory);
@@ -673,9 +686,11 @@ static int sbp2_probe(struct device *dev)
673 686
674 get_device(&unit->device); 687 get_device(&unit->device);
675 688
676 /* We schedule work to do the login so we can easily 689 /*
690 * We schedule work to do the login so we can easily
677 * reschedule retries. Always get the ref before scheduling 691 * reschedule retries. Always get the ref before scheduling
678 * work.*/ 692 * work.
693 */
679 INIT_DELAYED_WORK(&sd->work, sbp2_login); 694 INIT_DELAYED_WORK(&sd->work, sbp2_login);
680 if (schedule_delayed_work(&sd->work, 0)) 695 if (schedule_delayed_work(&sd->work, 0))
681 kref_get(&sd->kref); 696 kref_get(&sd->kref);
@@ -834,9 +849,11 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
834 result = sbp2_status_to_sense_data(status_get_data(*status), 849 result = sbp2_status_to_sense_data(status_get_data(*status),
835 orb->cmd->sense_buffer); 850 orb->cmd->sense_buffer);
836 } else { 851 } else {
837 /* If the orb completes with status == NULL, something 852 /*
853 * If the orb completes with status == NULL, something
838 * went wrong, typically a bus reset happened mid-orb 854 * went wrong, typically a bus reset happened mid-orb
839 * or when sending the write (less likely). */ 855 * or when sending the write (less likely).
856 */
840 result = DID_BUS_BUSY << 16; 857 result = DID_BUS_BUSY << 16;
841 } 858 }
842 859
@@ -878,11 +895,13 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
878 count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg, 895 count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg,
879 orb->cmd->sc_data_direction); 896 orb->cmd->sc_data_direction);
880 897
881 /* Handle the special case where there is only one element in 898 /*
899 * Handle the special case where there is only one element in
882 * the scatter list by converting it to an immediate block 900 * the scatter list by converting it to an immediate block
883 * request. This is also a workaround for broken devices such 901 * request. This is also a workaround for broken devices such
884 * as the second generation iPod which doesn't support page 902 * as the second generation iPod which doesn't support page
885 * tables. */ 903 * tables.
904 */
886 if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) { 905 if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
887 orb->request.data_descriptor.high = sd->address_high; 906 orb->request.data_descriptor.high = sd->address_high;
888 orb->request.data_descriptor.low = sg_dma_address(sg); 907 orb->request.data_descriptor.low = sg_dma_address(sg);
@@ -891,8 +910,10 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
891 return; 910 return;
892 } 911 }
893 912
894 /* Convert the scatterlist to an sbp2 page table. If any 913 /*
895 * scatterlist entries are too big for sbp2 we split the as we go. */ 914 * Convert the scatterlist to an sbp2 page table. If any
915 * scatterlist entries are too big for sbp2 we split the as we go.
916 */
896 for (i = 0, j = 0; i < count; i++) { 917 for (i = 0, j = 0; i < count; i++) {
897 sg_len = sg_dma_len(sg + i); 918 sg_len = sg_dma_len(sg + i);
898 sg_addr = sg_dma_address(sg + i); 919 sg_addr = sg_dma_address(sg + i);
@@ -908,11 +929,13 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
908 929
909 size = sizeof orb->page_table[0] * j; 930 size = sizeof orb->page_table[0] * j;
910 931
911 /* The data_descriptor pointer is the one case where we need 932 /*
933 * The data_descriptor pointer is the one case where we need
912 * to fill in the node ID part of the address. All other 934 * to fill in the node ID part of the address. All other
913 * pointers assume that the data referenced reside on the 935 * pointers assume that the data referenced reside on the
914 * initiator (i.e. us), but data_descriptor can refer to data 936 * initiator (i.e. us), but data_descriptor can refer to data
915 * on other nodes so we need to put our ID in descriptor.high. */ 937 * on other nodes so we need to put our ID in descriptor.high.
938 */
916 939
917 orb->page_table_bus = 940 orb->page_table_bus =
918 dma_map_single(device->card->device, orb->page_table, 941 dma_map_single(device->card->device, orb->page_table,
@@ -933,8 +956,10 @@ static void sbp2_command_orb_map_buffer(struct sbp2_command_orb *orb)
933 struct fw_device *device = fw_device(unit->device.parent); 956 struct fw_device *device = fw_device(unit->device.parent);
934 struct sbp2_device *sd = unit->device.driver_data; 957 struct sbp2_device *sd = unit->device.driver_data;
935 958
936 /* As for map_scatterlist, we need to fill in the high bits of 959 /*
937 * the data_descriptor pointer. */ 960 * As for map_scatterlist, we need to fill in the high bits of
961 * the data_descriptor pointer.
962 */
938 963
939 orb->request_buffer_bus = 964 orb->request_buffer_bus =
940 dma_map_single(device->card->device, 965 dma_map_single(device->card->device,
@@ -956,8 +981,10 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
956 struct sbp2_device *sd = unit->device.driver_data; 981 struct sbp2_device *sd = unit->device.driver_data;
957 struct sbp2_command_orb *orb; 982 struct sbp2_command_orb *orb;
958 983
959 /* Bidirectional commands are not yet implemented, and unknown 984 /*
960 * transfer direction not handled. */ 985 * Bidirectional commands are not yet implemented, and unknown
986 * transfer direction not handled.
987 */
961 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { 988 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
962 fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command"); 989 fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
963 goto fail_alloc; 990 goto fail_alloc;
@@ -983,10 +1010,12 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
983 1010
984 orb->request.next.high = SBP2_ORB_NULL; 1011 orb->request.next.high = SBP2_ORB_NULL;
985 orb->request.next.low = 0x0; 1012 orb->request.next.low = 0x0;
986 /* At speed 100 we can do 512 bytes per packet, at speed 200, 1013 /*
1014 * At speed 100 we can do 512 bytes per packet, at speed 200,
987 * 1024 bytes per packet etc. The SBP-2 max_payload field 1015 * 1024 bytes per packet etc. The SBP-2 max_payload field
988 * specifies the max payload size as 2 ^ (max_payload + 2), so 1016 * specifies the max payload size as 2 ^ (max_payload + 2), so
989 * if we set this to max_speed + 7, we get the right value. */ 1017 * if we set this to max_speed + 7, we get the right value.
1018 */
990 orb->request.misc = 1019 orb->request.misc =
991 command_orb_max_payload(device->node->max_speed + 7) | 1020 command_orb_max_payload(device->node->max_speed + 7) |
992 command_orb_speed(device->node->max_speed) | 1021 command_orb_speed(device->node->max_speed) |
@@ -1002,9 +1031,11 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1002 if (cmd->use_sg) { 1031 if (cmd->use_sg) {
1003 sbp2_command_orb_map_scatterlist(orb); 1032 sbp2_command_orb_map_scatterlist(orb);
1004 } else if (cmd->request_bufflen > SBP2_MAX_SG_ELEMENT_LENGTH) { 1033 } else if (cmd->request_bufflen > SBP2_MAX_SG_ELEMENT_LENGTH) {
1005 /* FIXME: Need to split this into a sg list... but 1034 /*
1035 * FIXME: Need to split this into a sg list... but
1006 * could we get the scsi or blk layer to do that by 1036 * could we get the scsi or blk layer to do that by
1007 * reporting our max supported block size? */ 1037 * reporting our max supported block size?
1038 */
1008 fw_error("command > 64k\n"); 1039 fw_error("command > 64k\n");
1009 goto fail_bufflen; 1040 goto fail_bufflen;
1010 } else if (cmd->request_bufflen > 0) { 1041 } else if (cmd->request_bufflen > 0) {
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 018c6b8afba6..c26d5d5e8d53 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -1,6 +1,5 @@
1/* -*- c-basic-offset: 8 -*- 1/*
2 * 2 * Incremental bus scan, based on bus topology
3 * fw-topology.c - Incremental bus scan, based on bus topology
4 * 3 *
5 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net> 4 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * 5 *
@@ -69,10 +68,12 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
69 sid++; 68 sid++;
70 q = *sid; 69 q = *sid;
71 70
72 /* Check that the extra packets actually are 71 /*
72 * Check that the extra packets actually are
73 * extended self ID packets and that the 73 * extended self ID packets and that the
74 * sequence numbers in the extended self ID 74 * sequence numbers in the extended self ID
75 * packets increase as expected. */ 75 * packets increase as expected.
76 */
76 77
77 if (!self_id_extended(q) || 78 if (!self_id_extended(q) ||
78 seq != self_id_ext_sequence(q)) 79 seq != self_id_ext_sequence(q))
@@ -113,7 +114,8 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
113 return node; 114 return node;
114} 115}
115 116
116/* Compute the maximum hop count for this node and it's children. The 117/*
118 * Compute the maximum hop count for this node and it's children. The
117 * maximum hop count is the maximum number of connections between any 119 * maximum hop count is the maximum number of connections between any
118 * two nodes in the subtree rooted at this node. We need this for 120 * two nodes in the subtree rooted at this node. We need this for
119 * setting the gap count. As we build the tree bottom up in 121 * setting the gap count. As we build the tree bottom up in
@@ -202,8 +204,10 @@ static struct fw_node *build_tree(struct fw_card *card,
202 return NULL; 204 return NULL;
203 } 205 }
204 206
205 /* Seek back from the top of our stack to find the 207 /*
206 * start of the child nodes for this node. */ 208 * Seek back from the top of our stack to find the
209 * start of the child nodes for this node.
210 */
207 for (i = 0, h = &stack; i < child_port_count; i++) 211 for (i = 0, h = &stack; i < child_port_count; i++)
208 h = h->prev; 212 h = h->prev;
209 child = fw_node(h); 213 child = fw_node(h);
@@ -230,7 +234,8 @@ static struct fw_node *build_tree(struct fw_card *card,
230 for (i = 0; i < port_count; i++) { 234 for (i = 0; i < port_count; i++) {
231 switch (get_port_type(sid, i)) { 235 switch (get_port_type(sid, i)) {
232 case SELFID_PORT_PARENT: 236 case SELFID_PORT_PARENT:
233 /* Who's your daddy? We dont know the 237 /*
238 * Who's your daddy? We dont know the
234 * parent node at this time, so we 239 * parent node at this time, so we
235 * temporarily abuse node->color for 240 * temporarily abuse node->color for
236 * remembering the entry in the 241 * remembering the entry in the
@@ -245,8 +250,10 @@ static struct fw_node *build_tree(struct fw_card *card,
245 250
246 case SELFID_PORT_CHILD: 251 case SELFID_PORT_CHILD:
247 node->ports[i].node = child; 252 node->ports[i].node = child;
248 /* Fix up parent reference for this 253 /*
249 * child node. */ 254 * Fix up parent reference for this
255 * child node.
256 */
250 child->ports[child->color].node = node; 257 child->ports[child->color].node = node;
251 child->color = card->color; 258 child->color = card->color;
252 child = fw_node(child->link.next); 259 child = fw_node(child->link.next);
@@ -254,9 +261,11 @@ static struct fw_node *build_tree(struct fw_card *card,
254 } 261 }
255 } 262 }
256 263
257 /* Check that the node reports exactly one parent 264 /*
265 * Check that the node reports exactly one parent
258 * port, except for the root, which of course should 266 * port, except for the root, which of course should
259 * have no parents. */ 267 * have no parents.
268 */
260 if ((next_sid == end && parent_count != 0) || 269 if ((next_sid == end && parent_count != 0) ||
261 (next_sid < end && parent_count != 1)) { 270 (next_sid < end && parent_count != 1)) {
262 fw_error("Parent port inconsistency for node %d: " 271 fw_error("Parent port inconsistency for node %d: "
@@ -269,9 +278,11 @@ static struct fw_node *build_tree(struct fw_card *card,
269 list_add_tail(&node->link, &stack); 278 list_add_tail(&node->link, &stack);
270 stack_depth += 1 - child_port_count; 279 stack_depth += 1 - child_port_count;
271 280
272 /* If all PHYs does not report the same gap count 281 /*
282 * If all PHYs does not report the same gap count
273 * setting, we fall back to 63 which will force a gap 283 * setting, we fall back to 63 which will force a gap
274 * count reconfiguration and a reset. */ 284 * count reconfiguration and a reset.
285 */
275 if (self_id_gap_count(q) != gap_count) 286 if (self_id_gap_count(q) != gap_count)
276 gap_count = 63; 287 gap_count = 63;
277 288
@@ -427,9 +438,11 @@ update_tree(struct fw_card *card, struct fw_node *root)
427 438
428 for (i = 0; i < node0->port_count; i++) { 439 for (i = 0; i < node0->port_count; i++) {
429 if (node0->ports[i].node && node1->ports[i].node) { 440 if (node0->ports[i].node && node1->ports[i].node) {
430 /* This port didn't change, queue the 441 /*
442 * This port didn't change, queue the
431 * connected node for further 443 * connected node for further
432 * investigation. */ 444 * investigation.
445 */
433 if (node0->ports[i].node->color == card->color) 446 if (node0->ports[i].node->color == card->color)
434 continue; 447 continue;
435 list_add_tail(&node0->ports[i].node->link, 448 list_add_tail(&node0->ports[i].node->link,
@@ -437,19 +450,23 @@ update_tree(struct fw_card *card, struct fw_node *root)
437 list_add_tail(&node1->ports[i].node->link, 450 list_add_tail(&node1->ports[i].node->link,
438 &list1); 451 &list1);
439 } else if (node0->ports[i].node) { 452 } else if (node0->ports[i].node) {
440 /* The nodes connected here were 453 /*
454 * The nodes connected here were
441 * unplugged; unref the lost nodes and 455 * unplugged; unref the lost nodes and
442 * queue FW_NODE_LOST callbacks for 456 * queue FW_NODE_LOST callbacks for
443 * them. */ 457 * them.
458 */
444 459
445 for_each_fw_node(card, node0->ports[i].node, 460 for_each_fw_node(card, node0->ports[i].node,
446 report_lost_node); 461 report_lost_node);
447 node0->ports[i].node = NULL; 462 node0->ports[i].node = NULL;
448 } else if (node1->ports[i].node) { 463 } else if (node1->ports[i].node) {
449 /* One or more node were connected to 464 /*
465 * One or more node were connected to
450 * this port. Move the new nodes into 466 * this port. Move the new nodes into
451 * the tree and queue FW_NODE_CREATED 467 * the tree and queue FW_NODE_CREATED
452 * callbacks for them. */ 468 * callbacks for them.
469 */
453 move_tree(node0, node1, i); 470 move_tree(node0, node1, i);
454 for_each_fw_node(card, node0->ports[i].node, 471 for_each_fw_node(card, node0->ports[i].node,
455 report_found_node); 472 report_found_node);
@@ -486,9 +503,11 @@ fw_core_handle_bus_reset(struct fw_card *card,
486 503
487 spin_lock_irqsave(&card->lock, flags); 504 spin_lock_irqsave(&card->lock, flags);
488 505
489 /* If the new topology has a different self_id_count the topology 506 /*
507 * If the new topology has a different self_id_count the topology
490 * changed, either nodes were added or removed. In that case we 508 * changed, either nodes were added or removed. In that case we
491 * reset the IRM reset counter. */ 509 * reset the IRM reset counter.
510 */
492 if (card->self_id_count != self_id_count) 511 if (card->self_id_count != self_id_count)
493 card->bm_retries = 0; 512 card->bm_retries = 0;
494 513
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
index 0778077e9d80..be2a6bed3847 100644
--- a/drivers/firewire/fw-topology.h
+++ b/drivers/firewire/fw-topology.h
@@ -1,7 +1,4 @@
1/* -*- c-basic-offset: 8 -*- 1/*
2 *
3 * fw-topology.h -- Incremental bus scan, based on bus topology
4 *
5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> 2 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * 3 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index d36dd512a59d..e4355de710fa 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -1,6 +1,5 @@
1/* -*- c-basic-offset: 8 -*- 1/*
2 * 2 * Core IEEE1394 transaction logic
3 * fw-transaction.c - core IEEE1394 transaction logic
4 * 3 *
5 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net> 4 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * 5 *
@@ -85,21 +84,27 @@ close_transaction(struct fw_transaction *transaction,
85 return -ENOENT; 84 return -ENOENT;
86} 85}
87 86
88/* Only valid for transactions that are potentially pending (ie have 87/*
89 * been sent). */ 88 * Only valid for transactions that are potentially pending (ie have
89 * been sent).
90 */
90int 91int
91fw_cancel_transaction(struct fw_card *card, 92fw_cancel_transaction(struct fw_card *card,
92 struct fw_transaction *transaction) 93 struct fw_transaction *transaction)
93{ 94{
94 /* Cancel the packet transmission if it's still queued. That 95 /*
96 * Cancel the packet transmission if it's still queued. That
95 * will call the packet transmission callback which cancels 97 * will call the packet transmission callback which cancels
96 * the transaction. */ 98 * the transaction.
99 */
97 100
98 if (card->driver->cancel_packet(card, &transaction->packet) == 0) 101 if (card->driver->cancel_packet(card, &transaction->packet) == 0)
99 return 0; 102 return 0;
100 103
101 /* If the request packet has already been sent, we need to see 104 /*
102 * if the transaction is still pending and remove it in that case. */ 105 * If the request packet has already been sent, we need to see
106 * if the transaction is still pending and remove it in that case.
107 */
103 108
104 return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0); 109 return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0);
105} 110}
@@ -131,8 +136,10 @@ transmit_complete_callback(struct fw_packet *packet,
131 close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0); 136 close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0);
132 break; 137 break;
133 default: 138 default:
134 /* In this case the ack is really a juju specific 139 /*
135 * rcode, so just forward that to the callback. */ 140 * In this case the ack is really a juju specific
141 * rcode, so just forward that to the callback.
142 */
136 close_transaction(t, card, status, NULL, 0); 143 close_transaction(t, card, status, NULL, 0);
137 break; 144 break;
138 } 145 }
@@ -243,13 +250,17 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
243 unsigned long flags; 250 unsigned long flags;
244 int tlabel, source; 251 int tlabel, source;
245 252
246 /* Bump the flush timer up 100ms first of all so we 253 /*
247 * don't race with a flush timer callback. */ 254 * Bump the flush timer up 100ms first of all so we
255 * don't race with a flush timer callback.
256 */
248 257
249 mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10)); 258 mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
250 259
251 /* Allocate tlabel from the bitmap and put the transaction on 260 /*
252 * the list while holding the card spinlock. */ 261 * Allocate tlabel from the bitmap and put the transaction on
262 * the list while holding the card spinlock.
263 */
253 264
254 spin_lock_irqsave(&card->lock, flags); 265 spin_lock_irqsave(&card->lock, flags);
255 266
@@ -336,9 +347,11 @@ void fw_flush_transactions(struct fw_card *card)
336 list_for_each_entry_safe(t, next, &list, link) { 347 list_for_each_entry_safe(t, next, &list, link) {
337 card->driver->cancel_packet(card, &t->packet); 348 card->driver->cancel_packet(card, &t->packet);
338 349
339 /* At this point cancel_packet will never call the 350 /*
351 * At this point cancel_packet will never call the
340 * transaction callback, since we just took all the 352 * transaction callback, since we just took all the
341 * transactions out of the list. So do it here.*/ 353 * transactions out of the list. So do it here.
354 */
342 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); 355 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
343 } 356 }
344} 357}
@@ -587,9 +600,11 @@ allocate_request(struct fw_packet *p)
587void 600void
588fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) 601fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
589{ 602{
590 /* Broadcast packets are reported as ACK_COMPLETE, so this 603 /*
604 * Broadcast packets are reported as ACK_COMPLETE, so this
591 * check is sufficient to ensure we don't send response to 605 * check is sufficient to ensure we don't send response to
592 * broadcast packets or posted writes. */ 606 * broadcast packets or posted writes.
607 */
593 if (request->ack != ACK_PENDING) 608 if (request->ack != ACK_PENDING)
594 return; 609 return;
595 610
@@ -639,11 +654,13 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
639 offset, request->length); 654 offset, request->length);
640 spin_unlock_irqrestore(&address_handler_lock, flags); 655 spin_unlock_irqrestore(&address_handler_lock, flags);
641 656
642 /* FIXME: lookup the fw_node corresponding to the sender of 657 /*
658 * FIXME: lookup the fw_node corresponding to the sender of
643 * this request and pass that to the address handler instead 659 * this request and pass that to the address handler instead
644 * of the node ID. We may also want to move the address 660 * of the node ID. We may also want to move the address
645 * allocations to fw_node so we only do this callback if the 661 * allocations to fw_node so we only do this callback if the
646 * upper layers registered it for this node. */ 662 * upper layers registered it for this node.
663 */
647 664
648 if (handler == NULL) 665 if (handler == NULL)
649 fw_send_response(card, request, RCODE_ADDRESS_ERROR); 666 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
@@ -687,8 +704,10 @@ fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
687 return; 704 return;
688 } 705 }
689 706
690 /* FIXME: sanity check packet, is length correct, does tcodes 707 /*
691 * and addresses match. */ 708 * FIXME: sanity check packet, is length correct, does tcodes
709 * and addresses match.
710 */
692 711
693 switch (tcode) { 712 switch (tcode) {
694 case TCODE_READ_QUADLET_RESPONSE: 713 case TCODE_READ_QUADLET_RESPONSE:
@@ -790,11 +809,13 @@ handle_registers(struct fw_card *card, struct fw_request *request,
790 case CSR_BANDWIDTH_AVAILABLE: 809 case CSR_BANDWIDTH_AVAILABLE:
791 case CSR_CHANNELS_AVAILABLE_HI: 810 case CSR_CHANNELS_AVAILABLE_HI:
792 case CSR_CHANNELS_AVAILABLE_LO: 811 case CSR_CHANNELS_AVAILABLE_LO:
793 /* FIXME: these are handled by the OHCI hardware and 812 /*
813 * FIXME: these are handled by the OHCI hardware and
794 * the stack never sees these request. If we add 814 * the stack never sees these request. If we add
795 * support for a new type of controller that doesn't 815 * support for a new type of controller that doesn't
796 * handle this in hardware we need to deal with these 816 * handle this in hardware we need to deal with these
797 * transactions. */ 817 * transactions.
818 */
798 BUG(); 819 BUG();
799 break; 820 break;
800 821
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index 63527340152b..1b7e4dc6c2c1 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -1,7 +1,4 @@
1/* -*- c-basic-offset: 8 -*- 1/*
2 *
3 * fw-transaction.h - Header for IEEE1394 transaction logic
4 *
5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> 2 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * 3 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -209,7 +206,8 @@ struct fw_packet {
209 size_t payload_length; 206 size_t payload_length;
210 u32 timestamp; 207 u32 timestamp;
211 208
212 /* This callback is called when the packet transmission has 209 /*
210 * This callback is called when the packet transmission has
213 * completed; for successful transmission, the status code is 211 * completed; for successful transmission, the status code is
214 * the ack received from the destination, otherwise it's a 212 * the ack received from the destination, otherwise it's a
215 * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO. 213 * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
@@ -230,8 +228,10 @@ struct fw_transaction {
230 228
231 struct fw_packet packet; 229 struct fw_packet packet;
232 230
233 /* The data passed to the callback is valid only during the 231 /*
234 * callback. */ 232 * The data passed to the callback is valid only during the
233 * callback.
234 */
235 fw_transaction_callback_t callback; 235 fw_transaction_callback_t callback;
236 void *callback_data; 236 void *callback_data;
237}; 237};
@@ -291,8 +291,10 @@ struct fw_card {
291 int link_speed; 291 int link_speed;
292 int config_rom_generation; 292 int config_rom_generation;
293 293
294 /* We need to store up to 4 self ID for a maximum of 63 294 /*
295 * devices plus 3 words for the topology map header. */ 295 * We need to store up to 4 self ID for a maximum of 63
296 * devices plus 3 words for the topology map header.
297 */
296 int self_id_count; 298 int self_id_count;
297 u32 topology_map[252 + 3]; 299 u32 topology_map[252 + 3];
298 300
@@ -318,12 +320,14 @@ struct fw_card {
318struct fw_card *fw_card_get(struct fw_card *card); 320struct fw_card *fw_card_get(struct fw_card *card);
319void fw_card_put(struct fw_card *card); 321void fw_card_put(struct fw_card *card);
320 322
321/* The iso packet format allows for an immediate header/payload part 323/*
324 * The iso packet format allows for an immediate header/payload part
322 * stored in 'header' immediately after the packet info plus an 325 * stored in 'header' immediately after the packet info plus an
323 * indirect payload part that is pointer to by the 'payload' field. 326 * indirect payload part that is pointer to by the 'payload' field.
324 * Applications can use one or the other or both to implement simple 327 * Applications can use one or the other or both to implement simple
325 * low-bandwidth streaming (e.g. audio) or more advanced 328 * low-bandwidth streaming (e.g. audio) or more advanced
326 * scatter-gather streaming (e.g. assembling video frame automatically). */ 329 * scatter-gather streaming (e.g. assembling video frame automatically).
330 */
327 331
328struct fw_iso_packet { 332struct fw_iso_packet {
329 u16 payload_length; /* Length of indirect payload. */ 333 u16 payload_length; /* Length of indirect payload. */
@@ -352,11 +356,13 @@ typedef void (*fw_iso_callback_t) (struct fw_iso_context *context,
352 void *header, 356 void *header,
353 void *data); 357 void *data);
354 358
355/* An iso buffer is just a set of pages mapped for DMA in the 359/*
360 * An iso buffer is just a set of pages mapped for DMA in the
356 * specified direction. Since the pages are to be used for DMA, they 361 * specified direction. Since the pages are to be used for DMA, they
357 * are not mapped into the kernel virtual address space. We store the 362 * are not mapped into the kernel virtual address space. We store the
358 * DMA address in the page private. The helper function 363 * DMA address in the page private. The helper function
359 * fw_iso_buffer_map() will map the pages into a given vma. */ 364 * fw_iso_buffer_map() will map the pages into a given vma.
365 */
360 366
361struct fw_iso_buffer { 367struct fw_iso_buffer {
362 enum dma_data_direction direction; 368 enum dma_data_direction direction;
@@ -408,18 +414,22 @@ fw_iso_context_stop(struct fw_iso_context *ctx);
408struct fw_card_driver { 414struct fw_card_driver {
409 const char *name; 415 const char *name;
410 416
411 /* Enable the given card with the given initial config rom. 417 /*
418 * Enable the given card with the given initial config rom.
412 * This function is expected to activate the card, and either 419 * This function is expected to activate the card, and either
413 * enable the PHY or set the link_on bit and initiate a bus 420 * enable the PHY or set the link_on bit and initiate a bus
414 * reset. */ 421 * reset.
422 */
415 int (*enable) (struct fw_card *card, u32 *config_rom, size_t length); 423 int (*enable) (struct fw_card *card, u32 *config_rom, size_t length);
416 424
417 int (*update_phy_reg) (struct fw_card *card, int address, 425 int (*update_phy_reg) (struct fw_card *card, int address,
418 int clear_bits, int set_bits); 426 int clear_bits, int set_bits);
419 427
420 /* Update the config rom for an enabled card. This function 428 /*
429 * Update the config rom for an enabled card. This function
421 * should change the config rom that is presented on the bus 430 * should change the config rom that is presented on the bus
422 * an initiate a bus reset. */ 431 * an initiate a bus reset.
432 */
423 int (*set_config_rom) (struct fw_card *card, 433 int (*set_config_rom) (struct fw_card *card,
424 u32 *config_rom, size_t length); 434 u32 *config_rom, size_t length);
425 435
@@ -428,12 +438,14 @@ struct fw_card_driver {
428 /* Calling cancel is valid once a packet has been submitted. */ 438 /* Calling cancel is valid once a packet has been submitted. */
429 int (*cancel_packet) (struct fw_card *card, struct fw_packet *packet); 439 int (*cancel_packet) (struct fw_card *card, struct fw_packet *packet);
430 440
431 /* Allow the specified node ID to do direct DMA out and in of 441 /*
442 * Allow the specified node ID to do direct DMA out and in of
432 * host memory. The card will disable this for all node when 443 * host memory. The card will disable this for all node when
433 * a bus reset happens, so driver need to reenable this after 444 * a bus reset happens, so driver need to reenable this after
434 * bus reset. Returns 0 on success, -ENODEV if the card 445 * bus reset. Returns 0 on success, -ENODEV if the card
435 * doesn't support this, -ESTALE if the generation doesn't 446 * doesn't support this, -ESTALE if the generation doesn't
436 * match. */ 447 * match.
448 */
437 int (*enable_phys_dma) (struct fw_card *card, 449 int (*enable_phys_dma) (struct fw_card *card,
438 int node_id, int generation); 450 int node_id, int generation);
439 451
@@ -473,15 +485,15 @@ void fw_flush_transactions(struct fw_card *card);
473void fw_send_phy_config(struct fw_card *card, 485void fw_send_phy_config(struct fw_card *card,
474 int node_id, int generation, int gap_count); 486 int node_id, int generation, int gap_count);
475 487
476/* Called by the topology code to inform the device code of node 488/*
477 * activity; found, lost, or updated nodes */ 489 * Called by the topology code to inform the device code of node
490 * activity; found, lost, or updated nodes.
491 */
478void 492void
479fw_node_event(struct fw_card *card, struct fw_node *node, int event); 493fw_node_event(struct fw_card *card, struct fw_node *node, int event);
480 494
481/* API used by card level drivers */ 495/* API used by card level drivers */
482 496
483/* Do we need phy speed here also? If we add more args, maybe we
484 should go back to struct fw_card_info. */
485void 497void
486fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, 498fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
487 struct device *device); 499 struct device *device);