aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firewire/fw-sbp2.c
diff options
context:
space:
mode:
authorKristian Høgsberg <krh@redhat.com>2007-05-07 20:33:32 -0400
committerStefan Richter <stefanr@s5r6.in-berlin.de>2007-05-10 12:24:13 -0400
commitc781c06d119d04601727f2fbc30151e6760d536d (patch)
tree1faf19acc6bc2a2a3b3bdae8368e395e75cd7518 /drivers/firewire/fw-sbp2.c
parente175569c4639872b5cf242c9d4a71cc40c5f3c29 (diff)
firewire: Clean up comment style.
Drop filenames from file preamble, drop editor annotations and use standard indent style for block comments. Signed-off-by: Kristian Hoegsberg <krh@redhat.com> Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de> (fixed typo)
Diffstat (limited to 'drivers/firewire/fw-sbp2.c')
-rw-r--r--drivers/firewire/fw-sbp2.c101
1 files changed, 66 insertions, 35 deletions
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 7ce9b811431a..eb3bddb162e4 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -1,5 +1,5 @@
1/* -*- c-basic-offset: 8 -*- 1/*
2 * fw-spb2.c -- SBP2 driver (SCSI over IEEE1394) 2 * SBP2 driver (SCSI over IEEE1394)
3 * 3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> 4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 * 5 *
@@ -18,7 +18,8 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21/* The basic structure of this driver is based the old storage driver, 21/*
22 * The basic structure of this driver is based on the old storage driver,
22 * drivers/ieee1394/sbp2.c, originally written by 23 * drivers/ieee1394/sbp2.c, originally written by
23 * James Goodwin <jamesg@filanet.com> 24 * James Goodwin <jamesg@filanet.com>
24 * with later contributions and ongoing maintenance from 25 * with later contributions and ongoing maintenance from
@@ -60,11 +61,13 @@ struct sbp2_device {
60 u32 workarounds; 61 u32 workarounds;
61 int login_id; 62 int login_id;
62 63
63 /* We cache these addresses and only update them once we've 64 /*
65 * We cache these addresses and only update them once we've
64 * logged in or reconnected to the sbp2 device. That way, any 66 * logged in or reconnected to the sbp2 device. That way, any
65 * IO to the device will automatically fail and get retried if 67 * IO to the device will automatically fail and get retried if
66 * it happens in a window where the device is not ready to 68 * it happens in a window where the device is not ready to
67 * handle it (e.g. after a bus reset but before we reconnect). */ 69 * handle it (e.g. after a bus reset but before we reconnect).
70 */
68 int node_id; 71 int node_id;
69 int address_high; 72 int address_high;
70 int generation; 73 int generation;
@@ -239,10 +242,14 @@ static const struct {
239 .model = ~0, 242 .model = ~0,
240 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, 243 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
241 }, 244 },
242 /* There are iPods (2nd gen, 3rd gen) with model_id == 0, but 245
246 /*
247 * There are iPods (2nd gen, 3rd gen) with model_id == 0, but
243 * these iPods do not feature the read_capacity bug according 248 * these iPods do not feature the read_capacity bug according
244 * to one report. Read_capacity behaviour as well as model_id 249 * to one report. Read_capacity behaviour as well as model_id
245 * could change due to Apple-supplied firmware updates though. */ 250 * could change due to Apple-supplied firmware updates though.
251 */
252
246 /* iPod 4th generation. */ { 253 /* iPod 4th generation. */ {
247 .firmware_revision = 0x0a2700, 254 .firmware_revision = 0x0a2700,
248 .model = 0x000021, 255 .model = 0x000021,
@@ -398,9 +405,10 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
398 if (orb == NULL) 405 if (orb == NULL)
399 return -ENOMEM; 406 return -ENOMEM;
400 407
401 /* The sbp2 device is going to send a block read request to 408 /*
402 * read out the request from host memory, so map it for 409 * The sbp2 device is going to send a block read request to
403 * dma. */ 410 * read out the request from host memory, so map it for dma.
411 */
404 orb->base.request_bus = 412 orb->base.request_bus =
405 dma_map_single(device->card->device, &orb->request, 413 dma_map_single(device->card->device, &orb->request,
406 sizeof orb->request, DMA_TO_DEVICE); 414 sizeof orb->request, DMA_TO_DEVICE);
@@ -426,10 +434,11 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
426 orb->request.status_fifo.high = sd->address_handler.offset >> 32; 434 orb->request.status_fifo.high = sd->address_handler.offset >> 32;
427 orb->request.status_fifo.low = sd->address_handler.offset; 435 orb->request.status_fifo.low = sd->address_handler.offset;
428 436
429 /* FIXME: Yeah, ok this isn't elegant, we hardwire exclusive 437 /*
438 * FIXME: Yeah, ok this isn't elegant, we hardwire exclusive
430 * login and 1 second reconnect time. The reconnect setting 439 * login and 1 second reconnect time. The reconnect setting
431 * is probably fine, but the exclusive login should be an 440 * is probably fine, but the exclusive login should be an option.
432 * option. */ 441 */
433 if (function == SBP2_LOGIN_REQUEST) { 442 if (function == SBP2_LOGIN_REQUEST) {
434 orb->request.misc |= 443 orb->request.misc |=
435 management_orb_exclusive | 444 management_orb_exclusive |
@@ -592,8 +601,10 @@ static void sbp2_login(struct work_struct *work)
592 sbp2_send_management_orb(unit, sd->node_id, sd->generation, 601 sbp2_send_management_orb(unit, sd->node_id, sd->generation,
593 SBP2_LOGOUT_REQUEST, sd->login_id, 602 SBP2_LOGOUT_REQUEST, sd->login_id,
594 NULL); 603 NULL);
595 /* Set this back to sbp2_login so we fall back and 604 /*
596 * retry login on bus reset. */ 605 * Set this back to sbp2_login so we fall back and
606 * retry login on bus reset.
607 */
597 PREPARE_DELAYED_WORK(&sd->work, sbp2_login); 608 PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
598 } 609 }
599 kref_put(&sd->kref, release_sbp2_device); 610 kref_put(&sd->kref, release_sbp2_device);
@@ -633,9 +644,11 @@ static int sbp2_probe(struct device *dev)
633 return -EBUSY; 644 return -EBUSY;
634 } 645 }
635 646
636 /* Scan unit directory to get management agent address, 647 /*
648 * Scan unit directory to get management agent address,
637 * firmware revison and model. Initialize firmware_revision 649 * firmware revison and model. Initialize firmware_revision
638 * and model to values that wont match anything in our table. */ 650 * and model to values that wont match anything in our table.
651 */
639 firmware_revision = 0xff000000; 652 firmware_revision = 0xff000000;
640 model = 0xff000000; 653 model = 0xff000000;
641 fw_csr_iterator_init(&ci, unit->directory); 654 fw_csr_iterator_init(&ci, unit->directory);
@@ -673,9 +686,11 @@ static int sbp2_probe(struct device *dev)
673 686
674 get_device(&unit->device); 687 get_device(&unit->device);
675 688
676 /* We schedule work to do the login so we can easily 689 /*
690 * We schedule work to do the login so we can easily
677 * reschedule retries. Always get the ref before scheduling 691 * reschedule retries. Always get the ref before scheduling
678 * work.*/ 692 * work.
693 */
679 INIT_DELAYED_WORK(&sd->work, sbp2_login); 694 INIT_DELAYED_WORK(&sd->work, sbp2_login);
680 if (schedule_delayed_work(&sd->work, 0)) 695 if (schedule_delayed_work(&sd->work, 0))
681 kref_get(&sd->kref); 696 kref_get(&sd->kref);
@@ -834,9 +849,11 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
834 result = sbp2_status_to_sense_data(status_get_data(*status), 849 result = sbp2_status_to_sense_data(status_get_data(*status),
835 orb->cmd->sense_buffer); 850 orb->cmd->sense_buffer);
836 } else { 851 } else {
837 /* If the orb completes with status == NULL, something 852 /*
853 * If the orb completes with status == NULL, something
838 * went wrong, typically a bus reset happened mid-orb 854 * went wrong, typically a bus reset happened mid-orb
839 * or when sending the write (less likely). */ 855 * or when sending the write (less likely).
856 */
840 result = DID_BUS_BUSY << 16; 857 result = DID_BUS_BUSY << 16;
841 } 858 }
842 859
@@ -878,11 +895,13 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
878 count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg, 895 count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg,
879 orb->cmd->sc_data_direction); 896 orb->cmd->sc_data_direction);
880 897
881 /* Handle the special case where there is only one element in 898 /*
899 * Handle the special case where there is only one element in
882 * the scatter list by converting it to an immediate block 900 * the scatter list by converting it to an immediate block
883 * request. This is also a workaround for broken devices such 901 * request. This is also a workaround for broken devices such
884 * as the second generation iPod which doesn't support page 902 * as the second generation iPod which doesn't support page
885 * tables. */ 903 * tables.
904 */
886 if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) { 905 if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
887 orb->request.data_descriptor.high = sd->address_high; 906 orb->request.data_descriptor.high = sd->address_high;
888 orb->request.data_descriptor.low = sg_dma_address(sg); 907 orb->request.data_descriptor.low = sg_dma_address(sg);
@@ -891,8 +910,10 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
891 return; 910 return;
892 } 911 }
893 912
894 /* Convert the scatterlist to an sbp2 page table. If any 913 /*
895 * scatterlist entries are too big for sbp2 we split the as we go. */ 914 * Convert the scatterlist to an sbp2 page table. If any
915 * scatterlist entries are too big for sbp2 we split the as we go.
916 */
896 for (i = 0, j = 0; i < count; i++) { 917 for (i = 0, j = 0; i < count; i++) {
897 sg_len = sg_dma_len(sg + i); 918 sg_len = sg_dma_len(sg + i);
898 sg_addr = sg_dma_address(sg + i); 919 sg_addr = sg_dma_address(sg + i);
@@ -908,11 +929,13 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
908 929
909 size = sizeof orb->page_table[0] * j; 930 size = sizeof orb->page_table[0] * j;
910 931
911 /* The data_descriptor pointer is the one case where we need 932 /*
933 * The data_descriptor pointer is the one case where we need
912 * to fill in the node ID part of the address. All other 934 * to fill in the node ID part of the address. All other
913 * pointers assume that the data referenced reside on the 935 * pointers assume that the data referenced reside on the
914 * initiator (i.e. us), but data_descriptor can refer to data 936 * initiator (i.e. us), but data_descriptor can refer to data
915 * on other nodes so we need to put our ID in descriptor.high. */ 937 * on other nodes so we need to put our ID in descriptor.high.
938 */
916 939
917 orb->page_table_bus = 940 orb->page_table_bus =
918 dma_map_single(device->card->device, orb->page_table, 941 dma_map_single(device->card->device, orb->page_table,
@@ -933,8 +956,10 @@ static void sbp2_command_orb_map_buffer(struct sbp2_command_orb *orb)
933 struct fw_device *device = fw_device(unit->device.parent); 956 struct fw_device *device = fw_device(unit->device.parent);
934 struct sbp2_device *sd = unit->device.driver_data; 957 struct sbp2_device *sd = unit->device.driver_data;
935 958
936 /* As for map_scatterlist, we need to fill in the high bits of 959 /*
937 * the data_descriptor pointer. */ 960 * As for map_scatterlist, we need to fill in the high bits of
961 * the data_descriptor pointer.
962 */
938 963
939 orb->request_buffer_bus = 964 orb->request_buffer_bus =
940 dma_map_single(device->card->device, 965 dma_map_single(device->card->device,
@@ -956,8 +981,10 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
956 struct sbp2_device *sd = unit->device.driver_data; 981 struct sbp2_device *sd = unit->device.driver_data;
957 struct sbp2_command_orb *orb; 982 struct sbp2_command_orb *orb;
958 983
959 /* Bidirectional commands are not yet implemented, and unknown 984 /*
960 * transfer direction not handled. */ 985 * Bidirectional commands are not yet implemented, and unknown
986 * transfer direction not handled.
987 */
961 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { 988 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
962 fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command"); 989 fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
963 goto fail_alloc; 990 goto fail_alloc;
@@ -983,10 +1010,12 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
983 1010
984 orb->request.next.high = SBP2_ORB_NULL; 1011 orb->request.next.high = SBP2_ORB_NULL;
985 orb->request.next.low = 0x0; 1012 orb->request.next.low = 0x0;
986 /* At speed 100 we can do 512 bytes per packet, at speed 200, 1013 /*
1014 * At speed 100 we can do 512 bytes per packet, at speed 200,
987 * 1024 bytes per packet etc. The SBP-2 max_payload field 1015 * 1024 bytes per packet etc. The SBP-2 max_payload field
988 * specifies the max payload size as 2 ^ (max_payload + 2), so 1016 * specifies the max payload size as 2 ^ (max_payload + 2), so
989 * if we set this to max_speed + 7, we get the right value. */ 1017 * if we set this to max_speed + 7, we get the right value.
1018 */
990 orb->request.misc = 1019 orb->request.misc =
991 command_orb_max_payload(device->node->max_speed + 7) | 1020 command_orb_max_payload(device->node->max_speed + 7) |
992 command_orb_speed(device->node->max_speed) | 1021 command_orb_speed(device->node->max_speed) |
@@ -1002,9 +1031,11 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1002 if (cmd->use_sg) { 1031 if (cmd->use_sg) {
1003 sbp2_command_orb_map_scatterlist(orb); 1032 sbp2_command_orb_map_scatterlist(orb);
1004 } else if (cmd->request_bufflen > SBP2_MAX_SG_ELEMENT_LENGTH) { 1033 } else if (cmd->request_bufflen > SBP2_MAX_SG_ELEMENT_LENGTH) {
1005 /* FIXME: Need to split this into a sg list... but 1034 /*
1035 * FIXME: Need to split this into a sg list... but
1006 * could we get the scsi or blk layer to do that by 1036 * could we get the scsi or blk layer to do that by
1007 * reporting our max supported block size? */ 1037 * reporting our max supported block size?
1038 */
1008 fw_error("command > 64k\n"); 1039 fw_error("command > 64k\n");
1009 goto fail_bufflen; 1040 goto fail_bufflen;
1010 } else if (cmd->request_bufflen > 0) { 1041 } else if (cmd->request_bufflen > 0) {