aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ieee1394/sbp2.c
diff options
context:
space:
mode:
authorStefan Richter <stefanr@s5r6.in-berlin.de>2006-11-02 15:16:08 -0500
committerStefan Richter <stefanr@s5r6.in-berlin.de>2006-12-07 16:59:34 -0500
commit138c8af8649ceea38aa52323b9326c10068bb597 (patch)
treeaab41a5b5e54292133f431e83052bd296829ef47 /drivers/ieee1394/sbp2.c
parent9117c6dc430578748ce732c6ff25eed8ed501a97 (diff)
ieee1394: sbp2: more concise names for types and variables
"struct scsi_id_instance_data" represents a logical unit. Rename it to "struct sbp2_lu", and "scsi_id" to "lu". Rename some other variables too. Wrap almost all lines after at most 80 columns. Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/ieee1394/sbp2.c')
-rw-r--r--drivers/ieee1394/sbp2.c1158
1 files changed, 567 insertions, 591 deletions
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 34280c7eb73d..a7fb588d3ca7 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -189,24 +189,23 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
189/* 189/*
190 * Globals 190 * Globals
191 */ 191 */
192static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *, u32); 192static void sbp2scsi_complete_all_commands(struct sbp2_lu *, u32);
193static void sbp2scsi_complete_command(struct scsi_id_instance_data *, u32, 193static void sbp2scsi_complete_command(struct sbp2_lu *, u32, struct scsi_cmnd *,
194 struct scsi_cmnd *,
195 void (*)(struct scsi_cmnd *)); 194 void (*)(struct scsi_cmnd *));
196static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *); 195static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *);
197static int sbp2_start_device(struct scsi_id_instance_data *); 196static int sbp2_start_device(struct sbp2_lu *);
198static void sbp2_remove_device(struct scsi_id_instance_data *); 197static void sbp2_remove_device(struct sbp2_lu *);
199static int sbp2_login_device(struct scsi_id_instance_data *); 198static int sbp2_login_device(struct sbp2_lu *);
200static int sbp2_reconnect_device(struct scsi_id_instance_data *); 199static int sbp2_reconnect_device(struct sbp2_lu *);
201static int sbp2_logout_device(struct scsi_id_instance_data *); 200static int sbp2_logout_device(struct sbp2_lu *);
202static void sbp2_host_reset(struct hpsb_host *); 201static void sbp2_host_reset(struct hpsb_host *);
203static int sbp2_handle_status_write(struct hpsb_host *, int, int, quadlet_t *, 202static int sbp2_handle_status_write(struct hpsb_host *, int, int, quadlet_t *,
204 u64, size_t, u16); 203 u64, size_t, u16);
205static int sbp2_agent_reset(struct scsi_id_instance_data *, int); 204static int sbp2_agent_reset(struct sbp2_lu *, int);
206static void sbp2_parse_unit_directory(struct scsi_id_instance_data *, 205static void sbp2_parse_unit_directory(struct sbp2_lu *,
207 struct unit_directory *); 206 struct unit_directory *);
208static int sbp2_set_busy_timeout(struct scsi_id_instance_data *); 207static int sbp2_set_busy_timeout(struct sbp2_lu *);
209static int sbp2_max_speed_and_size(struct scsi_id_instance_data *); 208static int sbp2_max_speed_and_size(struct sbp2_lu *);
210 209
211 210
212static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC }; 211static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC };
@@ -369,8 +368,6 @@ static inline void sbp2util_be32_to_cpu_buffer(void *buffer, int length)
369 368
370 for (length = (length >> 2); length--; ) 369 for (length = (length >> 2); length--; )
371 temp[length] = be32_to_cpu(temp[length]); 370 temp[length] = be32_to_cpu(temp[length]);
372
373 return;
374} 371}
375 372
376/* 373/*
@@ -382,8 +379,6 @@ static inline void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
382 379
383 for (length = (length >> 2); length--; ) 380 for (length = (length >> 2); length--; )
384 temp[length] = cpu_to_be32(temp[length]); 381 temp[length] = cpu_to_be32(temp[length]);
385
386 return;
387} 382}
388#else /* BIG_ENDIAN */ 383#else /* BIG_ENDIAN */
389/* Why waste the cpu cycles? */ 384/* Why waste the cpu cycles? */
@@ -397,18 +392,17 @@ static DECLARE_WAIT_QUEUE_HEAD(sbp2_access_wq);
397 * Waits for completion of an SBP-2 access request. 392 * Waits for completion of an SBP-2 access request.
398 * Returns nonzero if timed out or prematurely interrupted. 393 * Returns nonzero if timed out or prematurely interrupted.
399 */ 394 */
400static int sbp2util_access_timeout(struct scsi_id_instance_data *scsi_id, 395static int sbp2util_access_timeout(struct sbp2_lu *lu, int timeout)
401 int timeout)
402{ 396{
403 long leftover; 397 long leftover;
404 398
405 leftover = wait_event_interruptible_timeout( 399 leftover = wait_event_interruptible_timeout(
406 sbp2_access_wq, scsi_id->access_complete, timeout); 400 sbp2_access_wq, lu->access_complete, timeout);
407 scsi_id->access_complete = 0; 401 lu->access_complete = 0;
408 return leftover <= 0; 402 return leftover <= 0;
409} 403}
410 404
411static void sbp2_free_packet(struct hpsb_packet *packet) 405static void sbp2_free_packet(void *packet)
412{ 406{
413 hpsb_free_tlabel(packet); 407 hpsb_free_tlabel(packet);
414 hpsb_free_packet(packet); 408 hpsb_free_packet(packet);
@@ -419,121 +413,107 @@ static void sbp2_free_packet(struct hpsb_packet *packet)
419 * subaction and returns immediately. Can be used from atomic context. 413 * subaction and returns immediately. Can be used from atomic context.
420 */ 414 */
421static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr, 415static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
422 quadlet_t *buffer, size_t length) 416 quadlet_t *buf, size_t len)
423{ 417{
424 struct hpsb_packet *packet; 418 struct hpsb_packet *packet;
425 419
426 packet = hpsb_make_writepacket(ne->host, ne->nodeid, 420 packet = hpsb_make_writepacket(ne->host, ne->nodeid, addr, buf, len);
427 addr, buffer, length);
428 if (!packet) 421 if (!packet)
429 return -ENOMEM; 422 return -ENOMEM;
430 423
431 hpsb_set_packet_complete_task(packet, 424 hpsb_set_packet_complete_task(packet, sbp2_free_packet, packet);
432 (void (*)(void *))sbp2_free_packet,
433 packet);
434
435 hpsb_node_fill_packet(ne, packet); 425 hpsb_node_fill_packet(ne, packet);
436
437 if (hpsb_send_packet(packet) < 0) { 426 if (hpsb_send_packet(packet) < 0) {
438 sbp2_free_packet(packet); 427 sbp2_free_packet(packet);
439 return -EIO; 428 return -EIO;
440 } 429 }
441
442 return 0; 430 return 0;
443} 431}
444 432
445static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id, 433static void sbp2util_notify_fetch_agent(struct sbp2_lu *lu, u64 offset,
446 u64 offset, quadlet_t *data, size_t len) 434 quadlet_t *data, size_t len)
447{ 435{
448 /* 436 /* There is a small window after a bus reset within which the node
449 * There is a small window after a bus reset within which the node 437 * entry's generation is current but the reconnect wasn't completed. */
450 * entry's generation is current but the reconnect wasn't completed. 438 if (unlikely(atomic_read(&lu->state) == SBP2LU_STATE_IN_RESET))
451 */
452 if (unlikely(atomic_read(&scsi_id->state) == SBP2LU_STATE_IN_RESET))
453 return; 439 return;
454 440
455 if (hpsb_node_write(scsi_id->ne, 441 if (hpsb_node_write(lu->ne, lu->command_block_agent_addr + offset,
456 scsi_id->command_block_agent_addr + offset,
457 data, len)) 442 data, len))
458 SBP2_ERR("sbp2util_notify_fetch_agent failed."); 443 SBP2_ERR("sbp2util_notify_fetch_agent failed.");
459 /* 444
460 * Now accept new SCSI commands, unless a bus reset happended during 445 /* Now accept new SCSI commands, unless a bus reset happended during
461 * hpsb_node_write. 446 * hpsb_node_write. */
462 */ 447 if (likely(atomic_read(&lu->state) != SBP2LU_STATE_IN_RESET))
463 if (likely(atomic_read(&scsi_id->state) != SBP2LU_STATE_IN_RESET)) 448 scsi_unblock_requests(lu->shost);
464 scsi_unblock_requests(scsi_id->scsi_host);
465} 449}
466 450
467static void sbp2util_write_orb_pointer(struct work_struct *work) 451static void sbp2util_write_orb_pointer(struct work_struct *work)
468{ 452{
469 quadlet_t data[2]; 453 quadlet_t data[2];
470 454
471 data[0] = ORB_SET_NODE_ID( 455 data[0] = ORB_SET_NODE_ID((container_of(work, struct sbp2_lu, protocol_work))->hi->host->node_id);
472 (container_of(work, struct scsi_id_instance_data, protocol_work))->hi->host->node_id); 456 data[1] = (container_of(work, struct sbp2_lu, protocol_work))->last_orb_dma;
473 data[1] = (container_of(work, struct scsi_id_instance_data, protocol_work))->last_orb_dma;
474 sbp2util_cpu_to_be32_buffer(data, 8); 457 sbp2util_cpu_to_be32_buffer(data, 8);
475 sbp2util_notify_fetch_agent(container_of(work, struct scsi_id_instance_data, protocol_work), SBP2_ORB_POINTER_OFFSET, data, 8); 458 sbp2util_notify_fetch_agent(container_of(work, struct sbp2_lu, protocol_work), SBP2_ORB_POINTER_OFFSET, data, 8);
476} 459}
477 460
478static void sbp2util_write_doorbell(struct work_struct *work) 461static void sbp2util_write_doorbell(struct work_struct *work)
479{ 462{
480 sbp2util_notify_fetch_agent(container_of(work, struct scsi_id_instance_data, protocol_work), SBP2_DOORBELL_OFFSET, NULL, 4); 463 sbp2util_notify_fetch_agent(container_of(work, struct sbp2_lu, protocol_work), SBP2_DOORBELL_OFFSET, NULL, 4);
481} 464}
482 465
483static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id) 466static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
484{ 467{
485 struct sbp2_fwhost_info *hi = scsi_id->hi; 468 struct sbp2_fwhost_info *hi = lu->hi;
486 int i; 469 int i;
487 unsigned long flags, orbs; 470 unsigned long flags, orbs;
488 struct sbp2_command_info *command; 471 struct sbp2_command_info *cmd;
489 472
490 orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS; 473 orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
491 474
492 spin_lock_irqsave(&scsi_id->cmd_orb_lock, flags); 475 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
493 for (i = 0; i < orbs; i++) { 476 for (i = 0; i < orbs; i++) {
494 command = kzalloc(sizeof(*command), GFP_ATOMIC); 477 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
495 if (!command) { 478 if (!cmd) {
496 spin_unlock_irqrestore(&scsi_id->cmd_orb_lock, 479 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
497 flags);
498 return -ENOMEM; 480 return -ENOMEM;
499 } 481 }
500 command->command_orb_dma = 482 cmd->command_orb_dma = pci_map_single(hi->host->pdev,
501 pci_map_single(hi->host->pdev, &command->command_orb, 483 &cmd->command_orb,
502 sizeof(struct sbp2_command_orb), 484 sizeof(struct sbp2_command_orb),
503 PCI_DMA_TODEVICE); 485 PCI_DMA_TODEVICE);
504 command->sge_dma = 486 cmd->sge_dma = pci_map_single(hi->host->pdev,
505 pci_map_single(hi->host->pdev, 487 &cmd->scatter_gather_element,
506 &command->scatter_gather_element, 488 sizeof(cmd->scatter_gather_element),
507 sizeof(command->scatter_gather_element), 489 PCI_DMA_BIDIRECTIONAL);
508 PCI_DMA_BIDIRECTIONAL); 490 INIT_LIST_HEAD(&cmd->list);
509 INIT_LIST_HEAD(&command->list); 491 list_add_tail(&cmd->list, &lu->cmd_orb_completed);
510 list_add_tail(&command->list, &scsi_id->cmd_orb_completed);
511 } 492 }
512 spin_unlock_irqrestore(&scsi_id->cmd_orb_lock, flags); 493 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
513 return 0; 494 return 0;
514} 495}
515 496
516static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id) 497static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu)
517{ 498{
518 struct hpsb_host *host = scsi_id->hi->host; 499 struct hpsb_host *host = lu->hi->host;
519 struct list_head *lh, *next; 500 struct list_head *lh, *next;
520 struct sbp2_command_info *command; 501 struct sbp2_command_info *cmd;
521 unsigned long flags; 502 unsigned long flags;
522 503
523 spin_lock_irqsave(&scsi_id->cmd_orb_lock, flags); 504 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
524 if (!list_empty(&scsi_id->cmd_orb_completed)) { 505 if (!list_empty(&lu->cmd_orb_completed))
525 list_for_each_safe(lh, next, &scsi_id->cmd_orb_completed) { 506 list_for_each_safe(lh, next, &lu->cmd_orb_completed) {
526 command = list_entry(lh, struct sbp2_command_info, list); 507 cmd = list_entry(lh, struct sbp2_command_info, list);
527 pci_unmap_single(host->pdev, command->command_orb_dma, 508 pci_unmap_single(host->pdev, cmd->command_orb_dma,
528 sizeof(struct sbp2_command_orb), 509 sizeof(struct sbp2_command_orb),
529 PCI_DMA_TODEVICE); 510 PCI_DMA_TODEVICE);
530 pci_unmap_single(host->pdev, command->sge_dma, 511 pci_unmap_single(host->pdev, cmd->sge_dma,
531 sizeof(command->scatter_gather_element), 512 sizeof(cmd->scatter_gather_element),
532 PCI_DMA_BIDIRECTIONAL); 513 PCI_DMA_BIDIRECTIONAL);
533 kfree(command); 514 kfree(cmd);
534 } 515 }
535 } 516 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
536 spin_unlock_irqrestore(&scsi_id->cmd_orb_lock, flags);
537 return; 517 return;
538} 518}
539 519
@@ -542,116 +522,114 @@ static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_
542 * Only looks at the in-use list. 522 * Only looks at the in-use list.
543 */ 523 */
544static struct sbp2_command_info *sbp2util_find_command_for_orb( 524static struct sbp2_command_info *sbp2util_find_command_for_orb(
545 struct scsi_id_instance_data *scsi_id, dma_addr_t orb) 525 struct sbp2_lu *lu, dma_addr_t orb)
546{ 526{
547 struct sbp2_command_info *command; 527 struct sbp2_command_info *cmd;
548 unsigned long flags; 528 unsigned long flags;
549 529
550 spin_lock_irqsave(&scsi_id->cmd_orb_lock, flags); 530 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
551 if (!list_empty(&scsi_id->cmd_orb_inuse)) { 531 if (!list_empty(&lu->cmd_orb_inuse))
552 list_for_each_entry(command, &scsi_id->cmd_orb_inuse, list) { 532 list_for_each_entry(cmd, &lu->cmd_orb_inuse, list)
553 if (command->command_orb_dma == orb) { 533 if (cmd->command_orb_dma == orb) {
554 spin_unlock_irqrestore(&scsi_id->cmd_orb_lock, flags); 534 spin_unlock_irqrestore(
555 return command; 535 &lu->cmd_orb_lock, flags);
536 return cmd;
556 } 537 }
557 } 538 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
558 }
559 spin_unlock_irqrestore(&scsi_id->cmd_orb_lock, flags);
560 return NULL; 539 return NULL;
561} 540}
562 541
563/* 542/*
564 * Finds the sbp2_command for a given outstanding SCpnt. 543 * Finds the sbp2_command for a given outstanding SCpnt.
565 * Only looks at the in-use list. 544 * Only looks at the in-use list.
566 * Must be called with scsi_id->cmd_orb_lock held. 545 * Must be called with lu->cmd_orb_lock held.
567 */ 546 */
568static struct sbp2_command_info *sbp2util_find_command_for_SCpnt( 547static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(
569 struct scsi_id_instance_data *scsi_id, void *SCpnt) 548 struct sbp2_lu *lu, void *SCpnt)
570{ 549{
571 struct sbp2_command_info *command; 550 struct sbp2_command_info *cmd;
572 551
573 if (!list_empty(&scsi_id->cmd_orb_inuse)) 552 if (!list_empty(&lu->cmd_orb_inuse))
574 list_for_each_entry(command, &scsi_id->cmd_orb_inuse, list) 553 list_for_each_entry(cmd, &lu->cmd_orb_inuse, list)
575 if (command->Current_SCpnt == SCpnt) 554 if (cmd->Current_SCpnt == SCpnt)
576 return command; 555 return cmd;
577 return NULL; 556 return NULL;
578} 557}
579 558
580static struct sbp2_command_info *sbp2util_allocate_command_orb( 559static struct sbp2_command_info *sbp2util_allocate_command_orb(
581 struct scsi_id_instance_data *scsi_id, 560 struct sbp2_lu *lu,
582 struct scsi_cmnd *Current_SCpnt, 561 struct scsi_cmnd *Current_SCpnt,
583 void (*Current_done)(struct scsi_cmnd *)) 562 void (*Current_done)(struct scsi_cmnd *))
584{ 563{
585 struct list_head *lh; 564 struct list_head *lh;
586 struct sbp2_command_info *command = NULL; 565 struct sbp2_command_info *cmd = NULL;
587 unsigned long flags; 566 unsigned long flags;
588 567
589 spin_lock_irqsave(&scsi_id->cmd_orb_lock, flags); 568 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
590 if (!list_empty(&scsi_id->cmd_orb_completed)) { 569 if (!list_empty(&lu->cmd_orb_completed)) {
591 lh = scsi_id->cmd_orb_completed.next; 570 lh = lu->cmd_orb_completed.next;
592 list_del(lh); 571 list_del(lh);
593 command = list_entry(lh, struct sbp2_command_info, list); 572 cmd = list_entry(lh, struct sbp2_command_info, list);
594 command->Current_done = Current_done; 573 cmd->Current_done = Current_done;
595 command->Current_SCpnt = Current_SCpnt; 574 cmd->Current_SCpnt = Current_SCpnt;
596 list_add_tail(&command->list, &scsi_id->cmd_orb_inuse); 575 list_add_tail(&cmd->list, &lu->cmd_orb_inuse);
597 } else { 576 } else
598 SBP2_ERR("%s: no orbs available", __FUNCTION__); 577 SBP2_ERR("%s: no orbs available", __FUNCTION__);
599 } 578 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
600 spin_unlock_irqrestore(&scsi_id->cmd_orb_lock, flags); 579 return cmd;
601 return command;
602} 580}
603 581
604static void sbp2util_free_command_dma(struct sbp2_command_info *command) 582static void sbp2util_free_command_dma(struct sbp2_command_info *cmd)
605{ 583{
606 struct scsi_id_instance_data *scsi_id = 584 struct sbp2_lu *lu = (struct sbp2_lu *)
607 (struct scsi_id_instance_data *)command->Current_SCpnt->device->host->hostdata[0]; 585 cmd->Current_SCpnt->device->host->hostdata[0];
608 struct hpsb_host *host; 586 struct hpsb_host *host;
609 587
610 if (!scsi_id) { 588 if (!lu) {
611 SBP2_ERR("%s: scsi_id == NULL", __FUNCTION__); 589 SBP2_ERR("%s: lu == NULL", __FUNCTION__);
612 return; 590 return;
613 } 591 }
614 592
615 host = scsi_id->ud->ne->host; 593 host = lu->ud->ne->host;
616 594
617 if (command->cmd_dma) { 595 if (cmd->cmd_dma) {
618 if (command->dma_type == CMD_DMA_SINGLE) 596 if (cmd->dma_type == CMD_DMA_SINGLE)
619 pci_unmap_single(host->pdev, command->cmd_dma, 597 pci_unmap_single(host->pdev, cmd->cmd_dma,
620 command->dma_size, command->dma_dir); 598 cmd->dma_size, cmd->dma_dir);
621 else if (command->dma_type == CMD_DMA_PAGE) 599 else if (cmd->dma_type == CMD_DMA_PAGE)
622 pci_unmap_page(host->pdev, command->cmd_dma, 600 pci_unmap_page(host->pdev, cmd->cmd_dma,
623 command->dma_size, command->dma_dir); 601 cmd->dma_size, cmd->dma_dir);
624 /* XXX: Check for CMD_DMA_NONE bug */ 602 /* XXX: Check for CMD_DMA_NONE bug */
625 command->dma_type = CMD_DMA_NONE; 603 cmd->dma_type = CMD_DMA_NONE;
626 command->cmd_dma = 0; 604 cmd->cmd_dma = 0;
627 } 605 }
628 606
629 if (command->sge_buffer) { 607 if (cmd->sge_buffer) {
630 pci_unmap_sg(host->pdev, command->sge_buffer, 608 pci_unmap_sg(host->pdev, cmd->sge_buffer,
631 command->dma_size, command->dma_dir); 609 cmd->dma_size, cmd->dma_dir);
632 command->sge_buffer = NULL; 610 cmd->sge_buffer = NULL;
633 } 611 }
634} 612}
635 613
636/* 614/*
637 * This function moves a command to the completed orb list. 615 * This function moves a command to the completed orb list.
638 * Must be called with scsi_id->cmd_orb_lock held. 616 * Must be called with lu->cmd_orb_lock held.
639 */ 617 */
640static void sbp2util_mark_command_completed( 618static void sbp2util_mark_command_completed(
641 struct scsi_id_instance_data *scsi_id, 619 struct sbp2_lu *lu,
642 struct sbp2_command_info *command) 620 struct sbp2_command_info *cmd)
643{ 621{
644 list_del(&command->list); 622 list_del(&cmd->list);
645 sbp2util_free_command_dma(command); 623 sbp2util_free_command_dma(cmd);
646 list_add_tail(&command->list, &scsi_id->cmd_orb_completed); 624 list_add_tail(&cmd->list, &lu->cmd_orb_completed);
647} 625}
648 626
649/* 627/*
650 * Is scsi_id valid? Is the 1394 node still present? 628 * Is lu valid? Is the 1394 node still present?
651 */ 629 */
652static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_id) 630static inline int sbp2util_node_is_available(struct sbp2_lu *lu)
653{ 631{
654 return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo; 632 return lu && lu->ne && !lu->ne->in_limbo;
655} 633}
656 634
657/********************************************* 635/*********************************************
@@ -661,7 +639,7 @@ static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_
661static int sbp2_probe(struct device *dev) 639static int sbp2_probe(struct device *dev)
662{ 640{
663 struct unit_directory *ud; 641 struct unit_directory *ud;
664 struct scsi_id_instance_data *scsi_id; 642 struct sbp2_lu *lu;
665 643
666 ud = container_of(dev, struct unit_directory, device); 644 ud = container_of(dev, struct unit_directory, device);
667 645
@@ -670,60 +648,58 @@ static int sbp2_probe(struct device *dev)
670 if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY) 648 if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
671 return -ENODEV; 649 return -ENODEV;
672 650
673 scsi_id = sbp2_alloc_device(ud); 651 lu = sbp2_alloc_device(ud);
674 652 if (!lu)
675 if (!scsi_id)
676 return -ENOMEM; 653 return -ENOMEM;
677 654
678 sbp2_parse_unit_directory(scsi_id, ud); 655 sbp2_parse_unit_directory(lu, ud);
679 656 return sbp2_start_device(lu);
680 return sbp2_start_device(scsi_id);
681} 657}
682 658
683static int sbp2_remove(struct device *dev) 659static int sbp2_remove(struct device *dev)
684{ 660{
685 struct unit_directory *ud; 661 struct unit_directory *ud;
686 struct scsi_id_instance_data *scsi_id; 662 struct sbp2_lu *lu;
687 struct scsi_device *sdev; 663 struct scsi_device *sdev;
688 664
689 ud = container_of(dev, struct unit_directory, device); 665 ud = container_of(dev, struct unit_directory, device);
690 scsi_id = ud->device.driver_data; 666 lu = ud->device.driver_data;
691 if (!scsi_id) 667 if (!lu)
692 return 0; 668 return 0;
693 669
694 if (scsi_id->scsi_host) { 670 if (lu->shost) {
695 /* Get rid of enqueued commands if there is no chance to 671 /* Get rid of enqueued commands if there is no chance to
696 * send them. */ 672 * send them. */
697 if (!sbp2util_node_is_available(scsi_id)) 673 if (!sbp2util_node_is_available(lu))
698 sbp2scsi_complete_all_commands(scsi_id, DID_NO_CONNECT); 674 sbp2scsi_complete_all_commands(lu, DID_NO_CONNECT);
699 /* scsi_remove_device() may trigger shutdown functions of SCSI 675 /* scsi_remove_device() may trigger shutdown functions of SCSI
700 * highlevel drivers which would deadlock if blocked. */ 676 * highlevel drivers which would deadlock if blocked. */
701 atomic_set(&scsi_id->state, SBP2LU_STATE_IN_SHUTDOWN); 677 atomic_set(&lu->state, SBP2LU_STATE_IN_SHUTDOWN);
702 scsi_unblock_requests(scsi_id->scsi_host); 678 scsi_unblock_requests(lu->shost);
703 } 679 }
704 sdev = scsi_id->sdev; 680 sdev = lu->sdev;
705 if (sdev) { 681 if (sdev) {
706 scsi_id->sdev = NULL; 682 lu->sdev = NULL;
707 scsi_remove_device(sdev); 683 scsi_remove_device(sdev);
708 } 684 }
709 685
710 sbp2_logout_device(scsi_id); 686 sbp2_logout_device(lu);
711 sbp2_remove_device(scsi_id); 687 sbp2_remove_device(lu);
712 688
713 return 0; 689 return 0;
714} 690}
715 691
716static int sbp2_update(struct unit_directory *ud) 692static int sbp2_update(struct unit_directory *ud)
717{ 693{
718 struct scsi_id_instance_data *scsi_id = ud->device.driver_data; 694 struct sbp2_lu *lu = ud->device.driver_data;
719 695
720 if (sbp2_reconnect_device(scsi_id)) { 696 if (sbp2_reconnect_device(lu)) {
721 /* Reconnect has failed. Perhaps we didn't reconnect fast 697 /* Reconnect has failed. Perhaps we didn't reconnect fast
722 * enough. Try a regular login, but first log out just in 698 * enough. Try a regular login, but first log out just in
723 * case of any weirdness. */ 699 * case of any weirdness. */
724 sbp2_logout_device(scsi_id); 700 sbp2_logout_device(lu);
725 701
726 if (sbp2_login_device(scsi_id)) { 702 if (sbp2_login_device(lu)) {
727 /* Login failed too, just fail, and the backend 703 /* Login failed too, just fail, and the backend
728 * will call our sbp2_remove for us */ 704 * will call our sbp2_remove for us */
729 SBP2_ERR("Failed to reconnect to sbp2 device!"); 705 SBP2_ERR("Failed to reconnect to sbp2 device!");
@@ -731,58 +707,59 @@ static int sbp2_update(struct unit_directory *ud)
731 } 707 }
732 } 708 }
733 709
734 sbp2_set_busy_timeout(scsi_id); 710 sbp2_set_busy_timeout(lu);
735 sbp2_agent_reset(scsi_id, 1); 711 sbp2_agent_reset(lu, 1);
736 sbp2_max_speed_and_size(scsi_id); 712 sbp2_max_speed_and_size(lu);
737 713
738 /* Complete any pending commands with busy (so they get retried) 714 /* Complete any pending commands with busy (so they get retried)
739 * and remove them from our queue. */ 715 * and remove them from our queue. */
740 sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY); 716 sbp2scsi_complete_all_commands(lu, DID_BUS_BUSY);
741 717
742 /* Accept new commands unless there was another bus reset in the 718 /* Accept new commands unless there was another bus reset in the
743 * meantime. */ 719 * meantime. */
744 if (hpsb_node_entry_valid(scsi_id->ne)) { 720 if (hpsb_node_entry_valid(lu->ne)) {
745 atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); 721 atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
746 scsi_unblock_requests(scsi_id->scsi_host); 722 scsi_unblock_requests(lu->shost);
747 } 723 }
748 return 0; 724 return 0;
749} 725}
750 726
751static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud) 727static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
752{ 728{
753 struct sbp2_fwhost_info *hi; 729 struct sbp2_fwhost_info *hi;
754 struct Scsi_Host *scsi_host = NULL; 730 struct Scsi_Host *shost = NULL;
755 struct scsi_id_instance_data *scsi_id = NULL; 731 struct sbp2_lu *lu = NULL;
756 732
757 scsi_id = kzalloc(sizeof(*scsi_id), GFP_KERNEL); 733 lu = kzalloc(sizeof(*lu), GFP_KERNEL);
758 if (!scsi_id) { 734 if (!lu) {
759 SBP2_ERR("failed to create scsi_id"); 735 SBP2_ERR("failed to create lu");
760 goto failed_alloc; 736 goto failed_alloc;
761 } 737 }
762 738
763 scsi_id->ne = ud->ne; 739 lu->ne = ud->ne;
764 scsi_id->ud = ud; 740 lu->ud = ud;
765 scsi_id->speed_code = IEEE1394_SPEED_100; 741 lu->speed_code = IEEE1394_SPEED_100;
766 scsi_id->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100]; 742 lu->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100];
767 scsi_id->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE; 743 lu->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE;
768 INIT_LIST_HEAD(&scsi_id->cmd_orb_inuse); 744 INIT_LIST_HEAD(&lu->cmd_orb_inuse);
769 INIT_LIST_HEAD(&scsi_id->cmd_orb_completed); 745 INIT_LIST_HEAD(&lu->cmd_orb_completed);
770 INIT_LIST_HEAD(&scsi_id->scsi_list); 746 INIT_LIST_HEAD(&lu->lu_list);
771 spin_lock_init(&scsi_id->cmd_orb_lock); 747 spin_lock_init(&lu->cmd_orb_lock);
772 atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); 748 atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
773 INIT_WORK(&scsi_id->protocol_work, NULL); 749 INIT_WORK(&lu->protocol_work, NULL);
774 750
775 ud->device.driver_data = scsi_id; 751 ud->device.driver_data = lu;
776 752
777 hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host); 753 hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
778 if (!hi) { 754 if (!hi) {
779 hi = hpsb_create_hostinfo(&sbp2_highlevel, ud->ne->host, sizeof(*hi)); 755 hi = hpsb_create_hostinfo(&sbp2_highlevel, ud->ne->host,
756 sizeof(*hi));
780 if (!hi) { 757 if (!hi) {
781 SBP2_ERR("failed to allocate hostinfo"); 758 SBP2_ERR("failed to allocate hostinfo");
782 goto failed_alloc; 759 goto failed_alloc;
783 } 760 }
784 hi->host = ud->ne->host; 761 hi->host = ud->ne->host;
785 INIT_LIST_HEAD(&hi->scsi_ids); 762 INIT_LIST_HEAD(&hi->logical_units);
786 763
787#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA 764#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
788 /* Handle data movement if physical dma is not 765 /* Handle data movement if physical dma is not
@@ -802,9 +779,9 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
802 goto failed_alloc; 779 goto failed_alloc;
803 } 780 }
804 781
805 scsi_id->hi = hi; 782 lu->hi = hi;
806 783
807 list_add_tail(&scsi_id->scsi_list, &hi->scsi_ids); 784 list_add_tail(&lu->lu_list, &hi->logical_units);
808 785
809 /* Register the status FIFO address range. We could use the same FIFO 786 /* Register the status FIFO address range. We could use the same FIFO
810 * for targets at different nodes. However we need different FIFOs per 787 * for targets at different nodes. However we need different FIFOs per
@@ -814,197 +791,190 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
814 * then be performed as unified transactions. This slightly reduces 791 * then be performed as unified transactions. This slightly reduces
815 * bandwidth usage, and some Prolific based devices seem to require it. 792 * bandwidth usage, and some Prolific based devices seem to require it.
816 */ 793 */
817 scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace( 794 lu->status_fifo_addr = hpsb_allocate_and_register_addrspace(
818 &sbp2_highlevel, ud->ne->host, &sbp2_ops, 795 &sbp2_highlevel, ud->ne->host, &sbp2_ops,
819 sizeof(struct sbp2_status_block), sizeof(quadlet_t), 796 sizeof(struct sbp2_status_block), sizeof(quadlet_t),
820 ud->ne->host->low_addr_space, CSR1212_ALL_SPACE_END); 797 ud->ne->host->low_addr_space, CSR1212_ALL_SPACE_END);
821 if (scsi_id->status_fifo_addr == CSR1212_INVALID_ADDR_SPACE) { 798 if (lu->status_fifo_addr == CSR1212_INVALID_ADDR_SPACE) {
822 SBP2_ERR("failed to allocate status FIFO address range"); 799 SBP2_ERR("failed to allocate status FIFO address range");
823 goto failed_alloc; 800 goto failed_alloc;
824 } 801 }
825 802
826 scsi_host = scsi_host_alloc(&sbp2_shost_template, 803 shost = scsi_host_alloc(&sbp2_shost_template, sizeof(unsigned long));
827 sizeof(unsigned long)); 804 if (!shost) {
828 if (!scsi_host) {
829 SBP2_ERR("failed to register scsi host"); 805 SBP2_ERR("failed to register scsi host");
830 goto failed_alloc; 806 goto failed_alloc;
831 } 807 }
832 808
833 scsi_host->hostdata[0] = (unsigned long)scsi_id; 809 shost->hostdata[0] = (unsigned long)lu;
834 810
835 if (!scsi_add_host(scsi_host, &ud->device)) { 811 if (!scsi_add_host(shost, &ud->device)) {
836 scsi_id->scsi_host = scsi_host; 812 lu->shost = shost;
837 return scsi_id; 813 return lu;
838 } 814 }
839 815
840 SBP2_ERR("failed to add scsi host"); 816 SBP2_ERR("failed to add scsi host");
841 scsi_host_put(scsi_host); 817 scsi_host_put(shost);
842 818
843failed_alloc: 819failed_alloc:
844 sbp2_remove_device(scsi_id); 820 sbp2_remove_device(lu);
845 return NULL; 821 return NULL;
846} 822}
847 823
848static void sbp2_host_reset(struct hpsb_host *host) 824static void sbp2_host_reset(struct hpsb_host *host)
849{ 825{
850 struct sbp2_fwhost_info *hi; 826 struct sbp2_fwhost_info *hi;
851 struct scsi_id_instance_data *scsi_id; 827 struct sbp2_lu *lu;
852 828
853 hi = hpsb_get_hostinfo(&sbp2_highlevel, host); 829 hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
854 if (!hi) 830 if (!hi)
855 return; 831 return;
856 list_for_each_entry(scsi_id, &hi->scsi_ids, scsi_list) 832 list_for_each_entry(lu, &hi->logical_units, lu_list)
857 if (likely(atomic_read(&scsi_id->state) != 833 if (likely(atomic_read(&lu->state) !=
858 SBP2LU_STATE_IN_SHUTDOWN)) { 834 SBP2LU_STATE_IN_SHUTDOWN)) {
859 atomic_set(&scsi_id->state, SBP2LU_STATE_IN_RESET); 835 atomic_set(&lu->state, SBP2LU_STATE_IN_RESET);
860 scsi_block_requests(scsi_id->scsi_host); 836 scsi_block_requests(lu->shost);
861 } 837 }
862} 838}
863 839
864static int sbp2_start_device(struct scsi_id_instance_data *scsi_id) 840static int sbp2_start_device(struct sbp2_lu *lu)
865{ 841{
866 struct sbp2_fwhost_info *hi = scsi_id->hi; 842 struct sbp2_fwhost_info *hi = lu->hi;
867 int error; 843 int error;
868 844
869 scsi_id->login_response = 845 lu->login_response = pci_alloc_consistent(hi->host->pdev,
870 pci_alloc_consistent(hi->host->pdev,
871 sizeof(struct sbp2_login_response), 846 sizeof(struct sbp2_login_response),
872 &scsi_id->login_response_dma); 847 &lu->login_response_dma);
873 if (!scsi_id->login_response) 848 if (!lu->login_response)
874 goto alloc_fail; 849 goto alloc_fail;
875 850
876 scsi_id->query_logins_orb = 851 lu->query_logins_orb = pci_alloc_consistent(hi->host->pdev,
877 pci_alloc_consistent(hi->host->pdev,
878 sizeof(struct sbp2_query_logins_orb), 852 sizeof(struct sbp2_query_logins_orb),
879 &scsi_id->query_logins_orb_dma); 853 &lu->query_logins_orb_dma);
880 if (!scsi_id->query_logins_orb) 854 if (!lu->query_logins_orb)
881 goto alloc_fail; 855 goto alloc_fail;
882 856
883 scsi_id->query_logins_response = 857 lu->query_logins_response = pci_alloc_consistent(hi->host->pdev,
884 pci_alloc_consistent(hi->host->pdev,
885 sizeof(struct sbp2_query_logins_response), 858 sizeof(struct sbp2_query_logins_response),
886 &scsi_id->query_logins_response_dma); 859 &lu->query_logins_response_dma);
887 if (!scsi_id->query_logins_response) 860 if (!lu->query_logins_response)
888 goto alloc_fail; 861 goto alloc_fail;
889 862
890 scsi_id->reconnect_orb = 863 lu->reconnect_orb = pci_alloc_consistent(hi->host->pdev,
891 pci_alloc_consistent(hi->host->pdev,
892 sizeof(struct sbp2_reconnect_orb), 864 sizeof(struct sbp2_reconnect_orb),
893 &scsi_id->reconnect_orb_dma); 865 &lu->reconnect_orb_dma);
894 if (!scsi_id->reconnect_orb) 866 if (!lu->reconnect_orb)
895 goto alloc_fail; 867 goto alloc_fail;
896 868
897 scsi_id->logout_orb = 869 lu->logout_orb = pci_alloc_consistent(hi->host->pdev,
898 pci_alloc_consistent(hi->host->pdev,
899 sizeof(struct sbp2_logout_orb), 870 sizeof(struct sbp2_logout_orb),
900 &scsi_id->logout_orb_dma); 871 &lu->logout_orb_dma);
901 if (!scsi_id->logout_orb) 872 if (!lu->logout_orb)
902 goto alloc_fail; 873 goto alloc_fail;
903 874
904 scsi_id->login_orb = 875 lu->login_orb = pci_alloc_consistent(hi->host->pdev,
905 pci_alloc_consistent(hi->host->pdev,
906 sizeof(struct sbp2_login_orb), 876 sizeof(struct sbp2_login_orb),
907 &scsi_id->login_orb_dma); 877 &lu->login_orb_dma);
908 if (!scsi_id->login_orb) 878 if (!lu->login_orb)
909 goto alloc_fail; 879 goto alloc_fail;
910 880
911 if (sbp2util_create_command_orb_pool(scsi_id)) { 881 if (sbp2util_create_command_orb_pool(lu)) {
912 SBP2_ERR("sbp2util_create_command_orb_pool failed!"); 882 SBP2_ERR("sbp2util_create_command_orb_pool failed!");
913 sbp2_remove_device(scsi_id); 883 sbp2_remove_device(lu);
914 return -ENOMEM; 884 return -ENOMEM;
915 } 885 }
916 886
917 /* Wait a second before trying to log in. Previously logged in 887 /* Wait a second before trying to log in. Previously logged in
918 * initiators need a chance to reconnect. */ 888 * initiators need a chance to reconnect. */
919 if (msleep_interruptible(1000)) { 889 if (msleep_interruptible(1000)) {
920 sbp2_remove_device(scsi_id); 890 sbp2_remove_device(lu);
921 return -EINTR; 891 return -EINTR;
922 } 892 }
923 893
924 if (sbp2_login_device(scsi_id)) { 894 if (sbp2_login_device(lu)) {
925 sbp2_remove_device(scsi_id); 895 sbp2_remove_device(lu);
926 return -EBUSY; 896 return -EBUSY;
927 } 897 }
928 898
929 sbp2_set_busy_timeout(scsi_id); 899 sbp2_set_busy_timeout(lu);
930 sbp2_agent_reset(scsi_id, 1); 900 sbp2_agent_reset(lu, 1);
931 sbp2_max_speed_and_size(scsi_id); 901 sbp2_max_speed_and_size(lu);
932 902
933 error = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0); 903 error = scsi_add_device(lu->shost, 0, lu->ud->id, 0);
934 if (error) { 904 if (error) {
935 SBP2_ERR("scsi_add_device failed"); 905 SBP2_ERR("scsi_add_device failed");
936 sbp2_logout_device(scsi_id); 906 sbp2_logout_device(lu);
937 sbp2_remove_device(scsi_id); 907 sbp2_remove_device(lu);
938 return error; 908 return error;
939 } 909 }
940 910
941 return 0; 911 return 0;
942 912
943alloc_fail: 913alloc_fail:
944 SBP2_ERR("Could not allocate memory for scsi_id"); 914 SBP2_ERR("Could not allocate memory for lu");
945 sbp2_remove_device(scsi_id); 915 sbp2_remove_device(lu);
946 return -ENOMEM; 916 return -ENOMEM;
947} 917}
948 918
949static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id) 919static void sbp2_remove_device(struct sbp2_lu *lu)
950{ 920{
951 struct sbp2_fwhost_info *hi; 921 struct sbp2_fwhost_info *hi;
952 922
953 if (!scsi_id) 923 if (!lu)
954 return; 924 return;
955 925
956 hi = scsi_id->hi; 926 hi = lu->hi;
957 927
958 if (scsi_id->scsi_host) { 928 if (lu->shost) {
959 scsi_remove_host(scsi_id->scsi_host); 929 scsi_remove_host(lu->shost);
960 scsi_host_put(scsi_id->scsi_host); 930 scsi_host_put(lu->shost);
961 } 931 }
962 flush_scheduled_work(); 932 flush_scheduled_work();
963 sbp2util_remove_command_orb_pool(scsi_id); 933 sbp2util_remove_command_orb_pool(lu);
964 934
965 list_del(&scsi_id->scsi_list); 935 list_del(&lu->lu_list);
966 936
967 if (scsi_id->login_response) 937 if (lu->login_response)
968 pci_free_consistent(hi->host->pdev, 938 pci_free_consistent(hi->host->pdev,
969 sizeof(struct sbp2_login_response), 939 sizeof(struct sbp2_login_response),
970 scsi_id->login_response, 940 lu->login_response,
971 scsi_id->login_response_dma); 941 lu->login_response_dma);
972 if (scsi_id->login_orb) 942 if (lu->login_orb)
973 pci_free_consistent(hi->host->pdev, 943 pci_free_consistent(hi->host->pdev,
974 sizeof(struct sbp2_login_orb), 944 sizeof(struct sbp2_login_orb),
975 scsi_id->login_orb, 945 lu->login_orb,
976 scsi_id->login_orb_dma); 946 lu->login_orb_dma);
977 if (scsi_id->reconnect_orb) 947 if (lu->reconnect_orb)
978 pci_free_consistent(hi->host->pdev, 948 pci_free_consistent(hi->host->pdev,
979 sizeof(struct sbp2_reconnect_orb), 949 sizeof(struct sbp2_reconnect_orb),
980 scsi_id->reconnect_orb, 950 lu->reconnect_orb,
981 scsi_id->reconnect_orb_dma); 951 lu->reconnect_orb_dma);
982 if (scsi_id->logout_orb) 952 if (lu->logout_orb)
983 pci_free_consistent(hi->host->pdev, 953 pci_free_consistent(hi->host->pdev,
984 sizeof(struct sbp2_logout_orb), 954 sizeof(struct sbp2_logout_orb),
985 scsi_id->logout_orb, 955 lu->logout_orb,
986 scsi_id->logout_orb_dma); 956 lu->logout_orb_dma);
987 if (scsi_id->query_logins_orb) 957 if (lu->query_logins_orb)
988 pci_free_consistent(hi->host->pdev, 958 pci_free_consistent(hi->host->pdev,
989 sizeof(struct sbp2_query_logins_orb), 959 sizeof(struct sbp2_query_logins_orb),
990 scsi_id->query_logins_orb, 960 lu->query_logins_orb,
991 scsi_id->query_logins_orb_dma); 961 lu->query_logins_orb_dma);
992 if (scsi_id->query_logins_response) 962 if (lu->query_logins_response)
993 pci_free_consistent(hi->host->pdev, 963 pci_free_consistent(hi->host->pdev,
994 sizeof(struct sbp2_query_logins_response), 964 sizeof(struct sbp2_query_logins_response),
995 scsi_id->query_logins_response, 965 lu->query_logins_response,
996 scsi_id->query_logins_response_dma); 966 lu->query_logins_response_dma);
997 967
998 if (scsi_id->status_fifo_addr != CSR1212_INVALID_ADDR_SPACE) 968 if (lu->status_fifo_addr != CSR1212_INVALID_ADDR_SPACE)
999 hpsb_unregister_addrspace(&sbp2_highlevel, hi->host, 969 hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
1000 scsi_id->status_fifo_addr); 970 lu->status_fifo_addr);
1001 971
1002 scsi_id->ud->device.driver_data = NULL; 972 lu->ud->device.driver_data = NULL;
1003 973
1004 if (hi) 974 if (hi)
1005 module_put(hi->host->driver->owner); 975 module_put(hi->host->driver->owner);
1006 976
1007 kfree(scsi_id); 977 kfree(lu);
1008} 978}
1009 979
1010#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA 980#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
@@ -1037,62 +1007,69 @@ static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid,
1037 * SBP-2 protocol related section 1007 * SBP-2 protocol related section
1038 **************************************/ 1008 **************************************/
1039 1009
1040static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id) 1010static int sbp2_query_logins(struct sbp2_lu *lu)
1041{ 1011{
1042 struct sbp2_fwhost_info *hi = scsi_id->hi; 1012 struct sbp2_fwhost_info *hi = lu->hi;
1043 quadlet_t data[2]; 1013 quadlet_t data[2];
1044 int max_logins; 1014 int max_logins;
1045 int active_logins; 1015 int active_logins;
1046 1016
1047 scsi_id->query_logins_orb->reserved1 = 0x0; 1017 lu->query_logins_orb->reserved1 = 0x0;
1048 scsi_id->query_logins_orb->reserved2 = 0x0; 1018 lu->query_logins_orb->reserved2 = 0x0;
1049
1050 scsi_id->query_logins_orb->query_response_lo = scsi_id->query_logins_response_dma;
1051 scsi_id->query_logins_orb->query_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
1052 1019
1053 scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST); 1020 lu->query_logins_orb->query_response_lo = lu->query_logins_response_dma;
1054 scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1); 1021 lu->query_logins_orb->query_response_hi =
1055 scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->lun); 1022 ORB_SET_NODE_ID(hi->host->node_id);
1023 lu->query_logins_orb->lun_misc =
1024 ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
1025 lu->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
1026 lu->query_logins_orb->lun_misc |= ORB_SET_LUN(lu->lun);
1056 1027
1057 scsi_id->query_logins_orb->reserved_resp_length = 1028 lu->query_logins_orb->reserved_resp_length =
1058 ORB_SET_QUERY_LOGINS_RESP_LENGTH(sizeof(struct sbp2_query_logins_response)); 1029 ORB_SET_QUERY_LOGINS_RESP_LENGTH(
1030 sizeof(struct sbp2_query_logins_response));
1059 1031
1060 scsi_id->query_logins_orb->status_fifo_hi = 1032 lu->query_logins_orb->status_fifo_hi =
1061 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id); 1033 ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
1062 scsi_id->query_logins_orb->status_fifo_lo = 1034 lu->query_logins_orb->status_fifo_lo =
1063 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr); 1035 ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
1064 1036
1065 sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb)); 1037 sbp2util_cpu_to_be32_buffer(lu->query_logins_orb,
1038 sizeof(struct sbp2_query_logins_orb));
1066 1039
1067 memset(scsi_id->query_logins_response, 0, sizeof(struct sbp2_query_logins_response)); 1040 memset(lu->query_logins_response, 0,
1041 sizeof(struct sbp2_query_logins_response));
1068 1042
1069 data[0] = ORB_SET_NODE_ID(hi->host->node_id); 1043 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1070 data[1] = scsi_id->query_logins_orb_dma; 1044 data[1] = lu->query_logins_orb_dma;
1071 sbp2util_cpu_to_be32_buffer(data, 8); 1045 sbp2util_cpu_to_be32_buffer(data, 8);
1072 1046
1073 hpsb_node_write(scsi_id->ne, scsi_id->management_agent_addr, data, 8); 1047 hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
1074 1048
1075 if (sbp2util_access_timeout(scsi_id, 2*HZ)) { 1049 if (sbp2util_access_timeout(lu, 2*HZ)) {
1076 SBP2_INFO("Error querying logins to SBP-2 device - timed out"); 1050 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1077 return -EIO; 1051 return -EIO;
1078 } 1052 }
1079 1053
1080 if (scsi_id->status_block.ORB_offset_lo != scsi_id->query_logins_orb_dma) { 1054 if (lu->status_block.ORB_offset_lo != lu->query_logins_orb_dma) {
1081 SBP2_INFO("Error querying logins to SBP-2 device - timed out"); 1055 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1082 return -EIO; 1056 return -EIO;
1083 } 1057 }
1084 1058
1085 if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) { 1059 if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
1086 SBP2_INFO("Error querying logins to SBP-2 device - failed"); 1060 SBP2_INFO("Error querying logins to SBP-2 device - failed");
1087 return -EIO; 1061 return -EIO;
1088 } 1062 }
1089 1063
1090 sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_response, sizeof(struct sbp2_query_logins_response)); 1064 sbp2util_cpu_to_be32_buffer(lu->query_logins_response,
1065 sizeof(struct sbp2_query_logins_response));
1091 1066
1092 max_logins = RESPONSE_GET_MAX_LOGINS(scsi_id->query_logins_response->length_max_logins); 1067 max_logins = RESPONSE_GET_MAX_LOGINS(
1068 lu->query_logins_response->length_max_logins);
1093 SBP2_INFO("Maximum concurrent logins supported: %d", max_logins); 1069 SBP2_INFO("Maximum concurrent logins supported: %d", max_logins);
1094 1070
1095 active_logins = RESPONSE_GET_ACTIVE_LOGINS(scsi_id->query_logins_response->length_max_logins); 1071 active_logins = RESPONSE_GET_ACTIVE_LOGINS(
1072 lu->query_logins_response->length_max_logins);
1096 SBP2_INFO("Number of active logins: %d", active_logins); 1073 SBP2_INFO("Number of active logins: %d", active_logins);
1097 1074
1098 if (active_logins >= max_logins) { 1075 if (active_logins >= max_logins) {
@@ -1102,164 +1079,169 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1102 return 0; 1079 return 0;
1103} 1080}
1104 1081
1105static int sbp2_login_device(struct scsi_id_instance_data *scsi_id) 1082static int sbp2_login_device(struct sbp2_lu *lu)
1106{ 1083{
1107 struct sbp2_fwhost_info *hi = scsi_id->hi; 1084 struct sbp2_fwhost_info *hi = lu->hi;
1108 quadlet_t data[2]; 1085 quadlet_t data[2];
1109 1086
1110 if (!scsi_id->login_orb) 1087 if (!lu->login_orb)
1111 return -EIO; 1088 return -EIO;
1112 1089
1113 if (!sbp2_exclusive_login && sbp2_query_logins(scsi_id)) { 1090 if (!sbp2_exclusive_login && sbp2_query_logins(lu)) {
1114 SBP2_INFO("Device does not support any more concurrent logins"); 1091 SBP2_INFO("Device does not support any more concurrent logins");
1115 return -EIO; 1092 return -EIO;
1116 } 1093 }
1117 1094
1118 /* assume no password */ 1095 /* assume no password */
1119 scsi_id->login_orb->password_hi = 0; 1096 lu->login_orb->password_hi = 0;
1120 scsi_id->login_orb->password_lo = 0; 1097 lu->login_orb->password_lo = 0;
1121 1098
1122 scsi_id->login_orb->login_response_lo = scsi_id->login_response_dma; 1099 lu->login_orb->login_response_lo = lu->login_response_dma;
1123 scsi_id->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id); 1100 lu->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
1124 scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST); 1101 lu->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST);
1125 1102
1126 /* one second reconnect time */ 1103 /* one second reconnect time */
1127 scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); 1104 lu->login_orb->lun_misc |= ORB_SET_RECONNECT(0);
1128 scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(sbp2_exclusive_login); 1105 lu->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(sbp2_exclusive_login);
1129 scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); 1106 lu->login_orb->lun_misc |= ORB_SET_NOTIFY(1);
1130 scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->lun); 1107 lu->login_orb->lun_misc |= ORB_SET_LUN(lu->lun);
1131 1108
1132 scsi_id->login_orb->passwd_resp_lengths = 1109 lu->login_orb->passwd_resp_lengths =
1133 ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response)); 1110 ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
1134 1111
1135 scsi_id->login_orb->status_fifo_hi = 1112 lu->login_orb->status_fifo_hi =
1136 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id); 1113 ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
1137 scsi_id->login_orb->status_fifo_lo = 1114 lu->login_orb->status_fifo_lo =
1138 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr); 1115 ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
1139 1116
1140 sbp2util_cpu_to_be32_buffer(scsi_id->login_orb, sizeof(struct sbp2_login_orb)); 1117 sbp2util_cpu_to_be32_buffer(lu->login_orb,
1118 sizeof(struct sbp2_login_orb));
1141 1119
1142 memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response)); 1120 memset(lu->login_response, 0, sizeof(struct sbp2_login_response));
1143 1121
1144 data[0] = ORB_SET_NODE_ID(hi->host->node_id); 1122 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1145 data[1] = scsi_id->login_orb_dma; 1123 data[1] = lu->login_orb_dma;
1146 sbp2util_cpu_to_be32_buffer(data, 8); 1124 sbp2util_cpu_to_be32_buffer(data, 8);
1147 1125
1148 hpsb_node_write(scsi_id->ne, scsi_id->management_agent_addr, data, 8); 1126 hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
1149 1127
1150 /* wait up to 20 seconds for login status */ 1128 /* wait up to 20 seconds for login status */
1151 if (sbp2util_access_timeout(scsi_id, 20*HZ)) { 1129 if (sbp2util_access_timeout(lu, 20*HZ)) {
1152 SBP2_ERR("Error logging into SBP-2 device - timed out"); 1130 SBP2_ERR("Error logging into SBP-2 device - timed out");
1153 return -EIO; 1131 return -EIO;
1154 } 1132 }
1155 1133
1156 /* make sure that the returned status matches the login ORB */ 1134 /* make sure that the returned status matches the login ORB */
1157 if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) { 1135 if (lu->status_block.ORB_offset_lo != lu->login_orb_dma) {
1158 SBP2_ERR("Error logging into SBP-2 device - timed out"); 1136 SBP2_ERR("Error logging into SBP-2 device - timed out");
1159 return -EIO; 1137 return -EIO;
1160 } 1138 }
1161 1139
1162 if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) { 1140 if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
1163 SBP2_ERR("Error logging into SBP-2 device - failed"); 1141 SBP2_ERR("Error logging into SBP-2 device - failed");
1164 return -EIO; 1142 return -EIO;
1165 } 1143 }
1166 1144
1167 sbp2util_cpu_to_be32_buffer(scsi_id->login_response, sizeof(struct sbp2_login_response)); 1145 sbp2util_cpu_to_be32_buffer(lu->login_response,
1168 scsi_id->command_block_agent_addr = 1146 sizeof(struct sbp2_login_response));
1169 ((u64)scsi_id->login_response->command_block_agent_hi) << 32; 1147 lu->command_block_agent_addr =
1170 scsi_id->command_block_agent_addr |= ((u64)scsi_id->login_response->command_block_agent_lo); 1148 ((u64)lu->login_response->command_block_agent_hi) << 32;
1171 scsi_id->command_block_agent_addr &= 0x0000ffffffffffffULL; 1149 lu->command_block_agent_addr |=
1150 ((u64)lu->login_response->command_block_agent_lo);
1151 lu->command_block_agent_addr &= 0x0000ffffffffffffULL;
1172 1152
1173 SBP2_INFO("Logged into SBP-2 device"); 1153 SBP2_INFO("Logged into SBP-2 device");
1174 return 0; 1154 return 0;
1175} 1155}
1176 1156
1177static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id) 1157static int sbp2_logout_device(struct sbp2_lu *lu)
1178{ 1158{
1179 struct sbp2_fwhost_info *hi = scsi_id->hi; 1159 struct sbp2_fwhost_info *hi = lu->hi;
1180 quadlet_t data[2]; 1160 quadlet_t data[2];
1181 int error; 1161 int error;
1182 1162
1183 scsi_id->logout_orb->reserved1 = 0x0; 1163 lu->logout_orb->reserved1 = 0x0;
1184 scsi_id->logout_orb->reserved2 = 0x0; 1164 lu->logout_orb->reserved2 = 0x0;
1185 scsi_id->logout_orb->reserved3 = 0x0; 1165 lu->logout_orb->reserved3 = 0x0;
1186 scsi_id->logout_orb->reserved4 = 0x0; 1166 lu->logout_orb->reserved4 = 0x0;
1187 1167
1188 scsi_id->logout_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_LOGOUT_REQUEST); 1168 lu->logout_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_LOGOUT_REQUEST);
1189 scsi_id->logout_orb->login_ID_misc |= ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID); 1169 lu->logout_orb->login_ID_misc |=
1190 scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1); 1170 ORB_SET_LOGIN_ID(lu->login_response->length_login_ID);
1171 lu->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
1191 1172
1192 scsi_id->logout_orb->reserved5 = 0x0; 1173 lu->logout_orb->reserved5 = 0x0;
1193 scsi_id->logout_orb->status_fifo_hi = 1174 lu->logout_orb->status_fifo_hi =
1194 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id); 1175 ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
1195 scsi_id->logout_orb->status_fifo_lo = 1176 lu->logout_orb->status_fifo_lo =
1196 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr); 1177 ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
1197 1178
1198 sbp2util_cpu_to_be32_buffer(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb)); 1179 sbp2util_cpu_to_be32_buffer(lu->logout_orb,
1180 sizeof(struct sbp2_logout_orb));
1199 1181
1200 data[0] = ORB_SET_NODE_ID(hi->host->node_id); 1182 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1201 data[1] = scsi_id->logout_orb_dma; 1183 data[1] = lu->logout_orb_dma;
1202 sbp2util_cpu_to_be32_buffer(data, 8); 1184 sbp2util_cpu_to_be32_buffer(data, 8);
1203 1185
1204 error = hpsb_node_write(scsi_id->ne, 1186 error = hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
1205 scsi_id->management_agent_addr, data, 8);
1206 if (error) 1187 if (error)
1207 return error; 1188 return error;
1208 1189
1209 /* wait up to 1 second for the device to complete logout */ 1190 /* wait up to 1 second for the device to complete logout */
1210 if (sbp2util_access_timeout(scsi_id, HZ)) 1191 if (sbp2util_access_timeout(lu, HZ))
1211 return -EIO; 1192 return -EIO;
1212 1193
1213 SBP2_INFO("Logged out of SBP-2 device"); 1194 SBP2_INFO("Logged out of SBP-2 device");
1214 return 0; 1195 return 0;
1215} 1196}
1216 1197
1217static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id) 1198static int sbp2_reconnect_device(struct sbp2_lu *lu)
1218{ 1199{
1219 struct sbp2_fwhost_info *hi = scsi_id->hi; 1200 struct sbp2_fwhost_info *hi = lu->hi;
1220 quadlet_t data[2]; 1201 quadlet_t data[2];
1221 int error; 1202 int error;
1222 1203
1223 scsi_id->reconnect_orb->reserved1 = 0x0; 1204 lu->reconnect_orb->reserved1 = 0x0;
1224 scsi_id->reconnect_orb->reserved2 = 0x0; 1205 lu->reconnect_orb->reserved2 = 0x0;
1225 scsi_id->reconnect_orb->reserved3 = 0x0; 1206 lu->reconnect_orb->reserved3 = 0x0;
1226 scsi_id->reconnect_orb->reserved4 = 0x0; 1207 lu->reconnect_orb->reserved4 = 0x0;
1227 1208
1228 scsi_id->reconnect_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_RECONNECT_REQUEST); 1209 lu->reconnect_orb->login_ID_misc =
1229 scsi_id->reconnect_orb->login_ID_misc |= 1210 ORB_SET_FUNCTION(SBP2_RECONNECT_REQUEST);
1230 ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID); 1211 lu->reconnect_orb->login_ID_misc |=
1231 scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1); 1212 ORB_SET_LOGIN_ID(lu->login_response->length_login_ID);
1213 lu->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
1232 1214
1233 scsi_id->reconnect_orb->reserved5 = 0x0; 1215 lu->reconnect_orb->reserved5 = 0x0;
1234 scsi_id->reconnect_orb->status_fifo_hi = 1216 lu->reconnect_orb->status_fifo_hi =
1235 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id); 1217 ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
1236 scsi_id->reconnect_orb->status_fifo_lo = 1218 lu->reconnect_orb->status_fifo_lo =
1237 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr); 1219 ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
1238 1220
1239 sbp2util_cpu_to_be32_buffer(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb)); 1221 sbp2util_cpu_to_be32_buffer(lu->reconnect_orb,
1222 sizeof(struct sbp2_reconnect_orb));
1240 1223
1241 data[0] = ORB_SET_NODE_ID(hi->host->node_id); 1224 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1242 data[1] = scsi_id->reconnect_orb_dma; 1225 data[1] = lu->reconnect_orb_dma;
1243 sbp2util_cpu_to_be32_buffer(data, 8); 1226 sbp2util_cpu_to_be32_buffer(data, 8);
1244 1227
1245 error = hpsb_node_write(scsi_id->ne, 1228 error = hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
1246 scsi_id->management_agent_addr, data, 8);
1247 if (error) 1229 if (error)
1248 return error; 1230 return error;
1249 1231
1250 /* wait up to 1 second for reconnect status */ 1232 /* wait up to 1 second for reconnect status */
1251 if (sbp2util_access_timeout(scsi_id, HZ)) { 1233 if (sbp2util_access_timeout(lu, HZ)) {
1252 SBP2_ERR("Error reconnecting to SBP-2 device - timed out"); 1234 SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
1253 return -EIO; 1235 return -EIO;
1254 } 1236 }
1255 1237
1256 /* make sure that the returned status matches the reconnect ORB */ 1238 /* make sure that the returned status matches the reconnect ORB */
1257 if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) { 1239 if (lu->status_block.ORB_offset_lo != lu->reconnect_orb_dma) {
1258 SBP2_ERR("Error reconnecting to SBP-2 device - timed out"); 1240 SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
1259 return -EIO; 1241 return -EIO;
1260 } 1242 }
1261 1243
1262 if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) { 1244 if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
1263 SBP2_ERR("Error reconnecting to SBP-2 device - failed"); 1245 SBP2_ERR("Error reconnecting to SBP-2 device - failed");
1264 return -EIO; 1246 return -EIO;
1265 } 1247 }
@@ -1272,17 +1254,17 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
1272 * Set the target node's Single Phase Retry limit. Affects the target's retry 1254 * Set the target node's Single Phase Retry limit. Affects the target's retry
1273 * behaviour if our node is too busy to accept requests. 1255 * behaviour if our node is too busy to accept requests.
1274 */ 1256 */
1275static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id) 1257static int sbp2_set_busy_timeout(struct sbp2_lu *lu)
1276{ 1258{
1277 quadlet_t data; 1259 quadlet_t data;
1278 1260
1279 data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE); 1261 data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
1280 if (hpsb_node_write(scsi_id->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4)) 1262 if (hpsb_node_write(lu->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4))
1281 SBP2_ERR("%s error", __FUNCTION__); 1263 SBP2_ERR("%s error", __FUNCTION__);
1282 return 0; 1264 return 0;
1283} 1265}
1284 1266
1285static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, 1267static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
1286 struct unit_directory *ud) 1268 struct unit_directory *ud)
1287{ 1269{
1288 struct csr1212_keyval *kv; 1270 struct csr1212_keyval *kv;
@@ -1305,8 +1287,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1305 (kv->value.csr_offset << 2); 1287 (kv->value.csr_offset << 2);
1306 1288
1307 else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) 1289 else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE)
1308 scsi_id->lun = 1290 lu->lun = ORB_SET_LUN(kv->value.immediate);
1309 ORB_SET_LUN(kv->value.immediate);
1310 break; 1291 break;
1311 1292
1312 case SBP2_UNIT_CHARACTERISTICS_KEY: 1293 case SBP2_UNIT_CHARACTERISTICS_KEY:
@@ -1364,14 +1345,14 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1364 /* If this is a logical unit directory entry, process the parent 1345 /* If this is a logical unit directory entry, process the parent
1365 * to get the values. */ 1346 * to get the values. */
1366 if (ud->flags & UNIT_DIRECTORY_LUN_DIRECTORY) { 1347 if (ud->flags & UNIT_DIRECTORY_LUN_DIRECTORY) {
1367 struct unit_directory *parent_ud = 1348 struct unit_directory *parent_ud = container_of(
1368 container_of(ud->device.parent, struct unit_directory, device); 1349 ud->device.parent, struct unit_directory, device);
1369 sbp2_parse_unit_directory(scsi_id, parent_ud); 1350 sbp2_parse_unit_directory(lu, parent_ud);
1370 } else { 1351 } else {
1371 scsi_id->management_agent_addr = management_agent_addr; 1352 lu->management_agent_addr = management_agent_addr;
1372 scsi_id->workarounds = workarounds; 1353 lu->workarounds = workarounds;
1373 if (ud->flags & UNIT_DIRECTORY_HAS_LUN) 1354 if (ud->flags & UNIT_DIRECTORY_HAS_LUN)
1374 scsi_id->lun = ORB_SET_LUN(ud->lun); 1355 lu->lun = ORB_SET_LUN(ud->lun);
1375 } 1356 }
1376} 1357}
1377 1358
@@ -1386,59 +1367,58 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1386 * the speed that it needs to use, and the max_rec the host supports, and 1367 * the speed that it needs to use, and the max_rec the host supports, and
1387 * it takes care of the rest. 1368 * it takes care of the rest.
1388 */ 1369 */
1389static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id) 1370static int sbp2_max_speed_and_size(struct sbp2_lu *lu)
1390{ 1371{
1391 struct sbp2_fwhost_info *hi = scsi_id->hi; 1372 struct sbp2_fwhost_info *hi = lu->hi;
1392 u8 payload; 1373 u8 payload;
1393 1374
1394 scsi_id->speed_code = 1375 lu->speed_code = hi->host->speed[NODEID_TO_NODE(lu->ne->nodeid)];
1395 hi->host->speed[NODEID_TO_NODE(scsi_id->ne->nodeid)];
1396 1376
1397 if (scsi_id->speed_code > sbp2_max_speed) { 1377 if (lu->speed_code > sbp2_max_speed) {
1398 scsi_id->speed_code = sbp2_max_speed; 1378 lu->speed_code = sbp2_max_speed;
1399 SBP2_INFO("Reducing speed to %s", 1379 SBP2_INFO("Reducing speed to %s",
1400 hpsb_speedto_str[sbp2_max_speed]); 1380 hpsb_speedto_str[sbp2_max_speed]);
1401 } 1381 }
1402 1382
1403 /* Payload size is the lesser of what our speed supports and what 1383 /* Payload size is the lesser of what our speed supports and what
1404 * our host supports. */ 1384 * our host supports. */
1405 payload = min(sbp2_speedto_max_payload[scsi_id->speed_code], 1385 payload = min(sbp2_speedto_max_payload[lu->speed_code],
1406 (u8) (hi->host->csr.max_rec - 1)); 1386 (u8) (hi->host->csr.max_rec - 1));
1407 1387
1408 /* If physical DMA is off, work around limitation in ohci1394: 1388 /* If physical DMA is off, work around limitation in ohci1394:
1409 * packet size must not exceed PAGE_SIZE */ 1389 * packet size must not exceed PAGE_SIZE */
1410 if (scsi_id->ne->host->low_addr_space < (1ULL << 32)) 1390 if (lu->ne->host->low_addr_space < (1ULL << 32))
1411 while (SBP2_PAYLOAD_TO_BYTES(payload) + 24 > PAGE_SIZE && 1391 while (SBP2_PAYLOAD_TO_BYTES(payload) + 24 > PAGE_SIZE &&
1412 payload) 1392 payload)
1413 payload--; 1393 payload--;
1414 1394
1415 SBP2_INFO("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]", 1395 SBP2_INFO("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
1416 NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid), 1396 NODE_BUS_ARGS(hi->host, lu->ne->nodeid),
1417 hpsb_speedto_str[scsi_id->speed_code], 1397 hpsb_speedto_str[lu->speed_code],
1418 SBP2_PAYLOAD_TO_BYTES(payload)); 1398 SBP2_PAYLOAD_TO_BYTES(payload));
1419 1399
1420 scsi_id->max_payload_size = payload; 1400 lu->max_payload_size = payload;
1421 return 0; 1401 return 0;
1422} 1402}
1423 1403
1424static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait) 1404static int sbp2_agent_reset(struct sbp2_lu *lu, int wait)
1425{ 1405{
1426 quadlet_t data; 1406 quadlet_t data;
1427 u64 addr; 1407 u64 addr;
1428 int retval; 1408 int retval;
1429 unsigned long flags; 1409 unsigned long flags;
1430 1410
1431 /* cancel_delayed_work(&scsi_id->protocol_work); */ 1411 /* cancel_delayed_work(&lu->protocol_work); */
1432 if (wait) 1412 if (wait)
1433 flush_scheduled_work(); 1413 flush_scheduled_work();
1434 1414
1435 data = ntohl(SBP2_AGENT_RESET_DATA); 1415 data = ntohl(SBP2_AGENT_RESET_DATA);
1436 addr = scsi_id->command_block_agent_addr + SBP2_AGENT_RESET_OFFSET; 1416 addr = lu->command_block_agent_addr + SBP2_AGENT_RESET_OFFSET;
1437 1417
1438 if (wait) 1418 if (wait)
1439 retval = hpsb_node_write(scsi_id->ne, addr, &data, 4); 1419 retval = hpsb_node_write(lu->ne, addr, &data, 4);
1440 else 1420 else
1441 retval = sbp2util_node_write_no_wait(scsi_id->ne, addr, &data, 4); 1421 retval = sbp2util_node_write_no_wait(lu->ne, addr, &data, 4);
1442 1422
1443 if (retval < 0) { 1423 if (retval < 0) {
1444 SBP2_ERR("hpsb_node_write failed.\n"); 1424 SBP2_ERR("hpsb_node_write failed.\n");
@@ -1446,22 +1426,22 @@ static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
1446 } 1426 }
1447 1427
1448 /* make sure that the ORB_POINTER is written on next command */ 1428 /* make sure that the ORB_POINTER is written on next command */
1449 spin_lock_irqsave(&scsi_id->cmd_orb_lock, flags); 1429 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
1450 scsi_id->last_orb = NULL; 1430 lu->last_orb = NULL;
1451 spin_unlock_irqrestore(&scsi_id->cmd_orb_lock, flags); 1431 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
1452 1432
1453 return 0; 1433 return 0;
1454} 1434}
1455 1435
1456static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb, 1436static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1457 struct sbp2_fwhost_info *hi, 1437 struct sbp2_fwhost_info *hi,
1458 struct sbp2_command_info *command, 1438 struct sbp2_command_info *cmd,
1459 unsigned int scsi_use_sg, 1439 unsigned int scsi_use_sg,
1460 struct scatterlist *sgpnt, 1440 struct scatterlist *sgpnt,
1461 u32 orb_direction, 1441 u32 orb_direction,
1462 enum dma_data_direction dma_dir) 1442 enum dma_data_direction dma_dir)
1463{ 1443{
1464 command->dma_dir = dma_dir; 1444 cmd->dma_dir = dma_dir;
1465 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); 1445 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1466 orb->misc |= ORB_SET_DIRECTION(orb_direction); 1446 orb->misc |= ORB_SET_DIRECTION(orb_direction);
1467 1447
@@ -1469,31 +1449,29 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1469 if ((scsi_use_sg == 1) && 1449 if ((scsi_use_sg == 1) &&
1470 (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) { 1450 (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
1471 1451
1472 command->dma_size = sgpnt[0].length; 1452 cmd->dma_size = sgpnt[0].length;
1473 command->dma_type = CMD_DMA_PAGE; 1453 cmd->dma_type = CMD_DMA_PAGE;
1474 command->cmd_dma = pci_map_page(hi->host->pdev, 1454 cmd->cmd_dma = pci_map_page(hi->host->pdev,
1475 sgpnt[0].page, 1455 sgpnt[0].page, sgpnt[0].offset,
1476 sgpnt[0].offset, 1456 cmd->dma_size, cmd->dma_dir);
1477 command->dma_size,
1478 command->dma_dir);
1479 1457
1480 orb->data_descriptor_lo = command->cmd_dma; 1458 orb->data_descriptor_lo = cmd->cmd_dma;
1481 orb->misc |= ORB_SET_DATA_SIZE(command->dma_size); 1459 orb->misc |= ORB_SET_DATA_SIZE(cmd->dma_size);
1482 1460
1483 } else { 1461 } else {
1484 struct sbp2_unrestricted_page_table *sg_element = 1462 struct sbp2_unrestricted_page_table *sg_element =
1485 &command->scatter_gather_element[0]; 1463 &cmd->scatter_gather_element[0];
1486 u32 sg_count, sg_len; 1464 u32 sg_count, sg_len;
1487 dma_addr_t sg_addr; 1465 dma_addr_t sg_addr;
1488 int i, count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg, 1466 int i, count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg,
1489 dma_dir); 1467 dma_dir);
1490 1468
1491 command->dma_size = scsi_use_sg; 1469 cmd->dma_size = scsi_use_sg;
1492 command->sge_buffer = sgpnt; 1470 cmd->sge_buffer = sgpnt;
1493 1471
1494 /* use page tables (s/g) */ 1472 /* use page tables (s/g) */
1495 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1); 1473 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1496 orb->data_descriptor_lo = command->sge_dma; 1474 orb->data_descriptor_lo = cmd->sge_dma;
1497 1475
1498 /* loop through and fill out our SBP-2 page tables 1476 /* loop through and fill out our SBP-2 page tables
1499 * (and split up anything too large) */ 1477 * (and split up anything too large) */
@@ -1519,25 +1497,25 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1519 orb->misc |= ORB_SET_DATA_SIZE(sg_count); 1497 orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1520 1498
1521 sbp2util_cpu_to_be32_buffer(sg_element, 1499 sbp2util_cpu_to_be32_buffer(sg_element,
1522 (sizeof(struct sbp2_unrestricted_page_table)) * 1500 (sizeof(struct sbp2_unrestricted_page_table)) *
1523 sg_count); 1501 sg_count);
1524 } 1502 }
1525} 1503}
1526 1504
1527static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb, 1505static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
1528 struct sbp2_fwhost_info *hi, 1506 struct sbp2_fwhost_info *hi,
1529 struct sbp2_command_info *command, 1507 struct sbp2_command_info *cmd,
1530 struct scatterlist *sgpnt, 1508 struct scatterlist *sgpnt,
1531 u32 orb_direction, 1509 u32 orb_direction,
1532 unsigned int scsi_request_bufflen, 1510 unsigned int scsi_request_bufflen,
1533 void *scsi_request_buffer, 1511 void *scsi_request_buffer,
1534 enum dma_data_direction dma_dir) 1512 enum dma_data_direction dma_dir)
1535{ 1513{
1536 command->dma_dir = dma_dir; 1514 cmd->dma_dir = dma_dir;
1537 command->dma_size = scsi_request_bufflen; 1515 cmd->dma_size = scsi_request_bufflen;
1538 command->dma_type = CMD_DMA_SINGLE; 1516 cmd->dma_type = CMD_DMA_SINGLE;
1539 command->cmd_dma = pci_map_single(hi->host->pdev, scsi_request_buffer, 1517 cmd->cmd_dma = pci_map_single(hi->host->pdev, scsi_request_buffer,
1540 command->dma_size, command->dma_dir); 1518 cmd->dma_size, cmd->dma_dir);
1541 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); 1519 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1542 orb->misc |= ORB_SET_DIRECTION(orb_direction); 1520 orb->misc |= ORB_SET_DIRECTION(orb_direction);
1543 1521
@@ -1545,24 +1523,24 @@ static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
1545 * (but check for transfers larger than 64K) */ 1523 * (but check for transfers larger than 64K) */
1546 if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) { 1524 if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
1547 1525
1548 orb->data_descriptor_lo = command->cmd_dma; 1526 orb->data_descriptor_lo = cmd->cmd_dma;
1549 orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen); 1527 orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
1550 1528
1551 } else { 1529 } else {
1552 /* The buffer is too large. Turn this into page tables. */ 1530 /* The buffer is too large. Turn this into page tables. */
1553 1531
1554 struct sbp2_unrestricted_page_table *sg_element = 1532 struct sbp2_unrestricted_page_table *sg_element =
1555 &command->scatter_gather_element[0]; 1533 &cmd->scatter_gather_element[0];
1556 u32 sg_count, sg_len; 1534 u32 sg_count, sg_len;
1557 dma_addr_t sg_addr; 1535 dma_addr_t sg_addr;
1558 1536
1559 orb->data_descriptor_lo = command->sge_dma; 1537 orb->data_descriptor_lo = cmd->sge_dma;
1560 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1); 1538 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1561 1539
1562 /* fill out our SBP-2 page tables; split up the large buffer */ 1540 /* fill out our SBP-2 page tables; split up the large buffer */
1563 sg_count = 0; 1541 sg_count = 0;
1564 sg_len = scsi_request_bufflen; 1542 sg_len = scsi_request_bufflen;
1565 sg_addr = command->cmd_dma; 1543 sg_addr = cmd->cmd_dma;
1566 while (sg_len) { 1544 while (sg_len) {
1567 sg_element[sg_count].segment_base_lo = sg_addr; 1545 sg_element[sg_count].segment_base_lo = sg_addr;
1568 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) { 1546 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
@@ -1581,22 +1559,22 @@ static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
1581 orb->misc |= ORB_SET_DATA_SIZE(sg_count); 1559 orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1582 1560
1583 sbp2util_cpu_to_be32_buffer(sg_element, 1561 sbp2util_cpu_to_be32_buffer(sg_element,
1584 (sizeof(struct sbp2_unrestricted_page_table)) * 1562 (sizeof(struct sbp2_unrestricted_page_table)) *
1585 sg_count); 1563 sg_count);
1586 } 1564 }
1587} 1565}
1588 1566
1589static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, 1567static void sbp2_create_command_orb(struct sbp2_lu *lu,
1590 struct sbp2_command_info *command, 1568 struct sbp2_command_info *cmd,
1591 unchar *scsi_cmd, 1569 unchar *scsi_cmd,
1592 unsigned int scsi_use_sg, 1570 unsigned int scsi_use_sg,
1593 unsigned int scsi_request_bufflen, 1571 unsigned int scsi_request_bufflen,
1594 void *scsi_request_buffer, 1572 void *scsi_request_buffer,
1595 enum dma_data_direction dma_dir) 1573 enum dma_data_direction dma_dir)
1596{ 1574{
1597 struct sbp2_fwhost_info *hi = scsi_id->hi; 1575 struct sbp2_fwhost_info *hi = lu->hi;
1598 struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer; 1576 struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer;
1599 struct sbp2_command_orb *command_orb = &command->command_orb; 1577 struct sbp2_command_orb *orb = &cmd->command_orb;
1600 u32 orb_direction; 1578 u32 orb_direction;
1601 1579
1602 /* 1580 /*
@@ -1607,11 +1585,11 @@ static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
1607 * that data_size becomes the number of s/g elements, and 1585 * that data_size becomes the number of s/g elements, and
1608 * page_size should be zero (for unrestricted). 1586 * page_size should be zero (for unrestricted).
1609 */ 1587 */
1610 command_orb->next_ORB_hi = ORB_SET_NULL_PTR(1); 1588 orb->next_ORB_hi = ORB_SET_NULL_PTR(1);
1611 command_orb->next_ORB_lo = 0x0; 1589 orb->next_ORB_lo = 0x0;
1612 command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size); 1590 orb->misc = ORB_SET_MAX_PAYLOAD(lu->max_payload_size);
1613 command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code); 1591 orb->misc |= ORB_SET_SPEED(lu->speed_code);
1614 command_orb->misc |= ORB_SET_NOTIFY(1); 1592 orb->misc |= ORB_SET_NOTIFY(1);
1615 1593
1616 if (dma_dir == DMA_NONE) 1594 if (dma_dir == DMA_NONE)
1617 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER; 1595 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
@@ -1626,46 +1604,45 @@ static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
1626 1604
1627 /* set up our page table stuff */ 1605 /* set up our page table stuff */
1628 if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) { 1606 if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) {
1629 command_orb->data_descriptor_hi = 0x0; 1607 orb->data_descriptor_hi = 0x0;
1630 command_orb->data_descriptor_lo = 0x0; 1608 orb->data_descriptor_lo = 0x0;
1631 command_orb->misc |= ORB_SET_DIRECTION(1); 1609 orb->misc |= ORB_SET_DIRECTION(1);
1632 } else if (scsi_use_sg) 1610 } else if (scsi_use_sg)
1633 sbp2_prep_command_orb_sg(command_orb, hi, command, scsi_use_sg, 1611 sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_use_sg, sgpnt,
1634 sgpnt, orb_direction, dma_dir); 1612 orb_direction, dma_dir);
1635 else 1613 else
1636 sbp2_prep_command_orb_no_sg(command_orb, hi, command, sgpnt, 1614 sbp2_prep_command_orb_no_sg(orb, hi, cmd, sgpnt, orb_direction,
1637 orb_direction, scsi_request_bufflen, 1615 scsi_request_bufflen,
1638 scsi_request_buffer, dma_dir); 1616 scsi_request_buffer, dma_dir);
1639 1617
1640 sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb)); 1618 sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));
1641 1619
1642 memset(command_orb->cdb, 0, 12); 1620 memset(orb->cdb, 0, 12);
1643 memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd)); 1621 memcpy(orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));
1644} 1622}
1645 1623
1646static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, 1624static void sbp2_link_orb_command(struct sbp2_lu *lu,
1647 struct sbp2_command_info *command) 1625 struct sbp2_command_info *cmd)
1648{ 1626{
1649 struct sbp2_fwhost_info *hi = scsi_id->hi; 1627 struct sbp2_fwhost_info *hi = lu->hi;
1650 struct sbp2_command_orb *command_orb = &command->command_orb;
1651 struct sbp2_command_orb *last_orb; 1628 struct sbp2_command_orb *last_orb;
1652 dma_addr_t last_orb_dma; 1629 dma_addr_t last_orb_dma;
1653 u64 addr = scsi_id->command_block_agent_addr; 1630 u64 addr = lu->command_block_agent_addr;
1654 quadlet_t data[2]; 1631 quadlet_t data[2];
1655 size_t length; 1632 size_t length;
1656 unsigned long flags; 1633 unsigned long flags;
1657 1634
1658 pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma, 1635 pci_dma_sync_single_for_device(hi->host->pdev, cmd->command_orb_dma,
1659 sizeof(struct sbp2_command_orb), 1636 sizeof(struct sbp2_command_orb),
1660 PCI_DMA_TODEVICE); 1637 PCI_DMA_TODEVICE);
1661 pci_dma_sync_single_for_device(hi->host->pdev, command->sge_dma, 1638 pci_dma_sync_single_for_device(hi->host->pdev, cmd->sge_dma,
1662 sizeof(command->scatter_gather_element), 1639 sizeof(cmd->scatter_gather_element),
1663 PCI_DMA_BIDIRECTIONAL); 1640 PCI_DMA_BIDIRECTIONAL);
1664 1641
1665 /* check to see if there are any previous orbs to use */ 1642 /* check to see if there are any previous orbs to use */
1666 spin_lock_irqsave(&scsi_id->cmd_orb_lock, flags); 1643 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
1667 last_orb = scsi_id->last_orb; 1644 last_orb = lu->last_orb;
1668 last_orb_dma = scsi_id->last_orb_dma; 1645 last_orb_dma = lu->last_orb_dma;
1669 if (!last_orb) { 1646 if (!last_orb) {
1670 /* 1647 /*
1671 * last_orb == NULL means: We know that the target's fetch agent 1648 * last_orb == NULL means: We know that the target's fetch agent
@@ -1673,7 +1650,7 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
1673 */ 1650 */
1674 addr += SBP2_ORB_POINTER_OFFSET; 1651 addr += SBP2_ORB_POINTER_OFFSET;
1675 data[0] = ORB_SET_NODE_ID(hi->host->node_id); 1652 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1676 data[1] = command->command_orb_dma; 1653 data[1] = cmd->command_orb_dma;
1677 sbp2util_cpu_to_be32_buffer(data, 8); 1654 sbp2util_cpu_to_be32_buffer(data, 8);
1678 length = 8; 1655 length = 8;
1679 } else { 1656 } else {
@@ -1687,7 +1664,7 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
1687 pci_dma_sync_single_for_cpu(hi->host->pdev, last_orb_dma, 1664 pci_dma_sync_single_for_cpu(hi->host->pdev, last_orb_dma,
1688 sizeof(struct sbp2_command_orb), 1665 sizeof(struct sbp2_command_orb),
1689 PCI_DMA_TODEVICE); 1666 PCI_DMA_TODEVICE);
1690 last_orb->next_ORB_lo = cpu_to_be32(command->command_orb_dma); 1667 last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma);
1691 wmb(); 1668 wmb();
1692 /* Tells hardware that this pointer is valid */ 1669 /* Tells hardware that this pointer is valid */
1693 last_orb->next_ORB_hi = 0; 1670 last_orb->next_ORB_hi = 0;
@@ -1698,11 +1675,11 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
1698 data[0] = 0; 1675 data[0] = 0;
1699 length = 4; 1676 length = 4;
1700 } 1677 }
1701 scsi_id->last_orb = command_orb; 1678 lu->last_orb = &cmd->command_orb;
1702 scsi_id->last_orb_dma = command->command_orb_dma; 1679 lu->last_orb_dma = cmd->command_orb_dma;
1703 spin_unlock_irqrestore(&scsi_id->cmd_orb_lock, flags); 1680 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
1704 1681
1705 if (sbp2util_node_write_no_wait(scsi_id->ne, addr, data, length)) { 1682 if (sbp2util_node_write_no_wait(lu->ne, addr, data, length)) {
1706 /* 1683 /*
1707 * sbp2util_node_write_no_wait failed. We certainly ran out 1684 * sbp2util_node_write_no_wait failed. We certainly ran out
1708 * of transaction labels, perhaps just because there were no 1685 * of transaction labels, perhaps just because there were no
@@ -1711,31 +1688,30 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
1711 * the workqueue job will sleep to guaranteedly get a tlabel. 1688 * the workqueue job will sleep to guaranteedly get a tlabel.
1712 * We do not accept new commands until the job is over. 1689 * We do not accept new commands until the job is over.
1713 */ 1690 */
1714 scsi_block_requests(scsi_id->scsi_host); 1691 scsi_block_requests(lu->shost);
1715 PREPARE_WORK(&scsi_id->protocol_work, 1692 PREPARE_WORK(&lu->protocol_work,
1716 last_orb ? sbp2util_write_doorbell: 1693 last_orb ? sbp2util_write_doorbell:
1717 sbp2util_write_orb_pointer 1694 sbp2util_write_orb_pointer
1718 /* */); 1695 /* */);
1719 schedule_work(&scsi_id->protocol_work); 1696 schedule_work(&lu->protocol_work);
1720 } 1697 }
1721} 1698}
1722 1699
1723static int sbp2_send_command(struct scsi_id_instance_data *scsi_id, 1700static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,
1724 struct scsi_cmnd *SCpnt,
1725 void (*done)(struct scsi_cmnd *)) 1701 void (*done)(struct scsi_cmnd *))
1726{ 1702{
1727 unchar *cmd = (unchar *) SCpnt->cmnd; 1703 unchar *scsi_cmd = (unchar *)SCpnt->cmnd;
1728 unsigned int request_bufflen = SCpnt->request_bufflen; 1704 unsigned int request_bufflen = SCpnt->request_bufflen;
1729 struct sbp2_command_info *command; 1705 struct sbp2_command_info *cmd;
1730 1706
1731 command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done); 1707 cmd = sbp2util_allocate_command_orb(lu, SCpnt, done);
1732 if (!command) 1708 if (!cmd)
1733 return -EIO; 1709 return -EIO;
1734 1710
1735 sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg, 1711 sbp2_create_command_orb(lu, cmd, scsi_cmd, SCpnt->use_sg,
1736 request_bufflen, SCpnt->request_buffer, 1712 request_bufflen, SCpnt->request_buffer,
1737 SCpnt->sc_data_direction); 1713 SCpnt->sc_data_direction);
1738 sbp2_link_orb_command(scsi_id, command); 1714 sbp2_link_orb_command(lu, cmd);
1739 1715
1740 return 0; 1716 return 0;
1741} 1717}
@@ -1743,7 +1719,8 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
1743/* 1719/*
1744 * Translates SBP-2 status into SCSI sense data for check conditions 1720 * Translates SBP-2 status into SCSI sense data for check conditions
1745 */ 1721 */
1746static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data) 1722static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status,
1723 unchar *sense_data)
1747{ 1724{
1748 /* OK, it's pretty ugly... ;-) */ 1725 /* OK, it's pretty ugly... ;-) */
1749 sense_data[0] = 0x70; 1726 sense_data[0] = 0x70;
@@ -1771,11 +1748,11 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
1771 size_t length, u16 fl) 1748 size_t length, u16 fl)
1772{ 1749{
1773 struct sbp2_fwhost_info *hi; 1750 struct sbp2_fwhost_info *hi;
1774 struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp; 1751 struct sbp2_lu *lu = NULL, *lu_tmp;
1775 struct scsi_cmnd *SCpnt = NULL; 1752 struct scsi_cmnd *SCpnt = NULL;
1776 struct sbp2_status_block *sb; 1753 struct sbp2_status_block *sb;
1777 u32 scsi_status = SBP2_SCSI_STATUS_GOOD; 1754 u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
1778 struct sbp2_command_info *command; 1755 struct sbp2_command_info *cmd;
1779 unsigned long flags; 1756 unsigned long flags;
1780 1757
1781 if (unlikely(length < 8 || length > sizeof(struct sbp2_status_block))) { 1758 if (unlikely(length < 8 || length > sizeof(struct sbp2_status_block))) {
@@ -1793,49 +1770,50 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
1793 } 1770 }
1794 1771
1795 /* Find the unit which wrote the status. */ 1772 /* Find the unit which wrote the status. */
1796 list_for_each_entry(scsi_id_tmp, &hi->scsi_ids, scsi_list) { 1773 list_for_each_entry(lu_tmp, &hi->logical_units, lu_list) {
1797 if (scsi_id_tmp->ne->nodeid == nodeid && 1774 if (lu_tmp->ne->nodeid == nodeid &&
1798 scsi_id_tmp->status_fifo_addr == addr) { 1775 lu_tmp->status_fifo_addr == addr) {
1799 scsi_id = scsi_id_tmp; 1776 lu = lu_tmp;
1800 break; 1777 break;
1801 } 1778 }
1802 } 1779 }
1803 if (unlikely(!scsi_id)) { 1780 if (unlikely(!lu)) {
1804 SBP2_ERR("scsi_id is NULL - device is gone?"); 1781 SBP2_ERR("lu is NULL - device is gone?");
1805 return RCODE_ADDRESS_ERROR; 1782 return RCODE_ADDRESS_ERROR;
1806 } 1783 }
1807 1784
1808 /* Put response into scsi_id status fifo buffer. The first two bytes 1785 /* Put response into lu status fifo buffer. The first two bytes
1809 * come in big endian bit order. Often the target writes only a 1786 * come in big endian bit order. Often the target writes only a
1810 * truncated status block, minimally the first two quadlets. The rest 1787 * truncated status block, minimally the first two quadlets. The rest
1811 * is implied to be zeros. */ 1788 * is implied to be zeros. */
1812 sb = &scsi_id->status_block; 1789 sb = &lu->status_block;
1813 memset(sb->command_set_dependent, 0, sizeof(sb->command_set_dependent)); 1790 memset(sb->command_set_dependent, 0, sizeof(sb->command_set_dependent));
1814 memcpy(sb, data, length); 1791 memcpy(sb, data, length);
1815 sbp2util_be32_to_cpu_buffer(sb, 8); 1792 sbp2util_be32_to_cpu_buffer(sb, 8);
1816 1793
1817 /* Ignore unsolicited status. Handle command ORB status. */ 1794 /* Ignore unsolicited status. Handle command ORB status. */
1818 if (unlikely(STATUS_GET_SRC(sb->ORB_offset_hi_misc) == 2)) 1795 if (unlikely(STATUS_GET_SRC(sb->ORB_offset_hi_misc) == 2))
1819 command = NULL; 1796 cmd = NULL;
1820 else 1797 else
1821 command = sbp2util_find_command_for_orb(scsi_id, 1798 cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo);
1822 sb->ORB_offset_lo); 1799 if (cmd) {
1823 if (command) { 1800 pci_dma_sync_single_for_cpu(hi->host->pdev,
1824 pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma, 1801 cmd->command_orb_dma,
1825 sizeof(struct sbp2_command_orb), 1802 sizeof(struct sbp2_command_orb),
1826 PCI_DMA_TODEVICE); 1803 PCI_DMA_TODEVICE);
1827 pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma, 1804 pci_dma_sync_single_for_cpu(hi->host->pdev,
1828 sizeof(command->scatter_gather_element), 1805 cmd->sge_dma,
1829 PCI_DMA_BIDIRECTIONAL); 1806 sizeof(cmd->scatter_gather_element),
1807 PCI_DMA_BIDIRECTIONAL);
1830 /* Grab SCSI command pointers and check status. */ 1808 /* Grab SCSI command pointers and check status. */
1831 /* 1809 /*
1832 * FIXME: If the src field in the status is 1, the ORB DMA must 1810 * FIXME: If the src field in the status is 1, the ORB DMA must
1833 * not be reused until status for a subsequent ORB is received. 1811 * not be reused until status for a subsequent ORB is received.
1834 */ 1812 */
1835 SCpnt = command->Current_SCpnt; 1813 SCpnt = cmd->Current_SCpnt;
1836 spin_lock_irqsave(&scsi_id->cmd_orb_lock, flags); 1814 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
1837 sbp2util_mark_command_completed(scsi_id, command); 1815 sbp2util_mark_command_completed(lu, cmd);
1838 spin_unlock_irqrestore(&scsi_id->cmd_orb_lock, flags); 1816 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
1839 1817
1840 if (SCpnt) { 1818 if (SCpnt) {
1841 u32 h = sb->ORB_offset_hi_misc; 1819 u32 h = sb->ORB_offset_hi_misc;
@@ -1855,7 +1833,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
1855 (unchar *)sb, SCpnt->sense_buffer); 1833 (unchar *)sb, SCpnt->sense_buffer);
1856 1834
1857 if (STATUS_TEST_DEAD(h)) 1835 if (STATUS_TEST_DEAD(h))
1858 sbp2_agent_reset(scsi_id, 0); 1836 sbp2_agent_reset(lu, 0);
1859 } 1837 }
1860 1838
1861 /* Check here to see if there are no commands in-use. If there 1839 /* Check here to see if there are no commands in-use. If there
@@ -1864,25 +1842,25 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
1864 * last_orb so that next time we write directly to the 1842 * last_orb so that next time we write directly to the
1865 * ORB_POINTER register. That way the fetch agent does not need 1843 * ORB_POINTER register. That way the fetch agent does not need
1866 * to refetch the next_ORB. */ 1844 * to refetch the next_ORB. */
1867 spin_lock_irqsave(&scsi_id->cmd_orb_lock, flags); 1845 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
1868 if (list_empty(&scsi_id->cmd_orb_inuse)) 1846 if (list_empty(&lu->cmd_orb_inuse))
1869 scsi_id->last_orb = NULL; 1847 lu->last_orb = NULL;
1870 spin_unlock_irqrestore(&scsi_id->cmd_orb_lock, flags); 1848 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
1871 1849
1872 } else { 1850 } else {
1873 /* It's probably status after a management request. */ 1851 /* It's probably status after a management request. */
1874 if ((sb->ORB_offset_lo == scsi_id->reconnect_orb_dma) || 1852 if ((sb->ORB_offset_lo == lu->reconnect_orb_dma) ||
1875 (sb->ORB_offset_lo == scsi_id->login_orb_dma) || 1853 (sb->ORB_offset_lo == lu->login_orb_dma) ||
1876 (sb->ORB_offset_lo == scsi_id->query_logins_orb_dma) || 1854 (sb->ORB_offset_lo == lu->query_logins_orb_dma) ||
1877 (sb->ORB_offset_lo == scsi_id->logout_orb_dma)) { 1855 (sb->ORB_offset_lo == lu->logout_orb_dma)) {
1878 scsi_id->access_complete = 1; 1856 lu->access_complete = 1;
1879 wake_up_interruptible(&sbp2_access_wq); 1857 wake_up_interruptible(&sbp2_access_wq);
1880 } 1858 }
1881 } 1859 }
1882 1860
1883 if (SCpnt) 1861 if (SCpnt)
1884 sbp2scsi_complete_command(scsi_id, scsi_status, SCpnt, 1862 sbp2scsi_complete_command(lu, scsi_status, SCpnt,
1885 command->Current_done); 1863 cmd->Current_done);
1886 return RCODE_COMPLETE; 1864 return RCODE_COMPLETE;
1887} 1865}
1888 1866
@@ -1893,15 +1871,14 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
1893static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt, 1871static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
1894 void (*done)(struct scsi_cmnd *)) 1872 void (*done)(struct scsi_cmnd *))
1895{ 1873{
1896 struct scsi_id_instance_data *scsi_id = 1874 struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
1897 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
1898 struct sbp2_fwhost_info *hi; 1875 struct sbp2_fwhost_info *hi;
1899 int result = DID_NO_CONNECT << 16; 1876 int result = DID_NO_CONNECT << 16;
1900 1877
1901 if (unlikely(!sbp2util_node_is_available(scsi_id))) 1878 if (unlikely(!sbp2util_node_is_available(lu)))
1902 goto done; 1879 goto done;
1903 1880
1904 hi = scsi_id->hi; 1881 hi = lu->hi;
1905 1882
1906 if (unlikely(!hi)) { 1883 if (unlikely(!hi)) {
1907 SBP2_ERR("sbp2_fwhost_info is NULL - this is bad!"); 1884 SBP2_ERR("sbp2_fwhost_info is NULL - this is bad!");
@@ -1916,13 +1893,15 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
1916 1893
1917 /* handle the request sense command here (auto-request sense) */ 1894 /* handle the request sense command here (auto-request sense) */
1918 if (SCpnt->cmnd[0] == REQUEST_SENSE) { 1895 if (SCpnt->cmnd[0] == REQUEST_SENSE) {
1919 memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen); 1896 memcpy(SCpnt->request_buffer, SCpnt->sense_buffer,
1897 SCpnt->request_bufflen);
1920 memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); 1898 memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
1921 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done); 1899 sbp2scsi_complete_command(lu, SBP2_SCSI_STATUS_GOOD, SCpnt,
1900 done);
1922 return 0; 1901 return 0;
1923 } 1902 }
1924 1903
1925 if (unlikely(!hpsb_node_entry_valid(scsi_id->ne))) { 1904 if (unlikely(!hpsb_node_entry_valid(lu->ne))) {
1926 SBP2_ERR("Bus reset in progress - rejecting command"); 1905 SBP2_ERR("Bus reset in progress - rejecting command");
1927 result = DID_BUS_BUSY << 16; 1906 result = DID_BUS_BUSY << 16;
1928 goto done; 1907 goto done;
@@ -1936,9 +1915,10 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
1936 goto done; 1915 goto done;
1937 } 1916 }
1938 1917
1939 if (sbp2_send_command(scsi_id, SCpnt, done)) { 1918 if (sbp2_send_command(lu, SCpnt, done)) {
1940 SBP2_ERR("Error sending SCSI command"); 1919 SBP2_ERR("Error sending SCSI command");
1941 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT, 1920 sbp2scsi_complete_command(lu,
1921 SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
1942 SCpnt, done); 1922 SCpnt, done);
1943 } 1923 }
1944 return 0; 1924 return 0;
@@ -1949,31 +1929,31 @@ done:
1949 return 0; 1929 return 0;
1950} 1930}
1951 1931
1952static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id, 1932static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
1953 u32 status)
1954{ 1933{
1955 struct sbp2_fwhost_info *hi = scsi_id->hi; 1934 struct sbp2_fwhost_info *hi = lu->hi;
1956 struct list_head *lh; 1935 struct list_head *lh;
1957 struct sbp2_command_info *command; 1936 struct sbp2_command_info *cmd;
1958 unsigned long flags; 1937 unsigned long flags;
1959 1938
1960 spin_lock_irqsave(&scsi_id->cmd_orb_lock, flags); 1939 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
1961 while (!list_empty(&scsi_id->cmd_orb_inuse)) { 1940 while (!list_empty(&lu->cmd_orb_inuse)) {
1962 lh = scsi_id->cmd_orb_inuse.next; 1941 lh = lu->cmd_orb_inuse.next;
1963 command = list_entry(lh, struct sbp2_command_info, list); 1942 cmd = list_entry(lh, struct sbp2_command_info, list);
1964 pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma, 1943 pci_dma_sync_single_for_cpu(hi->host->pdev,
1965 sizeof(struct sbp2_command_orb), 1944 cmd->command_orb_dma,
1966 PCI_DMA_TODEVICE); 1945 sizeof(struct sbp2_command_orb),
1967 pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma, 1946 PCI_DMA_TODEVICE);
1968 sizeof(command->scatter_gather_element), 1947 pci_dma_sync_single_for_cpu(hi->host->pdev, cmd->sge_dma,
1969 PCI_DMA_BIDIRECTIONAL); 1948 sizeof(cmd->scatter_gather_element),
1970 sbp2util_mark_command_completed(scsi_id, command); 1949 PCI_DMA_BIDIRECTIONAL);
1971 if (command->Current_SCpnt) { 1950 sbp2util_mark_command_completed(lu, cmd);
1972 command->Current_SCpnt->result = status << 16; 1951 if (cmd->Current_SCpnt) {
1973 command->Current_done(command->Current_SCpnt); 1952 cmd->Current_SCpnt->result = status << 16;
1953 cmd->Current_done(cmd->Current_SCpnt);
1974 } 1954 }
1975 } 1955 }
1976 spin_unlock_irqrestore(&scsi_id->cmd_orb_lock, flags); 1956 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
1977 1957
1978 return; 1958 return;
1979} 1959}
@@ -1981,8 +1961,8 @@ static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id
1981/* 1961/*
1982 * Complete a regular SCSI command. Can be called in atomic context. 1962 * Complete a regular SCSI command. Can be called in atomic context.
1983 */ 1963 */
1984static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id, 1964static void sbp2scsi_complete_command(struct sbp2_lu *lu, u32 scsi_status,
1985 u32 scsi_status, struct scsi_cmnd *SCpnt, 1965 struct scsi_cmnd *SCpnt,
1986 void (*done)(struct scsi_cmnd *)) 1966 void (*done)(struct scsi_cmnd *))
1987{ 1967{
1988 if (!SCpnt) { 1968 if (!SCpnt) {
@@ -2025,7 +2005,7 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2025 2005
2026 /* If a bus reset is in progress and there was an error, complete 2006 /* If a bus reset is in progress and there was an error, complete
2027 * the command as busy so that it will get retried. */ 2007 * the command as busy so that it will get retried. */
2028 if (!hpsb_node_entry_valid(scsi_id->ne) 2008 if (!hpsb_node_entry_valid(lu->ne)
2029 && (scsi_status != SBP2_SCSI_STATUS_GOOD)) { 2009 && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
2030 SBP2_ERR("Completing command with busy (bus reset)"); 2010 SBP2_ERR("Completing command with busy (bus reset)");
2031 SCpnt->result = DID_BUS_BUSY << 16; 2011 SCpnt->result = DID_BUS_BUSY << 16;
@@ -2037,36 +2017,34 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2037 2017
2038static int sbp2scsi_slave_alloc(struct scsi_device *sdev) 2018static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
2039{ 2019{
2040 struct scsi_id_instance_data *scsi_id = 2020 struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
2041 (struct scsi_id_instance_data *)sdev->host->hostdata[0];
2042 2021
2043 scsi_id->sdev = sdev; 2022 lu->sdev = sdev;
2044 sdev->allow_restart = 1; 2023 sdev->allow_restart = 1;
2045 2024
2046 if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36) 2025 if (lu->workarounds & SBP2_WORKAROUND_INQUIRY_36)
2047 sdev->inquiry_len = 36; 2026 sdev->inquiry_len = 36;
2048 return 0; 2027 return 0;
2049} 2028}
2050 2029
2051static int sbp2scsi_slave_configure(struct scsi_device *sdev) 2030static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2052{ 2031{
2053 struct scsi_id_instance_data *scsi_id = 2032 struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
2054 (struct scsi_id_instance_data *)sdev->host->hostdata[0];
2055 2033
2056 blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); 2034 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
2057 sdev->use_10_for_rw = 1; 2035 sdev->use_10_for_rw = 1;
2058 2036
2059 if (sdev->type == TYPE_DISK && 2037 if (sdev->type == TYPE_DISK &&
2060 scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) 2038 lu->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
2061 sdev->skip_ms_page_8 = 1; 2039 sdev->skip_ms_page_8 = 1;
2062 if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) 2040 if (lu->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
2063 sdev->fix_capacity = 1; 2041 sdev->fix_capacity = 1;
2064 return 0; 2042 return 0;
2065} 2043}
2066 2044
2067static void sbp2scsi_slave_destroy(struct scsi_device *sdev) 2045static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
2068{ 2046{
2069 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL; 2047 ((struct sbp2_lu *)sdev->host->hostdata[0])->sdev = NULL;
2070 return; 2048 return;
2071} 2049}
2072 2050
@@ -2076,39 +2054,38 @@ static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
2076 */ 2054 */
2077static int sbp2scsi_abort(struct scsi_cmnd *SCpnt) 2055static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2078{ 2056{
2079 struct scsi_id_instance_data *scsi_id = 2057 struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
2080 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0]; 2058 struct sbp2_fwhost_info *hi = lu->hi;
2081 struct sbp2_fwhost_info *hi = scsi_id->hi; 2059 struct sbp2_command_info *cmd;
2082 struct sbp2_command_info *command;
2083 unsigned long flags; 2060 unsigned long flags;
2084 2061
2085 SBP2_INFO("aborting sbp2 command"); 2062 SBP2_INFO("aborting sbp2 command");
2086 scsi_print_command(SCpnt); 2063 scsi_print_command(SCpnt);
2087 2064
2088 if (sbp2util_node_is_available(scsi_id)) { 2065 if (sbp2util_node_is_available(lu)) {
2089 sbp2_agent_reset(scsi_id, 1); 2066 sbp2_agent_reset(lu, 1);
2090 2067
2091 /* Return a matching command structure to the free pool. */ 2068 /* Return a matching command structure to the free pool. */
2092 spin_lock_irqsave(&scsi_id->cmd_orb_lock, flags); 2069 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
2093 command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt); 2070 cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt);
2094 if (command) { 2071 if (cmd) {
2095 pci_dma_sync_single_for_cpu(hi->host->pdev, 2072 pci_dma_sync_single_for_cpu(hi->host->pdev,
2096 command->command_orb_dma, 2073 cmd->command_orb_dma,
2097 sizeof(struct sbp2_command_orb), 2074 sizeof(struct sbp2_command_orb),
2098 PCI_DMA_TODEVICE); 2075 PCI_DMA_TODEVICE);
2099 pci_dma_sync_single_for_cpu(hi->host->pdev, 2076 pci_dma_sync_single_for_cpu(hi->host->pdev,
2100 command->sge_dma, 2077 cmd->sge_dma,
2101 sizeof(command->scatter_gather_element), 2078 sizeof(cmd->scatter_gather_element),
2102 PCI_DMA_BIDIRECTIONAL); 2079 PCI_DMA_BIDIRECTIONAL);
2103 sbp2util_mark_command_completed(scsi_id, command); 2080 sbp2util_mark_command_completed(lu, cmd);
2104 if (command->Current_SCpnt) { 2081 if (cmd->Current_SCpnt) {
2105 command->Current_SCpnt->result = DID_ABORT << 16; 2082 cmd->Current_SCpnt->result = DID_ABORT << 16;
2106 command->Current_done(command->Current_SCpnt); 2083 cmd->Current_done(cmd->Current_SCpnt);
2107 } 2084 }
2108 } 2085 }
2109 spin_unlock_irqrestore(&scsi_id->cmd_orb_lock, flags); 2086 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
2110 2087
2111 sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY); 2088 sbp2scsi_complete_all_commands(lu, DID_BUS_BUSY);
2112 } 2089 }
2113 2090
2114 return SUCCESS; 2091 return SUCCESS;
@@ -2119,14 +2096,13 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2119 */ 2096 */
2120static int sbp2scsi_reset(struct scsi_cmnd *SCpnt) 2097static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
2121{ 2098{
2122 struct scsi_id_instance_data *scsi_id = 2099 struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
2123 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2124 2100
2125 SBP2_INFO("reset requested"); 2101 SBP2_INFO("reset requested");
2126 2102
2127 if (sbp2util_node_is_available(scsi_id)) { 2103 if (sbp2util_node_is_available(lu)) {
2128 SBP2_INFO("generating sbp2 fetch agent reset"); 2104 SBP2_INFO("generating sbp2 fetch agent reset");
2129 sbp2_agent_reset(scsi_id, 1); 2105 sbp2_agent_reset(lu, 1);
2130 } 2106 }
2131 2107
2132 return SUCCESS; 2108 return SUCCESS;
@@ -2137,16 +2113,16 @@ static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
2137 char *buf) 2113 char *buf)
2138{ 2114{
2139 struct scsi_device *sdev; 2115 struct scsi_device *sdev;
2140 struct scsi_id_instance_data *scsi_id; 2116 struct sbp2_lu *lu;
2141 2117
2142 if (!(sdev = to_scsi_device(dev))) 2118 if (!(sdev = to_scsi_device(dev)))
2143 return 0; 2119 return 0;
2144 2120
2145 if (!(scsi_id = (struct scsi_id_instance_data *)sdev->host->hostdata[0])) 2121 if (!(lu = (struct sbp2_lu *)sdev->host->hostdata[0]))
2146 return 0; 2122 return 0;
2147 2123
2148 return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)scsi_id->ne->guid, 2124 return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)lu->ne->guid,
2149 scsi_id->ud->id, ORB_SET_LUN(scsi_id->lun)); 2125 lu->ud->id, ORB_SET_LUN(lu->lun));
2150} 2126}
2151 2127
2152MODULE_AUTHOR("Ben Collins <bcollins@debian.org>"); 2128MODULE_AUTHOR("Ben Collins <bcollins@debian.org>");