diff options
author | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2014-03-03 17:23:51 -0500 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2014-12-10 14:53:21 -0500 |
commit | d737d7da8e7e931360282199341f44ac0803c837 (patch) | |
tree | 379cad73b30b61286933ea84e25b3492b3b4f0dd /drivers/firewire | |
parent | 8e045a31e7c0536e4deb750b37c919fadcb44aa3 (diff) |
firewire: sbp2: replace card lock by target lock
firewire-core uses fw_card.lock to protect topology data and transaction
data. firewire-sbp2 uses fw_card.lock for entirely unrelated purposes.
Introduce a sbp2_target.lock to firewire-sbp2 and replace all
fw_card.lock uses in the driver. fw_card.lock is now entirely private
to firewire-core. This has no immediate advantage apart from making it
clear in the code that firewire-sbp2 does not interact with the core
via the core lock.
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/firewire')
-rw-r--r-- | drivers/firewire/sbp2.c | 60 |
1 files changed, 29 insertions, 31 deletions
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 1f3f37a39a60..64ac8f8f5098 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -174,6 +174,7 @@ struct sbp2_target { | |||
174 | unsigned int mgt_orb_timeout; | 174 | unsigned int mgt_orb_timeout; |
175 | unsigned int max_payload; | 175 | unsigned int max_payload; |
176 | 176 | ||
177 | spinlock_t lock; | ||
177 | int dont_block; /* counter for each logical unit */ | 178 | int dont_block; /* counter for each logical unit */ |
178 | int blocked; /* ditto */ | 179 | int blocked; /* ditto */ |
179 | }; | 180 | }; |
@@ -270,6 +271,7 @@ struct sbp2_orb { | |||
270 | dma_addr_t request_bus; | 271 | dma_addr_t request_bus; |
271 | int rcode; | 272 | int rcode; |
272 | void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status); | 273 | void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status); |
274 | struct sbp2_logical_unit *lu; | ||
273 | struct list_head link; | 275 | struct list_head link; |
274 | }; | 276 | }; |
275 | 277 | ||
@@ -321,7 +323,6 @@ struct sbp2_command_orb { | |||
321 | u8 command_block[SBP2_MAX_CDB_SIZE]; | 323 | u8 command_block[SBP2_MAX_CDB_SIZE]; |
322 | } request; | 324 | } request; |
323 | struct scsi_cmnd *cmd; | 325 | struct scsi_cmnd *cmd; |
324 | struct sbp2_logical_unit *lu; | ||
325 | 326 | ||
326 | struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); | 327 | struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); |
327 | dma_addr_t page_table_bus; | 328 | dma_addr_t page_table_bus; |
@@ -444,7 +445,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, | |||
444 | } | 445 | } |
445 | 446 | ||
446 | /* Lookup the orb corresponding to this status write. */ | 447 | /* Lookup the orb corresponding to this status write. */ |
447 | spin_lock_irqsave(&card->lock, flags); | 448 | spin_lock_irqsave(&lu->tgt->lock, flags); |
448 | list_for_each_entry(orb, &lu->orb_list, link) { | 449 | list_for_each_entry(orb, &lu->orb_list, link) { |
449 | if (STATUS_GET_ORB_HIGH(status) == 0 && | 450 | if (STATUS_GET_ORB_HIGH(status) == 0 && |
450 | STATUS_GET_ORB_LOW(status) == orb->request_bus) { | 451 | STATUS_GET_ORB_LOW(status) == orb->request_bus) { |
@@ -453,7 +454,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, | |||
453 | break; | 454 | break; |
454 | } | 455 | } |
455 | } | 456 | } |
456 | spin_unlock_irqrestore(&card->lock, flags); | 457 | spin_unlock_irqrestore(&lu->tgt->lock, flags); |
457 | 458 | ||
458 | if (&orb->link != &lu->orb_list) { | 459 | if (&orb->link != &lu->orb_list) { |
459 | orb->callback(orb, &status); | 460 | orb->callback(orb, &status); |
@@ -480,18 +481,18 @@ static void complete_transaction(struct fw_card *card, int rcode, | |||
480 | * been set and only does the cleanup if the transaction | 481 | * been set and only does the cleanup if the transaction |
481 | * failed and we didn't already get a status write. | 482 | * failed and we didn't already get a status write. |
482 | */ | 483 | */ |
483 | spin_lock_irqsave(&card->lock, flags); | 484 | spin_lock_irqsave(&orb->lu->tgt->lock, flags); |
484 | 485 | ||
485 | if (orb->rcode == -1) | 486 | if (orb->rcode == -1) |
486 | orb->rcode = rcode; | 487 | orb->rcode = rcode; |
487 | if (orb->rcode != RCODE_COMPLETE) { | 488 | if (orb->rcode != RCODE_COMPLETE) { |
488 | list_del(&orb->link); | 489 | list_del(&orb->link); |
489 | spin_unlock_irqrestore(&card->lock, flags); | 490 | spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); |
490 | 491 | ||
491 | orb->callback(orb, NULL); | 492 | orb->callback(orb, NULL); |
492 | kref_put(&orb->kref, free_orb); /* orb callback reference */ | 493 | kref_put(&orb->kref, free_orb); /* orb callback reference */ |
493 | } else { | 494 | } else { |
494 | spin_unlock_irqrestore(&card->lock, flags); | 495 | spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); |
495 | } | 496 | } |
496 | 497 | ||
497 | kref_put(&orb->kref, free_orb); /* transaction callback reference */ | 498 | kref_put(&orb->kref, free_orb); /* transaction callback reference */ |
@@ -507,9 +508,10 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, | |||
507 | orb_pointer.high = 0; | 508 | orb_pointer.high = 0; |
508 | orb_pointer.low = cpu_to_be32(orb->request_bus); | 509 | orb_pointer.low = cpu_to_be32(orb->request_bus); |
509 | 510 | ||
510 | spin_lock_irqsave(&device->card->lock, flags); | 511 | orb->lu = lu; |
512 | spin_lock_irqsave(&lu->tgt->lock, flags); | ||
511 | list_add_tail(&orb->link, &lu->orb_list); | 513 | list_add_tail(&orb->link, &lu->orb_list); |
512 | spin_unlock_irqrestore(&device->card->lock, flags); | 514 | spin_unlock_irqrestore(&lu->tgt->lock, flags); |
513 | 515 | ||
514 | kref_get(&orb->kref); /* transaction callback reference */ | 516 | kref_get(&orb->kref); /* transaction callback reference */ |
515 | kref_get(&orb->kref); /* orb callback reference */ | 517 | kref_get(&orb->kref); /* orb callback reference */ |
@@ -527,9 +529,9 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) | |||
527 | int retval = -ENOENT; | 529 | int retval = -ENOENT; |
528 | 530 | ||
529 | INIT_LIST_HEAD(&list); | 531 | INIT_LIST_HEAD(&list); |
530 | spin_lock_irq(&device->card->lock); | 532 | spin_lock_irq(&lu->tgt->lock); |
531 | list_splice_init(&lu->orb_list, &list); | 533 | list_splice_init(&lu->orb_list, &list); |
532 | spin_unlock_irq(&device->card->lock); | 534 | spin_unlock_irq(&lu->tgt->lock); |
533 | 535 | ||
534 | list_for_each_entry_safe(orb, next, &list, link) { | 536 | list_for_each_entry_safe(orb, next, &list, link) { |
535 | retval = 0; | 537 | retval = 0; |
@@ -686,14 +688,11 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) | |||
686 | &d, 4, complete_agent_reset_write_no_wait, t); | 688 | &d, 4, complete_agent_reset_write_no_wait, t); |
687 | } | 689 | } |
688 | 690 | ||
689 | static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | 691 | static inline void sbp2_allow_block(struct sbp2_target *tgt) |
690 | { | 692 | { |
691 | struct sbp2_target *tgt = lu->tgt; | 693 | spin_lock_irq(&tgt->lock); |
692 | struct fw_card *card = target_parent_device(tgt)->card; | ||
693 | |||
694 | spin_lock_irq(&card->lock); | ||
695 | --tgt->dont_block; | 694 | --tgt->dont_block; |
696 | spin_unlock_irq(&card->lock); | 695 | spin_unlock_irq(&tgt->lock); |
697 | } | 696 | } |
698 | 697 | ||
699 | /* | 698 | /* |
@@ -702,7 +701,7 @@ static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | |||
702 | * logical units have been finished (indicated by dont_block == 0). | 701 | * logical units have been finished (indicated by dont_block == 0). |
703 | * - lu->generation is stale. | 702 | * - lu->generation is stale. |
704 | * | 703 | * |
705 | * Note, scsi_block_requests() must be called while holding card->lock, | 704 | * Note, scsi_block_requests() must be called while holding tgt->lock, |
706 | * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to | 705 | * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to |
707 | * unblock the target. | 706 | * unblock the target. |
708 | */ | 707 | */ |
@@ -714,20 +713,20 @@ static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) | |||
714 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 713 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
715 | unsigned long flags; | 714 | unsigned long flags; |
716 | 715 | ||
717 | spin_lock_irqsave(&card->lock, flags); | 716 | spin_lock_irqsave(&tgt->lock, flags); |
718 | if (!tgt->dont_block && !lu->blocked && | 717 | if (!tgt->dont_block && !lu->blocked && |
719 | lu->generation != card->generation) { | 718 | lu->generation != card->generation) { |
720 | lu->blocked = true; | 719 | lu->blocked = true; |
721 | if (++tgt->blocked == 1) | 720 | if (++tgt->blocked == 1) |
722 | scsi_block_requests(shost); | 721 | scsi_block_requests(shost); |
723 | } | 722 | } |
724 | spin_unlock_irqrestore(&card->lock, flags); | 723 | spin_unlock_irqrestore(&tgt->lock, flags); |
725 | } | 724 | } |
726 | 725 | ||
727 | /* | 726 | /* |
728 | * Unblocks lu->tgt as soon as all its logical units can be unblocked. | 727 | * Unblocks lu->tgt as soon as all its logical units can be unblocked. |
729 | * Note, it is harmless to run scsi_unblock_requests() outside the | 728 | * Note, it is harmless to run scsi_unblock_requests() outside the |
730 | * card->lock protected section. On the other hand, running it inside | 729 | * tgt->lock protected section. On the other hand, running it inside |
731 | * the section might clash with shost->host_lock. | 730 | * the section might clash with shost->host_lock. |
732 | */ | 731 | */ |
733 | static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | 732 | static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) |
@@ -738,12 +737,12 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | |||
738 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 737 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
739 | bool unblock = false; | 738 | bool unblock = false; |
740 | 739 | ||
741 | spin_lock_irq(&card->lock); | 740 | spin_lock_irq(&tgt->lock); |
742 | if (lu->blocked && lu->generation == card->generation) { | 741 | if (lu->blocked && lu->generation == card->generation) { |
743 | lu->blocked = false; | 742 | lu->blocked = false; |
744 | unblock = --tgt->blocked == 0; | 743 | unblock = --tgt->blocked == 0; |
745 | } | 744 | } |
746 | spin_unlock_irq(&card->lock); | 745 | spin_unlock_irq(&tgt->lock); |
747 | 746 | ||
748 | if (unblock) | 747 | if (unblock) |
749 | scsi_unblock_requests(shost); | 748 | scsi_unblock_requests(shost); |
@@ -752,18 +751,17 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | |||
752 | /* | 751 | /* |
753 | * Prevents future blocking of tgt and unblocks it. | 752 | * Prevents future blocking of tgt and unblocks it. |
754 | * Note, it is harmless to run scsi_unblock_requests() outside the | 753 | * Note, it is harmless to run scsi_unblock_requests() outside the |
755 | * card->lock protected section. On the other hand, running it inside | 754 | * tgt->lock protected section. On the other hand, running it inside |
756 | * the section might clash with shost->host_lock. | 755 | * the section might clash with shost->host_lock. |
757 | */ | 756 | */ |
758 | static void sbp2_unblock(struct sbp2_target *tgt) | 757 | static void sbp2_unblock(struct sbp2_target *tgt) |
759 | { | 758 | { |
760 | struct fw_card *card = target_parent_device(tgt)->card; | ||
761 | struct Scsi_Host *shost = | 759 | struct Scsi_Host *shost = |
762 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 760 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
763 | 761 | ||
764 | spin_lock_irq(&card->lock); | 762 | spin_lock_irq(&tgt->lock); |
765 | ++tgt->dont_block; | 763 | ++tgt->dont_block; |
766 | spin_unlock_irq(&card->lock); | 764 | spin_unlock_irq(&tgt->lock); |
767 | 765 | ||
768 | scsi_unblock_requests(shost); | 766 | scsi_unblock_requests(shost); |
769 | } | 767 | } |
@@ -899,7 +897,7 @@ static void sbp2_login(struct work_struct *work) | |||
899 | /* No error during __scsi_add_device() */ | 897 | /* No error during __scsi_add_device() */ |
900 | lu->has_sdev = true; | 898 | lu->has_sdev = true; |
901 | scsi_device_put(sdev); | 899 | scsi_device_put(sdev); |
902 | sbp2_allow_block(lu); | 900 | sbp2_allow_block(tgt); |
903 | 901 | ||
904 | return; | 902 | return; |
905 | 903 | ||
@@ -1158,6 +1156,7 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) | |||
1158 | dev_set_drvdata(&unit->device, tgt); | 1156 | dev_set_drvdata(&unit->device, tgt); |
1159 | tgt->unit = unit; | 1157 | tgt->unit = unit; |
1160 | INIT_LIST_HEAD(&tgt->lu_list); | 1158 | INIT_LIST_HEAD(&tgt->lu_list); |
1159 | spin_lock_init(&tgt->lock); | ||
1161 | tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; | 1160 | tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; |
1162 | 1161 | ||
1163 | if (fw_device_enable_phys_dma(device) < 0) | 1162 | if (fw_device_enable_phys_dma(device) < 0) |
@@ -1354,12 +1353,12 @@ static void complete_command_orb(struct sbp2_orb *base_orb, | |||
1354 | { | 1353 | { |
1355 | struct sbp2_command_orb *orb = | 1354 | struct sbp2_command_orb *orb = |
1356 | container_of(base_orb, struct sbp2_command_orb, base); | 1355 | container_of(base_orb, struct sbp2_command_orb, base); |
1357 | struct fw_device *device = target_parent_device(orb->lu->tgt); | 1356 | struct fw_device *device = target_parent_device(base_orb->lu->tgt); |
1358 | int result; | 1357 | int result; |
1359 | 1358 | ||
1360 | if (status != NULL) { | 1359 | if (status != NULL) { |
1361 | if (STATUS_GET_DEAD(*status)) | 1360 | if (STATUS_GET_DEAD(*status)) |
1362 | sbp2_agent_reset_no_wait(orb->lu); | 1361 | sbp2_agent_reset_no_wait(base_orb->lu); |
1363 | 1362 | ||
1364 | switch (STATUS_GET_RESPONSE(*status)) { | 1363 | switch (STATUS_GET_RESPONSE(*status)) { |
1365 | case SBP2_STATUS_REQUEST_COMPLETE: | 1364 | case SBP2_STATUS_REQUEST_COMPLETE: |
@@ -1385,7 +1384,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb, | |||
1385 | * or when sending the write (less likely). | 1384 | * or when sending the write (less likely). |
1386 | */ | 1385 | */ |
1387 | result = DID_BUS_BUSY << 16; | 1386 | result = DID_BUS_BUSY << 16; |
1388 | sbp2_conditionally_block(orb->lu); | 1387 | sbp2_conditionally_block(base_orb->lu); |
1389 | } | 1388 | } |
1390 | 1389 | ||
1391 | dma_unmap_single(device->card->device, orb->base.request_bus, | 1390 | dma_unmap_single(device->card->device, orb->base.request_bus, |
@@ -1482,7 +1481,6 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost, | |||
1482 | /* Initialize rcode to something not RCODE_COMPLETE. */ | 1481 | /* Initialize rcode to something not RCODE_COMPLETE. */ |
1483 | orb->base.rcode = -1; | 1482 | orb->base.rcode = -1; |
1484 | kref_init(&orb->base.kref); | 1483 | kref_init(&orb->base.kref); |
1485 | orb->lu = lu; | ||
1486 | orb->cmd = cmd; | 1484 | orb->cmd = cmd; |
1487 | orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); | 1485 | orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); |
1488 | orb->request.misc = cpu_to_be32( | 1486 | orb->request.misc = cpu_to_be32( |