diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-15 20:40:28 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-15 20:40:28 -0500 |
commit | a68db9cb858d10820add66682ad4d412f9914288 (patch) | |
tree | e17537ba2c11e0fd01dca906fb8dccc8576dfaf1 | |
parent | 7051d8e6308640aaee2f9d45d0ceaad3a4ee8a78 (diff) | |
parent | d737d7da8e7e931360282199341f44ac0803c837 (diff) |
Merge tag 'firewire-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394
Pull firewire updates from Stefan Richter:
"IEEE 1394 subsystem updates:
- clean up firewire-ohci's longlived vm-mapping
- use target instance lock instead of core lock in firewire-sbp2"
* tag 'firewire-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394:
firewire: sbp2: replace card lock by target lock
firewire: sbp2: replace some spin_lock_irqsave by spin_lock_irq
firewire: sbp2: protect a reference counter properly
firewire: core: document fw_csr_string's truncation of long strings
firewire: ohci: replace vm_map_ram() with vmap()
-rw-r--r-- | drivers/firewire/core-device.c | 3 | ||||
-rw-r--r-- | drivers/firewire/ohci.c | 6 | ||||
-rw-r--r-- | drivers/firewire/sbp2.c | 67 |
3 files changed, 35 insertions, 41 deletions
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 2c6d5e118ac1..f9e3aee6a211 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c | |||
@@ -115,6 +115,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size) | |||
115 | * | 115 | * |
116 | * The string is taken from a minimal ASCII text descriptor leaf after | 116 | * The string is taken from a minimal ASCII text descriptor leaf after |
117 | * the immediate entry with @key. The string is zero-terminated. | 117 | * the immediate entry with @key. The string is zero-terminated. |
118 | * An overlong string is silently truncated such that it and the | ||
119 | * zero byte fit into @size. | ||
120 | * | ||
118 | * Returns strlen(buf) or a negative error code. | 121 | * Returns strlen(buf) or a negative error code. |
119 | */ | 122 | */ |
120 | int fw_csr_string(const u32 *directory, int key, char *buf, size_t size) | 123 | int fw_csr_string(const u32 *directory, int key, char *buf, size_t size) |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index a66a3217f1d9..aff9018d0658 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -689,8 +689,7 @@ static void ar_context_release(struct ar_context *ctx) | |||
689 | { | 689 | { |
690 | unsigned int i; | 690 | unsigned int i; |
691 | 691 | ||
692 | if (ctx->buffer) | 692 | vunmap(ctx->buffer); |
693 | vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES); | ||
694 | 693 | ||
695 | for (i = 0; i < AR_BUFFERS; i++) | 694 | for (i = 0; i < AR_BUFFERS; i++) |
696 | if (ctx->pages[i]) { | 695 | if (ctx->pages[i]) { |
@@ -1018,8 +1017,7 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, | |||
1018 | pages[i] = ctx->pages[i]; | 1017 | pages[i] = ctx->pages[i]; |
1019 | for (i = 0; i < AR_WRAPAROUND_PAGES; i++) | 1018 | for (i = 0; i < AR_WRAPAROUND_PAGES; i++) |
1020 | pages[AR_BUFFERS + i] = ctx->pages[i]; | 1019 | pages[AR_BUFFERS + i] = ctx->pages[i]; |
1021 | ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES, | 1020 | ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL); |
1022 | -1, PAGE_KERNEL); | ||
1023 | if (!ctx->buffer) | 1021 | if (!ctx->buffer) |
1024 | goto out_of_memory; | 1022 | goto out_of_memory; |
1025 | 1023 | ||
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 7aef911fdc71..64ac8f8f5098 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -174,6 +174,7 @@ struct sbp2_target { | |||
174 | unsigned int mgt_orb_timeout; | 174 | unsigned int mgt_orb_timeout; |
175 | unsigned int max_payload; | 175 | unsigned int max_payload; |
176 | 176 | ||
177 | spinlock_t lock; | ||
177 | int dont_block; /* counter for each logical unit */ | 178 | int dont_block; /* counter for each logical unit */ |
178 | int blocked; /* ditto */ | 179 | int blocked; /* ditto */ |
179 | }; | 180 | }; |
@@ -270,6 +271,7 @@ struct sbp2_orb { | |||
270 | dma_addr_t request_bus; | 271 | dma_addr_t request_bus; |
271 | int rcode; | 272 | int rcode; |
272 | void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status); | 273 | void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status); |
274 | struct sbp2_logical_unit *lu; | ||
273 | struct list_head link; | 275 | struct list_head link; |
274 | }; | 276 | }; |
275 | 277 | ||
@@ -321,7 +323,6 @@ struct sbp2_command_orb { | |||
321 | u8 command_block[SBP2_MAX_CDB_SIZE]; | 323 | u8 command_block[SBP2_MAX_CDB_SIZE]; |
322 | } request; | 324 | } request; |
323 | struct scsi_cmnd *cmd; | 325 | struct scsi_cmnd *cmd; |
324 | struct sbp2_logical_unit *lu; | ||
325 | 326 | ||
326 | struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); | 327 | struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); |
327 | dma_addr_t page_table_bus; | 328 | dma_addr_t page_table_bus; |
@@ -444,7 +445,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, | |||
444 | } | 445 | } |
445 | 446 | ||
446 | /* Lookup the orb corresponding to this status write. */ | 447 | /* Lookup the orb corresponding to this status write. */ |
447 | spin_lock_irqsave(&card->lock, flags); | 448 | spin_lock_irqsave(&lu->tgt->lock, flags); |
448 | list_for_each_entry(orb, &lu->orb_list, link) { | 449 | list_for_each_entry(orb, &lu->orb_list, link) { |
449 | if (STATUS_GET_ORB_HIGH(status) == 0 && | 450 | if (STATUS_GET_ORB_HIGH(status) == 0 && |
450 | STATUS_GET_ORB_LOW(status) == orb->request_bus) { | 451 | STATUS_GET_ORB_LOW(status) == orb->request_bus) { |
@@ -453,7 +454,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, | |||
453 | break; | 454 | break; |
454 | } | 455 | } |
455 | } | 456 | } |
456 | spin_unlock_irqrestore(&card->lock, flags); | 457 | spin_unlock_irqrestore(&lu->tgt->lock, flags); |
457 | 458 | ||
458 | if (&orb->link != &lu->orb_list) { | 459 | if (&orb->link != &lu->orb_list) { |
459 | orb->callback(orb, &status); | 460 | orb->callback(orb, &status); |
@@ -480,18 +481,18 @@ static void complete_transaction(struct fw_card *card, int rcode, | |||
480 | * been set and only does the cleanup if the transaction | 481 | * been set and only does the cleanup if the transaction |
481 | * failed and we didn't already get a status write. | 482 | * failed and we didn't already get a status write. |
482 | */ | 483 | */ |
483 | spin_lock_irqsave(&card->lock, flags); | 484 | spin_lock_irqsave(&orb->lu->tgt->lock, flags); |
484 | 485 | ||
485 | if (orb->rcode == -1) | 486 | if (orb->rcode == -1) |
486 | orb->rcode = rcode; | 487 | orb->rcode = rcode; |
487 | if (orb->rcode != RCODE_COMPLETE) { | 488 | if (orb->rcode != RCODE_COMPLETE) { |
488 | list_del(&orb->link); | 489 | list_del(&orb->link); |
489 | spin_unlock_irqrestore(&card->lock, flags); | 490 | spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); |
490 | 491 | ||
491 | orb->callback(orb, NULL); | 492 | orb->callback(orb, NULL); |
492 | kref_put(&orb->kref, free_orb); /* orb callback reference */ | 493 | kref_put(&orb->kref, free_orb); /* orb callback reference */ |
493 | } else { | 494 | } else { |
494 | spin_unlock_irqrestore(&card->lock, flags); | 495 | spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); |
495 | } | 496 | } |
496 | 497 | ||
497 | kref_put(&orb->kref, free_orb); /* transaction callback reference */ | 498 | kref_put(&orb->kref, free_orb); /* transaction callback reference */ |
@@ -507,9 +508,10 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, | |||
507 | orb_pointer.high = 0; | 508 | orb_pointer.high = 0; |
508 | orb_pointer.low = cpu_to_be32(orb->request_bus); | 509 | orb_pointer.low = cpu_to_be32(orb->request_bus); |
509 | 510 | ||
510 | spin_lock_irqsave(&device->card->lock, flags); | 511 | orb->lu = lu; |
512 | spin_lock_irqsave(&lu->tgt->lock, flags); | ||
511 | list_add_tail(&orb->link, &lu->orb_list); | 513 | list_add_tail(&orb->link, &lu->orb_list); |
512 | spin_unlock_irqrestore(&device->card->lock, flags); | 514 | spin_unlock_irqrestore(&lu->tgt->lock, flags); |
513 | 515 | ||
514 | kref_get(&orb->kref); /* transaction callback reference */ | 516 | kref_get(&orb->kref); /* transaction callback reference */ |
515 | kref_get(&orb->kref); /* orb callback reference */ | 517 | kref_get(&orb->kref); /* orb callback reference */ |
@@ -524,13 +526,12 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) | |||
524 | struct fw_device *device = target_parent_device(lu->tgt); | 526 | struct fw_device *device = target_parent_device(lu->tgt); |
525 | struct sbp2_orb *orb, *next; | 527 | struct sbp2_orb *orb, *next; |
526 | struct list_head list; | 528 | struct list_head list; |
527 | unsigned long flags; | ||
528 | int retval = -ENOENT; | 529 | int retval = -ENOENT; |
529 | 530 | ||
530 | INIT_LIST_HEAD(&list); | 531 | INIT_LIST_HEAD(&list); |
531 | spin_lock_irqsave(&device->card->lock, flags); | 532 | spin_lock_irq(&lu->tgt->lock); |
532 | list_splice_init(&lu->orb_list, &list); | 533 | list_splice_init(&lu->orb_list, &list); |
533 | spin_unlock_irqrestore(&device->card->lock, flags); | 534 | spin_unlock_irq(&lu->tgt->lock); |
534 | 535 | ||
535 | list_for_each_entry_safe(orb, next, &list, link) { | 536 | list_for_each_entry_safe(orb, next, &list, link) { |
536 | retval = 0; | 537 | retval = 0; |
@@ -687,16 +688,11 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) | |||
687 | &d, 4, complete_agent_reset_write_no_wait, t); | 688 | &d, 4, complete_agent_reset_write_no_wait, t); |
688 | } | 689 | } |
689 | 690 | ||
690 | static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | 691 | static inline void sbp2_allow_block(struct sbp2_target *tgt) |
691 | { | 692 | { |
692 | /* | 693 | spin_lock_irq(&tgt->lock); |
693 | * We may access dont_block without taking card->lock here: | 694 | --tgt->dont_block; |
694 | * All callers of sbp2_allow_block() and all callers of sbp2_unblock() | 695 | spin_unlock_irq(&tgt->lock); |
695 | * are currently serialized against each other. | ||
696 | * And a wrong result in sbp2_conditionally_block()'s access of | ||
697 | * dont_block is rather harmless, it simply misses its first chance. | ||
698 | */ | ||
699 | --lu->tgt->dont_block; | ||
700 | } | 696 | } |
701 | 697 | ||
702 | /* | 698 | /* |
@@ -705,7 +701,7 @@ static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | |||
705 | * logical units have been finished (indicated by dont_block == 0). | 701 | * logical units have been finished (indicated by dont_block == 0). |
706 | * - lu->generation is stale. | 702 | * - lu->generation is stale. |
707 | * | 703 | * |
708 | * Note, scsi_block_requests() must be called while holding card->lock, | 704 | * Note, scsi_block_requests() must be called while holding tgt->lock, |
709 | * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to | 705 | * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to |
710 | * unblock the target. | 706 | * unblock the target. |
711 | */ | 707 | */ |
@@ -717,20 +713,20 @@ static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) | |||
717 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 713 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
718 | unsigned long flags; | 714 | unsigned long flags; |
719 | 715 | ||
720 | spin_lock_irqsave(&card->lock, flags); | 716 | spin_lock_irqsave(&tgt->lock, flags); |
721 | if (!tgt->dont_block && !lu->blocked && | 717 | if (!tgt->dont_block && !lu->blocked && |
722 | lu->generation != card->generation) { | 718 | lu->generation != card->generation) { |
723 | lu->blocked = true; | 719 | lu->blocked = true; |
724 | if (++tgt->blocked == 1) | 720 | if (++tgt->blocked == 1) |
725 | scsi_block_requests(shost); | 721 | scsi_block_requests(shost); |
726 | } | 722 | } |
727 | spin_unlock_irqrestore(&card->lock, flags); | 723 | spin_unlock_irqrestore(&tgt->lock, flags); |
728 | } | 724 | } |
729 | 725 | ||
730 | /* | 726 | /* |
731 | * Unblocks lu->tgt as soon as all its logical units can be unblocked. | 727 | * Unblocks lu->tgt as soon as all its logical units can be unblocked. |
732 | * Note, it is harmless to run scsi_unblock_requests() outside the | 728 | * Note, it is harmless to run scsi_unblock_requests() outside the |
733 | * card->lock protected section. On the other hand, running it inside | 729 | * tgt->lock protected section. On the other hand, running it inside |
734 | * the section might clash with shost->host_lock. | 730 | * the section might clash with shost->host_lock. |
735 | */ | 731 | */ |
736 | static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | 732 | static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) |
@@ -739,15 +735,14 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | |||
739 | struct fw_card *card = target_parent_device(tgt)->card; | 735 | struct fw_card *card = target_parent_device(tgt)->card; |
740 | struct Scsi_Host *shost = | 736 | struct Scsi_Host *shost = |
741 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 737 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
742 | unsigned long flags; | ||
743 | bool unblock = false; | 738 | bool unblock = false; |
744 | 739 | ||
745 | spin_lock_irqsave(&card->lock, flags); | 740 | spin_lock_irq(&tgt->lock); |
746 | if (lu->blocked && lu->generation == card->generation) { | 741 | if (lu->blocked && lu->generation == card->generation) { |
747 | lu->blocked = false; | 742 | lu->blocked = false; |
748 | unblock = --tgt->blocked == 0; | 743 | unblock = --tgt->blocked == 0; |
749 | } | 744 | } |
750 | spin_unlock_irqrestore(&card->lock, flags); | 745 | spin_unlock_irq(&tgt->lock); |
751 | 746 | ||
752 | if (unblock) | 747 | if (unblock) |
753 | scsi_unblock_requests(shost); | 748 | scsi_unblock_requests(shost); |
@@ -756,19 +751,17 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | |||
756 | /* | 751 | /* |
757 | * Prevents future blocking of tgt and unblocks it. | 752 | * Prevents future blocking of tgt and unblocks it. |
758 | * Note, it is harmless to run scsi_unblock_requests() outside the | 753 | * Note, it is harmless to run scsi_unblock_requests() outside the |
759 | * card->lock protected section. On the other hand, running it inside | 754 | * tgt->lock protected section. On the other hand, running it inside |
760 | * the section might clash with shost->host_lock. | 755 | * the section might clash with shost->host_lock. |
761 | */ | 756 | */ |
762 | static void sbp2_unblock(struct sbp2_target *tgt) | 757 | static void sbp2_unblock(struct sbp2_target *tgt) |
763 | { | 758 | { |
764 | struct fw_card *card = target_parent_device(tgt)->card; | ||
765 | struct Scsi_Host *shost = | 759 | struct Scsi_Host *shost = |
766 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 760 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
767 | unsigned long flags; | ||
768 | 761 | ||
769 | spin_lock_irqsave(&card->lock, flags); | 762 | spin_lock_irq(&tgt->lock); |
770 | ++tgt->dont_block; | 763 | ++tgt->dont_block; |
771 | spin_unlock_irqrestore(&card->lock, flags); | 764 | spin_unlock_irq(&tgt->lock); |
772 | 765 | ||
773 | scsi_unblock_requests(shost); | 766 | scsi_unblock_requests(shost); |
774 | } | 767 | } |
@@ -904,7 +897,7 @@ static void sbp2_login(struct work_struct *work) | |||
904 | /* No error during __scsi_add_device() */ | 897 | /* No error during __scsi_add_device() */ |
905 | lu->has_sdev = true; | 898 | lu->has_sdev = true; |
906 | scsi_device_put(sdev); | 899 | scsi_device_put(sdev); |
907 | sbp2_allow_block(lu); | 900 | sbp2_allow_block(tgt); |
908 | 901 | ||
909 | return; | 902 | return; |
910 | 903 | ||
@@ -1163,6 +1156,7 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) | |||
1163 | dev_set_drvdata(&unit->device, tgt); | 1156 | dev_set_drvdata(&unit->device, tgt); |
1164 | tgt->unit = unit; | 1157 | tgt->unit = unit; |
1165 | INIT_LIST_HEAD(&tgt->lu_list); | 1158 | INIT_LIST_HEAD(&tgt->lu_list); |
1159 | spin_lock_init(&tgt->lock); | ||
1166 | tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; | 1160 | tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; |
1167 | 1161 | ||
1168 | if (fw_device_enable_phys_dma(device) < 0) | 1162 | if (fw_device_enable_phys_dma(device) < 0) |
@@ -1359,12 +1353,12 @@ static void complete_command_orb(struct sbp2_orb *base_orb, | |||
1359 | { | 1353 | { |
1360 | struct sbp2_command_orb *orb = | 1354 | struct sbp2_command_orb *orb = |
1361 | container_of(base_orb, struct sbp2_command_orb, base); | 1355 | container_of(base_orb, struct sbp2_command_orb, base); |
1362 | struct fw_device *device = target_parent_device(orb->lu->tgt); | 1356 | struct fw_device *device = target_parent_device(base_orb->lu->tgt); |
1363 | int result; | 1357 | int result; |
1364 | 1358 | ||
1365 | if (status != NULL) { | 1359 | if (status != NULL) { |
1366 | if (STATUS_GET_DEAD(*status)) | 1360 | if (STATUS_GET_DEAD(*status)) |
1367 | sbp2_agent_reset_no_wait(orb->lu); | 1361 | sbp2_agent_reset_no_wait(base_orb->lu); |
1368 | 1362 | ||
1369 | switch (STATUS_GET_RESPONSE(*status)) { | 1363 | switch (STATUS_GET_RESPONSE(*status)) { |
1370 | case SBP2_STATUS_REQUEST_COMPLETE: | 1364 | case SBP2_STATUS_REQUEST_COMPLETE: |
@@ -1390,7 +1384,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb, | |||
1390 | * or when sending the write (less likely). | 1384 | * or when sending the write (less likely). |
1391 | */ | 1385 | */ |
1392 | result = DID_BUS_BUSY << 16; | 1386 | result = DID_BUS_BUSY << 16; |
1393 | sbp2_conditionally_block(orb->lu); | 1387 | sbp2_conditionally_block(base_orb->lu); |
1394 | } | 1388 | } |
1395 | 1389 | ||
1396 | dma_unmap_single(device->card->device, orb->base.request_bus, | 1390 | dma_unmap_single(device->card->device, orb->base.request_bus, |
@@ -1487,7 +1481,6 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost, | |||
1487 | /* Initialize rcode to something not RCODE_COMPLETE. */ | 1481 | /* Initialize rcode to something not RCODE_COMPLETE. */ |
1488 | orb->base.rcode = -1; | 1482 | orb->base.rcode = -1; |
1489 | kref_init(&orb->base.kref); | 1483 | kref_init(&orb->base.kref); |
1490 | orb->lu = lu; | ||
1491 | orb->cmd = cmd; | 1484 | orb->cmd = cmd; |
1492 | orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); | 1485 | orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); |
1493 | orb->request.misc = cpu_to_be32( | 1486 | orb->request.misc = cpu_to_be32( |