aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/libata-core.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-06-22 23:46:10 -0400
committerJeff Garzik <jeff@garzik.org>2006-06-22 23:46:10 -0400
commitba6a13083c1b720a47c05bee7bedbb6ef06c4611 (patch)
tree26f9d8d37145fac426744f96ecf006ec0a481e31 /drivers/scsi/libata-core.c
parent47005f255ed126a4b48a1a2f63164fb1d83bcb0a (diff)
[libata] Add host lock to struct ata_port
Prepare for changes required to support SATA devices attached to SAS HBAs. For these devices we don't want to use host_set at all, since libata will not be the owner of struct scsi_host. Signed-off-by: Brian King <brking@us.ibm.com> (with slight merge modifications made by...) Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r--drivers/scsi/libata-core.c55
1 files changed, 28 insertions, 27 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 425ab1493fd3..24d340aeb518 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -933,9 +933,9 @@ void ata_port_flush_task(struct ata_port *ap)
933 933
934 DPRINTK("ENTER\n"); 934 DPRINTK("ENTER\n");
935 935
936 spin_lock_irqsave(&ap->host_set->lock, flags); 936 spin_lock_irqsave(ap->lock, flags);
937 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK; 937 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
938 spin_unlock_irqrestore(&ap->host_set->lock, flags); 938 spin_unlock_irqrestore(ap->lock, flags);
939 939
940 DPRINTK("flush #1\n"); 940 DPRINTK("flush #1\n");
941 flush_workqueue(ata_wq); 941 flush_workqueue(ata_wq);
@@ -950,9 +950,9 @@ void ata_port_flush_task(struct ata_port *ap)
950 flush_workqueue(ata_wq); 950 flush_workqueue(ata_wq);
951 } 951 }
952 952
953 spin_lock_irqsave(&ap->host_set->lock, flags); 953 spin_lock_irqsave(ap->lock, flags);
954 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK; 954 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
955 spin_unlock_irqrestore(&ap->host_set->lock, flags); 955 spin_unlock_irqrestore(ap->lock, flags);
956 956
957 DPRINTK("EXIT\n"); 957 DPRINTK("EXIT\n");
958} 958}
@@ -999,11 +999,11 @@ unsigned ata_exec_internal(struct ata_device *dev,
999 unsigned int err_mask; 999 unsigned int err_mask;
1000 int rc; 1000 int rc;
1001 1001
1002 spin_lock_irqsave(&ap->host_set->lock, flags); 1002 spin_lock_irqsave(ap->lock, flags);
1003 1003
1004 /* no internal command while frozen */ 1004 /* no internal command while frozen */
1005 if (ap->flags & ATA_FLAG_FROZEN) { 1005 if (ap->flags & ATA_FLAG_FROZEN) {
1006 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1006 spin_unlock_irqrestore(ap->lock, flags);
1007 return AC_ERR_SYSTEM; 1007 return AC_ERR_SYSTEM;
1008 } 1008 }
1009 1009
@@ -1052,14 +1052,14 @@ unsigned ata_exec_internal(struct ata_device *dev,
1052 1052
1053 ata_qc_issue(qc); 1053 ata_qc_issue(qc);
1054 1054
1055 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1055 spin_unlock_irqrestore(ap->lock, flags);
1056 1056
1057 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL); 1057 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
1058 1058
1059 ata_port_flush_task(ap); 1059 ata_port_flush_task(ap);
1060 1060
1061 if (!rc) { 1061 if (!rc) {
1062 spin_lock_irqsave(&ap->host_set->lock, flags); 1062 spin_lock_irqsave(ap->lock, flags);
1063 1063
1064 /* We're racing with irq here. If we lose, the 1064 /* We're racing with irq here. If we lose, the
1065 * following test prevents us from completing the qc 1065 * following test prevents us from completing the qc
@@ -1078,7 +1078,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
1078 "qc timeout (cmd 0x%x)\n", command); 1078 "qc timeout (cmd 0x%x)\n", command);
1079 } 1079 }
1080 1080
1081 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1081 spin_unlock_irqrestore(ap->lock, flags);
1082 } 1082 }
1083 1083
1084 /* do post_internal_cmd */ 1084 /* do post_internal_cmd */
@@ -1092,7 +1092,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
1092 } 1092 }
1093 1093
1094 /* finish up */ 1094 /* finish up */
1095 spin_lock_irqsave(&ap->host_set->lock, flags); 1095 spin_lock_irqsave(ap->lock, flags);
1096 1096
1097 *tf = qc->result_tf; 1097 *tf = qc->result_tf;
1098 err_mask = qc->err_mask; 1098 err_mask = qc->err_mask;
@@ -1118,7 +1118,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
1118 ata_port_probe(ap); 1118 ata_port_probe(ap);
1119 } 1119 }
1120 1120
1121 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1121 spin_unlock_irqrestore(ap->lock, flags);
1122 1122
1123 return err_mask; 1123 return err_mask;
1124} 1124}
@@ -3912,7 +3912,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3912 3912
3913 if (ap->ops->error_handler) { 3913 if (ap->ops->error_handler) {
3914 if (in_wq) { 3914 if (in_wq) {
3915 spin_lock_irqsave(&ap->host_set->lock, flags); 3915 spin_lock_irqsave(ap->lock, flags);
3916 3916
3917 /* EH might have kicked in while host_set lock 3917 /* EH might have kicked in while host_set lock
3918 * is released. 3918 * is released.
@@ -3926,7 +3926,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3926 ata_port_freeze(ap); 3926 ata_port_freeze(ap);
3927 } 3927 }
3928 3928
3929 spin_unlock_irqrestore(&ap->host_set->lock, flags); 3929 spin_unlock_irqrestore(ap->lock, flags);
3930 } else { 3930 } else {
3931 if (likely(!(qc->err_mask & AC_ERR_HSM))) 3931 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3932 ata_qc_complete(qc); 3932 ata_qc_complete(qc);
@@ -3935,10 +3935,10 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3935 } 3935 }
3936 } else { 3936 } else {
3937 if (in_wq) { 3937 if (in_wq) {
3938 spin_lock_irqsave(&ap->host_set->lock, flags); 3938 spin_lock_irqsave(ap->lock, flags);
3939 ata_irq_on(ap); 3939 ata_irq_on(ap);
3940 ata_qc_complete(qc); 3940 ata_qc_complete(qc);
3941 spin_unlock_irqrestore(&ap->host_set->lock, flags); 3941 spin_unlock_irqrestore(ap->lock, flags);
3942 } else 3942 } else
3943 ata_qc_complete(qc); 3943 ata_qc_complete(qc);
3944 } 3944 }
@@ -4018,7 +4018,7 @@ fsm_start:
4018 * hsm_task_state is changed. Hence, the following locking. 4018 * hsm_task_state is changed. Hence, the following locking.
4019 */ 4019 */
4020 if (in_wq) 4020 if (in_wq)
4021 spin_lock_irqsave(&ap->host_set->lock, flags); 4021 spin_lock_irqsave(ap->lock, flags);
4022 4022
4023 if (qc->tf.protocol == ATA_PROT_PIO) { 4023 if (qc->tf.protocol == ATA_PROT_PIO) {
4024 /* PIO data out protocol. 4024 /* PIO data out protocol.
@@ -4037,7 +4037,7 @@ fsm_start:
4037 atapi_send_cdb(ap, qc); 4037 atapi_send_cdb(ap, qc);
4038 4038
4039 if (in_wq) 4039 if (in_wq)
4040 spin_unlock_irqrestore(&ap->host_set->lock, flags); 4040 spin_unlock_irqrestore(ap->lock, flags);
4041 4041
4042 /* if polling, ata_pio_task() handles the rest. 4042 /* if polling, ata_pio_task() handles the rest.
4043 * otherwise, interrupt handler takes over from here. 4043 * otherwise, interrupt handler takes over from here.
@@ -5130,9 +5130,9 @@ void ata_dev_init(struct ata_device *dev)
5130 * requests which occur asynchronously. Synchronize using 5130 * requests which occur asynchronously. Synchronize using
5131 * host_set lock. 5131 * host_set lock.
5132 */ 5132 */
5133 spin_lock_irqsave(&ap->host_set->lock, flags); 5133 spin_lock_irqsave(ap->lock, flags);
5134 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5134 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5135 spin_unlock_irqrestore(&ap->host_set->lock, flags); 5135 spin_unlock_irqrestore(ap->lock, flags);
5136 5136
5137 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0, 5137 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5138 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET); 5138 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
@@ -5167,6 +5167,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
5167 host->unique_id = ata_unique_id++; 5167 host->unique_id = ata_unique_id++;
5168 host->max_cmd_len = 12; 5168 host->max_cmd_len = 12;
5169 5169
5170 ap->lock = &host_set->lock;
5170 ap->flags = ATA_FLAG_DISABLED; 5171 ap->flags = ATA_FLAG_DISABLED;
5171 ap->id = host->unique_id; 5172 ap->id = host->unique_id;
5172 ap->host = host; 5173 ap->host = host;
@@ -5388,7 +5389,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
5388 ata_port_probe(ap); 5389 ata_port_probe(ap);
5389 5390
5390 /* kick EH for boot probing */ 5391 /* kick EH for boot probing */
5391 spin_lock_irqsave(&ap->host_set->lock, flags); 5392 spin_lock_irqsave(ap->lock, flags);
5392 5393
5393 ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1; 5394 ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5394 ap->eh_info.action |= ATA_EH_SOFTRESET; 5395 ap->eh_info.action |= ATA_EH_SOFTRESET;
@@ -5396,7 +5397,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
5396 ap->flags |= ATA_FLAG_LOADING; 5397 ap->flags |= ATA_FLAG_LOADING;
5397 ata_port_schedule_eh(ap); 5398 ata_port_schedule_eh(ap);
5398 5399
5399 spin_unlock_irqrestore(&ap->host_set->lock, flags); 5400 spin_unlock_irqrestore(ap->lock, flags);
5400 5401
5401 /* wait for EH to finish */ 5402 /* wait for EH to finish */
5402 ata_port_wait_eh(ap); 5403 ata_port_wait_eh(ap);
@@ -5460,29 +5461,29 @@ void ata_port_detach(struct ata_port *ap)
5460 return; 5461 return;
5461 5462
5462 /* tell EH we're leaving & flush EH */ 5463 /* tell EH we're leaving & flush EH */
5463 spin_lock_irqsave(&ap->host_set->lock, flags); 5464 spin_lock_irqsave(ap->lock, flags);
5464 ap->flags |= ATA_FLAG_UNLOADING; 5465 ap->flags |= ATA_FLAG_UNLOADING;
5465 spin_unlock_irqrestore(&ap->host_set->lock, flags); 5466 spin_unlock_irqrestore(ap->lock, flags);
5466 5467
5467 ata_port_wait_eh(ap); 5468 ata_port_wait_eh(ap);
5468 5469
5469 /* EH is now guaranteed to see UNLOADING, so no new device 5470 /* EH is now guaranteed to see UNLOADING, so no new device
5470 * will be attached. Disable all existing devices. 5471 * will be attached. Disable all existing devices.
5471 */ 5472 */
5472 spin_lock_irqsave(&ap->host_set->lock, flags); 5473 spin_lock_irqsave(ap->lock, flags);
5473 5474
5474 for (i = 0; i < ATA_MAX_DEVICES; i++) 5475 for (i = 0; i < ATA_MAX_DEVICES; i++)
5475 ata_dev_disable(&ap->device[i]); 5476 ata_dev_disable(&ap->device[i]);
5476 5477
5477 spin_unlock_irqrestore(&ap->host_set->lock, flags); 5478 spin_unlock_irqrestore(ap->lock, flags);
5478 5479
5479 /* Final freeze & EH. All in-flight commands are aborted. EH 5480 /* Final freeze & EH. All in-flight commands are aborted. EH
5480 * will be skipped and retrials will be terminated with bad 5481 * will be skipped and retrials will be terminated with bad
5481 * target. 5482 * target.
5482 */ 5483 */
5483 spin_lock_irqsave(&ap->host_set->lock, flags); 5484 spin_lock_irqsave(ap->lock, flags);
5484 ata_port_freeze(ap); /* won't be thawed */ 5485 ata_port_freeze(ap); /* won't be thawed */
5485 spin_unlock_irqrestore(&ap->host_set->lock, flags); 5486 spin_unlock_irqrestore(ap->lock, flags);
5486 5487
5487 ata_port_wait_eh(ap); 5488 ata_port_wait_eh(ap);
5488 5489