diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-06-22 23:46:10 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-06-22 23:46:10 -0400 |
commit | ba6a13083c1b720a47c05bee7bedbb6ef06c4611 (patch) | |
tree | 26f9d8d37145fac426744f96ecf006ec0a481e31 | |
parent | 47005f255ed126a4b48a1a2f63164fb1d83bcb0a (diff) |
[libata] Add host lock to struct ata_port
Prepare for changes required to support SATA devices
attached to SAS HBAs. For these devices we don't want to
use host_set at all, since libata will not be the owner
of struct scsi_host.
Signed-off-by: Brian King <brking@us.ibm.com>
(with slight merge modifications made by...)
Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r-- | drivers/scsi/libata-bmdma.c | 5 | ||||
-rw-r--r-- | drivers/scsi/libata-core.c | 55 | ||||
-rw-r--r-- | drivers/scsi/libata-eh.c | 65 | ||||
-rw-r--r-- | drivers/scsi/libata-scsi.c | 20 | ||||
-rw-r--r-- | include/linux/libata.h | 1 |
5 files changed, 73 insertions, 73 deletions
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c index 13fab97c840e..004e1a0d8b71 100644 --- a/drivers/scsi/libata-bmdma.c +++ b/drivers/scsi/libata-bmdma.c | |||
@@ -715,7 +715,6 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
715 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, | 715 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, |
716 | ata_postreset_fn_t postreset) | 716 | ata_postreset_fn_t postreset) |
717 | { | 717 | { |
718 | struct ata_host_set *host_set = ap->host_set; | ||
719 | struct ata_eh_context *ehc = &ap->eh_context; | 718 | struct ata_eh_context *ehc = &ap->eh_context; |
720 | struct ata_queued_cmd *qc; | 719 | struct ata_queued_cmd *qc; |
721 | unsigned long flags; | 720 | unsigned long flags; |
@@ -726,7 +725,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
726 | qc = NULL; | 725 | qc = NULL; |
727 | 726 | ||
728 | /* reset PIO HSM and stop DMA engine */ | 727 | /* reset PIO HSM and stop DMA engine */ |
729 | spin_lock_irqsave(&host_set->lock, flags); | 728 | spin_lock_irqsave(ap->lock, flags); |
730 | 729 | ||
731 | ap->hsm_task_state = HSM_ST_IDLE; | 730 | ap->hsm_task_state = HSM_ST_IDLE; |
732 | 731 | ||
@@ -755,7 +754,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
755 | ata_chk_status(ap); | 754 | ata_chk_status(ap); |
756 | ap->ops->irq_clear(ap); | 755 | ap->ops->irq_clear(ap); |
757 | 756 | ||
758 | spin_unlock_irqrestore(&host_set->lock, flags); | 757 | spin_unlock_irqrestore(ap->lock, flags); |
759 | 758 | ||
760 | if (thaw) | 759 | if (thaw) |
761 | ata_eh_thaw_port(ap); | 760 | ata_eh_thaw_port(ap); |
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 425ab1493fd3..24d340aeb518 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -933,9 +933,9 @@ void ata_port_flush_task(struct ata_port *ap) | |||
933 | 933 | ||
934 | DPRINTK("ENTER\n"); | 934 | DPRINTK("ENTER\n"); |
935 | 935 | ||
936 | spin_lock_irqsave(&ap->host_set->lock, flags); | 936 | spin_lock_irqsave(ap->lock, flags); |
937 | ap->flags |= ATA_FLAG_FLUSH_PORT_TASK; | 937 | ap->flags |= ATA_FLAG_FLUSH_PORT_TASK; |
938 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 938 | spin_unlock_irqrestore(ap->lock, flags); |
939 | 939 | ||
940 | DPRINTK("flush #1\n"); | 940 | DPRINTK("flush #1\n"); |
941 | flush_workqueue(ata_wq); | 941 | flush_workqueue(ata_wq); |
@@ -950,9 +950,9 @@ void ata_port_flush_task(struct ata_port *ap) | |||
950 | flush_workqueue(ata_wq); | 950 | flush_workqueue(ata_wq); |
951 | } | 951 | } |
952 | 952 | ||
953 | spin_lock_irqsave(&ap->host_set->lock, flags); | 953 | spin_lock_irqsave(ap->lock, flags); |
954 | ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK; | 954 | ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK; |
955 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 955 | spin_unlock_irqrestore(ap->lock, flags); |
956 | 956 | ||
957 | DPRINTK("EXIT\n"); | 957 | DPRINTK("EXIT\n"); |
958 | } | 958 | } |
@@ -999,11 +999,11 @@ unsigned ata_exec_internal(struct ata_device *dev, | |||
999 | unsigned int err_mask; | 999 | unsigned int err_mask; |
1000 | int rc; | 1000 | int rc; |
1001 | 1001 | ||
1002 | spin_lock_irqsave(&ap->host_set->lock, flags); | 1002 | spin_lock_irqsave(ap->lock, flags); |
1003 | 1003 | ||
1004 | /* no internal command while frozen */ | 1004 | /* no internal command while frozen */ |
1005 | if (ap->flags & ATA_FLAG_FROZEN) { | 1005 | if (ap->flags & ATA_FLAG_FROZEN) { |
1006 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 1006 | spin_unlock_irqrestore(ap->lock, flags); |
1007 | return AC_ERR_SYSTEM; | 1007 | return AC_ERR_SYSTEM; |
1008 | } | 1008 | } |
1009 | 1009 | ||
@@ -1052,14 +1052,14 @@ unsigned ata_exec_internal(struct ata_device *dev, | |||
1052 | 1052 | ||
1053 | ata_qc_issue(qc); | 1053 | ata_qc_issue(qc); |
1054 | 1054 | ||
1055 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 1055 | spin_unlock_irqrestore(ap->lock, flags); |
1056 | 1056 | ||
1057 | rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL); | 1057 | rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL); |
1058 | 1058 | ||
1059 | ata_port_flush_task(ap); | 1059 | ata_port_flush_task(ap); |
1060 | 1060 | ||
1061 | if (!rc) { | 1061 | if (!rc) { |
1062 | spin_lock_irqsave(&ap->host_set->lock, flags); | 1062 | spin_lock_irqsave(ap->lock, flags); |
1063 | 1063 | ||
1064 | /* We're racing with irq here. If we lose, the | 1064 | /* We're racing with irq here. If we lose, the |
1065 | * following test prevents us from completing the qc | 1065 | * following test prevents us from completing the qc |
@@ -1078,7 +1078,7 @@ unsigned ata_exec_internal(struct ata_device *dev, | |||
1078 | "qc timeout (cmd 0x%x)\n", command); | 1078 | "qc timeout (cmd 0x%x)\n", command); |
1079 | } | 1079 | } |
1080 | 1080 | ||
1081 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 1081 | spin_unlock_irqrestore(ap->lock, flags); |
1082 | } | 1082 | } |
1083 | 1083 | ||
1084 | /* do post_internal_cmd */ | 1084 | /* do post_internal_cmd */ |
@@ -1092,7 +1092,7 @@ unsigned ata_exec_internal(struct ata_device *dev, | |||
1092 | } | 1092 | } |
1093 | 1093 | ||
1094 | /* finish up */ | 1094 | /* finish up */ |
1095 | spin_lock_irqsave(&ap->host_set->lock, flags); | 1095 | spin_lock_irqsave(ap->lock, flags); |
1096 | 1096 | ||
1097 | *tf = qc->result_tf; | 1097 | *tf = qc->result_tf; |
1098 | err_mask = qc->err_mask; | 1098 | err_mask = qc->err_mask; |
@@ -1118,7 +1118,7 @@ unsigned ata_exec_internal(struct ata_device *dev, | |||
1118 | ata_port_probe(ap); | 1118 | ata_port_probe(ap); |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 1121 | spin_unlock_irqrestore(ap->lock, flags); |
1122 | 1122 | ||
1123 | return err_mask; | 1123 | return err_mask; |
1124 | } | 1124 | } |
@@ -3912,7 +3912,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | |||
3912 | 3912 | ||
3913 | if (ap->ops->error_handler) { | 3913 | if (ap->ops->error_handler) { |
3914 | if (in_wq) { | 3914 | if (in_wq) { |
3915 | spin_lock_irqsave(&ap->host_set->lock, flags); | 3915 | spin_lock_irqsave(ap->lock, flags); |
3916 | 3916 | ||
3917 | /* EH might have kicked in while host_set lock | 3917 | /* EH might have kicked in while host_set lock |
3918 | * is released. | 3918 | * is released. |
@@ -3926,7 +3926,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | |||
3926 | ata_port_freeze(ap); | 3926 | ata_port_freeze(ap); |
3927 | } | 3927 | } |
3928 | 3928 | ||
3929 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 3929 | spin_unlock_irqrestore(ap->lock, flags); |
3930 | } else { | 3930 | } else { |
3931 | if (likely(!(qc->err_mask & AC_ERR_HSM))) | 3931 | if (likely(!(qc->err_mask & AC_ERR_HSM))) |
3932 | ata_qc_complete(qc); | 3932 | ata_qc_complete(qc); |
@@ -3935,10 +3935,10 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | |||
3935 | } | 3935 | } |
3936 | } else { | 3936 | } else { |
3937 | if (in_wq) { | 3937 | if (in_wq) { |
3938 | spin_lock_irqsave(&ap->host_set->lock, flags); | 3938 | spin_lock_irqsave(ap->lock, flags); |
3939 | ata_irq_on(ap); | 3939 | ata_irq_on(ap); |
3940 | ata_qc_complete(qc); | 3940 | ata_qc_complete(qc); |
3941 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 3941 | spin_unlock_irqrestore(ap->lock, flags); |
3942 | } else | 3942 | } else |
3943 | ata_qc_complete(qc); | 3943 | ata_qc_complete(qc); |
3944 | } | 3944 | } |
@@ -4018,7 +4018,7 @@ fsm_start: | |||
4018 | * hsm_task_state is changed. Hence, the following locking. | 4018 | * hsm_task_state is changed. Hence, the following locking. |
4019 | */ | 4019 | */ |
4020 | if (in_wq) | 4020 | if (in_wq) |
4021 | spin_lock_irqsave(&ap->host_set->lock, flags); | 4021 | spin_lock_irqsave(ap->lock, flags); |
4022 | 4022 | ||
4023 | if (qc->tf.protocol == ATA_PROT_PIO) { | 4023 | if (qc->tf.protocol == ATA_PROT_PIO) { |
4024 | /* PIO data out protocol. | 4024 | /* PIO data out protocol. |
@@ -4037,7 +4037,7 @@ fsm_start: | |||
4037 | atapi_send_cdb(ap, qc); | 4037 | atapi_send_cdb(ap, qc); |
4038 | 4038 | ||
4039 | if (in_wq) | 4039 | if (in_wq) |
4040 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 4040 | spin_unlock_irqrestore(ap->lock, flags); |
4041 | 4041 | ||
4042 | /* if polling, ata_pio_task() handles the rest. | 4042 | /* if polling, ata_pio_task() handles the rest. |
4043 | * otherwise, interrupt handler takes over from here. | 4043 | * otherwise, interrupt handler takes over from here. |
@@ -5130,9 +5130,9 @@ void ata_dev_init(struct ata_device *dev) | |||
5130 | * requests which occur asynchronously. Synchronize using | 5130 | * requests which occur asynchronously. Synchronize using |
5131 | * host_set lock. | 5131 | * host_set lock. |
5132 | */ | 5132 | */ |
5133 | spin_lock_irqsave(&ap->host_set->lock, flags); | 5133 | spin_lock_irqsave(ap->lock, flags); |
5134 | dev->flags &= ~ATA_DFLAG_INIT_MASK; | 5134 | dev->flags &= ~ATA_DFLAG_INIT_MASK; |
5135 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 5135 | spin_unlock_irqrestore(ap->lock, flags); |
5136 | 5136 | ||
5137 | memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0, | 5137 | memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0, |
5138 | sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET); | 5138 | sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET); |
@@ -5167,6 +5167,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, | |||
5167 | host->unique_id = ata_unique_id++; | 5167 | host->unique_id = ata_unique_id++; |
5168 | host->max_cmd_len = 12; | 5168 | host->max_cmd_len = 12; |
5169 | 5169 | ||
5170 | ap->lock = &host_set->lock; | ||
5170 | ap->flags = ATA_FLAG_DISABLED; | 5171 | ap->flags = ATA_FLAG_DISABLED; |
5171 | ap->id = host->unique_id; | 5172 | ap->id = host->unique_id; |
5172 | ap->host = host; | 5173 | ap->host = host; |
@@ -5388,7 +5389,7 @@ int ata_device_add(const struct ata_probe_ent *ent) | |||
5388 | ata_port_probe(ap); | 5389 | ata_port_probe(ap); |
5389 | 5390 | ||
5390 | /* kick EH for boot probing */ | 5391 | /* kick EH for boot probing */ |
5391 | spin_lock_irqsave(&ap->host_set->lock, flags); | 5392 | spin_lock_irqsave(ap->lock, flags); |
5392 | 5393 | ||
5393 | ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1; | 5394 | ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1; |
5394 | ap->eh_info.action |= ATA_EH_SOFTRESET; | 5395 | ap->eh_info.action |= ATA_EH_SOFTRESET; |
@@ -5396,7 +5397,7 @@ int ata_device_add(const struct ata_probe_ent *ent) | |||
5396 | ap->flags |= ATA_FLAG_LOADING; | 5397 | ap->flags |= ATA_FLAG_LOADING; |
5397 | ata_port_schedule_eh(ap); | 5398 | ata_port_schedule_eh(ap); |
5398 | 5399 | ||
5399 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 5400 | spin_unlock_irqrestore(ap->lock, flags); |
5400 | 5401 | ||
5401 | /* wait for EH to finish */ | 5402 | /* wait for EH to finish */ |
5402 | ata_port_wait_eh(ap); | 5403 | ata_port_wait_eh(ap); |
@@ -5460,29 +5461,29 @@ void ata_port_detach(struct ata_port *ap) | |||
5460 | return; | 5461 | return; |
5461 | 5462 | ||
5462 | /* tell EH we're leaving & flush EH */ | 5463 | /* tell EH we're leaving & flush EH */ |
5463 | spin_lock_irqsave(&ap->host_set->lock, flags); | 5464 | spin_lock_irqsave(ap->lock, flags); |
5464 | ap->flags |= ATA_FLAG_UNLOADING; | 5465 | ap->flags |= ATA_FLAG_UNLOADING; |
5465 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 5466 | spin_unlock_irqrestore(ap->lock, flags); |
5466 | 5467 | ||
5467 | ata_port_wait_eh(ap); | 5468 | ata_port_wait_eh(ap); |
5468 | 5469 | ||
5469 | /* EH is now guaranteed to see UNLOADING, so no new device | 5470 | /* EH is now guaranteed to see UNLOADING, so no new device |
5470 | * will be attached. Disable all existing devices. | 5471 | * will be attached. Disable all existing devices. |
5471 | */ | 5472 | */ |
5472 | spin_lock_irqsave(&ap->host_set->lock, flags); | 5473 | spin_lock_irqsave(ap->lock, flags); |
5473 | 5474 | ||
5474 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 5475 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
5475 | ata_dev_disable(&ap->device[i]); | 5476 | ata_dev_disable(&ap->device[i]); |
5476 | 5477 | ||
5477 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 5478 | spin_unlock_irqrestore(ap->lock, flags); |
5478 | 5479 | ||
5479 | /* Final freeze & EH. All in-flight commands are aborted. EH | 5480 | /* Final freeze & EH. All in-flight commands are aborted. EH |
5480 | * will be skipped and retrials will be terminated with bad | 5481 | * will be skipped and retrials will be terminated with bad |
5481 | * target. | 5482 | * target. |
5482 | */ | 5483 | */ |
5483 | spin_lock_irqsave(&ap->host_set->lock, flags); | 5484 | spin_lock_irqsave(ap->lock, flags); |
5484 | ata_port_freeze(ap); /* won't be thawed */ | 5485 | ata_port_freeze(ap); /* won't be thawed */ |
5485 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 5486 | spin_unlock_irqrestore(ap->lock, flags); |
5486 | 5487 | ||
5487 | ata_port_wait_eh(ap); | 5488 | ata_port_wait_eh(ap); |
5488 | 5489 | ||
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c index 70b623988a9f..823385981a7a 100644 --- a/drivers/scsi/libata-eh.c +++ b/drivers/scsi/libata-eh.c | |||
@@ -128,7 +128,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) | |||
128 | } | 128 | } |
129 | 129 | ||
130 | ret = EH_HANDLED; | 130 | ret = EH_HANDLED; |
131 | spin_lock_irqsave(&ap->host_set->lock, flags); | 131 | spin_lock_irqsave(ap->lock, flags); |
132 | qc = ata_qc_from_tag(ap, ap->active_tag); | 132 | qc = ata_qc_from_tag(ap, ap->active_tag); |
133 | if (qc) { | 133 | if (qc) { |
134 | WARN_ON(qc->scsicmd != cmd); | 134 | WARN_ON(qc->scsicmd != cmd); |
@@ -136,7 +136,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) | |||
136 | qc->err_mask |= AC_ERR_TIMEOUT; | 136 | qc->err_mask |= AC_ERR_TIMEOUT; |
137 | ret = EH_NOT_HANDLED; | 137 | ret = EH_NOT_HANDLED; |
138 | } | 138 | } |
139 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 139 | spin_unlock_irqrestore(ap->lock, flags); |
140 | 140 | ||
141 | out: | 141 | out: |
142 | DPRINTK("EXIT, ret=%d\n", ret); | 142 | DPRINTK("EXIT, ret=%d\n", ret); |
@@ -158,7 +158,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) | |||
158 | void ata_scsi_error(struct Scsi_Host *host) | 158 | void ata_scsi_error(struct Scsi_Host *host) |
159 | { | 159 | { |
160 | struct ata_port *ap = ata_shost_to_port(host); | 160 | struct ata_port *ap = ata_shost_to_port(host); |
161 | spinlock_t *hs_lock = &ap->host_set->lock; | 161 | spinlock_t *ap_lock = ap->lock; |
162 | int i, repeat_cnt = ATA_EH_MAX_REPEAT; | 162 | int i, repeat_cnt = ATA_EH_MAX_REPEAT; |
163 | unsigned long flags; | 163 | unsigned long flags; |
164 | 164 | ||
@@ -185,7 +185,7 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
185 | struct scsi_cmnd *scmd, *tmp; | 185 | struct scsi_cmnd *scmd, *tmp; |
186 | int nr_timedout = 0; | 186 | int nr_timedout = 0; |
187 | 187 | ||
188 | spin_lock_irqsave(hs_lock, flags); | 188 | spin_lock_irqsave(ap_lock, flags); |
189 | 189 | ||
190 | list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { | 190 | list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { |
191 | struct ata_queued_cmd *qc; | 191 | struct ata_queued_cmd *qc; |
@@ -224,15 +224,15 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
224 | if (nr_timedout) | 224 | if (nr_timedout) |
225 | __ata_port_freeze(ap); | 225 | __ata_port_freeze(ap); |
226 | 226 | ||
227 | spin_unlock_irqrestore(hs_lock, flags); | 227 | spin_unlock_irqrestore(ap_lock, flags); |
228 | } else | 228 | } else |
229 | spin_unlock_wait(hs_lock); | 229 | spin_unlock_wait(ap_lock); |
230 | 230 | ||
231 | repeat: | 231 | repeat: |
232 | /* invoke error handler */ | 232 | /* invoke error handler */ |
233 | if (ap->ops->error_handler) { | 233 | if (ap->ops->error_handler) { |
234 | /* fetch & clear EH info */ | 234 | /* fetch & clear EH info */ |
235 | spin_lock_irqsave(hs_lock, flags); | 235 | spin_lock_irqsave(ap_lock, flags); |
236 | 236 | ||
237 | memset(&ap->eh_context, 0, sizeof(ap->eh_context)); | 237 | memset(&ap->eh_context, 0, sizeof(ap->eh_context)); |
238 | ap->eh_context.i = ap->eh_info; | 238 | ap->eh_context.i = ap->eh_info; |
@@ -241,7 +241,7 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
241 | ap->flags |= ATA_FLAG_EH_IN_PROGRESS; | 241 | ap->flags |= ATA_FLAG_EH_IN_PROGRESS; |
242 | ap->flags &= ~ATA_FLAG_EH_PENDING; | 242 | ap->flags &= ~ATA_FLAG_EH_PENDING; |
243 | 243 | ||
244 | spin_unlock_irqrestore(hs_lock, flags); | 244 | spin_unlock_irqrestore(ap_lock, flags); |
245 | 245 | ||
246 | /* invoke EH. if unloading, just finish failed qcs */ | 246 | /* invoke EH. if unloading, just finish failed qcs */ |
247 | if (!(ap->flags & ATA_FLAG_UNLOADING)) | 247 | if (!(ap->flags & ATA_FLAG_UNLOADING)) |
@@ -253,14 +253,14 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
253 | * recovered the port but before this point. Repeat | 253 | * recovered the port but before this point. Repeat |
254 | * EH in such case. | 254 | * EH in such case. |
255 | */ | 255 | */ |
256 | spin_lock_irqsave(hs_lock, flags); | 256 | spin_lock_irqsave(ap_lock, flags); |
257 | 257 | ||
258 | if (ap->flags & ATA_FLAG_EH_PENDING) { | 258 | if (ap->flags & ATA_FLAG_EH_PENDING) { |
259 | if (--repeat_cnt) { | 259 | if (--repeat_cnt) { |
260 | ata_port_printk(ap, KERN_INFO, | 260 | ata_port_printk(ap, KERN_INFO, |
261 | "EH pending after completion, " | 261 | "EH pending after completion, " |
262 | "repeating EH (cnt=%d)\n", repeat_cnt); | 262 | "repeating EH (cnt=%d)\n", repeat_cnt); |
263 | spin_unlock_irqrestore(hs_lock, flags); | 263 | spin_unlock_irqrestore(ap_lock, flags); |
264 | goto repeat; | 264 | goto repeat; |
265 | } | 265 | } |
266 | ata_port_printk(ap, KERN_ERR, "EH pending after %d " | 266 | ata_port_printk(ap, KERN_ERR, "EH pending after %d " |
@@ -270,14 +270,14 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
270 | /* this run is complete, make sure EH info is clear */ | 270 | /* this run is complete, make sure EH info is clear */ |
271 | memset(&ap->eh_info, 0, sizeof(ap->eh_info)); | 271 | memset(&ap->eh_info, 0, sizeof(ap->eh_info)); |
272 | 272 | ||
273 | /* Clear host_eh_scheduled while holding hs_lock such | 273 | /* Clear host_eh_scheduled while holding ap_lock such |
274 | * that if exception occurs after this point but | 274 | * that if exception occurs after this point but |
275 | * before EH completion, SCSI midlayer will | 275 | * before EH completion, SCSI midlayer will |
276 | * re-initiate EH. | 276 | * re-initiate EH. |
277 | */ | 277 | */ |
278 | host->host_eh_scheduled = 0; | 278 | host->host_eh_scheduled = 0; |
279 | 279 | ||
280 | spin_unlock_irqrestore(hs_lock, flags); | 280 | spin_unlock_irqrestore(ap_lock, flags); |
281 | } else { | 281 | } else { |
282 | WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); | 282 | WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); |
283 | ap->ops->eng_timeout(ap); | 283 | ap->ops->eng_timeout(ap); |
@@ -289,7 +289,7 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
289 | scsi_eh_flush_done_q(&ap->eh_done_q); | 289 | scsi_eh_flush_done_q(&ap->eh_done_q); |
290 | 290 | ||
291 | /* clean up */ | 291 | /* clean up */ |
292 | spin_lock_irqsave(hs_lock, flags); | 292 | spin_lock_irqsave(ap_lock, flags); |
293 | 293 | ||
294 | if (ap->flags & ATA_FLAG_LOADING) { | 294 | if (ap->flags & ATA_FLAG_LOADING) { |
295 | ap->flags &= ~ATA_FLAG_LOADING; | 295 | ap->flags &= ~ATA_FLAG_LOADING; |
@@ -306,7 +306,7 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
306 | ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS; | 306 | ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS; |
307 | wake_up_all(&ap->eh_wait_q); | 307 | wake_up_all(&ap->eh_wait_q); |
308 | 308 | ||
309 | spin_unlock_irqrestore(hs_lock, flags); | 309 | spin_unlock_irqrestore(ap_lock, flags); |
310 | 310 | ||
311 | DPRINTK("EXIT\n"); | 311 | DPRINTK("EXIT\n"); |
312 | } | 312 | } |
@@ -326,17 +326,17 @@ void ata_port_wait_eh(struct ata_port *ap) | |||
326 | DEFINE_WAIT(wait); | 326 | DEFINE_WAIT(wait); |
327 | 327 | ||
328 | retry: | 328 | retry: |
329 | spin_lock_irqsave(&ap->host_set->lock, flags); | 329 | spin_lock_irqsave(ap->lock, flags); |
330 | 330 | ||
331 | while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) { | 331 | while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) { |
332 | prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); | 332 | prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); |
333 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 333 | spin_unlock_irqrestore(ap->lock, flags); |
334 | schedule(); | 334 | schedule(); |
335 | spin_lock_irqsave(&ap->host_set->lock, flags); | 335 | spin_lock_irqsave(ap->lock, flags); |
336 | } | 336 | } |
337 | finish_wait(&ap->eh_wait_q, &wait); | 337 | finish_wait(&ap->eh_wait_q, &wait); |
338 | 338 | ||
339 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 339 | spin_unlock_irqrestore(ap->lock, flags); |
340 | 340 | ||
341 | /* make sure SCSI EH is complete */ | 341 | /* make sure SCSI EH is complete */ |
342 | if (scsi_host_in_recovery(ap->host)) { | 342 | if (scsi_host_in_recovery(ap->host)) { |
@@ -368,7 +368,6 @@ void ata_port_wait_eh(struct ata_port *ap) | |||
368 | static void ata_qc_timeout(struct ata_queued_cmd *qc) | 368 | static void ata_qc_timeout(struct ata_queued_cmd *qc) |
369 | { | 369 | { |
370 | struct ata_port *ap = qc->ap; | 370 | struct ata_port *ap = qc->ap; |
371 | struct ata_host_set *host_set = ap->host_set; | ||
372 | u8 host_stat = 0, drv_stat; | 371 | u8 host_stat = 0, drv_stat; |
373 | unsigned long flags; | 372 | unsigned long flags; |
374 | 373 | ||
@@ -376,7 +375,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
376 | 375 | ||
377 | ap->hsm_task_state = HSM_ST_IDLE; | 376 | ap->hsm_task_state = HSM_ST_IDLE; |
378 | 377 | ||
379 | spin_lock_irqsave(&host_set->lock, flags); | 378 | spin_lock_irqsave(ap->lock, flags); |
380 | 379 | ||
381 | switch (qc->tf.protocol) { | 380 | switch (qc->tf.protocol) { |
382 | 381 | ||
@@ -405,7 +404,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
405 | break; | 404 | break; |
406 | } | 405 | } |
407 | 406 | ||
408 | spin_unlock_irqrestore(&host_set->lock, flags); | 407 | spin_unlock_irqrestore(ap->lock, flags); |
409 | 408 | ||
410 | ata_eh_qc_complete(qc); | 409 | ata_eh_qc_complete(qc); |
411 | 410 | ||
@@ -592,9 +591,9 @@ void ata_eh_freeze_port(struct ata_port *ap) | |||
592 | if (!ap->ops->error_handler) | 591 | if (!ap->ops->error_handler) |
593 | return; | 592 | return; |
594 | 593 | ||
595 | spin_lock_irqsave(&ap->host_set->lock, flags); | 594 | spin_lock_irqsave(ap->lock, flags); |
596 | __ata_port_freeze(ap); | 595 | __ata_port_freeze(ap); |
597 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 596 | spin_unlock_irqrestore(ap->lock, flags); |
598 | } | 597 | } |
599 | 598 | ||
600 | /** | 599 | /** |
@@ -613,14 +612,14 @@ void ata_eh_thaw_port(struct ata_port *ap) | |||
613 | if (!ap->ops->error_handler) | 612 | if (!ap->ops->error_handler) |
614 | return; | 613 | return; |
615 | 614 | ||
616 | spin_lock_irqsave(&ap->host_set->lock, flags); | 615 | spin_lock_irqsave(ap->lock, flags); |
617 | 616 | ||
618 | ap->flags &= ~ATA_FLAG_FROZEN; | 617 | ap->flags &= ~ATA_FLAG_FROZEN; |
619 | 618 | ||
620 | if (ap->ops->thaw) | 619 | if (ap->ops->thaw) |
621 | ap->ops->thaw(ap); | 620 | ap->ops->thaw(ap); |
622 | 621 | ||
623 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 622 | spin_unlock_irqrestore(ap->lock, flags); |
624 | 623 | ||
625 | DPRINTK("ata%u port thawed\n", ap->id); | 624 | DPRINTK("ata%u port thawed\n", ap->id); |
626 | } | 625 | } |
@@ -636,11 +635,11 @@ static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) | |||
636 | struct scsi_cmnd *scmd = qc->scsicmd; | 635 | struct scsi_cmnd *scmd = qc->scsicmd; |
637 | unsigned long flags; | 636 | unsigned long flags; |
638 | 637 | ||
639 | spin_lock_irqsave(&ap->host_set->lock, flags); | 638 | spin_lock_irqsave(ap->lock, flags); |
640 | qc->scsidone = ata_eh_scsidone; | 639 | qc->scsidone = ata_eh_scsidone; |
641 | __ata_qc_complete(qc); | 640 | __ata_qc_complete(qc); |
642 | WARN_ON(ata_tag_valid(qc->tag)); | 641 | WARN_ON(ata_tag_valid(qc->tag)); |
643 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 642 | spin_unlock_irqrestore(ap->lock, flags); |
644 | 643 | ||
645 | scsi_eh_finish_cmd(scmd, &ap->eh_done_q); | 644 | scsi_eh_finish_cmd(scmd, &ap->eh_done_q); |
646 | } | 645 | } |
@@ -694,7 +693,7 @@ static void ata_eh_detach_dev(struct ata_device *dev) | |||
694 | 693 | ||
695 | ata_dev_disable(dev); | 694 | ata_dev_disable(dev); |
696 | 695 | ||
697 | spin_lock_irqsave(&ap->host_set->lock, flags); | 696 | spin_lock_irqsave(ap->lock, flags); |
698 | 697 | ||
699 | dev->flags &= ~ATA_DFLAG_DETACH; | 698 | dev->flags &= ~ATA_DFLAG_DETACH; |
700 | 699 | ||
@@ -703,7 +702,7 @@ static void ata_eh_detach_dev(struct ata_device *dev) | |||
703 | ap->flags |= ATA_FLAG_SCSI_HOTPLUG; | 702 | ap->flags |= ATA_FLAG_SCSI_HOTPLUG; |
704 | } | 703 | } |
705 | 704 | ||
706 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 705 | spin_unlock_irqrestore(ap->lock, flags); |
707 | } | 706 | } |
708 | 707 | ||
709 | static void ata_eh_clear_action(struct ata_device *dev, | 708 | static void ata_eh_clear_action(struct ata_device *dev, |
@@ -749,10 +748,10 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, | |||
749 | { | 748 | { |
750 | unsigned long flags; | 749 | unsigned long flags; |
751 | 750 | ||
752 | spin_lock_irqsave(&ap->host_set->lock, flags); | 751 | spin_lock_irqsave(ap->lock, flags); |
753 | ata_eh_clear_action(dev, &ap->eh_info, action); | 752 | ata_eh_clear_action(dev, &ap->eh_info, action); |
754 | ap->flags |= ATA_FLAG_RECOVERED; | 753 | ap->flags |= ATA_FLAG_RECOVERED; |
755 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 754 | spin_unlock_irqrestore(ap->lock, flags); |
756 | } | 755 | } |
757 | 756 | ||
758 | /** | 757 | /** |
@@ -1625,9 +1624,9 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap, | |||
1625 | break; | 1624 | break; |
1626 | } | 1625 | } |
1627 | 1626 | ||
1628 | spin_lock_irqsave(&ap->host_set->lock, flags); | 1627 | spin_lock_irqsave(ap->lock, flags); |
1629 | ap->flags |= ATA_FLAG_SCSI_HOTPLUG; | 1628 | ap->flags |= ATA_FLAG_SCSI_HOTPLUG; |
1630 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 1629 | spin_unlock_irqrestore(ap->lock, flags); |
1631 | } | 1630 | } |
1632 | } | 1631 | } |
1633 | 1632 | ||
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index 9698949fa52a..d86abed62007 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c | |||
@@ -752,7 +752,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev) | |||
752 | if (!ap->ops->error_handler) | 752 | if (!ap->ops->error_handler) |
753 | return; | 753 | return; |
754 | 754 | ||
755 | spin_lock_irqsave(&ap->host_set->lock, flags); | 755 | spin_lock_irqsave(ap->lock, flags); |
756 | dev = __ata_scsi_find_dev(ap, sdev); | 756 | dev = __ata_scsi_find_dev(ap, sdev); |
757 | if (dev && dev->sdev) { | 757 | if (dev && dev->sdev) { |
758 | /* SCSI device already in CANCEL state, no need to offline it */ | 758 | /* SCSI device already in CANCEL state, no need to offline it */ |
@@ -760,7 +760,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev) | |||
760 | dev->flags |= ATA_DFLAG_DETACH; | 760 | dev->flags |= ATA_DFLAG_DETACH; |
761 | ata_port_schedule_eh(ap); | 761 | ata_port_schedule_eh(ap); |
762 | } | 762 | } |
763 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 763 | spin_unlock_irqrestore(ap->lock, flags); |
764 | } | 764 | } |
765 | 765 | ||
766 | /** | 766 | /** |
@@ -2684,7 +2684,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
2684 | ap = ata_shost_to_port(shost); | 2684 | ap = ata_shost_to_port(shost); |
2685 | 2685 | ||
2686 | spin_unlock(shost->host_lock); | 2686 | spin_unlock(shost->host_lock); |
2687 | spin_lock(&ap->host_set->lock); | 2687 | spin_lock(ap->lock); |
2688 | 2688 | ||
2689 | ata_scsi_dump_cdb(ap, cmd); | 2689 | ata_scsi_dump_cdb(ap, cmd); |
2690 | 2690 | ||
@@ -2696,7 +2696,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
2696 | done(cmd); | 2696 | done(cmd); |
2697 | } | 2697 | } |
2698 | 2698 | ||
2699 | spin_unlock(&ap->host_set->lock); | 2699 | spin_unlock(ap->lock); |
2700 | spin_lock(shost->host_lock); | 2700 | spin_lock(shost->host_lock); |
2701 | return rc; | 2701 | return rc; |
2702 | } | 2702 | } |
@@ -2858,7 +2858,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev) | |||
2858 | * increments reference counts regardless of device state. | 2858 | * increments reference counts regardless of device state. |
2859 | */ | 2859 | */ |
2860 | mutex_lock(&ap->host->scan_mutex); | 2860 | mutex_lock(&ap->host->scan_mutex); |
2861 | spin_lock_irqsave(&ap->host_set->lock, flags); | 2861 | spin_lock_irqsave(ap->lock, flags); |
2862 | 2862 | ||
2863 | /* clearing dev->sdev is protected by host_set lock */ | 2863 | /* clearing dev->sdev is protected by host_set lock */ |
2864 | sdev = dev->sdev; | 2864 | sdev = dev->sdev; |
@@ -2882,7 +2882,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev) | |||
2882 | } | 2882 | } |
2883 | } | 2883 | } |
2884 | 2884 | ||
2885 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 2885 | spin_unlock_irqrestore(ap->lock, flags); |
2886 | mutex_unlock(&ap->host->scan_mutex); | 2886 | mutex_unlock(&ap->host->scan_mutex); |
2887 | 2887 | ||
2888 | if (sdev) { | 2888 | if (sdev) { |
@@ -2926,9 +2926,9 @@ void ata_scsi_hotplug(void *data) | |||
2926 | if (!(dev->flags & ATA_DFLAG_DETACHED)) | 2926 | if (!(dev->flags & ATA_DFLAG_DETACHED)) |
2927 | continue; | 2927 | continue; |
2928 | 2928 | ||
2929 | spin_lock_irqsave(&ap->host_set->lock, flags); | 2929 | spin_lock_irqsave(ap->lock, flags); |
2930 | dev->flags &= ~ATA_DFLAG_DETACHED; | 2930 | dev->flags &= ~ATA_DFLAG_DETACHED; |
2931 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 2931 | spin_unlock_irqrestore(ap->lock, flags); |
2932 | 2932 | ||
2933 | ata_scsi_remove_dev(dev); | 2933 | ata_scsi_remove_dev(dev); |
2934 | } | 2934 | } |
@@ -2981,7 +2981,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | |||
2981 | (lun != SCAN_WILD_CARD && lun != 0)) | 2981 | (lun != SCAN_WILD_CARD && lun != 0)) |
2982 | return -EINVAL; | 2982 | return -EINVAL; |
2983 | 2983 | ||
2984 | spin_lock_irqsave(&ap->host_set->lock, flags); | 2984 | spin_lock_irqsave(ap->lock, flags); |
2985 | 2985 | ||
2986 | if (id == SCAN_WILD_CARD) { | 2986 | if (id == SCAN_WILD_CARD) { |
2987 | ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1; | 2987 | ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1; |
@@ -2999,7 +2999,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | |||
2999 | if (rc == 0) | 2999 | if (rc == 0) |
3000 | ata_port_schedule_eh(ap); | 3000 | ata_port_schedule_eh(ap); |
3001 | 3001 | ||
3002 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 3002 | spin_unlock_irqrestore(ap->lock, flags); |
3003 | 3003 | ||
3004 | return rc; | 3004 | return rc; |
3005 | } | 3005 | } |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 6b3c3af2c75f..20b1cf527c60 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -483,6 +483,7 @@ struct ata_eh_context { | |||
483 | struct ata_port { | 483 | struct ata_port { |
484 | struct Scsi_Host *host; /* our co-allocated scsi host */ | 484 | struct Scsi_Host *host; /* our co-allocated scsi host */ |
485 | const struct ata_port_operations *ops; | 485 | const struct ata_port_operations *ops; |
486 | spinlock_t *lock; | ||
486 | unsigned long flags; /* ATA_FLAG_xxx */ | 487 | unsigned long flags; /* ATA_FLAG_xxx */ |
487 | unsigned int id; /* unique id req'd by scsi midlyr */ | 488 | unsigned int id; /* unique id req'd by scsi midlyr */ |
488 | unsigned int port_no; /* unique port #; from zero */ | 489 | unsigned int port_no; /* unique port #; from zero */ |