diff options
author | Tejun Heo <htejun@gmail.com> | 2006-07-03 03:07:27 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-07-05 22:16:28 -0400 |
commit | 500530f652f9e5dabe7571b018dec47742ce0f16 (patch) | |
tree | cb9653c45a7e37d9bfe8dcc3923ed6b33ca134ea /drivers/scsi | |
parent | d6f26d1f1f1128a896f38a7f8426daed0a1205a2 (diff) |
[PATCH] libata: reimplement controller-wide PM
Reimplement controller-wide PM. ata_host_set_suspend/resume() are
defined to suspend and resume a host_set. While suspended, EHs for
all ports in the host_set are pegged using ATA_FLAG_SUSPENDED and
frozen.
Because SCSI device hotplug is done asynchronously against the rest of
libata EH and the same mutex is used when adding new device, suspend
cannot wait for hotplug to complete. So, if SCSI device hotplug is in
progress, suspend fails with -EBUSY.
In most cases, host_set resume is followed by device resume. As each
resume operation requires a reset, a single host_set-wide resume
operation may result in multiple resets. To avoid this, resume waits
upto 1 second giving PM to request resume for devices.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/libata-core.c | 165 | ||||
-rw-r--r-- | drivers/scsi/libata-eh.c | 128 |
2 files changed, 286 insertions, 7 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 51dbc5221934..fa65b990f8b0 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -5009,6 +5009,122 @@ int ata_flush_cache(struct ata_device *dev) | |||
5009 | return 0; | 5009 | return 0; |
5010 | } | 5010 | } |
5011 | 5011 | ||
5012 | static int ata_host_set_request_pm(struct ata_host_set *host_set, | ||
5013 | pm_message_t mesg, unsigned int action, | ||
5014 | unsigned int ehi_flags, int wait) | ||
5015 | { | ||
5016 | unsigned long flags; | ||
5017 | int i, rc; | ||
5018 | |||
5019 | for (i = 0; i < host_set->n_ports; i++) { | ||
5020 | struct ata_port *ap = host_set->ports[i]; | ||
5021 | |||
5022 | /* Previous resume operation might still be in | ||
5023 | * progress. Wait for PM_PENDING to clear. | ||
5024 | */ | ||
5025 | if (ap->pflags & ATA_PFLAG_PM_PENDING) { | ||
5026 | ata_port_wait_eh(ap); | ||
5027 | WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); | ||
5028 | } | ||
5029 | |||
5030 | /* request PM ops to EH */ | ||
5031 | spin_lock_irqsave(ap->lock, flags); | ||
5032 | |||
5033 | ap->pm_mesg = mesg; | ||
5034 | if (wait) { | ||
5035 | rc = 0; | ||
5036 | ap->pm_result = &rc; | ||
5037 | } | ||
5038 | |||
5039 | ap->pflags |= ATA_PFLAG_PM_PENDING; | ||
5040 | ap->eh_info.action |= action; | ||
5041 | ap->eh_info.flags |= ehi_flags; | ||
5042 | |||
5043 | ata_port_schedule_eh(ap); | ||
5044 | |||
5045 | spin_unlock_irqrestore(ap->lock, flags); | ||
5046 | |||
5047 | /* wait and check result */ | ||
5048 | if (wait) { | ||
5049 | ata_port_wait_eh(ap); | ||
5050 | WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); | ||
5051 | if (rc) | ||
5052 | return rc; | ||
5053 | } | ||
5054 | } | ||
5055 | |||
5056 | return 0; | ||
5057 | } | ||
5058 | |||
5059 | /** | ||
5060 | * ata_host_set_suspend - suspend host_set | ||
5061 | * @host_set: host_set to suspend | ||
5062 | * @mesg: PM message | ||
5063 | * | ||
5064 | * Suspend @host_set. Actual operation is performed by EH. This | ||
5065 | * function requests EH to perform PM operations and waits for EH | ||
5066 | * to finish. | ||
5067 | * | ||
5068 | * LOCKING: | ||
5069 | * Kernel thread context (may sleep). | ||
5070 | * | ||
5071 | * RETURNS: | ||
5072 | * 0 on success, -errno on failure. | ||
5073 | */ | ||
5074 | int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg) | ||
5075 | { | ||
5076 | int i, j, rc; | ||
5077 | |||
5078 | rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1); | ||
5079 | if (rc) | ||
5080 | goto fail; | ||
5081 | |||
5082 | /* EH is quiescent now. Fail if we have any ready device. | ||
5083 | * This happens if hotplug occurs between completion of device | ||
5084 | * suspension and here. | ||
5085 | */ | ||
5086 | for (i = 0; i < host_set->n_ports; i++) { | ||
5087 | struct ata_port *ap = host_set->ports[i]; | ||
5088 | |||
5089 | for (j = 0; j < ATA_MAX_DEVICES; j++) { | ||
5090 | struct ata_device *dev = &ap->device[j]; | ||
5091 | |||
5092 | if (ata_dev_ready(dev)) { | ||
5093 | ata_port_printk(ap, KERN_WARNING, | ||
5094 | "suspend failed, device %d " | ||
5095 | "still active\n", dev->devno); | ||
5096 | rc = -EBUSY; | ||
5097 | goto fail; | ||
5098 | } | ||
5099 | } | ||
5100 | } | ||
5101 | |||
5102 | host_set->dev->power.power_state = mesg; | ||
5103 | return 0; | ||
5104 | |||
5105 | fail: | ||
5106 | ata_host_set_resume(host_set); | ||
5107 | return rc; | ||
5108 | } | ||
5109 | |||
5110 | /** | ||
5111 | * ata_host_set_resume - resume host_set | ||
5112 | * @host_set: host_set to resume | ||
5113 | * | ||
5114 | * Resume @host_set. Actual operation is performed by EH. This | ||
5115 | * function requests EH to perform PM operations and returns. | ||
5116 | * Note that all resume operations are performed parallely. | ||
5117 | * | ||
5118 | * LOCKING: | ||
5119 | * Kernel thread context (may sleep). | ||
5120 | */ | ||
5121 | void ata_host_set_resume(struct ata_host_set *host_set) | ||
5122 | { | ||
5123 | ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET, | ||
5124 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); | ||
5125 | host_set->dev->power.power_state = PMSG_ON; | ||
5126 | } | ||
5127 | |||
5012 | /** | 5128 | /** |
5013 | * ata_port_start - Set port up for dma. | 5129 | * ata_port_start - Set port up for dma. |
5014 | * @ap: Port to initialize | 5130 | * @ap: Port to initialize |
@@ -5651,20 +5767,55 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) | |||
5651 | return (tmp == bits->val) ? 1 : 0; | 5767 | return (tmp == bits->val) ? 1 : 0; |
5652 | } | 5768 | } |
5653 | 5769 | ||
5654 | int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state) | 5770 | void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state) |
5655 | { | 5771 | { |
5656 | pci_save_state(pdev); | 5772 | pci_save_state(pdev); |
5657 | pci_disable_device(pdev); | 5773 | |
5658 | pci_set_power_state(pdev, PCI_D3hot); | 5774 | if (state.event == PM_EVENT_SUSPEND) { |
5659 | return 0; | 5775 | pci_disable_device(pdev); |
5776 | pci_set_power_state(pdev, PCI_D3hot); | ||
5777 | } | ||
5660 | } | 5778 | } |
5661 | 5779 | ||
5662 | int ata_pci_device_resume(struct pci_dev *pdev) | 5780 | void ata_pci_device_do_resume(struct pci_dev *pdev) |
5663 | { | 5781 | { |
5664 | pci_set_power_state(pdev, PCI_D0); | 5782 | pci_set_power_state(pdev, PCI_D0); |
5665 | pci_restore_state(pdev); | 5783 | pci_restore_state(pdev); |
5666 | pci_enable_device(pdev); | 5784 | pci_enable_device(pdev); |
5667 | pci_set_master(pdev); | 5785 | pci_set_master(pdev); |
5786 | } | ||
5787 | |||
5788 | int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state) | ||
5789 | { | ||
5790 | struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); | ||
5791 | int rc = 0; | ||
5792 | |||
5793 | rc = ata_host_set_suspend(host_set, state); | ||
5794 | if (rc) | ||
5795 | return rc; | ||
5796 | |||
5797 | if (host_set->next) { | ||
5798 | rc = ata_host_set_suspend(host_set->next, state); | ||
5799 | if (rc) { | ||
5800 | ata_host_set_resume(host_set); | ||
5801 | return rc; | ||
5802 | } | ||
5803 | } | ||
5804 | |||
5805 | ata_pci_device_do_suspend(pdev, state); | ||
5806 | |||
5807 | return 0; | ||
5808 | } | ||
5809 | |||
5810 | int ata_pci_device_resume(struct pci_dev *pdev) | ||
5811 | { | ||
5812 | struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); | ||
5813 | |||
5814 | ata_pci_device_do_resume(pdev); | ||
5815 | ata_host_set_resume(host_set); | ||
5816 | if (host_set->next) | ||
5817 | ata_host_set_resume(host_set->next); | ||
5818 | |||
5668 | return 0; | 5819 | return 0; |
5669 | } | 5820 | } |
5670 | #endif /* CONFIG_PCI */ | 5821 | #endif /* CONFIG_PCI */ |
@@ -5844,6 +5995,8 @@ EXPORT_SYMBOL_GPL(sata_scr_write); | |||
5844 | EXPORT_SYMBOL_GPL(sata_scr_write_flush); | 5995 | EXPORT_SYMBOL_GPL(sata_scr_write_flush); |
5845 | EXPORT_SYMBOL_GPL(ata_port_online); | 5996 | EXPORT_SYMBOL_GPL(ata_port_online); |
5846 | EXPORT_SYMBOL_GPL(ata_port_offline); | 5997 | EXPORT_SYMBOL_GPL(ata_port_offline); |
5998 | EXPORT_SYMBOL_GPL(ata_host_set_suspend); | ||
5999 | EXPORT_SYMBOL_GPL(ata_host_set_resume); | ||
5847 | EXPORT_SYMBOL_GPL(ata_id_string); | 6000 | EXPORT_SYMBOL_GPL(ata_id_string); |
5848 | EXPORT_SYMBOL_GPL(ata_id_c_string); | 6001 | EXPORT_SYMBOL_GPL(ata_id_c_string); |
5849 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); | 6002 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); |
@@ -5858,6 +6011,8 @@ EXPORT_SYMBOL_GPL(ata_pci_host_stop); | |||
5858 | EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); | 6011 | EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); |
5859 | EXPORT_SYMBOL_GPL(ata_pci_init_one); | 6012 | EXPORT_SYMBOL_GPL(ata_pci_init_one); |
5860 | EXPORT_SYMBOL_GPL(ata_pci_remove_one); | 6013 | EXPORT_SYMBOL_GPL(ata_pci_remove_one); |
6014 | EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); | ||
6015 | EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); | ||
5861 | EXPORT_SYMBOL_GPL(ata_pci_device_suspend); | 6016 | EXPORT_SYMBOL_GPL(ata_pci_device_suspend); |
5862 | EXPORT_SYMBOL_GPL(ata_pci_device_resume); | 6017 | EXPORT_SYMBOL_GPL(ata_pci_device_resume); |
5863 | EXPORT_SYMBOL_GPL(ata_pci_default_filter); | 6018 | EXPORT_SYMBOL_GPL(ata_pci_default_filter); |
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c index b9df49a36214..4b6aa30f4d68 100644 --- a/drivers/scsi/libata-eh.c +++ b/drivers/scsi/libata-eh.c | |||
@@ -47,6 +47,8 @@ | |||
47 | 47 | ||
48 | static void __ata_port_freeze(struct ata_port *ap); | 48 | static void __ata_port_freeze(struct ata_port *ap); |
49 | static void ata_eh_finish(struct ata_port *ap); | 49 | static void ata_eh_finish(struct ata_port *ap); |
50 | static void ata_eh_handle_port_suspend(struct ata_port *ap); | ||
51 | static void ata_eh_handle_port_resume(struct ata_port *ap); | ||
50 | 52 | ||
51 | static void ata_ering_record(struct ata_ering *ering, int is_io, | 53 | static void ata_ering_record(struct ata_ering *ering, int is_io, |
52 | unsigned int err_mask) | 54 | unsigned int err_mask) |
@@ -262,6 +264,9 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
262 | repeat: | 264 | repeat: |
263 | /* invoke error handler */ | 265 | /* invoke error handler */ |
264 | if (ap->ops->error_handler) { | 266 | if (ap->ops->error_handler) { |
267 | /* process port resume request */ | ||
268 | ata_eh_handle_port_resume(ap); | ||
269 | |||
265 | /* fetch & clear EH info */ | 270 | /* fetch & clear EH info */ |
266 | spin_lock_irqsave(ap->lock, flags); | 271 | spin_lock_irqsave(ap->lock, flags); |
267 | 272 | ||
@@ -274,12 +279,15 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
274 | 279 | ||
275 | spin_unlock_irqrestore(ap->lock, flags); | 280 | spin_unlock_irqrestore(ap->lock, flags); |
276 | 281 | ||
277 | /* invoke EH. if unloading, just finish failed qcs */ | 282 | /* invoke EH, skip if unloading or suspended */ |
278 | if (!(ap->pflags & ATA_PFLAG_UNLOADING)) | 283 | if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) |
279 | ap->ops->error_handler(ap); | 284 | ap->ops->error_handler(ap); |
280 | else | 285 | else |
281 | ata_eh_finish(ap); | 286 | ata_eh_finish(ap); |
282 | 287 | ||
288 | /* process port suspend request */ | ||
289 | ata_eh_handle_port_suspend(ap); | ||
290 | |||
283 | /* Exception might have happend after ->error_handler | 291 | /* Exception might have happend after ->error_handler |
284 | * recovered the port but before this point. Repeat | 292 | * recovered the port but before this point. Repeat |
285 | * EH in such case. | 293 | * EH in such case. |
@@ -2101,3 +2109,119 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2101 | ata_eh_recover(ap, prereset, softreset, hardreset, postreset); | 2109 | ata_eh_recover(ap, prereset, softreset, hardreset, postreset); |
2102 | ata_eh_finish(ap); | 2110 | ata_eh_finish(ap); |
2103 | } | 2111 | } |
2112 | |||
2113 | /** | ||
2114 | * ata_eh_handle_port_suspend - perform port suspend operation | ||
2115 | * @ap: port to suspend | ||
2116 | * | ||
2117 | * Suspend @ap. | ||
2118 | * | ||
2119 | * LOCKING: | ||
2120 | * Kernel thread context (may sleep). | ||
2121 | */ | ||
2122 | static void ata_eh_handle_port_suspend(struct ata_port *ap) | ||
2123 | { | ||
2124 | unsigned long flags; | ||
2125 | int rc = 0; | ||
2126 | |||
2127 | /* are we suspending? */ | ||
2128 | spin_lock_irqsave(ap->lock, flags); | ||
2129 | if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || | ||
2130 | ap->pm_mesg.event == PM_EVENT_ON) { | ||
2131 | spin_unlock_irqrestore(ap->lock, flags); | ||
2132 | return; | ||
2133 | } | ||
2134 | spin_unlock_irqrestore(ap->lock, flags); | ||
2135 | |||
2136 | WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); | ||
2137 | |||
2138 | /* suspend */ | ||
2139 | ata_eh_freeze_port(ap); | ||
2140 | |||
2141 | if (ap->ops->port_suspend) | ||
2142 | rc = ap->ops->port_suspend(ap, ap->pm_mesg); | ||
2143 | |||
2144 | /* report result */ | ||
2145 | spin_lock_irqsave(ap->lock, flags); | ||
2146 | |||
2147 | ap->pflags &= ~ATA_PFLAG_PM_PENDING; | ||
2148 | if (rc == 0) | ||
2149 | ap->pflags |= ATA_PFLAG_SUSPENDED; | ||
2150 | else | ||
2151 | ata_port_schedule_eh(ap); | ||
2152 | |||
2153 | if (ap->pm_result) { | ||
2154 | *ap->pm_result = rc; | ||
2155 | ap->pm_result = NULL; | ||
2156 | } | ||
2157 | |||
2158 | spin_unlock_irqrestore(ap->lock, flags); | ||
2159 | |||
2160 | return; | ||
2161 | } | ||
2162 | |||
2163 | /** | ||
2164 | * ata_eh_handle_port_resume - perform port resume operation | ||
2165 | * @ap: port to resume | ||
2166 | * | ||
2167 | * Resume @ap. | ||
2168 | * | ||
2169 | * This function also waits upto one second until all devices | ||
2170 | * hanging off this port requests resume EH action. This is to | ||
2171 | * prevent invoking EH and thus reset multiple times on resume. | ||
2172 | * | ||
2173 | * On DPM resume, where some of devices might not be resumed | ||
2174 | * together, this may delay port resume upto one second, but such | ||
2175 | * DPM resumes are rare and 1 sec delay isn't too bad. | ||
2176 | * | ||
2177 | * LOCKING: | ||
2178 | * Kernel thread context (may sleep). | ||
2179 | */ | ||
2180 | static void ata_eh_handle_port_resume(struct ata_port *ap) | ||
2181 | { | ||
2182 | unsigned long timeout; | ||
2183 | unsigned long flags; | ||
2184 | int i, rc = 0; | ||
2185 | |||
2186 | /* are we resuming? */ | ||
2187 | spin_lock_irqsave(ap->lock, flags); | ||
2188 | if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || | ||
2189 | ap->pm_mesg.event != PM_EVENT_ON) { | ||
2190 | spin_unlock_irqrestore(ap->lock, flags); | ||
2191 | return; | ||
2192 | } | ||
2193 | spin_unlock_irqrestore(ap->lock, flags); | ||
2194 | |||
2195 | /* spurious? */ | ||
2196 | if (!(ap->pflags & ATA_PFLAG_SUSPENDED)) | ||
2197 | goto done; | ||
2198 | |||
2199 | if (ap->ops->port_resume) | ||
2200 | rc = ap->ops->port_resume(ap); | ||
2201 | |||
2202 | /* give devices time to request EH */ | ||
2203 | timeout = jiffies + HZ; /* 1s max */ | ||
2204 | while (1) { | ||
2205 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | ||
2206 | struct ata_device *dev = &ap->device[i]; | ||
2207 | unsigned int action = ata_eh_dev_action(dev); | ||
2208 | |||
2209 | if ((dev->flags & ATA_DFLAG_SUSPENDED) && | ||
2210 | !(action & ATA_EH_RESUME)) | ||
2211 | break; | ||
2212 | } | ||
2213 | |||
2214 | if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout)) | ||
2215 | break; | ||
2216 | msleep(10); | ||
2217 | } | ||
2218 | |||
2219 | done: | ||
2220 | spin_lock_irqsave(ap->lock, flags); | ||
2221 | ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); | ||
2222 | if (ap->pm_result) { | ||
2223 | *ap->pm_result = rc; | ||
2224 | ap->pm_result = NULL; | ||
2225 | } | ||
2226 | spin_unlock_irqrestore(ap->lock, flags); | ||
2227 | } | ||