aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
authorElias Oltmanns <eo@nebensachen.de>2008-09-21 05:54:08 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-09-29 00:27:54 -0400
commit45fabbb77bd95adff7a80bde1c7a0ace1075fde6 (patch)
tree0e98efc190b25a11f84b8ae7d1ee0a17c41d3da8 /drivers/ata
parentea6ce53cd5d005455ec0a3cc1d45d3af0cb90919 (diff)
libata: Implement disk shock protection support
On user request (through sysfs), the IDLE IMMEDIATE command with UNLOAD FEATURE as specified in ATA-7 is issued to the device and processing of the request queue is stopped thereafter until the specified timeout expires or user space asks to resume normal operation. This is supposed to prevent the heads of a hard drive from accidentally crashing onto the platter when a heavy shock is anticipated (like a falling laptop expected to hit the floor). In fact, the whole port stops processing commands until the timeout has expired in order to avoid any resets due to failed commands on another device. Signed-off-by: Elias Oltmanns <eo@nebensachen.de> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/libata-eh.c126
-rw-r--r--drivers/ata/libata-scsi.c108
4 files changed, 235 insertions, 1 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6acea41eb7ca..aeadd00411a1 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -316,6 +316,7 @@ static struct device_attribute *ahci_shost_attrs[] = {
316 316
317static struct device_attribute *ahci_sdev_attrs[] = { 317static struct device_attribute *ahci_sdev_attrs[] = {
318 &dev_attr_sw_activity, 318 &dev_attr_sw_activity,
319 &dev_attr_unload_heads,
319 NULL 320 NULL
320}; 321};
321 322
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6eed58e35e12..1ee9499bd343 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5378,6 +5378,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5378 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5378 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5379 INIT_LIST_HEAD(&ap->eh_done_q); 5379 INIT_LIST_HEAD(&ap->eh_done_q);
5380 init_waitqueue_head(&ap->eh_wait_q); 5380 init_waitqueue_head(&ap->eh_wait_q);
5381 init_completion(&ap->park_req_pending);
5381 init_timer_deferrable(&ap->fastdrain_timer); 5382 init_timer_deferrable(&ap->fastdrain_timer);
5382 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5383 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5383 ap->fastdrain_timer.data = (unsigned long)ap; 5384 ap->fastdrain_timer.data = (unsigned long)ap;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 99037a4860d9..33ac5ea4f531 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2536,6 +2536,80 @@ int ata_eh_reset(struct ata_link *link, int classify,
2536 goto retry; 2536 goto retry;
2537} 2537}
2538 2538
2539static inline void ata_eh_pull_park_action(struct ata_port *ap)
2540{
2541 struct ata_link *link;
2542 struct ata_device *dev;
2543 unsigned long flags;
2544
2545 /*
2546 * This function can be thought of as an extended version of
2547 * ata_eh_about_to_do() specially crafted to accommodate the
2548 * requirements of ATA_EH_PARK handling. Since the EH thread
2549 * does not leave the do {} while () loop in ata_eh_recover as
2550 * long as the timeout for a park request to *one* device on
2551 * the port has not expired, and since we still want to pick
2552 * up park requests to other devices on the same port or
2553 * timeout updates for the same device, we have to pull
2554 * ATA_EH_PARK actions from eh_info into eh_context.i
2555 * ourselves at the beginning of each pass over the loop.
2556 *
2557 * Additionally, all write accesses to &ap->park_req_pending
2558 * through INIT_COMPLETION() (see below) or complete_all()
2559 * (see ata_scsi_park_store()) are protected by the host lock.
2560 * As a result we have that park_req_pending.done is zero on
2561 * exit from this function, i.e. when ATA_EH_PARK actions for
2562 * *all* devices on port ap have been pulled into the
2563 * respective eh_context structs. If, and only if,
2564 * park_req_pending.done is non-zero by the time we reach
2565 * wait_for_completion_timeout(), another ATA_EH_PARK action
2566 * has been scheduled for at least one of the devices on port
2567 * ap and we have to cycle over the do {} while () loop in
2568 * ata_eh_recover() again.
2569 */
2570
2571 spin_lock_irqsave(ap->lock, flags);
2572 INIT_COMPLETION(ap->park_req_pending);
2573 ata_port_for_each_link(link, ap) {
2574 ata_link_for_each_dev(dev, link) {
2575 struct ata_eh_info *ehi = &link->eh_info;
2576
2577 link->eh_context.i.dev_action[dev->devno] |=
2578 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2579 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2580 }
2581 }
2582 spin_unlock_irqrestore(ap->lock, flags);
2583}
2584
2585static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2586{
2587 struct ata_eh_context *ehc = &dev->link->eh_context;
2588 struct ata_taskfile tf;
2589 unsigned int err_mask;
2590
2591 ata_tf_init(dev, &tf);
2592 if (park) {
2593 ehc->unloaded_mask |= 1 << dev->devno;
2594 tf.command = ATA_CMD_IDLEIMMEDIATE;
2595 tf.feature = 0x44;
2596 tf.lbal = 0x4c;
2597 tf.lbam = 0x4e;
2598 tf.lbah = 0x55;
2599 } else {
2600 ehc->unloaded_mask &= ~(1 << dev->devno);
2601 tf.command = ATA_CMD_CHK_POWER;
2602 }
2603
2604 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2605 tf.protocol |= ATA_PROT_NODATA;
2606 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2607 if (park && (err_mask || tf.lbal != 0xc4)) {
2608 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2609 ehc->unloaded_mask &= ~(1 << dev->devno);
2610 }
2611}
2612
2539static int ata_eh_revalidate_and_attach(struct ata_link *link, 2613static int ata_eh_revalidate_and_attach(struct ata_link *link,
2540 struct ata_device **r_failed_dev) 2614 struct ata_device **r_failed_dev)
2541{ 2615{
@@ -2845,7 +2919,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2845 struct ata_device *dev; 2919 struct ata_device *dev;
2846 int nr_failed_devs; 2920 int nr_failed_devs;
2847 int rc; 2921 int rc;
2848 unsigned long flags; 2922 unsigned long flags, deadline;
2849 2923
2850 DPRINTK("ENTER\n"); 2924 DPRINTK("ENTER\n");
2851 2925
@@ -2919,6 +2993,56 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2919 } 2993 }
2920 } 2994 }
2921 2995
2996 do {
2997 unsigned long now;
2998
2999 /*
3000 * clears ATA_EH_PARK in eh_info and resets
3001 * ap->park_req_pending
3002 */
3003 ata_eh_pull_park_action(ap);
3004
3005 deadline = jiffies;
3006 ata_port_for_each_link(link, ap) {
3007 ata_link_for_each_dev(dev, link) {
3008 struct ata_eh_context *ehc = &link->eh_context;
3009 unsigned long tmp;
3010
3011 if (dev->class != ATA_DEV_ATA)
3012 continue;
3013 if (!(ehc->i.dev_action[dev->devno] &
3014 ATA_EH_PARK))
3015 continue;
3016 tmp = dev->unpark_deadline;
3017 if (time_before(deadline, tmp))
3018 deadline = tmp;
3019 else if (time_before_eq(tmp, jiffies))
3020 continue;
3021 if (ehc->unloaded_mask & (1 << dev->devno))
3022 continue;
3023
3024 ata_eh_park_issue_cmd(dev, 1);
3025 }
3026 }
3027
3028 now = jiffies;
3029 if (time_before_eq(deadline, now))
3030 break;
3031
3032 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3033 deadline - now);
3034 } while (deadline);
3035 ata_port_for_each_link(link, ap) {
3036 ata_link_for_each_dev(dev, link) {
3037 if (!(link->eh_context.unloaded_mask &
3038 (1 << dev->devno)))
3039 continue;
3040
3041 ata_eh_park_issue_cmd(dev, 0);
3042 ata_eh_done(link, dev, ATA_EH_PARK);
3043 }
3044 }
3045
2922 /* the rest */ 3046 /* the rest */
2923 ata_port_for_each_link(link, ap) { 3047 ata_port_for_each_link(link, ap) {
2924 struct ata_eh_context *ehc = &link->eh_context; 3048 struct ata_eh_context *ehc = &link->eh_context;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index b9d3ba423cb2..fccd5e496c62 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -183,6 +183,105 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
183 ata_scsi_lpm_show, ata_scsi_lpm_put); 183 ata_scsi_lpm_show, ata_scsi_lpm_put);
184EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); 184EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
185 185
186static ssize_t ata_scsi_park_show(struct device *device,
187 struct device_attribute *attr, char *buf)
188{
189 struct scsi_device *sdev = to_scsi_device(device);
190 struct ata_port *ap;
191 struct ata_link *link;
192 struct ata_device *dev;
193 unsigned long flags;
194 unsigned int uninitialized_var(msecs);
195 int rc = 0;
196
197 ap = ata_shost_to_port(sdev->host);
198
199 spin_lock_irqsave(ap->lock, flags);
200 dev = ata_scsi_find_dev(ap, sdev);
201 if (!dev) {
202 rc = -ENODEV;
203 goto unlock;
204 }
205 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
206 rc = -EOPNOTSUPP;
207 goto unlock;
208 }
209
210 link = dev->link;
211 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
212 link->eh_context.unloaded_mask & (1 << dev->devno) &&
213 time_after(dev->unpark_deadline, jiffies))
214 msecs = jiffies_to_msecs(dev->unpark_deadline - jiffies);
215 else
216 msecs = 0;
217
218unlock:
219 spin_unlock_irq(ap->lock);
220
221 return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
222}
223
224static ssize_t ata_scsi_park_store(struct device *device,
225 struct device_attribute *attr,
226 const char *buf, size_t len)
227{
228 struct scsi_device *sdev = to_scsi_device(device);
229 struct ata_port *ap;
230 struct ata_device *dev;
231 long int input;
232 unsigned long flags;
233 int rc;
234
235 rc = strict_strtol(buf, 10, &input);
236 if (rc || input < -2)
237 return -EINVAL;
238 if (input > ATA_TMOUT_MAX_PARK) {
239 rc = -EOVERFLOW;
240 input = ATA_TMOUT_MAX_PARK;
241 }
242
243 ap = ata_shost_to_port(sdev->host);
244
245 spin_lock_irqsave(ap->lock, flags);
246 dev = ata_scsi_find_dev(ap, sdev);
247 if (unlikely(!dev)) {
248 rc = -ENODEV;
249 goto unlock;
250 }
251 if (dev->class != ATA_DEV_ATA) {
252 rc = -EOPNOTSUPP;
253 goto unlock;
254 }
255
256 if (input >= 0) {
257 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
258 rc = -EOPNOTSUPP;
259 goto unlock;
260 }
261
262 dev->unpark_deadline = ata_deadline(jiffies, input);
263 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
264 ata_port_schedule_eh(ap);
265 complete(&ap->park_req_pending);
266 } else {
267 switch (input) {
268 case -1:
269 dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
270 break;
271 case -2:
272 dev->flags |= ATA_DFLAG_NO_UNLOAD;
273 break;
274 }
275 }
276unlock:
277 spin_unlock_irqrestore(ap->lock, flags);
278
279 return rc ? rc : len;
280}
281DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
282 ata_scsi_park_show, ata_scsi_park_store);
283EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
284
186static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) 285static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
187{ 286{
188 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 287 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
@@ -269,6 +368,12 @@ DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show,
269 ata_scsi_activity_store); 368 ata_scsi_activity_store);
270EXPORT_SYMBOL_GPL(dev_attr_sw_activity); 369EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
271 370
371struct device_attribute *ata_common_sdev_attrs[] = {
372 &dev_attr_unload_heads,
373 NULL
374};
375EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
376
272static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, 377static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
273 void (*done)(struct scsi_cmnd *)) 378 void (*done)(struct scsi_cmnd *))
274{ 379{
@@ -954,6 +1059,9 @@ static int atapi_drain_needed(struct request *rq)
954static int ata_scsi_dev_config(struct scsi_device *sdev, 1059static int ata_scsi_dev_config(struct scsi_device *sdev,
955 struct ata_device *dev) 1060 struct ata_device *dev)
956{ 1061{
1062 if (!ata_id_has_unload(dev->id))
1063 dev->flags |= ATA_DFLAG_NO_UNLOAD;
1064
957 /* configure max sectors */ 1065 /* configure max sectors */
958 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); 1066 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
959 1067