aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/libata-eh.c
diff options
context:
space:
mode:
authorElias Oltmanns <eo@nebensachen.de>2008-09-21 05:54:08 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-09-29 00:27:54 -0400
commit45fabbb77bd95adff7a80bde1c7a0ace1075fde6 (patch)
tree0e98efc190b25a11f84b8ae7d1ee0a17c41d3da8 /drivers/ata/libata-eh.c
parentea6ce53cd5d005455ec0a3cc1d45d3af0cb90919 (diff)
libata: Implement disk shock protection support
On user request (through sysfs), the IDLE IMMEDIATE command with UNLOAD FEATURE as specified in ATA-7 is issued to the device and processing of the request queue is stopped thereafter until the specified timeout expires or user space asks to resume normal operation. This is supposed to prevent the heads of a hard drive from accidentally crashing onto the platter when a heavy shock is anticipated (like a falling laptop expected to hit the floor). In fact, the whole port stops processing commands until the timeout has expired in order to avoid any resets due to failed commands on another device. Signed-off-by: Elias Oltmanns <eo@nebensachen.de> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata/libata-eh.c')
-rw-r--r--drivers/ata/libata-eh.c126
1 files changed, 125 insertions, 1 deletions
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 99037a4860d9..33ac5ea4f531 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2536,6 +2536,80 @@ int ata_eh_reset(struct ata_link *link, int classify,
2536 goto retry; 2536 goto retry;
2537} 2537}
2538 2538
2539static inline void ata_eh_pull_park_action(struct ata_port *ap)
2540{
2541 struct ata_link *link;
2542 struct ata_device *dev;
2543 unsigned long flags;
2544
2545 /*
2546 * This function can be thought of as an extended version of
2547 * ata_eh_about_to_do() specially crafted to accommodate the
2548 * requirements of ATA_EH_PARK handling. Since the EH thread
2549 * does not leave the do {} while () loop in ata_eh_recover as
2550 * long as the timeout for a park request to *one* device on
2551 * the port has not expired, and since we still want to pick
2552 * up park requests to other devices on the same port or
2553 * timeout updates for the same device, we have to pull
2554 * ATA_EH_PARK actions from eh_info into eh_context.i
2555 * ourselves at the beginning of each pass over the loop.
2556 *
2557 * Additionally, all write accesses to &ap->park_req_pending
2558 * through INIT_COMPLETION() (see below) or complete_all()
2559 * (see ata_scsi_park_store()) are protected by the host lock.
2560 * As a result we have that park_req_pending.done is zero on
2561 * exit from this function, i.e. when ATA_EH_PARK actions for
2562 * *all* devices on port ap have been pulled into the
2563 * respective eh_context structs. If, and only if,
2564 * park_req_pending.done is non-zero by the time we reach
2565 * wait_for_completion_timeout(), another ATA_EH_PARK action
2566 * has been scheduled for at least one of the devices on port
2567 * ap and we have to cycle over the do {} while () loop in
2568 * ata_eh_recover() again.
2569 */
2570
2571 spin_lock_irqsave(ap->lock, flags);
2572 INIT_COMPLETION(ap->park_req_pending);
2573 ata_port_for_each_link(link, ap) {
2574 ata_link_for_each_dev(dev, link) {
2575 struct ata_eh_info *ehi = &link->eh_info;
2576
2577 link->eh_context.i.dev_action[dev->devno] |=
2578 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2579 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2580 }
2581 }
2582 spin_unlock_irqrestore(ap->lock, flags);
2583}
2584
2585static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2586{
2587 struct ata_eh_context *ehc = &dev->link->eh_context;
2588 struct ata_taskfile tf;
2589 unsigned int err_mask;
2590
2591 ata_tf_init(dev, &tf);
2592 if (park) {
2593 ehc->unloaded_mask |= 1 << dev->devno;
2594 tf.command = ATA_CMD_IDLEIMMEDIATE;
2595 tf.feature = 0x44;
2596 tf.lbal = 0x4c;
2597 tf.lbam = 0x4e;
2598 tf.lbah = 0x55;
2599 } else {
2600 ehc->unloaded_mask &= ~(1 << dev->devno);
2601 tf.command = ATA_CMD_CHK_POWER;
2602 }
2603
2604 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2605 tf.protocol |= ATA_PROT_NODATA;
2606 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2607 if (park && (err_mask || tf.lbal != 0xc4)) {
2608 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2609 ehc->unloaded_mask &= ~(1 << dev->devno);
2610 }
2611}
2612
2539static int ata_eh_revalidate_and_attach(struct ata_link *link, 2613static int ata_eh_revalidate_and_attach(struct ata_link *link,
2540 struct ata_device **r_failed_dev) 2614 struct ata_device **r_failed_dev)
2541{ 2615{
@@ -2845,7 +2919,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2845 struct ata_device *dev; 2919 struct ata_device *dev;
2846 int nr_failed_devs; 2920 int nr_failed_devs;
2847 int rc; 2921 int rc;
2848 unsigned long flags; 2922 unsigned long flags, deadline;
2849 2923
2850 DPRINTK("ENTER\n"); 2924 DPRINTK("ENTER\n");
2851 2925
@@ -2919,6 +2993,56 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2919 } 2993 }
2920 } 2994 }
2921 2995
2996 do {
2997 unsigned long now;
2998
2999 /*
3000 * clears ATA_EH_PARK in eh_info and resets
3001 * ap->park_req_pending
3002 */
3003 ata_eh_pull_park_action(ap);
3004
3005 deadline = jiffies;
3006 ata_port_for_each_link(link, ap) {
3007 ata_link_for_each_dev(dev, link) {
3008 struct ata_eh_context *ehc = &link->eh_context;
3009 unsigned long tmp;
3010
3011 if (dev->class != ATA_DEV_ATA)
3012 continue;
3013 if (!(ehc->i.dev_action[dev->devno] &
3014 ATA_EH_PARK))
3015 continue;
3016 tmp = dev->unpark_deadline;
3017 if (time_before(deadline, tmp))
3018 deadline = tmp;
3019 else if (time_before_eq(tmp, jiffies))
3020 continue;
3021 if (ehc->unloaded_mask & (1 << dev->devno))
3022 continue;
3023
3024 ata_eh_park_issue_cmd(dev, 1);
3025 }
3026 }
3027
3028 now = jiffies;
3029 if (time_before_eq(deadline, now))
3030 break;
3031
3032 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3033 deadline - now);
3034 } while (deadline);
3035 ata_port_for_each_link(link, ap) {
3036 ata_link_for_each_dev(dev, link) {
3037 if (!(link->eh_context.unloaded_mask &
3038 (1 << dev->devno)))
3039 continue;
3040
3041 ata_eh_park_issue_cmd(dev, 0);
3042 ata_eh_done(link, dev, ATA_EH_PARK);
3043 }
3044 }
3045
2922 /* the rest */ 3046 /* the rest */
2923 ata_port_for_each_link(link, ap) { 3047 ata_port_for_each_link(link, ap) {
2924 struct ata_eh_context *ehc = &link->eh_context; 3048 struct ata_eh_context *ehc = &link->eh_context;