diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-10-15 18:16:07 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-10-15 18:16:07 -0400 |
commit | 2502991560dc8244dbe10e48473d85722c1e2ec1 (patch) | |
tree | 63b1f3be2ed56ff06f1e8db709e4ce85d69c3add /drivers/ata/libata-eh.c | |
parent | 7e69a8c4d06b7ecb874f571e82b715a9f79bc3c4 (diff) | |
parent | a9ff8f6462635c8d9f8d64b7b10ddcea8404d77b (diff) |
Merge branch 'fixes' into for-linus
Conflicts:
arch/arm/mach-versatile/core.c
Diffstat (limited to 'drivers/ata/libata-eh.c')
-rw-r--r-- | drivers/ata/libata-eh.c | 375 |
1 files changed, 342 insertions, 33 deletions
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index c1db2f234d2e..a93247cc395a 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -33,6 +33,7 @@ | |||
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/blkdev.h> | ||
36 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
37 | #include <scsi/scsi.h> | 38 | #include <scsi/scsi.h> |
38 | #include <scsi/scsi_host.h> | 39 | #include <scsi/scsi_host.h> |
@@ -79,6 +80,8 @@ enum { | |||
79 | */ | 80 | */ |
80 | ATA_EH_PRERESET_TIMEOUT = 10000, | 81 | ATA_EH_PRERESET_TIMEOUT = 10000, |
81 | ATA_EH_FASTDRAIN_INTERVAL = 3000, | 82 | ATA_EH_FASTDRAIN_INTERVAL = 3000, |
83 | |||
84 | ATA_EH_UA_TRIES = 5, | ||
82 | }; | 85 | }; |
83 | 86 | ||
84 | /* The following table determines how we sequence resets. Each entry | 87 | /* The following table determines how we sequence resets. Each entry |
@@ -457,29 +460,29 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, | |||
457 | * RETURNS: | 460 | * RETURNS: |
458 | * EH_HANDLED or EH_NOT_HANDLED | 461 | * EH_HANDLED or EH_NOT_HANDLED |
459 | */ | 462 | */ |
460 | enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) | 463 | enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) |
461 | { | 464 | { |
462 | struct Scsi_Host *host = cmd->device->host; | 465 | struct Scsi_Host *host = cmd->device->host; |
463 | struct ata_port *ap = ata_shost_to_port(host); | 466 | struct ata_port *ap = ata_shost_to_port(host); |
464 | unsigned long flags; | 467 | unsigned long flags; |
465 | struct ata_queued_cmd *qc; | 468 | struct ata_queued_cmd *qc; |
466 | enum scsi_eh_timer_return ret; | 469 | enum blk_eh_timer_return ret; |
467 | 470 | ||
468 | DPRINTK("ENTER\n"); | 471 | DPRINTK("ENTER\n"); |
469 | 472 | ||
470 | if (ap->ops->error_handler) { | 473 | if (ap->ops->error_handler) { |
471 | ret = EH_NOT_HANDLED; | 474 | ret = BLK_EH_NOT_HANDLED; |
472 | goto out; | 475 | goto out; |
473 | } | 476 | } |
474 | 477 | ||
475 | ret = EH_HANDLED; | 478 | ret = BLK_EH_HANDLED; |
476 | spin_lock_irqsave(ap->lock, flags); | 479 | spin_lock_irqsave(ap->lock, flags); |
477 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | 480 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
478 | if (qc) { | 481 | if (qc) { |
479 | WARN_ON(qc->scsicmd != cmd); | 482 | WARN_ON(qc->scsicmd != cmd); |
480 | qc->flags |= ATA_QCFLAG_EH_SCHEDULED; | 483 | qc->flags |= ATA_QCFLAG_EH_SCHEDULED; |
481 | qc->err_mask |= AC_ERR_TIMEOUT; | 484 | qc->err_mask |= AC_ERR_TIMEOUT; |
482 | ret = EH_NOT_HANDLED; | 485 | ret = BLK_EH_NOT_HANDLED; |
483 | } | 486 | } |
484 | spin_unlock_irqrestore(ap->lock, flags); | 487 | spin_unlock_irqrestore(ap->lock, flags); |
485 | 488 | ||
@@ -831,7 +834,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) | |||
831 | * Note that ATA_QCFLAG_FAILED is unconditionally set after | 834 | * Note that ATA_QCFLAG_FAILED is unconditionally set after |
832 | * this function completes. | 835 | * this function completes. |
833 | */ | 836 | */ |
834 | scsi_req_abort_cmd(qc->scsicmd); | 837 | blk_abort_request(qc->scsicmd->request); |
835 | } | 838 | } |
836 | 839 | ||
837 | /** | 840 | /** |
@@ -1357,6 +1360,37 @@ static int ata_eh_read_log_10h(struct ata_device *dev, | |||
1357 | } | 1360 | } |
1358 | 1361 | ||
1359 | /** | 1362 | /** |
1363 | * atapi_eh_tur - perform ATAPI TEST_UNIT_READY | ||
1364 | * @dev: target ATAPI device | ||
1365 | * @r_sense_key: out parameter for sense_key | ||
1366 | * | ||
1367 | * Perform ATAPI TEST_UNIT_READY. | ||
1368 | * | ||
1369 | * LOCKING: | ||
1370 | * EH context (may sleep). | ||
1371 | * | ||
1372 | * RETURNS: | ||
1373 | * 0 on success, AC_ERR_* mask on failure. | ||
1374 | */ | ||
1375 | static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) | ||
1376 | { | ||
1377 | u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; | ||
1378 | struct ata_taskfile tf; | ||
1379 | unsigned int err_mask; | ||
1380 | |||
1381 | ata_tf_init(dev, &tf); | ||
1382 | |||
1383 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | ||
1384 | tf.command = ATA_CMD_PACKET; | ||
1385 | tf.protocol = ATAPI_PROT_NODATA; | ||
1386 | |||
1387 | err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); | ||
1388 | if (err_mask == AC_ERR_DEV) | ||
1389 | *r_sense_key = tf.feature >> 4; | ||
1390 | return err_mask; | ||
1391 | } | ||
1392 | |||
1393 | /** | ||
1360 | * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE | 1394 | * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE |
1361 | * @dev: device to perform REQUEST_SENSE to | 1395 | * @dev: device to perform REQUEST_SENSE to |
1362 | * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) | 1396 | * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) |
@@ -1756,7 +1790,7 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) | |||
1756 | static unsigned int ata_eh_speed_down(struct ata_device *dev, | 1790 | static unsigned int ata_eh_speed_down(struct ata_device *dev, |
1757 | unsigned int eflags, unsigned int err_mask) | 1791 | unsigned int eflags, unsigned int err_mask) |
1758 | { | 1792 | { |
1759 | struct ata_link *link = dev->link; | 1793 | struct ata_link *link = ata_dev_phys_link(dev); |
1760 | int xfer_ok = 0; | 1794 | int xfer_ok = 0; |
1761 | unsigned int verdict; | 1795 | unsigned int verdict; |
1762 | unsigned int action = 0; | 1796 | unsigned int action = 0; |
@@ -1880,7 +1914,8 @@ static void ata_eh_link_autopsy(struct ata_link *link) | |||
1880 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 1914 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { |
1881 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); | 1915 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); |
1882 | 1916 | ||
1883 | if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link) | 1917 | if (!(qc->flags & ATA_QCFLAG_FAILED) || |
1918 | ata_dev_phys_link(qc->dev) != link) | ||
1884 | continue; | 1919 | continue; |
1885 | 1920 | ||
1886 | /* inherit upper level err_mask */ | 1921 | /* inherit upper level err_mask */ |
@@ -1967,6 +2002,23 @@ void ata_eh_autopsy(struct ata_port *ap) | |||
1967 | ata_port_for_each_link(link, ap) | 2002 | ata_port_for_each_link(link, ap) |
1968 | ata_eh_link_autopsy(link); | 2003 | ata_eh_link_autopsy(link); |
1969 | 2004 | ||
2005 | /* Handle the frigging slave link. Autopsy is done similarly | ||
2006 | * but actions and flags are transferred over to the master | ||
2007 | * link and handled from there. | ||
2008 | */ | ||
2009 | if (ap->slave_link) { | ||
2010 | struct ata_eh_context *mehc = &ap->link.eh_context; | ||
2011 | struct ata_eh_context *sehc = &ap->slave_link->eh_context; | ||
2012 | |||
2013 | ata_eh_link_autopsy(ap->slave_link); | ||
2014 | |||
2015 | ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); | ||
2016 | mehc->i.action |= sehc->i.action; | ||
2017 | mehc->i.dev_action[1] |= sehc->i.dev_action[1]; | ||
2018 | mehc->i.flags |= sehc->i.flags; | ||
2019 | ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); | ||
2020 | } | ||
2021 | |||
1970 | /* Autopsy of fanout ports can affect host link autopsy. | 2022 | /* Autopsy of fanout ports can affect host link autopsy. |
1971 | * Perform host link autopsy last. | 2023 | * Perform host link autopsy last. |
1972 | */ | 2024 | */ |
@@ -2001,7 +2053,8 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2001 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 2053 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { |
2002 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); | 2054 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); |
2003 | 2055 | ||
2004 | if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link || | 2056 | if (!(qc->flags & ATA_QCFLAG_FAILED) || |
2057 | ata_dev_phys_link(qc->dev) != link || | ||
2005 | ((qc->flags & ATA_QCFLAG_QUIET) && | 2058 | ((qc->flags & ATA_QCFLAG_QUIET) && |
2006 | qc->err_mask == AC_ERR_DEV)) | 2059 | qc->err_mask == AC_ERR_DEV)) |
2007 | continue; | 2060 | continue; |
@@ -2068,7 +2121,7 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2068 | char cdb_buf[70] = ""; | 2121 | char cdb_buf[70] = ""; |
2069 | 2122 | ||
2070 | if (!(qc->flags & ATA_QCFLAG_FAILED) || | 2123 | if (!(qc->flags & ATA_QCFLAG_FAILED) || |
2071 | qc->dev->link != link || !qc->err_mask) | 2124 | ata_dev_phys_link(qc->dev) != link || !qc->err_mask) |
2072 | continue; | 2125 | continue; |
2073 | 2126 | ||
2074 | if (qc->dma_dir != DMA_NONE) { | 2127 | if (qc->dma_dir != DMA_NONE) { |
@@ -2160,12 +2213,14 @@ void ata_eh_report(struct ata_port *ap) | |||
2160 | } | 2213 | } |
2161 | 2214 | ||
2162 | static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, | 2215 | static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, |
2163 | unsigned int *classes, unsigned long deadline) | 2216 | unsigned int *classes, unsigned long deadline, |
2217 | bool clear_classes) | ||
2164 | { | 2218 | { |
2165 | struct ata_device *dev; | 2219 | struct ata_device *dev; |
2166 | 2220 | ||
2167 | ata_link_for_each_dev(dev, link) | 2221 | if (clear_classes) |
2168 | classes[dev->devno] = ATA_DEV_UNKNOWN; | 2222 | ata_link_for_each_dev(dev, link) |
2223 | classes[dev->devno] = ATA_DEV_UNKNOWN; | ||
2169 | 2224 | ||
2170 | return reset(link, classes, deadline); | 2225 | return reset(link, classes, deadline); |
2171 | } | 2226 | } |
@@ -2187,17 +2242,20 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2187 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) | 2242 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) |
2188 | { | 2243 | { |
2189 | struct ata_port *ap = link->ap; | 2244 | struct ata_port *ap = link->ap; |
2245 | struct ata_link *slave = ap->slave_link; | ||
2190 | struct ata_eh_context *ehc = &link->eh_context; | 2246 | struct ata_eh_context *ehc = &link->eh_context; |
2247 | struct ata_eh_context *sehc = &slave->eh_context; | ||
2191 | unsigned int *classes = ehc->classes; | 2248 | unsigned int *classes = ehc->classes; |
2192 | unsigned int lflags = link->flags; | 2249 | unsigned int lflags = link->flags; |
2193 | int verbose = !(ehc->i.flags & ATA_EHI_QUIET); | 2250 | int verbose = !(ehc->i.flags & ATA_EHI_QUIET); |
2194 | int max_tries = 0, try = 0; | 2251 | int max_tries = 0, try = 0; |
2252 | struct ata_link *failed_link; | ||
2195 | struct ata_device *dev; | 2253 | struct ata_device *dev; |
2196 | unsigned long deadline, now; | 2254 | unsigned long deadline, now; |
2197 | ata_reset_fn_t reset; | 2255 | ata_reset_fn_t reset; |
2198 | unsigned long flags; | 2256 | unsigned long flags; |
2199 | u32 sstatus; | 2257 | u32 sstatus; |
2200 | int nr_known, rc; | 2258 | int nr_unknown, rc; |
2201 | 2259 | ||
2202 | /* | 2260 | /* |
2203 | * Prepare to reset | 2261 | * Prepare to reset |
@@ -2252,8 +2310,30 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2252 | } | 2310 | } |
2253 | 2311 | ||
2254 | if (prereset) { | 2312 | if (prereset) { |
2255 | rc = prereset(link, | 2313 | unsigned long deadline = ata_deadline(jiffies, |
2256 | ata_deadline(jiffies, ATA_EH_PRERESET_TIMEOUT)); | 2314 | ATA_EH_PRERESET_TIMEOUT); |
2315 | |||
2316 | if (slave) { | ||
2317 | sehc->i.action &= ~ATA_EH_RESET; | ||
2318 | sehc->i.action |= ehc->i.action; | ||
2319 | } | ||
2320 | |||
2321 | rc = prereset(link, deadline); | ||
2322 | |||
2323 | /* If present, do prereset on slave link too. Reset | ||
2324 | * is skipped iff both master and slave links report | ||
2325 | * -ENOENT or clear ATA_EH_RESET. | ||
2326 | */ | ||
2327 | if (slave && (rc == 0 || rc == -ENOENT)) { | ||
2328 | int tmp; | ||
2329 | |||
2330 | tmp = prereset(slave, deadline); | ||
2331 | if (tmp != -ENOENT) | ||
2332 | rc = tmp; | ||
2333 | |||
2334 | ehc->i.action |= sehc->i.action; | ||
2335 | } | ||
2336 | |||
2257 | if (rc) { | 2337 | if (rc) { |
2258 | if (rc == -ENOENT) { | 2338 | if (rc == -ENOENT) { |
2259 | ata_link_printk(link, KERN_DEBUG, | 2339 | ata_link_printk(link, KERN_DEBUG, |
@@ -2302,25 +2382,51 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2302 | else | 2382 | else |
2303 | ehc->i.flags |= ATA_EHI_DID_SOFTRESET; | 2383 | ehc->i.flags |= ATA_EHI_DID_SOFTRESET; |
2304 | 2384 | ||
2305 | rc = ata_do_reset(link, reset, classes, deadline); | 2385 | rc = ata_do_reset(link, reset, classes, deadline, true); |
2306 | if (rc && rc != -EAGAIN) | 2386 | if (rc && rc != -EAGAIN) { |
2387 | failed_link = link; | ||
2307 | goto fail; | 2388 | goto fail; |
2389 | } | ||
2390 | |||
2391 | /* hardreset slave link if existent */ | ||
2392 | if (slave && reset == hardreset) { | ||
2393 | int tmp; | ||
2394 | |||
2395 | if (verbose) | ||
2396 | ata_link_printk(slave, KERN_INFO, | ||
2397 | "hard resetting link\n"); | ||
2308 | 2398 | ||
2399 | ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); | ||
2400 | tmp = ata_do_reset(slave, reset, classes, deadline, | ||
2401 | false); | ||
2402 | switch (tmp) { | ||
2403 | case -EAGAIN: | ||
2404 | rc = -EAGAIN; | ||
2405 | case 0: | ||
2406 | break; | ||
2407 | default: | ||
2408 | failed_link = slave; | ||
2409 | rc = tmp; | ||
2410 | goto fail; | ||
2411 | } | ||
2412 | } | ||
2413 | |||
2414 | /* perform follow-up SRST if necessary */ | ||
2309 | if (reset == hardreset && | 2415 | if (reset == hardreset && |
2310 | ata_eh_followup_srst_needed(link, rc, classes)) { | 2416 | ata_eh_followup_srst_needed(link, rc, classes)) { |
2311 | /* okay, let's do follow-up softreset */ | ||
2312 | reset = softreset; | 2417 | reset = softreset; |
2313 | 2418 | ||
2314 | if (!reset) { | 2419 | if (!reset) { |
2315 | ata_link_printk(link, KERN_ERR, | 2420 | ata_link_printk(link, KERN_ERR, |
2316 | "follow-up softreset required " | 2421 | "follow-up softreset required " |
2317 | "but no softreset avaliable\n"); | 2422 | "but no softreset avaliable\n"); |
2423 | failed_link = link; | ||
2318 | rc = -EINVAL; | 2424 | rc = -EINVAL; |
2319 | goto fail; | 2425 | goto fail; |
2320 | } | 2426 | } |
2321 | 2427 | ||
2322 | ata_eh_about_to_do(link, NULL, ATA_EH_RESET); | 2428 | ata_eh_about_to_do(link, NULL, ATA_EH_RESET); |
2323 | rc = ata_do_reset(link, reset, classes, deadline); | 2429 | rc = ata_do_reset(link, reset, classes, deadline, true); |
2324 | } | 2430 | } |
2325 | } else { | 2431 | } else { |
2326 | if (verbose) | 2432 | if (verbose) |
@@ -2341,7 +2447,7 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2341 | dev->pio_mode = XFER_PIO_0; | 2447 | dev->pio_mode = XFER_PIO_0; |
2342 | dev->flags &= ~ATA_DFLAG_SLEEPING; | 2448 | dev->flags &= ~ATA_DFLAG_SLEEPING; |
2343 | 2449 | ||
2344 | if (ata_link_offline(link)) | 2450 | if (ata_phys_link_offline(ata_dev_phys_link(dev))) |
2345 | continue; | 2451 | continue; |
2346 | 2452 | ||
2347 | /* apply class override */ | 2453 | /* apply class override */ |
@@ -2354,6 +2460,8 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2354 | /* record current link speed */ | 2460 | /* record current link speed */ |
2355 | if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) | 2461 | if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) |
2356 | link->sata_spd = (sstatus >> 4) & 0xf; | 2462 | link->sata_spd = (sstatus >> 4) & 0xf; |
2463 | if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) | ||
2464 | slave->sata_spd = (sstatus >> 4) & 0xf; | ||
2357 | 2465 | ||
2358 | /* thaw the port */ | 2466 | /* thaw the port */ |
2359 | if (ata_is_host_link(link)) | 2467 | if (ata_is_host_link(link)) |
@@ -2366,12 +2474,17 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2366 | * reset and here. This race is mediated by cross checking | 2474 | * reset and here. This race is mediated by cross checking |
2367 | * link onlineness and classification result later. | 2475 | * link onlineness and classification result later. |
2368 | */ | 2476 | */ |
2369 | if (postreset) | 2477 | if (postreset) { |
2370 | postreset(link, classes); | 2478 | postreset(link, classes); |
2479 | if (slave) | ||
2480 | postreset(slave, classes); | ||
2481 | } | ||
2371 | 2482 | ||
2372 | /* clear cached SError */ | 2483 | /* clear cached SError */ |
2373 | spin_lock_irqsave(link->ap->lock, flags); | 2484 | spin_lock_irqsave(link->ap->lock, flags); |
2374 | link->eh_info.serror = 0; | 2485 | link->eh_info.serror = 0; |
2486 | if (slave) | ||
2487 | slave->eh_info.serror = 0; | ||
2375 | spin_unlock_irqrestore(link->ap->lock, flags); | 2488 | spin_unlock_irqrestore(link->ap->lock, flags); |
2376 | 2489 | ||
2377 | /* Make sure onlineness and classification result correspond. | 2490 | /* Make sure onlineness and classification result correspond. |
@@ -2381,19 +2494,21 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2381 | * link onlineness and classification result, those conditions | 2494 | * link onlineness and classification result, those conditions |
2382 | * can be reliably detected and retried. | 2495 | * can be reliably detected and retried. |
2383 | */ | 2496 | */ |
2384 | nr_known = 0; | 2497 | nr_unknown = 0; |
2385 | ata_link_for_each_dev(dev, link) { | 2498 | ata_link_for_each_dev(dev, link) { |
2386 | /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */ | 2499 | /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */ |
2387 | if (classes[dev->devno] == ATA_DEV_UNKNOWN) | 2500 | if (classes[dev->devno] == ATA_DEV_UNKNOWN) { |
2388 | classes[dev->devno] = ATA_DEV_NONE; | 2501 | classes[dev->devno] = ATA_DEV_NONE; |
2389 | else | 2502 | if (ata_phys_link_online(ata_dev_phys_link(dev))) |
2390 | nr_known++; | 2503 | nr_unknown++; |
2504 | } | ||
2391 | } | 2505 | } |
2392 | 2506 | ||
2393 | if (classify && !nr_known && ata_link_online(link)) { | 2507 | if (classify && nr_unknown) { |
2394 | if (try < max_tries) { | 2508 | if (try < max_tries) { |
2395 | ata_link_printk(link, KERN_WARNING, "link online but " | 2509 | ata_link_printk(link, KERN_WARNING, "link online but " |
2396 | "device misclassified, retrying\n"); | 2510 | "device misclassified, retrying\n"); |
2511 | failed_link = link; | ||
2397 | rc = -EAGAIN; | 2512 | rc = -EAGAIN; |
2398 | goto fail; | 2513 | goto fail; |
2399 | } | 2514 | } |
@@ -2404,6 +2519,8 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2404 | 2519 | ||
2405 | /* reset successful, schedule revalidation */ | 2520 | /* reset successful, schedule revalidation */ |
2406 | ata_eh_done(link, NULL, ATA_EH_RESET); | 2521 | ata_eh_done(link, NULL, ATA_EH_RESET); |
2522 | if (slave) | ||
2523 | ata_eh_done(slave, NULL, ATA_EH_RESET); | ||
2407 | ehc->last_reset = jiffies; | 2524 | ehc->last_reset = jiffies; |
2408 | ehc->i.action |= ATA_EH_REVALIDATE; | 2525 | ehc->i.action |= ATA_EH_REVALIDATE; |
2409 | 2526 | ||
@@ -2411,6 +2528,8 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2411 | out: | 2528 | out: |
2412 | /* clear hotplug flag */ | 2529 | /* clear hotplug flag */ |
2413 | ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; | 2530 | ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; |
2531 | if (slave) | ||
2532 | sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; | ||
2414 | 2533 | ||
2415 | spin_lock_irqsave(ap->lock, flags); | 2534 | spin_lock_irqsave(ap->lock, flags); |
2416 | ap->pflags &= ~ATA_PFLAG_RESETTING; | 2535 | ap->pflags &= ~ATA_PFLAG_RESETTING; |
@@ -2431,7 +2550,7 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2431 | if (time_before(now, deadline)) { | 2550 | if (time_before(now, deadline)) { |
2432 | unsigned long delta = deadline - now; | 2551 | unsigned long delta = deadline - now; |
2433 | 2552 | ||
2434 | ata_link_printk(link, KERN_WARNING, | 2553 | ata_link_printk(failed_link, KERN_WARNING, |
2435 | "reset failed (errno=%d), retrying in %u secs\n", | 2554 | "reset failed (errno=%d), retrying in %u secs\n", |
2436 | rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); | 2555 | rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); |
2437 | 2556 | ||
@@ -2439,13 +2558,92 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2439 | delta = schedule_timeout_uninterruptible(delta); | 2558 | delta = schedule_timeout_uninterruptible(delta); |
2440 | } | 2559 | } |
2441 | 2560 | ||
2442 | if (rc == -EPIPE || try == max_tries - 1) | 2561 | if (try == max_tries - 1) { |
2443 | sata_down_spd_limit(link); | 2562 | sata_down_spd_limit(link); |
2563 | if (slave) | ||
2564 | sata_down_spd_limit(slave); | ||
2565 | } else if (rc == -EPIPE) | ||
2566 | sata_down_spd_limit(failed_link); | ||
2567 | |||
2444 | if (hardreset) | 2568 | if (hardreset) |
2445 | reset = hardreset; | 2569 | reset = hardreset; |
2446 | goto retry; | 2570 | goto retry; |
2447 | } | 2571 | } |
2448 | 2572 | ||
2573 | static inline void ata_eh_pull_park_action(struct ata_port *ap) | ||
2574 | { | ||
2575 | struct ata_link *link; | ||
2576 | struct ata_device *dev; | ||
2577 | unsigned long flags; | ||
2578 | |||
2579 | /* | ||
2580 | * This function can be thought of as an extended version of | ||
2581 | * ata_eh_about_to_do() specially crafted to accommodate the | ||
2582 | * requirements of ATA_EH_PARK handling. Since the EH thread | ||
2583 | * does not leave the do {} while () loop in ata_eh_recover as | ||
2584 | * long as the timeout for a park request to *one* device on | ||
2585 | * the port has not expired, and since we still want to pick | ||
2586 | * up park requests to other devices on the same port or | ||
2587 | * timeout updates for the same device, we have to pull | ||
2588 | * ATA_EH_PARK actions from eh_info into eh_context.i | ||
2589 | * ourselves at the beginning of each pass over the loop. | ||
2590 | * | ||
2591 | * Additionally, all write accesses to &ap->park_req_pending | ||
2592 | * through INIT_COMPLETION() (see below) or complete_all() | ||
2593 | * (see ata_scsi_park_store()) are protected by the host lock. | ||
2594 | * As a result we have that park_req_pending.done is zero on | ||
2595 | * exit from this function, i.e. when ATA_EH_PARK actions for | ||
2596 | * *all* devices on port ap have been pulled into the | ||
2597 | * respective eh_context structs. If, and only if, | ||
2598 | * park_req_pending.done is non-zero by the time we reach | ||
2599 | * wait_for_completion_timeout(), another ATA_EH_PARK action | ||
2600 | * has been scheduled for at least one of the devices on port | ||
2601 | * ap and we have to cycle over the do {} while () loop in | ||
2602 | * ata_eh_recover() again. | ||
2603 | */ | ||
2604 | |||
2605 | spin_lock_irqsave(ap->lock, flags); | ||
2606 | INIT_COMPLETION(ap->park_req_pending); | ||
2607 | ata_port_for_each_link(link, ap) { | ||
2608 | ata_link_for_each_dev(dev, link) { | ||
2609 | struct ata_eh_info *ehi = &link->eh_info; | ||
2610 | |||
2611 | link->eh_context.i.dev_action[dev->devno] |= | ||
2612 | ehi->dev_action[dev->devno] & ATA_EH_PARK; | ||
2613 | ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); | ||
2614 | } | ||
2615 | } | ||
2616 | spin_unlock_irqrestore(ap->lock, flags); | ||
2617 | } | ||
2618 | |||
2619 | static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) | ||
2620 | { | ||
2621 | struct ata_eh_context *ehc = &dev->link->eh_context; | ||
2622 | struct ata_taskfile tf; | ||
2623 | unsigned int err_mask; | ||
2624 | |||
2625 | ata_tf_init(dev, &tf); | ||
2626 | if (park) { | ||
2627 | ehc->unloaded_mask |= 1 << dev->devno; | ||
2628 | tf.command = ATA_CMD_IDLEIMMEDIATE; | ||
2629 | tf.feature = 0x44; | ||
2630 | tf.lbal = 0x4c; | ||
2631 | tf.lbam = 0x4e; | ||
2632 | tf.lbah = 0x55; | ||
2633 | } else { | ||
2634 | ehc->unloaded_mask &= ~(1 << dev->devno); | ||
2635 | tf.command = ATA_CMD_CHK_POWER; | ||
2636 | } | ||
2637 | |||
2638 | tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; | ||
2639 | tf.protocol |= ATA_PROT_NODATA; | ||
2640 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); | ||
2641 | if (park && (err_mask || tf.lbal != 0xc4)) { | ||
2642 | ata_dev_printk(dev, KERN_ERR, "head unload failed!\n"); | ||
2643 | ehc->unloaded_mask &= ~(1 << dev->devno); | ||
2644 | } | ||
2645 | } | ||
2646 | |||
2449 | static int ata_eh_revalidate_and_attach(struct ata_link *link, | 2647 | static int ata_eh_revalidate_and_attach(struct ata_link *link, |
2450 | struct ata_device **r_failed_dev) | 2648 | struct ata_device **r_failed_dev) |
2451 | { | 2649 | { |
@@ -2472,7 +2670,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, | |||
2472 | if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { | 2670 | if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { |
2473 | WARN_ON(dev->class == ATA_DEV_PMP); | 2671 | WARN_ON(dev->class == ATA_DEV_PMP); |
2474 | 2672 | ||
2475 | if (ata_link_offline(link)) { | 2673 | if (ata_phys_link_offline(ata_dev_phys_link(dev))) { |
2476 | rc = -EIO; | 2674 | rc = -EIO; |
2477 | goto err; | 2675 | goto err; |
2478 | } | 2676 | } |
@@ -2610,6 +2808,53 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) | |||
2610 | return rc; | 2808 | return rc; |
2611 | } | 2809 | } |
2612 | 2810 | ||
2811 | /** | ||
2812 | * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset | ||
2813 | * @dev: ATAPI device to clear UA for | ||
2814 | * | ||
2815 | * Resets and other operations can make an ATAPI device raise | ||
2816 | * UNIT ATTENTION which causes the next operation to fail. This | ||
2817 | * function clears UA. | ||
2818 | * | ||
2819 | * LOCKING: | ||
2820 | * EH context (may sleep). | ||
2821 | * | ||
2822 | * RETURNS: | ||
2823 | * 0 on success, -errno on failure. | ||
2824 | */ | ||
2825 | static int atapi_eh_clear_ua(struct ata_device *dev) | ||
2826 | { | ||
2827 | int i; | ||
2828 | |||
2829 | for (i = 0; i < ATA_EH_UA_TRIES; i++) { | ||
2830 | u8 sense_buffer[SCSI_SENSE_BUFFERSIZE]; | ||
2831 | u8 sense_key = 0; | ||
2832 | unsigned int err_mask; | ||
2833 | |||
2834 | err_mask = atapi_eh_tur(dev, &sense_key); | ||
2835 | if (err_mask != 0 && err_mask != AC_ERR_DEV) { | ||
2836 | ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY " | ||
2837 | "failed (err_mask=0x%x)\n", err_mask); | ||
2838 | return -EIO; | ||
2839 | } | ||
2840 | |||
2841 | if (!err_mask || sense_key != UNIT_ATTENTION) | ||
2842 | return 0; | ||
2843 | |||
2844 | err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); | ||
2845 | if (err_mask) { | ||
2846 | ata_dev_printk(dev, KERN_WARNING, "failed to clear " | ||
2847 | "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); | ||
2848 | return -EIO; | ||
2849 | } | ||
2850 | } | ||
2851 | |||
2852 | ata_dev_printk(dev, KERN_WARNING, | ||
2853 | "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES); | ||
2854 | |||
2855 | return 0; | ||
2856 | } | ||
2857 | |||
2613 | static int ata_link_nr_enabled(struct ata_link *link) | 2858 | static int ata_link_nr_enabled(struct ata_link *link) |
2614 | { | 2859 | { |
2615 | struct ata_device *dev; | 2860 | struct ata_device *dev; |
@@ -2697,7 +2942,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) | |||
2697 | /* This is the last chance, better to slow | 2942 | /* This is the last chance, better to slow |
2698 | * down than lose it. | 2943 | * down than lose it. |
2699 | */ | 2944 | */ |
2700 | sata_down_spd_limit(dev->link); | 2945 | sata_down_spd_limit(ata_dev_phys_link(dev)); |
2701 | ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); | 2946 | ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); |
2702 | } | 2947 | } |
2703 | } | 2948 | } |
@@ -2707,7 +2952,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) | |||
2707 | ata_dev_disable(dev); | 2952 | ata_dev_disable(dev); |
2708 | 2953 | ||
2709 | /* detach if offline */ | 2954 | /* detach if offline */ |
2710 | if (ata_link_offline(dev->link)) | 2955 | if (ata_phys_link_offline(ata_dev_phys_link(dev))) |
2711 | ata_eh_detach_dev(dev); | 2956 | ata_eh_detach_dev(dev); |
2712 | 2957 | ||
2713 | /* schedule probe if necessary */ | 2958 | /* schedule probe if necessary */ |
@@ -2755,7 +3000,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2755 | struct ata_device *dev; | 3000 | struct ata_device *dev; |
2756 | int nr_failed_devs; | 3001 | int nr_failed_devs; |
2757 | int rc; | 3002 | int rc; |
2758 | unsigned long flags; | 3003 | unsigned long flags, deadline; |
2759 | 3004 | ||
2760 | DPRINTK("ENTER\n"); | 3005 | DPRINTK("ENTER\n"); |
2761 | 3006 | ||
@@ -2829,6 +3074,56 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2829 | } | 3074 | } |
2830 | } | 3075 | } |
2831 | 3076 | ||
3077 | do { | ||
3078 | unsigned long now; | ||
3079 | |||
3080 | /* | ||
3081 | * clears ATA_EH_PARK in eh_info and resets | ||
3082 | * ap->park_req_pending | ||
3083 | */ | ||
3084 | ata_eh_pull_park_action(ap); | ||
3085 | |||
3086 | deadline = jiffies; | ||
3087 | ata_port_for_each_link(link, ap) { | ||
3088 | ata_link_for_each_dev(dev, link) { | ||
3089 | struct ata_eh_context *ehc = &link->eh_context; | ||
3090 | unsigned long tmp; | ||
3091 | |||
3092 | if (dev->class != ATA_DEV_ATA) | ||
3093 | continue; | ||
3094 | if (!(ehc->i.dev_action[dev->devno] & | ||
3095 | ATA_EH_PARK)) | ||
3096 | continue; | ||
3097 | tmp = dev->unpark_deadline; | ||
3098 | if (time_before(deadline, tmp)) | ||
3099 | deadline = tmp; | ||
3100 | else if (time_before_eq(tmp, jiffies)) | ||
3101 | continue; | ||
3102 | if (ehc->unloaded_mask & (1 << dev->devno)) | ||
3103 | continue; | ||
3104 | |||
3105 | ata_eh_park_issue_cmd(dev, 1); | ||
3106 | } | ||
3107 | } | ||
3108 | |||
3109 | now = jiffies; | ||
3110 | if (time_before_eq(deadline, now)) | ||
3111 | break; | ||
3112 | |||
3113 | deadline = wait_for_completion_timeout(&ap->park_req_pending, | ||
3114 | deadline - now); | ||
3115 | } while (deadline); | ||
3116 | ata_port_for_each_link(link, ap) { | ||
3117 | ata_link_for_each_dev(dev, link) { | ||
3118 | if (!(link->eh_context.unloaded_mask & | ||
3119 | (1 << dev->devno))) | ||
3120 | continue; | ||
3121 | |||
3122 | ata_eh_park_issue_cmd(dev, 0); | ||
3123 | ata_eh_done(link, dev, ATA_EH_PARK); | ||
3124 | } | ||
3125 | } | ||
3126 | |||
2832 | /* the rest */ | 3127 | /* the rest */ |
2833 | ata_port_for_each_link(link, ap) { | 3128 | ata_port_for_each_link(link, ap) { |
2834 | struct ata_eh_context *ehc = &link->eh_context; | 3129 | struct ata_eh_context *ehc = &link->eh_context; |
@@ -2852,6 +3147,20 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2852 | ehc->i.flags &= ~ATA_EHI_SETMODE; | 3147 | ehc->i.flags &= ~ATA_EHI_SETMODE; |
2853 | } | 3148 | } |
2854 | 3149 | ||
3150 | /* If reset has been issued, clear UA to avoid | ||
3151 | * disrupting the current users of the device. | ||
3152 | */ | ||
3153 | if (ehc->i.flags & ATA_EHI_DID_RESET) { | ||
3154 | ata_link_for_each_dev(dev, link) { | ||
3155 | if (dev->class != ATA_DEV_ATAPI) | ||
3156 | continue; | ||
3157 | rc = atapi_eh_clear_ua(dev); | ||
3158 | if (rc) | ||
3159 | goto dev_fail; | ||
3160 | } | ||
3161 | } | ||
3162 | |||
3163 | /* configure link power saving */ | ||
2855 | if (ehc->i.action & ATA_EH_LPM) | 3164 | if (ehc->i.action & ATA_EH_LPM) |
2856 | ata_link_for_each_dev(dev, link) | 3165 | ata_link_for_each_dev(dev, link) |
2857 | ata_dev_enable_pm(dev, ap->pm_policy); | 3166 | ata_dev_enable_pm(dev, ap->pm_policy); |