diff options
Diffstat (limited to 'drivers/ata/libata-eh.c')
-rw-r--r-- | drivers/ata/libata-eh.c | 219 |
1 files changed, 171 insertions, 48 deletions
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 7894d83ea1eb..58bdc538d229 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -66,15 +66,19 @@ enum { | |||
66 | ATA_ECAT_DUBIOUS_TOUT_HSM = 6, | 66 | ATA_ECAT_DUBIOUS_TOUT_HSM = 6, |
67 | ATA_ECAT_DUBIOUS_UNK_DEV = 7, | 67 | ATA_ECAT_DUBIOUS_UNK_DEV = 7, |
68 | ATA_ECAT_NR = 8, | 68 | ATA_ECAT_NR = 8, |
69 | }; | ||
70 | 69 | ||
71 | /* Waiting in ->prereset can never be reliable. It's sometimes nice | 70 | ATA_EH_CMD_DFL_TIMEOUT = 5000, |
72 | * to wait there but it can't be depended upon; otherwise, we wouldn't | 71 | |
73 | * be resetting. Just give it enough time for most drives to spin up. | 72 | /* always put at least this amount of time between resets */ |
74 | */ | 73 | ATA_EH_RESET_COOL_DOWN = 5000, |
75 | enum { | 74 | |
76 | ATA_EH_PRERESET_TIMEOUT = 10 * HZ, | 75 | /* Waiting in ->prereset can never be reliable. It's |
77 | ATA_EH_FASTDRAIN_INTERVAL = 3 * HZ, | 76 | * sometimes nice to wait there but it can't be depended upon; |
77 | * otherwise, we wouldn't be resetting. Just give it enough | ||
78 | * time for most drives to spin up. | ||
79 | */ | ||
80 | ATA_EH_PRERESET_TIMEOUT = 10000, | ||
81 | ATA_EH_FASTDRAIN_INTERVAL = 3000, | ||
78 | }; | 82 | }; |
79 | 83 | ||
80 | /* The following table determines how we sequence resets. Each entry | 84 | /* The following table determines how we sequence resets. Each entry |
@@ -84,12 +88,59 @@ enum { | |||
84 | * are mostly for error handling, hotplug and retarded devices. | 88 | * are mostly for error handling, hotplug and retarded devices. |
85 | */ | 89 | */ |
86 | static const unsigned long ata_eh_reset_timeouts[] = { | 90 | static const unsigned long ata_eh_reset_timeouts[] = { |
87 | 10 * HZ, /* most drives spin up by 10sec */ | 91 | 10000, /* most drives spin up by 10sec */ |
88 | 10 * HZ, /* > 99% working drives spin up before 20sec */ | 92 | 10000, /* > 99% working drives spin up before 20sec */ |
89 | 35 * HZ, /* give > 30 secs of idleness for retarded devices */ | 93 | 35000, /* give > 30 secs of idleness for retarded devices */ |
90 | 5 * HZ, /* and sweet one last chance */ | 94 | 5000, /* and sweet one last chance */ |
91 | /* > 1 min has elapsed, give up */ | 95 | ULONG_MAX, /* > 1 min has elapsed, give up */ |
96 | }; | ||
97 | |||
98 | static const unsigned long ata_eh_identify_timeouts[] = { | ||
99 | 5000, /* covers > 99% of successes and not too boring on failures */ | ||
100 | 10000, /* combined time till here is enough even for media access */ | ||
101 | 30000, /* for true idiots */ | ||
102 | ULONG_MAX, | ||
103 | }; | ||
104 | |||
105 | static const unsigned long ata_eh_other_timeouts[] = { | ||
106 | 5000, /* same rationale as identify timeout */ | ||
107 | 10000, /* ditto */ | ||
108 | /* but no merciful 30sec for other commands, it just isn't worth it */ | ||
109 | ULONG_MAX, | ||
110 | }; | ||
111 | |||
112 | struct ata_eh_cmd_timeout_ent { | ||
113 | const u8 *commands; | ||
114 | const unsigned long *timeouts; | ||
115 | }; | ||
116 | |||
117 | /* The following table determines timeouts to use for EH internal | ||
118 | * commands. Each table entry is a command class and matches the | ||
119 | * commands the entry applies to and the timeout table to use. | ||
120 | * | ||
121 | * On the retry after a command timed out, the next timeout value from | ||
122 | * the table is used. If the table doesn't contain further entries, | ||
123 | * the last value is used. | ||
124 | * | ||
125 | * ehc->cmd_timeout_idx keeps track of which timeout to use per | ||
126 | * command class, so if SET_FEATURES times out on the first try, the | ||
127 | * next try will use the second timeout value only for that class. | ||
128 | */ | ||
129 | #define CMDS(cmds...) (const u8 []){ cmds, 0 } | ||
130 | static const struct ata_eh_cmd_timeout_ent | ||
131 | ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { | ||
132 | { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), | ||
133 | .timeouts = ata_eh_identify_timeouts, }, | ||
134 | { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), | ||
135 | .timeouts = ata_eh_other_timeouts, }, | ||
136 | { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), | ||
137 | .timeouts = ata_eh_other_timeouts, }, | ||
138 | { .commands = CMDS(ATA_CMD_SET_FEATURES), | ||
139 | .timeouts = ata_eh_other_timeouts, }, | ||
140 | { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), | ||
141 | .timeouts = ata_eh_other_timeouts, }, | ||
92 | }; | 142 | }; |
143 | #undef CMDS | ||
93 | 144 | ||
94 | static void __ata_port_freeze(struct ata_port *ap); | 145 | static void __ata_port_freeze(struct ata_port *ap); |
95 | #ifdef CONFIG_PM | 146 | #ifdef CONFIG_PM |
@@ -236,6 +287,73 @@ void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, | |||
236 | 287 | ||
237 | #endif /* CONFIG_PCI */ | 288 | #endif /* CONFIG_PCI */ |
238 | 289 | ||
290 | static int ata_lookup_timeout_table(u8 cmd) | ||
291 | { | ||
292 | int i; | ||
293 | |||
294 | for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { | ||
295 | const u8 *cur; | ||
296 | |||
297 | for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) | ||
298 | if (*cur == cmd) | ||
299 | return i; | ||
300 | } | ||
301 | |||
302 | return -1; | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * ata_internal_cmd_timeout - determine timeout for an internal command | ||
307 | * @dev: target device | ||
308 | * @cmd: internal command to be issued | ||
309 | * | ||
310 | * Determine timeout for internal command @cmd for @dev. | ||
311 | * | ||
312 | * LOCKING: | ||
313 | * EH context. | ||
314 | * | ||
315 | * RETURNS: | ||
316 | * Determined timeout. | ||
317 | */ | ||
318 | unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) | ||
319 | { | ||
320 | struct ata_eh_context *ehc = &dev->link->eh_context; | ||
321 | int ent = ata_lookup_timeout_table(cmd); | ||
322 | int idx; | ||
323 | |||
324 | if (ent < 0) | ||
325 | return ATA_EH_CMD_DFL_TIMEOUT; | ||
326 | |||
327 | idx = ehc->cmd_timeout_idx[dev->devno][ent]; | ||
328 | return ata_eh_cmd_timeout_table[ent].timeouts[idx]; | ||
329 | } | ||
330 | |||
331 | /** | ||
332 | * ata_internal_cmd_timed_out - notification for internal command timeout | ||
333 | * @dev: target device | ||
334 | * @cmd: internal command which timed out | ||
335 | * | ||
336 | * Notify EH that internal command @cmd for @dev timed out. This | ||
337 | * function should be called only for commands whose timeouts are | ||
338 | * determined using ata_internal_cmd_timeout(). | ||
339 | * | ||
340 | * LOCKING: | ||
341 | * EH context. | ||
342 | */ | ||
343 | void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) | ||
344 | { | ||
345 | struct ata_eh_context *ehc = &dev->link->eh_context; | ||
346 | int ent = ata_lookup_timeout_table(cmd); | ||
347 | int idx; | ||
348 | |||
349 | if (ent < 0) | ||
350 | return; | ||
351 | |||
352 | idx = ehc->cmd_timeout_idx[dev->devno][ent]; | ||
353 | if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) | ||
354 | ehc->cmd_timeout_idx[dev->devno][ent]++; | ||
355 | } | ||
356 | |||
239 | static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, | 357 | static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, |
240 | unsigned int err_mask) | 358 | unsigned int err_mask) |
241 | { | 359 | { |
@@ -486,6 +604,9 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
486 | if (ata_ncq_enabled(dev)) | 604 | if (ata_ncq_enabled(dev)) |
487 | ehc->saved_ncq_enabled |= 1 << devno; | 605 | ehc->saved_ncq_enabled |= 1 << devno; |
488 | } | 606 | } |
607 | |||
608 | /* set last reset timestamp to some time in the past */ | ||
609 | ehc->last_reset = jiffies - 60 * HZ; | ||
489 | } | 610 | } |
490 | 611 | ||
491 | ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; | 612 | ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; |
@@ -641,7 +762,7 @@ void ata_eh_fastdrain_timerfn(unsigned long arg) | |||
641 | /* some qcs have finished, give it another chance */ | 762 | /* some qcs have finished, give it another chance */ |
642 | ap->fastdrain_cnt = cnt; | 763 | ap->fastdrain_cnt = cnt; |
643 | ap->fastdrain_timer.expires = | 764 | ap->fastdrain_timer.expires = |
644 | jiffies + ATA_EH_FASTDRAIN_INTERVAL; | 765 | ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); |
645 | add_timer(&ap->fastdrain_timer); | 766 | add_timer(&ap->fastdrain_timer); |
646 | } | 767 | } |
647 | 768 | ||
@@ -681,7 +802,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) | |||
681 | 802 | ||
682 | /* activate fast drain */ | 803 | /* activate fast drain */ |
683 | ap->fastdrain_cnt = cnt; | 804 | ap->fastdrain_cnt = cnt; |
684 | ap->fastdrain_timer.expires = jiffies + ATA_EH_FASTDRAIN_INTERVAL; | 805 | ap->fastdrain_timer.expires = |
806 | ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); | ||
685 | add_timer(&ap->fastdrain_timer); | 807 | add_timer(&ap->fastdrain_timer); |
686 | } | 808 | } |
687 | 809 | ||
@@ -1238,6 +1360,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev, | |||
1238 | * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE | 1360 | * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE |
1239 | * @dev: device to perform REQUEST_SENSE to | 1361 | * @dev: device to perform REQUEST_SENSE to |
1240 | * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) | 1362 | * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) |
1363 | * @dfl_sense_key: default sense key to use | ||
1241 | * | 1364 | * |
1242 | * Perform ATAPI REQUEST_SENSE after the device reported CHECK | 1365 | * Perform ATAPI REQUEST_SENSE after the device reported CHECK |
1243 | * SENSE. This function is EH helper. | 1366 | * SENSE. This function is EH helper. |
@@ -1248,13 +1371,13 @@ static int ata_eh_read_log_10h(struct ata_device *dev, | |||
1248 | * RETURNS: | 1371 | * RETURNS: |
1249 | * 0 on success, AC_ERR_* mask on failure | 1372 | * 0 on success, AC_ERR_* mask on failure |
1250 | */ | 1373 | */ |
1251 | static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc) | 1374 | static unsigned int atapi_eh_request_sense(struct ata_device *dev, |
1375 | u8 *sense_buf, u8 dfl_sense_key) | ||
1252 | { | 1376 | { |
1253 | struct ata_device *dev = qc->dev; | 1377 | u8 cdb[ATAPI_CDB_LEN] = |
1254 | unsigned char *sense_buf = qc->scsicmd->sense_buffer; | 1378 | { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; |
1255 | struct ata_port *ap = dev->link->ap; | 1379 | struct ata_port *ap = dev->link->ap; |
1256 | struct ata_taskfile tf; | 1380 | struct ata_taskfile tf; |
1257 | u8 cdb[ATAPI_CDB_LEN]; | ||
1258 | 1381 | ||
1259 | DPRINTK("ATAPI request sense\n"); | 1382 | DPRINTK("ATAPI request sense\n"); |
1260 | 1383 | ||
@@ -1265,15 +1388,11 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc) | |||
1265 | * for the case where they are -not- overwritten | 1388 | * for the case where they are -not- overwritten |
1266 | */ | 1389 | */ |
1267 | sense_buf[0] = 0x70; | 1390 | sense_buf[0] = 0x70; |
1268 | sense_buf[2] = qc->result_tf.feature >> 4; | 1391 | sense_buf[2] = dfl_sense_key; |
1269 | 1392 | ||
1270 | /* some devices time out if garbage left in tf */ | 1393 | /* some devices time out if garbage left in tf */ |
1271 | ata_tf_init(dev, &tf); | 1394 | ata_tf_init(dev, &tf); |
1272 | 1395 | ||
1273 | memset(cdb, 0, ATAPI_CDB_LEN); | ||
1274 | cdb[0] = REQUEST_SENSE; | ||
1275 | cdb[4] = SCSI_SENSE_BUFFERSIZE; | ||
1276 | |||
1277 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | 1396 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
1278 | tf.command = ATA_CMD_PACKET; | 1397 | tf.command = ATA_CMD_PACKET; |
1279 | 1398 | ||
@@ -1445,7 +1564,9 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, | |||
1445 | 1564 | ||
1446 | case ATA_DEV_ATAPI: | 1565 | case ATA_DEV_ATAPI: |
1447 | if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { | 1566 | if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { |
1448 | tmp = atapi_eh_request_sense(qc); | 1567 | tmp = atapi_eh_request_sense(qc->dev, |
1568 | qc->scsicmd->sense_buffer, | ||
1569 | qc->result_tf.feature >> 4); | ||
1449 | if (!tmp) { | 1570 | if (!tmp) { |
1450 | /* ATA_QCFLAG_SENSE_VALID is used to | 1571 | /* ATA_QCFLAG_SENSE_VALID is used to |
1451 | * tell atapi_qc_complete() that sense | 1572 | * tell atapi_qc_complete() that sense |
@@ -2071,13 +2192,12 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2071 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, | 2192 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, |
2072 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) | 2193 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) |
2073 | { | 2194 | { |
2074 | const int max_tries = ARRAY_SIZE(ata_eh_reset_timeouts); | ||
2075 | struct ata_port *ap = link->ap; | 2195 | struct ata_port *ap = link->ap; |
2076 | struct ata_eh_context *ehc = &link->eh_context; | 2196 | struct ata_eh_context *ehc = &link->eh_context; |
2077 | unsigned int *classes = ehc->classes; | 2197 | unsigned int *classes = ehc->classes; |
2078 | unsigned int lflags = link->flags; | 2198 | unsigned int lflags = link->flags; |
2079 | int verbose = !(ehc->i.flags & ATA_EHI_QUIET); | 2199 | int verbose = !(ehc->i.flags & ATA_EHI_QUIET); |
2080 | int try = 0; | 2200 | int max_tries = 0, try = 0; |
2081 | struct ata_device *dev; | 2201 | struct ata_device *dev; |
2082 | unsigned long deadline, now; | 2202 | unsigned long deadline, now; |
2083 | ata_reset_fn_t reset; | 2203 | ata_reset_fn_t reset; |
@@ -2088,11 +2208,20 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2088 | /* | 2208 | /* |
2089 | * Prepare to reset | 2209 | * Prepare to reset |
2090 | */ | 2210 | */ |
2211 | while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) | ||
2212 | max_tries++; | ||
2213 | |||
2214 | now = jiffies; | ||
2215 | deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN); | ||
2216 | if (time_before(now, deadline)) | ||
2217 | schedule_timeout_uninterruptible(deadline - now); | ||
2218 | |||
2091 | spin_lock_irqsave(ap->lock, flags); | 2219 | spin_lock_irqsave(ap->lock, flags); |
2092 | ap->pflags |= ATA_PFLAG_RESETTING; | 2220 | ap->pflags |= ATA_PFLAG_RESETTING; |
2093 | spin_unlock_irqrestore(ap->lock, flags); | 2221 | spin_unlock_irqrestore(ap->lock, flags); |
2094 | 2222 | ||
2095 | ata_eh_about_to_do(link, NULL, ATA_EH_RESET); | 2223 | ata_eh_about_to_do(link, NULL, ATA_EH_RESET); |
2224 | ehc->last_reset = jiffies; | ||
2096 | 2225 | ||
2097 | ata_link_for_each_dev(dev, link) { | 2226 | ata_link_for_each_dev(dev, link) { |
2098 | /* If we issue an SRST then an ATA drive (not ATAPI) | 2227 | /* If we issue an SRST then an ATA drive (not ATAPI) |
@@ -2125,7 +2254,8 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2125 | } | 2254 | } |
2126 | 2255 | ||
2127 | if (prereset) { | 2256 | if (prereset) { |
2128 | rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT); | 2257 | rc = prereset(link, |
2258 | ata_deadline(jiffies, ATA_EH_PRERESET_TIMEOUT)); | ||
2129 | if (rc) { | 2259 | if (rc) { |
2130 | if (rc == -ENOENT) { | 2260 | if (rc == -ENOENT) { |
2131 | ata_link_printk(link, KERN_DEBUG, | 2261 | ata_link_printk(link, KERN_DEBUG, |
@@ -2157,10 +2287,11 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2157 | /* | 2287 | /* |
2158 | * Perform reset | 2288 | * Perform reset |
2159 | */ | 2289 | */ |
2290 | ehc->last_reset = jiffies; | ||
2160 | if (ata_is_host_link(link)) | 2291 | if (ata_is_host_link(link)) |
2161 | ata_eh_freeze_port(ap); | 2292 | ata_eh_freeze_port(ap); |
2162 | 2293 | ||
2163 | deadline = jiffies + ata_eh_reset_timeouts[try++]; | 2294 | deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); |
2164 | 2295 | ||
2165 | if (reset) { | 2296 | if (reset) { |
2166 | if (verbose) | 2297 | if (verbose) |
@@ -2277,6 +2408,7 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2277 | 2408 | ||
2278 | /* reset successful, schedule revalidation */ | 2409 | /* reset successful, schedule revalidation */ |
2279 | ata_eh_done(link, NULL, ATA_EH_RESET); | 2410 | ata_eh_done(link, NULL, ATA_EH_RESET); |
2411 | ehc->last_reset = jiffies; | ||
2280 | ehc->i.action |= ATA_EH_REVALIDATE; | 2412 | ehc->i.action |= ATA_EH_REVALIDATE; |
2281 | 2413 | ||
2282 | rc = 0; | 2414 | rc = 0; |
@@ -2303,9 +2435,9 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2303 | if (time_before(now, deadline)) { | 2435 | if (time_before(now, deadline)) { |
2304 | unsigned long delta = deadline - now; | 2436 | unsigned long delta = deadline - now; |
2305 | 2437 | ||
2306 | ata_link_printk(link, KERN_WARNING, "reset failed " | 2438 | ata_link_printk(link, KERN_WARNING, |
2307 | "(errno=%d), retrying in %u secs\n", | 2439 | "reset failed (errno=%d), retrying in %u secs\n", |
2308 | rc, (jiffies_to_msecs(delta) + 999) / 1000); | 2440 | rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); |
2309 | 2441 | ||
2310 | while (delta) | 2442 | while (delta) |
2311 | delta = schedule_timeout_uninterruptible(delta); | 2443 | delta = schedule_timeout_uninterruptible(delta); |
@@ -2583,8 +2715,11 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) | |||
2583 | ata_eh_detach_dev(dev); | 2715 | ata_eh_detach_dev(dev); |
2584 | 2716 | ||
2585 | /* schedule probe if necessary */ | 2717 | /* schedule probe if necessary */ |
2586 | if (ata_eh_schedule_probe(dev)) | 2718 | if (ata_eh_schedule_probe(dev)) { |
2587 | ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; | 2719 | ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; |
2720 | memset(ehc->cmd_timeout_idx[dev->devno], 0, | ||
2721 | sizeof(ehc->cmd_timeout_idx[dev->devno])); | ||
2722 | } | ||
2588 | 2723 | ||
2589 | return 1; | 2724 | return 1; |
2590 | } else { | 2725 | } else { |
@@ -2622,7 +2757,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2622 | { | 2757 | { |
2623 | struct ata_link *link; | 2758 | struct ata_link *link; |
2624 | struct ata_device *dev; | 2759 | struct ata_device *dev; |
2625 | int nr_failed_devs, nr_disabled_devs; | 2760 | int nr_failed_devs; |
2626 | int rc; | 2761 | int rc; |
2627 | unsigned long flags; | 2762 | unsigned long flags; |
2628 | 2763 | ||
@@ -2665,7 +2800,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2665 | retry: | 2800 | retry: |
2666 | rc = 0; | 2801 | rc = 0; |
2667 | nr_failed_devs = 0; | 2802 | nr_failed_devs = 0; |
2668 | nr_disabled_devs = 0; | ||
2669 | 2803 | ||
2670 | /* if UNLOADING, finish immediately */ | 2804 | /* if UNLOADING, finish immediately */ |
2671 | if (ap->pflags & ATA_PFLAG_UNLOADING) | 2805 | if (ap->pflags & ATA_PFLAG_UNLOADING) |
@@ -2732,8 +2866,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2732 | 2866 | ||
2733 | dev_fail: | 2867 | dev_fail: |
2734 | nr_failed_devs++; | 2868 | nr_failed_devs++; |
2735 | if (ata_eh_handle_dev_fail(dev, rc)) | 2869 | ata_eh_handle_dev_fail(dev, rc); |
2736 | nr_disabled_devs++; | ||
2737 | 2870 | ||
2738 | if (ap->pflags & ATA_PFLAG_FROZEN) { | 2871 | if (ap->pflags & ATA_PFLAG_FROZEN) { |
2739 | /* PMP reset requires working host port. | 2872 | /* PMP reset requires working host port. |
@@ -2745,18 +2878,8 @@ dev_fail: | |||
2745 | } | 2878 | } |
2746 | } | 2879 | } |
2747 | 2880 | ||
2748 | if (nr_failed_devs) { | 2881 | if (nr_failed_devs) |
2749 | if (nr_failed_devs != nr_disabled_devs) { | ||
2750 | ata_port_printk(ap, KERN_WARNING, "failed to recover " | ||
2751 | "some devices, retrying in 5 secs\n"); | ||
2752 | ssleep(5); | ||
2753 | } else { | ||
2754 | /* no device left to recover, repeat fast */ | ||
2755 | msleep(500); | ||
2756 | } | ||
2757 | |||
2758 | goto retry; | 2882 | goto retry; |
2759 | } | ||
2760 | 2883 | ||
2761 | out: | 2884 | out: |
2762 | if (rc && r_failed_link) | 2885 | if (rc && r_failed_link) |