aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-15 14:18:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-15 14:18:10 -0400
commitb312bf359e20cc39c00d480fd40a24c245d80bf7 (patch)
tree756247f113688403cf35d1d7437fc92d390279d2 /drivers/ata
parentdc221eae08eea3b0db127d1f152fac24d10b6a52 (diff)
parent2640d7c0b8d5d9d9ee303b8cd09f5124176f6239 (diff)
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev
* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev: AHCI: Remove an unnecessary flush from ahci_qc_issue AHCI: speed up resume [libata] Add support for VPD page b1 ata: endianness annotations in pata drivers libata-eh: update atapi_eh_request_sense() to take @dev instead of @qc [libata] sata_svw: update code comments relating to data corruption libata/ahci: enclosure management support libata: improve EH internal command timeout handling libata: use ULONG_MAX to terminate reset timeout table libata: improve EH retry delay handling libata: consistently use msecs for time durations
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/ahci.c331
-rw-r--r--drivers/ata/libata-core.c56
-rw-r--r--drivers/ata/libata-eh.c219
-rw-r--r--drivers/ata/libata-pmp.c13
-rw-r--r--drivers/ata/libata-scsi.c99
-rw-r--r--drivers/ata/libata-sff.c15
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_bf54x.c6
-rw-r--r--drivers/ata/pata_legacy.c10
-rw-r--r--drivers/ata/pata_qdi.c2
-rw-r--r--drivers/ata/pata_scc.c2
-rw-r--r--drivers/ata/pata_winbond.c2
-rw-r--r--drivers/ata/sata_svw.c38
13 files changed, 671 insertions, 124 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5e6468a7ca4..dc7596f028b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -56,6 +56,12 @@ MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)
56static int ahci_enable_alpm(struct ata_port *ap, 56static int ahci_enable_alpm(struct ata_port *ap,
57 enum link_pm policy); 57 enum link_pm policy);
58static void ahci_disable_alpm(struct ata_port *ap); 58static void ahci_disable_alpm(struct ata_port *ap);
59static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
60static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
61 size_t size);
62static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
63 ssize_t size);
64#define MAX_SLOTS 8
59 65
60enum { 66enum {
61 AHCI_PCI_BAR = 5, 67 AHCI_PCI_BAR = 5,
@@ -98,6 +104,8 @@ enum {
98 HOST_IRQ_STAT = 0x08, /* interrupt status */ 104 HOST_IRQ_STAT = 0x08, /* interrupt status */
99 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ 105 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
100 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ 106 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
107 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
108 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
101 109
102 /* HOST_CTL bits */ 110 /* HOST_CTL bits */
103 HOST_RESET = (1 << 0), /* reset controller; self-clear */ 111 HOST_RESET = (1 << 0), /* reset controller; self-clear */
@@ -105,6 +113,7 @@ enum {
105 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 113 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
106 114
107 /* HOST_CAP bits */ 115 /* HOST_CAP bits */
116 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
108 HOST_CAP_SSC = (1 << 14), /* Slumber capable */ 117 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
109 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ 118 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
110 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 119 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
@@ -202,6 +211,11 @@ enum {
202 ATA_FLAG_IPM, 211 ATA_FLAG_IPM,
203 212
204 ICH_MAP = 0x90, /* ICH MAP register */ 213 ICH_MAP = 0x90, /* ICH MAP register */
214
215 /* em_ctl bits */
216 EM_CTL_RST = (1 << 9), /* Reset */
217 EM_CTL_TM = (1 << 8), /* Transmit Message */
218 EM_CTL_ALHD = (1 << 26), /* Activity LED */
205}; 219};
206 220
207struct ahci_cmd_hdr { 221struct ahci_cmd_hdr {
@@ -219,12 +233,21 @@ struct ahci_sg {
219 __le32 flags_size; 233 __le32 flags_size;
220}; 234};
221 235
236struct ahci_em_priv {
237 enum sw_activity blink_policy;
238 struct timer_list timer;
239 unsigned long saved_activity;
240 unsigned long activity;
241 unsigned long led_state;
242};
243
222struct ahci_host_priv { 244struct ahci_host_priv {
223 unsigned int flags; /* AHCI_HFLAG_* */ 245 unsigned int flags; /* AHCI_HFLAG_* */
224 u32 cap; /* cap to use */ 246 u32 cap; /* cap to use */
225 u32 port_map; /* port map to use */ 247 u32 port_map; /* port map to use */
226 u32 saved_cap; /* saved initial cap */ 248 u32 saved_cap; /* saved initial cap */
227 u32 saved_port_map; /* saved initial port_map */ 249 u32 saved_port_map; /* saved initial port_map */
250 u32 em_loc; /* enclosure management location */
228}; 251};
229 252
230struct ahci_port_priv { 253struct ahci_port_priv {
@@ -240,6 +263,8 @@ struct ahci_port_priv {
240 unsigned int ncq_saw_dmas:1; 263 unsigned int ncq_saw_dmas:1;
241 unsigned int ncq_saw_sdb:1; 264 unsigned int ncq_saw_sdb:1;
242 u32 intr_mask; /* interrupts to enable */ 265 u32 intr_mask; /* interrupts to enable */
266 struct ahci_em_priv em_priv[MAX_SLOTS];/* enclosure management info
267 * per PM slot */
243}; 268};
244 269
245static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 270static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
@@ -277,9 +302,20 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
277static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 302static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
278static int ahci_pci_device_resume(struct pci_dev *pdev); 303static int ahci_pci_device_resume(struct pci_dev *pdev);
279#endif 304#endif
305static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
306static ssize_t ahci_activity_store(struct ata_device *dev,
307 enum sw_activity val);
308static void ahci_init_sw_activity(struct ata_link *link);
280 309
281static struct device_attribute *ahci_shost_attrs[] = { 310static struct device_attribute *ahci_shost_attrs[] = {
282 &dev_attr_link_power_management_policy, 311 &dev_attr_link_power_management_policy,
312 &dev_attr_em_message_type,
313 &dev_attr_em_message,
314 NULL
315};
316
317static struct device_attribute *ahci_sdev_attrs[] = {
318 &dev_attr_sw_activity,
283 NULL 319 NULL
284}; 320};
285 321
@@ -289,6 +325,7 @@ static struct scsi_host_template ahci_sht = {
289 .sg_tablesize = AHCI_MAX_SG, 325 .sg_tablesize = AHCI_MAX_SG,
290 .dma_boundary = AHCI_DMA_BOUNDARY, 326 .dma_boundary = AHCI_DMA_BOUNDARY,
291 .shost_attrs = ahci_shost_attrs, 327 .shost_attrs = ahci_shost_attrs,
328 .sdev_attrs = ahci_sdev_attrs,
292}; 329};
293 330
294static struct ata_port_operations ahci_ops = { 331static struct ata_port_operations ahci_ops = {
@@ -316,6 +353,10 @@ static struct ata_port_operations ahci_ops = {
316 353
317 .enable_pm = ahci_enable_alpm, 354 .enable_pm = ahci_enable_alpm,
318 .disable_pm = ahci_disable_alpm, 355 .disable_pm = ahci_disable_alpm,
356 .em_show = ahci_led_show,
357 .em_store = ahci_led_store,
358 .sw_activity_show = ahci_activity_show,
359 .sw_activity_store = ahci_activity_store,
319#ifdef CONFIG_PM 360#ifdef CONFIG_PM
320 .port_suspend = ahci_port_suspend, 361 .port_suspend = ahci_port_suspend,
321 .port_resume = ahci_port_resume, 362 .port_resume = ahci_port_resume,
@@ -561,6 +602,11 @@ static struct pci_driver ahci_pci_driver = {
561#endif 602#endif
562}; 603};
563 604
605static int ahci_em_messages = 1;
606module_param(ahci_em_messages, int, 0444);
607/* add other LED protocol types when they become supported */
608MODULE_PARM_DESC(ahci_em_messages,
609 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
564 610
565static inline int ahci_nr_ports(u32 cap) 611static inline int ahci_nr_ports(u32 cap)
566{ 612{
@@ -1031,11 +1077,28 @@ static void ahci_power_down(struct ata_port *ap)
1031 1077
1032static void ahci_start_port(struct ata_port *ap) 1078static void ahci_start_port(struct ata_port *ap)
1033{ 1079{
1080 struct ahci_port_priv *pp = ap->private_data;
1081 struct ata_link *link;
1082 struct ahci_em_priv *emp;
1083
1034 /* enable FIS reception */ 1084 /* enable FIS reception */
1035 ahci_start_fis_rx(ap); 1085 ahci_start_fis_rx(ap);
1036 1086
1037 /* enable DMA */ 1087 /* enable DMA */
1038 ahci_start_engine(ap); 1088 ahci_start_engine(ap);
1089
1090 /* turn on LEDs */
1091 if (ap->flags & ATA_FLAG_EM) {
1092 ata_port_for_each_link(link, ap) {
1093 emp = &pp->em_priv[link->pmp];
1094 ahci_transmit_led_message(ap, emp->led_state, 4);
1095 }
1096 }
1097
1098 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1099 ata_port_for_each_link(link, ap)
1100 ahci_init_sw_activity(link);
1101
1039} 1102}
1040 1103
1041static int ahci_deinit_port(struct ata_port *ap, const char **emsg) 1104static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
@@ -1079,12 +1142,15 @@ static int ahci_reset_controller(struct ata_host *host)
1079 readl(mmio + HOST_CTL); /* flush */ 1142 readl(mmio + HOST_CTL); /* flush */
1080 } 1143 }
1081 1144
1082 /* reset must complete within 1 second, or 1145 /*
1146 * to perform host reset, OS should set HOST_RESET
1147 * and poll until this bit is read to be "0".
1148 * reset must complete within 1 second, or
1083 * the hardware should be considered fried. 1149 * the hardware should be considered fried.
1084 */ 1150 */
1085 ssleep(1); 1151 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1152 HOST_RESET, 10, 1000);
1086 1153
1087 tmp = readl(mmio + HOST_CTL);
1088 if (tmp & HOST_RESET) { 1154 if (tmp & HOST_RESET) {
1089 dev_printk(KERN_ERR, host->dev, 1155 dev_printk(KERN_ERR, host->dev,
1090 "controller reset failed (0x%x)\n", tmp); 1156 "controller reset failed (0x%x)\n", tmp);
@@ -1116,6 +1182,230 @@ static int ahci_reset_controller(struct ata_host *host)
1116 return 0; 1182 return 0;
1117} 1183}
1118 1184
1185static void ahci_sw_activity(struct ata_link *link)
1186{
1187 struct ata_port *ap = link->ap;
1188 struct ahci_port_priv *pp = ap->private_data;
1189 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1190
1191 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1192 return;
1193
1194 emp->activity++;
1195 if (!timer_pending(&emp->timer))
1196 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1197}
1198
1199static void ahci_sw_activity_blink(unsigned long arg)
1200{
1201 struct ata_link *link = (struct ata_link *)arg;
1202 struct ata_port *ap = link->ap;
1203 struct ahci_port_priv *pp = ap->private_data;
1204 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1205 unsigned long led_message = emp->led_state;
1206 u32 activity_led_state;
1207
1208 led_message &= 0xffff0000;
1209 led_message |= ap->port_no | (link->pmp << 8);
1210
1211 /* check to see if we've had activity. If so,
1212 * toggle state of LED and reset timer. If not,
1213 * turn LED to desired idle state.
1214 */
1215 if (emp->saved_activity != emp->activity) {
1216 emp->saved_activity = emp->activity;
1217 /* get the current LED state */
1218 activity_led_state = led_message & 0x00010000;
1219
1220 if (activity_led_state)
1221 activity_led_state = 0;
1222 else
1223 activity_led_state = 1;
1224
1225 /* clear old state */
1226 led_message &= 0xfff8ffff;
1227
1228 /* toggle state */
1229 led_message |= (activity_led_state << 16);
1230 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1231 } else {
1232 /* switch to idle */
1233 led_message &= 0xfff8ffff;
1234 if (emp->blink_policy == BLINK_OFF)
1235 led_message |= (1 << 16);
1236 }
1237 ahci_transmit_led_message(ap, led_message, 4);
1238}
1239
1240static void ahci_init_sw_activity(struct ata_link *link)
1241{
1242 struct ata_port *ap = link->ap;
1243 struct ahci_port_priv *pp = ap->private_data;
1244 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1245
1246 /* init activity stats, setup timer */
1247 emp->saved_activity = emp->activity = 0;
1248 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1249
1250 /* check our blink policy and set flag for link if it's enabled */
1251 if (emp->blink_policy)
1252 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1253}
1254
1255static int ahci_reset_em(struct ata_host *host)
1256{
1257 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1258 u32 em_ctl;
1259
1260 em_ctl = readl(mmio + HOST_EM_CTL);
1261 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1262 return -EINVAL;
1263
1264 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1265 return 0;
1266}
1267
1268static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1269 ssize_t size)
1270{
1271 struct ahci_host_priv *hpriv = ap->host->private_data;
1272 struct ahci_port_priv *pp = ap->private_data;
1273 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1274 u32 em_ctl;
1275 u32 message[] = {0, 0};
1276 unsigned int flags;
1277 int pmp;
1278 struct ahci_em_priv *emp;
1279
1280 /* get the slot number from the message */
1281 pmp = (state & 0x0000ff00) >> 8;
1282 if (pmp < MAX_SLOTS)
1283 emp = &pp->em_priv[pmp];
1284 else
1285 return -EINVAL;
1286
1287 spin_lock_irqsave(ap->lock, flags);
1288
1289 /*
1290 * if we are still busy transmitting a previous message,
1291 * do not allow
1292 */
1293 em_ctl = readl(mmio + HOST_EM_CTL);
1294 if (em_ctl & EM_CTL_TM) {
1295 spin_unlock_irqrestore(ap->lock, flags);
1296 return -EINVAL;
1297 }
1298
1299 /*
1300 * create message header - this is all zero except for
1301 * the message size, which is 4 bytes.
1302 */
1303 message[0] |= (4 << 8);
1304
1305 /* ignore 0:4 of byte zero, fill in port info yourself */
1306 message[1] = ((state & 0xfffffff0) | ap->port_no);
1307
1308 /* write message to EM_LOC */
1309 writel(message[0], mmio + hpriv->em_loc);
1310 writel(message[1], mmio + hpriv->em_loc+4);
1311
1312 /* save off new led state for port/slot */
1313 emp->led_state = message[1];
1314
1315 /*
1316 * tell hardware to transmit the message
1317 */
1318 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1319
1320 spin_unlock_irqrestore(ap->lock, flags);
1321 return size;
1322}
1323
1324static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1325{
1326 struct ahci_port_priv *pp = ap->private_data;
1327 struct ata_link *link;
1328 struct ahci_em_priv *emp;
1329 int rc = 0;
1330
1331 ata_port_for_each_link(link, ap) {
1332 emp = &pp->em_priv[link->pmp];
1333 rc += sprintf(buf, "%lx\n", emp->led_state);
1334 }
1335 return rc;
1336}
1337
1338static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1339 size_t size)
1340{
1341 int state;
1342 int pmp;
1343 struct ahci_port_priv *pp = ap->private_data;
1344 struct ahci_em_priv *emp;
1345
1346 state = simple_strtoul(buf, NULL, 0);
1347
1348 /* get the slot number from the message */
1349 pmp = (state & 0x0000ff00) >> 8;
1350 if (pmp < MAX_SLOTS)
1351 emp = &pp->em_priv[pmp];
1352 else
1353 return -EINVAL;
1354
1355 /* mask off the activity bits if we are in sw_activity
1356 * mode, user should turn off sw_activity before setting
1357 * activity led through em_message
1358 */
1359 if (emp->blink_policy)
1360 state &= 0xfff8ffff;
1361
1362 return ahci_transmit_led_message(ap, state, size);
1363}
1364
1365static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1366{
1367 struct ata_link *link = dev->link;
1368 struct ata_port *ap = link->ap;
1369 struct ahci_port_priv *pp = ap->private_data;
1370 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1371 u32 port_led_state = emp->led_state;
1372
1373 /* save the desired Activity LED behavior */
1374 if (val == OFF) {
1375 /* clear LFLAG */
1376 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1377
1378 /* set the LED to OFF */
1379 port_led_state &= 0xfff80000;
1380 port_led_state |= (ap->port_no | (link->pmp << 8));
1381 ahci_transmit_led_message(ap, port_led_state, 4);
1382 } else {
1383 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1384 if (val == BLINK_OFF) {
1385 /* set LED to ON for idle */
1386 port_led_state &= 0xfff80000;
1387 port_led_state |= (ap->port_no | (link->pmp << 8));
1388 port_led_state |= 0x00010000; /* check this */
1389 ahci_transmit_led_message(ap, port_led_state, 4);
1390 }
1391 }
1392 emp->blink_policy = val;
1393 return 0;
1394}
1395
1396static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1397{
1398 struct ata_link *link = dev->link;
1399 struct ata_port *ap = link->ap;
1400 struct ahci_port_priv *pp = ap->private_data;
1401 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1402
1403 /* display the saved value of activity behavior for this
1404 * disk.
1405 */
1406 return sprintf(buf, "%d\n", emp->blink_policy);
1407}
1408
1119static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap, 1409static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1120 int port_no, void __iomem *mmio, 1410 int port_no, void __iomem *mmio,
1121 void __iomem *port_mmio) 1411 void __iomem *port_mmio)
@@ -1846,7 +2136,8 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1846 if (qc->tf.protocol == ATA_PROT_NCQ) 2136 if (qc->tf.protocol == ATA_PROT_NCQ)
1847 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); 2137 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1848 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); 2138 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1849 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 2139
2140 ahci_sw_activity(qc->dev->link);
1850 2141
1851 return 0; 2142 return 0;
1852} 2143}
@@ -2154,7 +2445,8 @@ static void ahci_print_info(struct ata_host *host)
2154 dev_printk(KERN_INFO, &pdev->dev, 2445 dev_printk(KERN_INFO, &pdev->dev,
2155 "flags: " 2446 "flags: "
2156 "%s%s%s%s%s%s%s" 2447 "%s%s%s%s%s%s%s"
2157 "%s%s%s%s%s%s%s\n" 2448 "%s%s%s%s%s%s%s"
2449 "%s\n"
2158 , 2450 ,
2159 2451
2160 cap & (1 << 31) ? "64bit " : "", 2452 cap & (1 << 31) ? "64bit " : "",
@@ -2171,7 +2463,8 @@ static void ahci_print_info(struct ata_host *host)
2171 cap & (1 << 17) ? "pmp " : "", 2463 cap & (1 << 17) ? "pmp " : "",
2172 cap & (1 << 15) ? "pio " : "", 2464 cap & (1 << 15) ? "pio " : "",
2173 cap & (1 << 14) ? "slum " : "", 2465 cap & (1 << 14) ? "slum " : "",
2174 cap & (1 << 13) ? "part " : "" 2466 cap & (1 << 13) ? "part " : "",
2467 cap & (1 << 6) ? "ems ": ""
2175 ); 2468 );
2176} 2469}
2177 2470
@@ -2291,6 +2584,24 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2291 if (hpriv->cap & HOST_CAP_PMP) 2584 if (hpriv->cap & HOST_CAP_PMP)
2292 pi.flags |= ATA_FLAG_PMP; 2585 pi.flags |= ATA_FLAG_PMP;
2293 2586
2587 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
2588 u8 messages;
2589 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
2590 u32 em_loc = readl(mmio + HOST_EM_LOC);
2591 u32 em_ctl = readl(mmio + HOST_EM_CTL);
2592
2593 messages = (em_ctl & 0x000f0000) >> 16;
2594
2595 /* we only support LED message type right now */
2596 if ((messages & 0x01) && (ahci_em_messages == 1)) {
2597 /* store em_loc */
2598 hpriv->em_loc = ((em_loc >> 16) * 4);
2599 pi.flags |= ATA_FLAG_EM;
2600 if (!(em_ctl & EM_CTL_ALHD))
2601 pi.flags |= ATA_FLAG_SW_ACTIVITY;
2602 }
2603 }
2604
2294 /* CAP.NP sometimes indicate the index of the last enabled 2605 /* CAP.NP sometimes indicate the index of the last enabled
2295 * port, at other times, that of the last possible port, so 2606 * port, at other times, that of the last possible port, so
2296 * determining the maximum port number requires looking at 2607 * determining the maximum port number requires looking at
@@ -2304,6 +2615,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2304 host->iomap = pcim_iomap_table(pdev); 2615 host->iomap = pcim_iomap_table(pdev);
2305 host->private_data = hpriv; 2616 host->private_data = hpriv;
2306 2617
2618 if (pi.flags & ATA_FLAG_EM)
2619 ahci_reset_em(host);
2620
2307 for (i = 0; i < host->n_ports; i++) { 2621 for (i = 0; i < host->n_ports; i++) {
2308 struct ata_port *ap = host->ports[i]; 2622 struct ata_port *ap = host->ports[i];
2309 2623
@@ -2314,6 +2628,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2314 /* set initial link pm policy */ 2628 /* set initial link pm policy */
2315 ap->pm_policy = NOT_AVAILABLE; 2629 ap->pm_policy = NOT_AVAILABLE;
2316 2630
2631 /* set enclosure management message type */
2632 if (ap->flags & ATA_FLAG_EM)
2633 ap->em_message_type = ahci_em_messages;
2634
2635
2317 /* disabled/not-implemented port */ 2636 /* disabled/not-implemented port */
2318 if (!(hpriv->port_map & (1 << i))) 2637 if (!(hpriv->port_map & (1 << i)))
2319 ap->ops = &ata_dummy_port_ops; 2638 ap->ops = &ata_dummy_port_ops;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 303fc0d2b97..9bef1a84fe3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -54,7 +54,6 @@
54#include <linux/completion.h> 54#include <linux/completion.h>
55#include <linux/suspend.h> 55#include <linux/suspend.h>
56#include <linux/workqueue.h> 56#include <linux/workqueue.h>
57#include <linux/jiffies.h>
58#include <linux/scatterlist.h> 57#include <linux/scatterlist.h>
59#include <linux/io.h> 58#include <linux/io.h>
60#include <scsi/scsi.h> 59#include <scsi/scsi.h>
@@ -145,7 +144,7 @@ static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CF
145module_param_named(dma, libata_dma_mask, int, 0444); 144module_param_named(dma, libata_dma_mask, int, 0444);
146MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 145MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
147 146
148static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ; 147static int ata_probe_timeout;
149module_param(ata_probe_timeout, int, 0444); 148module_param(ata_probe_timeout, int, 0444);
150MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 149MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
151 150
@@ -1533,7 +1532,7 @@ unsigned long ata_id_xfermask(const u16 *id)
1533 * @ap: The ata_port to queue port_task for 1532 * @ap: The ata_port to queue port_task for
1534 * @fn: workqueue function to be scheduled 1533 * @fn: workqueue function to be scheduled
1535 * @data: data for @fn to use 1534 * @data: data for @fn to use
1536 * @delay: delay time for workqueue function 1535 * @delay: delay time in msecs for workqueue function
1537 * 1536 *
1538 * Schedule @fn(@data) for execution after @delay jiffies using 1537 * Schedule @fn(@data) for execution after @delay jiffies using
1539 * port_task. There is one port_task per port and it's the 1538 * port_task. There is one port_task per port and it's the
@@ -1552,7 +1551,7 @@ void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1552 ap->port_task_data = data; 1551 ap->port_task_data = data;
1553 1552
1554 /* may fail if ata_port_flush_task() in progress */ 1553 /* may fail if ata_port_flush_task() in progress */
1555 queue_delayed_work(ata_wq, &ap->port_task, delay); 1554 queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1556} 1555}
1557 1556
1558/** 1557/**
@@ -1612,6 +1611,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1612 struct ata_link *link = dev->link; 1611 struct ata_link *link = dev->link;
1613 struct ata_port *ap = link->ap; 1612 struct ata_port *ap = link->ap;
1614 u8 command = tf->command; 1613 u8 command = tf->command;
1614 int auto_timeout = 0;
1615 struct ata_queued_cmd *qc; 1615 struct ata_queued_cmd *qc;
1616 unsigned int tag, preempted_tag; 1616 unsigned int tag, preempted_tag;
1617 u32 preempted_sactive, preempted_qc_active; 1617 u32 preempted_sactive, preempted_qc_active;
@@ -1684,8 +1684,14 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1684 1684
1685 spin_unlock_irqrestore(ap->lock, flags); 1685 spin_unlock_irqrestore(ap->lock, flags);
1686 1686
1687 if (!timeout) 1687 if (!timeout) {
1688 timeout = ata_probe_timeout * 1000 / HZ; 1688 if (ata_probe_timeout)
1689 timeout = ata_probe_timeout * 1000;
1690 else {
1691 timeout = ata_internal_cmd_timeout(dev, command);
1692 auto_timeout = 1;
1693 }
1694 }
1689 1695
1690 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1696 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1691 1697
@@ -1761,6 +1767,9 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1761 1767
1762 spin_unlock_irqrestore(ap->lock, flags); 1768 spin_unlock_irqrestore(ap->lock, flags);
1763 1769
1770 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1771 ata_internal_cmd_timed_out(dev, command);
1772
1764 return err_mask; 1773 return err_mask;
1765} 1774}
1766 1775
@@ -3319,7 +3328,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3319 int (*check_ready)(struct ata_link *link)) 3328 int (*check_ready)(struct ata_link *link))
3320{ 3329{
3321 unsigned long start = jiffies; 3330 unsigned long start = jiffies;
3322 unsigned long nodev_deadline = start + ATA_TMOUT_FF_WAIT; 3331 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3323 int warned = 0; 3332 int warned = 0;
3324 3333
3325 if (time_after(nodev_deadline, deadline)) 3334 if (time_after(nodev_deadline, deadline))
@@ -3387,7 +3396,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3387int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3396int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3388 int (*check_ready)(struct ata_link *link)) 3397 int (*check_ready)(struct ata_link *link))
3389{ 3398{
3390 msleep(ATA_WAIT_AFTER_RESET_MSECS); 3399 msleep(ATA_WAIT_AFTER_RESET);
3391 3400
3392 return ata_wait_ready(link, deadline, check_ready); 3401 return ata_wait_ready(link, deadline, check_ready);
3393} 3402}
@@ -3417,13 +3426,13 @@ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3417int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3426int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3418 unsigned long deadline) 3427 unsigned long deadline)
3419{ 3428{
3420 unsigned long interval_msec = params[0]; 3429 unsigned long interval = params[0];
3421 unsigned long duration = msecs_to_jiffies(params[1]); 3430 unsigned long duration = params[1];
3422 unsigned long last_jiffies, t; 3431 unsigned long last_jiffies, t;
3423 u32 last, cur; 3432 u32 last, cur;
3424 int rc; 3433 int rc;
3425 3434
3426 t = jiffies + msecs_to_jiffies(params[2]); 3435 t = ata_deadline(jiffies, params[2]);
3427 if (time_before(t, deadline)) 3436 if (time_before(t, deadline))
3428 deadline = t; 3437 deadline = t;
3429 3438
@@ -3435,7 +3444,7 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3435 last_jiffies = jiffies; 3444 last_jiffies = jiffies;
3436 3445
3437 while (1) { 3446 while (1) {
3438 msleep(interval_msec); 3447 msleep(interval);
3439 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3448 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3440 return rc; 3449 return rc;
3441 cur &= 0xf; 3450 cur &= 0xf;
@@ -3444,7 +3453,8 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3444 if (cur == last) { 3453 if (cur == last) {
3445 if (cur == 1 && time_before(jiffies, deadline)) 3454 if (cur == 1 && time_before(jiffies, deadline))
3446 continue; 3455 continue;
3447 if (time_after(jiffies, last_jiffies + duration)) 3456 if (time_after(jiffies,
3457 ata_deadline(last_jiffies, duration)))
3448 return 0; 3458 return 0;
3449 continue; 3459 continue;
3450 } 3460 }
@@ -3636,7 +3646,8 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3636 if (check_ready) { 3646 if (check_ready) {
3637 unsigned long pmp_deadline; 3647 unsigned long pmp_deadline;
3638 3648
3639 pmp_deadline = jiffies + ATA_TMOUT_PMP_SRST_WAIT; 3649 pmp_deadline = ata_deadline(jiffies,
3650 ATA_TMOUT_PMP_SRST_WAIT);
3640 if (time_after(pmp_deadline, deadline)) 3651 if (time_after(pmp_deadline, deadline))
3641 pmp_deadline = deadline; 3652 pmp_deadline = deadline;
3642 ata_wait_ready(link, pmp_deadline, check_ready); 3653 ata_wait_ready(link, pmp_deadline, check_ready);
@@ -6073,8 +6084,6 @@ static void __init ata_parse_force_param(void)
6073 6084
6074static int __init ata_init(void) 6085static int __init ata_init(void)
6075{ 6086{
6076 ata_probe_timeout *= HZ;
6077
6078 ata_parse_force_param(); 6087 ata_parse_force_param();
6079 6088
6080 ata_wq = create_workqueue("ata"); 6089 ata_wq = create_workqueue("ata");
@@ -6127,8 +6136,8 @@ int ata_ratelimit(void)
6127 * @reg: IO-mapped register 6136 * @reg: IO-mapped register
6128 * @mask: Mask to apply to read register value 6137 * @mask: Mask to apply to read register value
6129 * @val: Wait condition 6138 * @val: Wait condition
6130 * @interval_msec: polling interval in milliseconds 6139 * @interval: polling interval in milliseconds
6131 * @timeout_msec: timeout in milliseconds 6140 * @timeout: timeout in milliseconds
6132 * 6141 *
6133 * Waiting for some bits of register to change is a common 6142 * Waiting for some bits of register to change is a common
6134 * operation for ATA controllers. This function reads 32bit LE 6143 * operation for ATA controllers. This function reads 32bit LE
@@ -6146,10 +6155,9 @@ int ata_ratelimit(void)
6146 * The final register value. 6155 * The final register value.
6147 */ 6156 */
6148u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 6157u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6149 unsigned long interval_msec, 6158 unsigned long interval, unsigned long timeout)
6150 unsigned long timeout_msec)
6151{ 6159{
6152 unsigned long timeout; 6160 unsigned long deadline;
6153 u32 tmp; 6161 u32 tmp;
6154 6162
6155 tmp = ioread32(reg); 6163 tmp = ioread32(reg);
@@ -6158,10 +6166,10 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6158 * preceding writes reach the controller before starting to 6166 * preceding writes reach the controller before starting to
6159 * eat away the timeout. 6167 * eat away the timeout.
6160 */ 6168 */
6161 timeout = jiffies + (timeout_msec * HZ) / 1000; 6169 deadline = ata_deadline(jiffies, timeout);
6162 6170
6163 while ((tmp & mask) == val && time_before(jiffies, timeout)) { 6171 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6164 msleep(interval_msec); 6172 msleep(interval);
6165 tmp = ioread32(reg); 6173 tmp = ioread32(reg);
6166 } 6174 }
6167 6175
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7894d83ea1e..58bdc538d22 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -66,15 +66,19 @@ enum {
66 ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 66 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
67 ATA_ECAT_DUBIOUS_UNK_DEV = 7, 67 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
68 ATA_ECAT_NR = 8, 68 ATA_ECAT_NR = 8,
69};
70 69
71/* Waiting in ->prereset can never be reliable. It's sometimes nice 70 ATA_EH_CMD_DFL_TIMEOUT = 5000,
72 * to wait there but it can't be depended upon; otherwise, we wouldn't 71
73 * be resetting. Just give it enough time for most drives to spin up. 72 /* always put at least this amount of time between resets */
74 */ 73 ATA_EH_RESET_COOL_DOWN = 5000,
75enum { 74
76 ATA_EH_PRERESET_TIMEOUT = 10 * HZ, 75 /* Waiting in ->prereset can never be reliable. It's
77 ATA_EH_FASTDRAIN_INTERVAL = 3 * HZ, 76 * sometimes nice to wait there but it can't be depended upon;
77 * otherwise, we wouldn't be resetting. Just give it enough
78 * time for most drives to spin up.
79 */
80 ATA_EH_PRERESET_TIMEOUT = 10000,
81 ATA_EH_FASTDRAIN_INTERVAL = 3000,
78}; 82};
79 83
80/* The following table determines how we sequence resets. Each entry 84/* The following table determines how we sequence resets. Each entry
@@ -84,12 +88,59 @@ enum {
84 * are mostly for error handling, hotplug and retarded devices. 88 * are mostly for error handling, hotplug and retarded devices.
85 */ 89 */
86static const unsigned long ata_eh_reset_timeouts[] = { 90static const unsigned long ata_eh_reset_timeouts[] = {
87 10 * HZ, /* most drives spin up by 10sec */ 91 10000, /* most drives spin up by 10sec */
88 10 * HZ, /* > 99% working drives spin up before 20sec */ 92 10000, /* > 99% working drives spin up before 20sec */
89 35 * HZ, /* give > 30 secs of idleness for retarded devices */ 93 35000, /* give > 30 secs of idleness for retarded devices */
90 5 * HZ, /* and sweet one last chance */ 94 5000, /* and sweet one last chance */
91 /* > 1 min has elapsed, give up */ 95 ULONG_MAX, /* > 1 min has elapsed, give up */
96};
97
98static const unsigned long ata_eh_identify_timeouts[] = {
99 5000, /* covers > 99% of successes and not too boring on failures */
100 10000, /* combined time till here is enough even for media access */
101 30000, /* for true idiots */
102 ULONG_MAX,
103};
104
105static const unsigned long ata_eh_other_timeouts[] = {
106 5000, /* same rationale as identify timeout */
107 10000, /* ditto */
108 /* but no merciful 30sec for other commands, it just isn't worth it */
109 ULONG_MAX,
110};
111
112struct ata_eh_cmd_timeout_ent {
113 const u8 *commands;
114 const unsigned long *timeouts;
115};
116
117/* The following table determines timeouts to use for EH internal
118 * commands. Each table entry is a command class and matches the
119 * commands the entry applies to and the timeout table to use.
120 *
121 * On the retry after a command timed out, the next timeout value from
122 * the table is used. If the table doesn't contain further entries,
123 * the last value is used.
124 *
125 * ehc->cmd_timeout_idx keeps track of which timeout to use per
126 * command class, so if SET_FEATURES times out on the first try, the
127 * next try will use the second timeout value only for that class.
128 */
129#define CMDS(cmds...) (const u8 []){ cmds, 0 }
130static const struct ata_eh_cmd_timeout_ent
131ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
132 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
133 .timeouts = ata_eh_identify_timeouts, },
134 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
135 .timeouts = ata_eh_other_timeouts, },
136 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
137 .timeouts = ata_eh_other_timeouts, },
138 { .commands = CMDS(ATA_CMD_SET_FEATURES),
139 .timeouts = ata_eh_other_timeouts, },
140 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
141 .timeouts = ata_eh_other_timeouts, },
92}; 142};
143#undef CMDS
93 144
94static void __ata_port_freeze(struct ata_port *ap); 145static void __ata_port_freeze(struct ata_port *ap);
95#ifdef CONFIG_PM 146#ifdef CONFIG_PM
@@ -236,6 +287,73 @@ void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
236 287
237#endif /* CONFIG_PCI */ 288#endif /* CONFIG_PCI */
238 289
290static int ata_lookup_timeout_table(u8 cmd)
291{
292 int i;
293
294 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
295 const u8 *cur;
296
297 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
298 if (*cur == cmd)
299 return i;
300 }
301
302 return -1;
303}
304
305/**
306 * ata_internal_cmd_timeout - determine timeout for an internal command
307 * @dev: target device
308 * @cmd: internal command to be issued
309 *
310 * Determine timeout for internal command @cmd for @dev.
311 *
312 * LOCKING:
313 * EH context.
314 *
315 * RETURNS:
316 * Determined timeout.
317 */
318unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
319{
320 struct ata_eh_context *ehc = &dev->link->eh_context;
321 int ent = ata_lookup_timeout_table(cmd);
322 int idx;
323
324 if (ent < 0)
325 return ATA_EH_CMD_DFL_TIMEOUT;
326
327 idx = ehc->cmd_timeout_idx[dev->devno][ent];
328 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
329}
330
331/**
332 * ata_internal_cmd_timed_out - notification for internal command timeout
333 * @dev: target device
334 * @cmd: internal command which timed out
335 *
336 * Notify EH that internal command @cmd for @dev timed out. This
337 * function should be called only for commands whose timeouts are
338 * determined using ata_internal_cmd_timeout().
339 *
340 * LOCKING:
341 * EH context.
342 */
343void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
344{
345 struct ata_eh_context *ehc = &dev->link->eh_context;
346 int ent = ata_lookup_timeout_table(cmd);
347 int idx;
348
349 if (ent < 0)
350 return;
351
352 idx = ehc->cmd_timeout_idx[dev->devno][ent];
353 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
354 ehc->cmd_timeout_idx[dev->devno][ent]++;
355}
356
239static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 357static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
240 unsigned int err_mask) 358 unsigned int err_mask)
241{ 359{
@@ -486,6 +604,9 @@ void ata_scsi_error(struct Scsi_Host *host)
486 if (ata_ncq_enabled(dev)) 604 if (ata_ncq_enabled(dev))
487 ehc->saved_ncq_enabled |= 1 << devno; 605 ehc->saved_ncq_enabled |= 1 << devno;
488 } 606 }
607
608 /* set last reset timestamp to some time in the past */
609 ehc->last_reset = jiffies - 60 * HZ;
489 } 610 }
490 611
491 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 612 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
@@ -641,7 +762,7 @@ void ata_eh_fastdrain_timerfn(unsigned long arg)
641 /* some qcs have finished, give it another chance */ 762 /* some qcs have finished, give it another chance */
642 ap->fastdrain_cnt = cnt; 763 ap->fastdrain_cnt = cnt;
643 ap->fastdrain_timer.expires = 764 ap->fastdrain_timer.expires =
644 jiffies + ATA_EH_FASTDRAIN_INTERVAL; 765 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
645 add_timer(&ap->fastdrain_timer); 766 add_timer(&ap->fastdrain_timer);
646 } 767 }
647 768
@@ -681,7 +802,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
681 802
682 /* activate fast drain */ 803 /* activate fast drain */
683 ap->fastdrain_cnt = cnt; 804 ap->fastdrain_cnt = cnt;
684 ap->fastdrain_timer.expires = jiffies + ATA_EH_FASTDRAIN_INTERVAL; 805 ap->fastdrain_timer.expires =
806 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
685 add_timer(&ap->fastdrain_timer); 807 add_timer(&ap->fastdrain_timer);
686} 808}
687 809
@@ -1238,6 +1360,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
1238 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1360 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1239 * @dev: device to perform REQUEST_SENSE to 1361 * @dev: device to perform REQUEST_SENSE to
1240 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 1362 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1363 * @dfl_sense_key: default sense key to use
1241 * 1364 *
1242 * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1365 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1243 * SENSE. This function is EH helper. 1366 * SENSE. This function is EH helper.
@@ -1248,13 +1371,13 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
1248 * RETURNS: 1371 * RETURNS:
1249 * 0 on success, AC_ERR_* mask on failure 1372 * 0 on success, AC_ERR_* mask on failure
1250 */ 1373 */
1251static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc) 1374static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1375 u8 *sense_buf, u8 dfl_sense_key)
1252{ 1376{
1253 struct ata_device *dev = qc->dev; 1377 u8 cdb[ATAPI_CDB_LEN] =
1254 unsigned char *sense_buf = qc->scsicmd->sense_buffer; 1378 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1255 struct ata_port *ap = dev->link->ap; 1379 struct ata_port *ap = dev->link->ap;
1256 struct ata_taskfile tf; 1380 struct ata_taskfile tf;
1257 u8 cdb[ATAPI_CDB_LEN];
1258 1381
1259 DPRINTK("ATAPI request sense\n"); 1382 DPRINTK("ATAPI request sense\n");
1260 1383
@@ -1265,15 +1388,11 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
1265 * for the case where they are -not- overwritten 1388 * for the case where they are -not- overwritten
1266 */ 1389 */
1267 sense_buf[0] = 0x70; 1390 sense_buf[0] = 0x70;
1268 sense_buf[2] = qc->result_tf.feature >> 4; 1391 sense_buf[2] = dfl_sense_key;
1269 1392
1270 /* some devices time out if garbage left in tf */ 1393 /* some devices time out if garbage left in tf */
1271 ata_tf_init(dev, &tf); 1394 ata_tf_init(dev, &tf);
1272 1395
1273 memset(cdb, 0, ATAPI_CDB_LEN);
1274 cdb[0] = REQUEST_SENSE;
1275 cdb[4] = SCSI_SENSE_BUFFERSIZE;
1276
1277 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1396 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1278 tf.command = ATA_CMD_PACKET; 1397 tf.command = ATA_CMD_PACKET;
1279 1398
@@ -1445,7 +1564,9 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1445 1564
1446 case ATA_DEV_ATAPI: 1565 case ATA_DEV_ATAPI:
1447 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 1566 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1448 tmp = atapi_eh_request_sense(qc); 1567 tmp = atapi_eh_request_sense(qc->dev,
1568 qc->scsicmd->sense_buffer,
1569 qc->result_tf.feature >> 4);
1449 if (!tmp) { 1570 if (!tmp) {
1450 /* ATA_QCFLAG_SENSE_VALID is used to 1571 /* ATA_QCFLAG_SENSE_VALID is used to
1451 * tell atapi_qc_complete() that sense 1572 * tell atapi_qc_complete() that sense
@@ -2071,13 +2192,12 @@ int ata_eh_reset(struct ata_link *link, int classify,
2071 ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2192 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2072 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2193 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2073{ 2194{
2074 const int max_tries = ARRAY_SIZE(ata_eh_reset_timeouts);
2075 struct ata_port *ap = link->ap; 2195 struct ata_port *ap = link->ap;
2076 struct ata_eh_context *ehc = &link->eh_context; 2196 struct ata_eh_context *ehc = &link->eh_context;
2077 unsigned int *classes = ehc->classes; 2197 unsigned int *classes = ehc->classes;
2078 unsigned int lflags = link->flags; 2198 unsigned int lflags = link->flags;
2079 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2199 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2080 int try = 0; 2200 int max_tries = 0, try = 0;
2081 struct ata_device *dev; 2201 struct ata_device *dev;
2082 unsigned long deadline, now; 2202 unsigned long deadline, now;
2083 ata_reset_fn_t reset; 2203 ata_reset_fn_t reset;
@@ -2088,11 +2208,20 @@ int ata_eh_reset(struct ata_link *link, int classify,
2088 /* 2208 /*
2089 * Prepare to reset 2209 * Prepare to reset
2090 */ 2210 */
2211 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2212 max_tries++;
2213
2214 now = jiffies;
2215 deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN);
2216 if (time_before(now, deadline))
2217 schedule_timeout_uninterruptible(deadline - now);
2218
2091 spin_lock_irqsave(ap->lock, flags); 2219 spin_lock_irqsave(ap->lock, flags);
2092 ap->pflags |= ATA_PFLAG_RESETTING; 2220 ap->pflags |= ATA_PFLAG_RESETTING;
2093 spin_unlock_irqrestore(ap->lock, flags); 2221 spin_unlock_irqrestore(ap->lock, flags);
2094 2222
2095 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2223 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2224 ehc->last_reset = jiffies;
2096 2225
2097 ata_link_for_each_dev(dev, link) { 2226 ata_link_for_each_dev(dev, link) {
2098 /* If we issue an SRST then an ATA drive (not ATAPI) 2227 /* If we issue an SRST then an ATA drive (not ATAPI)
@@ -2125,7 +2254,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
2125 } 2254 }
2126 2255
2127 if (prereset) { 2256 if (prereset) {
2128 rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT); 2257 rc = prereset(link,
2258 ata_deadline(jiffies, ATA_EH_PRERESET_TIMEOUT));
2129 if (rc) { 2259 if (rc) {
2130 if (rc == -ENOENT) { 2260 if (rc == -ENOENT) {
2131 ata_link_printk(link, KERN_DEBUG, 2261 ata_link_printk(link, KERN_DEBUG,
@@ -2157,10 +2287,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
2157 /* 2287 /*
2158 * Perform reset 2288 * Perform reset
2159 */ 2289 */
2290 ehc->last_reset = jiffies;
2160 if (ata_is_host_link(link)) 2291 if (ata_is_host_link(link))
2161 ata_eh_freeze_port(ap); 2292 ata_eh_freeze_port(ap);
2162 2293
2163 deadline = jiffies + ata_eh_reset_timeouts[try++]; 2294 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2164 2295
2165 if (reset) { 2296 if (reset) {
2166 if (verbose) 2297 if (verbose)
@@ -2277,6 +2408,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2277 2408
2278 /* reset successful, schedule revalidation */ 2409 /* reset successful, schedule revalidation */
2279 ata_eh_done(link, NULL, ATA_EH_RESET); 2410 ata_eh_done(link, NULL, ATA_EH_RESET);
2411 ehc->last_reset = jiffies;
2280 ehc->i.action |= ATA_EH_REVALIDATE; 2412 ehc->i.action |= ATA_EH_REVALIDATE;
2281 2413
2282 rc = 0; 2414 rc = 0;
@@ -2303,9 +2435,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
2303 if (time_before(now, deadline)) { 2435 if (time_before(now, deadline)) {
2304 unsigned long delta = deadline - now; 2436 unsigned long delta = deadline - now;
2305 2437
2306 ata_link_printk(link, KERN_WARNING, "reset failed " 2438 ata_link_printk(link, KERN_WARNING,
2307 "(errno=%d), retrying in %u secs\n", 2439 "reset failed (errno=%d), retrying in %u secs\n",
2308 rc, (jiffies_to_msecs(delta) + 999) / 1000); 2440 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2309 2441
2310 while (delta) 2442 while (delta)
2311 delta = schedule_timeout_uninterruptible(delta); 2443 delta = schedule_timeout_uninterruptible(delta);
@@ -2583,8 +2715,11 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2583 ata_eh_detach_dev(dev); 2715 ata_eh_detach_dev(dev);
2584 2716
2585 /* schedule probe if necessary */ 2717 /* schedule probe if necessary */
2586 if (ata_eh_schedule_probe(dev)) 2718 if (ata_eh_schedule_probe(dev)) {
2587 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 2719 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
2720 memset(ehc->cmd_timeout_idx[dev->devno], 0,
2721 sizeof(ehc->cmd_timeout_idx[dev->devno]));
2722 }
2588 2723
2589 return 1; 2724 return 1;
2590 } else { 2725 } else {
@@ -2622,7 +2757,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2622{ 2757{
2623 struct ata_link *link; 2758 struct ata_link *link;
2624 struct ata_device *dev; 2759 struct ata_device *dev;
2625 int nr_failed_devs, nr_disabled_devs; 2760 int nr_failed_devs;
2626 int rc; 2761 int rc;
2627 unsigned long flags; 2762 unsigned long flags;
2628 2763
@@ -2665,7 +2800,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2665 retry: 2800 retry:
2666 rc = 0; 2801 rc = 0;
2667 nr_failed_devs = 0; 2802 nr_failed_devs = 0;
2668 nr_disabled_devs = 0;
2669 2803
2670 /* if UNLOADING, finish immediately */ 2804 /* if UNLOADING, finish immediately */
2671 if (ap->pflags & ATA_PFLAG_UNLOADING) 2805 if (ap->pflags & ATA_PFLAG_UNLOADING)
@@ -2732,8 +2866,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2732 2866
2733dev_fail: 2867dev_fail:
2734 nr_failed_devs++; 2868 nr_failed_devs++;
2735 if (ata_eh_handle_dev_fail(dev, rc)) 2869 ata_eh_handle_dev_fail(dev, rc);
2736 nr_disabled_devs++;
2737 2870
2738 if (ap->pflags & ATA_PFLAG_FROZEN) { 2871 if (ap->pflags & ATA_PFLAG_FROZEN) {
2739 /* PMP reset requires working host port. 2872 /* PMP reset requires working host port.
@@ -2745,18 +2878,8 @@ dev_fail:
2745 } 2878 }
2746 } 2879 }
2747 2880
2748 if (nr_failed_devs) { 2881 if (nr_failed_devs)
2749 if (nr_failed_devs != nr_disabled_devs) {
2750 ata_port_printk(ap, KERN_WARNING, "failed to recover "
2751 "some devices, retrying in 5 secs\n");
2752 ssleep(5);
2753 } else {
2754 /* no device left to recover, repeat fast */
2755 msleep(500);
2756 }
2757
2758 goto retry; 2882 goto retry;
2759 }
2760 2883
2761 out: 2884 out:
2762 if (rc && r_failed_link) 2885 if (rc && r_failed_link)
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 7daf4c0f621..b65db309c18 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -727,19 +727,12 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
727 } 727 }
728 728
729 if (tries) { 729 if (tries) {
730 int sleep = ehc->i.flags & ATA_EHI_DID_RESET;
731
732 /* consecutive revalidation failures? speed down */ 730 /* consecutive revalidation failures? speed down */
733 if (reval_failed) 731 if (reval_failed)
734 sata_down_spd_limit(link); 732 sata_down_spd_limit(link);
735 else 733 else
736 reval_failed = 1; 734 reval_failed = 1;
737 735
738 ata_dev_printk(dev, KERN_WARNING,
739 "retrying reset%s\n",
740 sleep ? " in 5 secs" : "");
741 if (sleep)
742 ssleep(5);
743 ehc->i.action |= ATA_EH_RESET; 736 ehc->i.action |= ATA_EH_RESET;
744 goto retry; 737 goto retry;
745 } else { 738 } else {
@@ -785,7 +778,8 @@ static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap)
785 * SError.N working. 778 * SError.N working.
786 */ 779 */
787 sata_link_hardreset(link, sata_deb_timing_normal, 780 sata_link_hardreset(link, sata_deb_timing_normal,
788 jiffies + ATA_TMOUT_INTERNAL_QUICK, NULL, NULL); 781 ata_deadline(jiffies, ATA_TMOUT_INTERNAL_QUICK),
782 NULL, NULL);
789 783
790 /* unconditionally clear SError.N */ 784 /* unconditionally clear SError.N */
791 rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); 785 rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
@@ -990,10 +984,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
990 goto retry; 984 goto retry;
991 985
992 if (--pmp_tries) { 986 if (--pmp_tries) {
993 ata_port_printk(ap, KERN_WARNING,
994 "failed to recover PMP, retrying in 5 secs\n");
995 pmp_ehc->i.action |= ATA_EH_RESET; 987 pmp_ehc->i.action |= ATA_EH_RESET;
996 ssleep(5);
997 goto retry; 988 goto retry;
998 } 989 }
999 990
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 499ccc628d8..f3b4b15a8dc 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -190,6 +190,85 @@ static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
190 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); 190 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
191} 191}
192 192
193static ssize_t
194ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
195 const char *buf, size_t count)
196{
197 struct Scsi_Host *shost = class_to_shost(dev);
198 struct ata_port *ap = ata_shost_to_port(shost);
199 if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
200 return ap->ops->em_store(ap, buf, count);
201 return -EINVAL;
202}
203
204static ssize_t
205ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
206 char *buf)
207{
208 struct Scsi_Host *shost = class_to_shost(dev);
209 struct ata_port *ap = ata_shost_to_port(shost);
210
211 if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
212 return ap->ops->em_show(ap, buf);
213 return -EINVAL;
214}
215DEVICE_ATTR(em_message, S_IRUGO | S_IWUGO,
216 ata_scsi_em_message_show, ata_scsi_em_message_store);
217EXPORT_SYMBOL_GPL(dev_attr_em_message);
218
219static ssize_t
220ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
221 char *buf)
222{
223 struct Scsi_Host *shost = class_to_shost(dev);
224 struct ata_port *ap = ata_shost_to_port(shost);
225
226 return snprintf(buf, 23, "%d\n", ap->em_message_type);
227}
228DEVICE_ATTR(em_message_type, S_IRUGO,
229 ata_scsi_em_message_type_show, NULL);
230EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
231
232static ssize_t
233ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
234 char *buf)
235{
236 struct scsi_device *sdev = to_scsi_device(dev);
237 struct ata_port *ap = ata_shost_to_port(sdev->host);
238 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
239
240 if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
241 return ap->ops->sw_activity_show(atadev, buf);
242 return -EINVAL;
243}
244
245static ssize_t
246ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
247 const char *buf, size_t count)
248{
249 struct scsi_device *sdev = to_scsi_device(dev);
250 struct ata_port *ap = ata_shost_to_port(sdev->host);
251 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
252 enum sw_activity val;
253 int rc;
254
255 if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
256 val = simple_strtoul(buf, NULL, 0);
257 switch (val) {
258 case OFF: case BLINK_ON: case BLINK_OFF:
259 rc = ap->ops->sw_activity_store(atadev, val);
260 if (!rc)
261 return count;
262 else
263 return rc;
264 }
265 }
266 return -EINVAL;
267}
268DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show,
269 ata_scsi_activity_store);
270EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
271
193static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, 272static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
194 void (*done)(struct scsi_cmnd *)) 273 void (*done)(struct scsi_cmnd *))
195{ 274{
@@ -1779,7 +1858,9 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
1779 const u8 pages[] = { 1858 const u8 pages[] = {
1780 0x00, /* page 0x00, this page */ 1859 0x00, /* page 0x00, this page */
1781 0x80, /* page 0x80, unit serial no page */ 1860 0x80, /* page 0x80, unit serial no page */
1782 0x83 /* page 0x83, device ident page */ 1861 0x83, /* page 0x83, device ident page */
1862 0x89, /* page 0x89, ata info page */
1863 0xb1, /* page 0xb1, block device characteristics page */
1783 }; 1864 };
1784 1865
1785 rbuf[3] = sizeof(pages); /* number of supported VPD pages */ 1866 rbuf[3] = sizeof(pages); /* number of supported VPD pages */
@@ -1900,6 +1981,19 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
1900 return 0; 1981 return 0;
1901} 1982}
1902 1983
1984static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
1985{
1986 rbuf[1] = 0xb1;
1987 rbuf[3] = 0x3c;
1988 if (ata_id_major_version(args->id) > 7) {
1989 rbuf[4] = args->id[217] >> 8;
1990 rbuf[5] = args->id[217];
1991 rbuf[7] = args->id[168] & 0xf;
1992 }
1993
1994 return 0;
1995}
1996
1903/** 1997/**
1904 * ata_scsiop_noop - Command handler that simply returns success. 1998 * ata_scsiop_noop - Command handler that simply returns success.
1905 * @args: device IDENTIFY data / SCSI command of interest. 1999 * @args: device IDENTIFY data / SCSI command of interest.
@@ -2921,6 +3015,9 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2921 case 0x89: 3015 case 0x89:
2922 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); 3016 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
2923 break; 3017 break;
3018 case 0xb1:
3019 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
3020 break;
2924 default: 3021 default:
2925 ata_scsi_invalid_field(cmd, done); 3022 ata_scsi_invalid_field(cmd, done);
2926 break; 3023 break;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index c0908c22548..304fdc6f1dc 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -345,8 +345,8 @@ void ata_sff_dma_pause(struct ata_port *ap)
345/** 345/**
346 * ata_sff_busy_sleep - sleep until BSY clears, or timeout 346 * ata_sff_busy_sleep - sleep until BSY clears, or timeout
347 * @ap: port containing status register to be polled 347 * @ap: port containing status register to be polled
348 * @tmout_pat: impatience timeout 348 * @tmout_pat: impatience timeout in msecs
349 * @tmout: overall timeout 349 * @tmout: overall timeout in msecs
350 * 350 *
351 * Sleep until ATA Status register bit BSY clears, 351 * Sleep until ATA Status register bit BSY clears,
352 * or a timeout occurs. 352 * or a timeout occurs.
@@ -365,7 +365,7 @@ int ata_sff_busy_sleep(struct ata_port *ap,
365 365
366 status = ata_sff_busy_wait(ap, ATA_BUSY, 300); 366 status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
367 timer_start = jiffies; 367 timer_start = jiffies;
368 timeout = timer_start + tmout_pat; 368 timeout = ata_deadline(timer_start, tmout_pat);
369 while (status != 0xff && (status & ATA_BUSY) && 369 while (status != 0xff && (status & ATA_BUSY) &&
370 time_before(jiffies, timeout)) { 370 time_before(jiffies, timeout)) {
371 msleep(50); 371 msleep(50);
@@ -377,7 +377,7 @@ int ata_sff_busy_sleep(struct ata_port *ap,
377 "port is slow to respond, please be patient " 377 "port is slow to respond, please be patient "
378 "(Status 0x%x)\n", status); 378 "(Status 0x%x)\n", status);
379 379
380 timeout = timer_start + tmout; 380 timeout = ata_deadline(timer_start, tmout);
381 while (status != 0xff && (status & ATA_BUSY) && 381 while (status != 0xff && (status & ATA_BUSY) &&
382 time_before(jiffies, timeout)) { 382 time_before(jiffies, timeout)) {
383 msleep(50); 383 msleep(50);
@@ -390,7 +390,7 @@ int ata_sff_busy_sleep(struct ata_port *ap,
390 if (status & ATA_BUSY) { 390 if (status & ATA_BUSY) {
391 ata_port_printk(ap, KERN_ERR, "port failed to respond " 391 ata_port_printk(ap, KERN_ERR, "port failed to respond "
392 "(%lu secs, Status 0x%x)\n", 392 "(%lu secs, Status 0x%x)\n",
393 tmout / HZ, status); 393 DIV_ROUND_UP(tmout, 1000), status);
394 return -EBUSY; 394 return -EBUSY;
395 } 395 }
396 396
@@ -1888,7 +1888,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1888 unsigned int dev1 = devmask & (1 << 1); 1888 unsigned int dev1 = devmask & (1 << 1);
1889 int rc, ret = 0; 1889 int rc, ret = 0;
1890 1890
1891 msleep(ATA_WAIT_AFTER_RESET_MSECS); 1891 msleep(ATA_WAIT_AFTER_RESET);
1892 1892
1893 /* always check readiness of the master device */ 1893 /* always check readiness of the master device */
1894 rc = ata_sff_wait_ready(link, deadline); 1894 rc = ata_sff_wait_ready(link, deadline);
@@ -2371,7 +2371,8 @@ void ata_bus_reset(struct ata_port *ap)
2371 2371
2372 /* issue bus reset */ 2372 /* issue bus reset */
2373 if (ap->flags & ATA_FLAG_SRST) { 2373 if (ap->flags & ATA_FLAG_SRST) {
2374 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ); 2374 rc = ata_bus_softreset(ap, devmask,
2375 ata_deadline(jiffies, 40000));
2375 if (rc && rc != -ENODEV) 2376 if (rc && rc != -ENODEV)
2376 goto err_out; 2377 goto err_out;
2377 } 2378 }
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 1cf803adbc9..f6f9c28ec7f 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -151,6 +151,8 @@ extern void ata_scsi_dev_rescan(struct work_struct *work);
151extern int ata_bus_probe(struct ata_port *ap); 151extern int ata_bus_probe(struct ata_port *ap);
152 152
153/* libata-eh.c */ 153/* libata-eh.c */
154extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
155extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
154extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); 156extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
155extern void ata_scsi_error(struct Scsi_Host *host); 157extern void ata_scsi_error(struct Scsi_Host *host);
156extern void ata_port_wait_eh(struct ata_port *ap); 158extern void ata_port_wait_eh(struct ata_port *ap);
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 55516103626..d3932901a3b 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1011,7 +1011,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1011 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1011 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1012 unsigned int dev0 = devmask & (1 << 0); 1012 unsigned int dev0 = devmask & (1 << 0);
1013 unsigned int dev1 = devmask & (1 << 1); 1013 unsigned int dev1 = devmask & (1 << 1);
1014 unsigned long timeout; 1014 unsigned long deadline;
1015 1015
1016 /* if device 0 was found in ata_devchk, wait for its 1016 /* if device 0 was found in ata_devchk, wait for its
1017 * BSY bit to clear 1017 * BSY bit to clear
@@ -1022,7 +1022,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1022 /* if device 1 was found in ata_devchk, wait for 1022 /* if device 1 was found in ata_devchk, wait for
1023 * register access, then wait for BSY to clear 1023 * register access, then wait for BSY to clear
1024 */ 1024 */
1025 timeout = jiffies + ATA_TMOUT_BOOT; 1025 deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
1026 while (dev1) { 1026 while (dev1) {
1027 u8 nsect, lbal; 1027 u8 nsect, lbal;
1028 1028
@@ -1031,7 +1031,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1031 lbal = read_atapi_register(base, ATA_REG_LBAL); 1031 lbal = read_atapi_register(base, ATA_REG_LBAL);
1032 if ((nsect == 1) && (lbal == 1)) 1032 if ((nsect == 1) && (lbal == 1))
1033 break; 1033 break;
1034 if (time_after(jiffies, timeout)) { 1034 if (time_after(jiffies, deadline)) {
1035 dev1 = 0; 1035 dev1 = 0;
1036 break; 1036 break;
1037 } 1037 }
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index fe7cc8ed4ea..bc037ffce20 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -305,7 +305,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
305 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); 305 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
306 306
307 if (unlikely(slop)) { 307 if (unlikely(slop)) {
308 u32 pad; 308 __le32 pad;
309 if (rw == READ) { 309 if (rw == READ) {
310 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); 310 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
311 memcpy(buf + buflen - slop, &pad, slop); 311 memcpy(buf + buflen - slop, &pad, slop);
@@ -746,14 +746,12 @@ static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf,
746 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); 746 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
747 747
748 if (unlikely(slop)) { 748 if (unlikely(slop)) {
749 u32 pad; 749 __le32 pad;
750 if (rw == WRITE) { 750 if (rw == WRITE) {
751 memcpy(&pad, buf + buflen - slop, slop); 751 memcpy(&pad, buf + buflen - slop, slop);
752 pad = le32_to_cpu(pad); 752 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
753 iowrite32(pad, ap->ioaddr.data_addr);
754 } else { 753 } else {
755 pad = ioread32(ap->ioaddr.data_addr); 754 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
756 pad = cpu_to_le32(pad);
757 memcpy(buf + buflen - slop, &pad, slop); 755 memcpy(buf + buflen - slop, &pad, slop);
758 } 756 }
759 } 757 }
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index 97e5b090d7c..63b7a1c165a 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -137,7 +137,7 @@ static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf,
137 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); 137 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
138 138
139 if (unlikely(slop)) { 139 if (unlikely(slop)) {
140 u32 pad; 140 __le32 pad;
141 if (rw == READ) { 141 if (rw == READ) {
142 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); 142 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
143 memcpy(buf + buflen - slop, &pad, slop); 143 memcpy(buf + buflen - slop, &pad, slop);
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index bbf5aa345e6..16673d16857 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -696,7 +696,7 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc)
696 696
697 if (reg & INTSTS_BMSINT) { 697 if (reg & INTSTS_BMSINT) {
698 unsigned int classes; 698 unsigned int classes;
699 unsigned long deadline = jiffies + ATA_TMOUT_BOOT; 699 unsigned long deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
700 printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME); 700 printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
701 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT); 701 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
702 /* TBD: SW reset */ 702 /* TBD: SW reset */
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
index 474528f8fe3..a7606b044a6 100644
--- a/drivers/ata/pata_winbond.c
+++ b/drivers/ata/pata_winbond.c
@@ -105,7 +105,7 @@ static unsigned int winbond_data_xfer(struct ata_device *dev,
105 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); 105 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
106 106
107 if (unlikely(slop)) { 107 if (unlikely(slop)) {
108 u32 pad; 108 __le32 pad;
109 if (rw == READ) { 109 if (rw == READ) {
110 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); 110 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
111 memcpy(buf + buflen - slop, &pad, slop); 111 memcpy(buf + buflen - slop, &pad, slop);
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 16aa6839aa5..fb13b82aacb 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -253,21 +253,29 @@ static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc)
253 /* start host DMA transaction */ 253 /* start host DMA transaction */
254 dmactl = readb(mmio + ATA_DMA_CMD); 254 dmactl = readb(mmio + ATA_DMA_CMD);
255 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); 255 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
256 /* There is a race condition in certain SATA controllers that can 256 /* This works around possible data corruption.
257 be seen when the r/w command is given to the controller before the 257
258 host DMA is started. On a Read command, the controller would initiate 258 On certain SATA controllers that can be seen when the r/w
259 the command to the drive even before it sees the DMA start. When there 259 command is given to the controller before the host DMA is
260 are very fast drives connected to the controller, or when the data request 260 started.
261 hits in the drive cache, there is the possibility that the drive returns a part 261
262 or all of the requested data to the controller before the DMA start is issued. 262 On a Read command, the controller would initiate the
263 In this case, the controller would become confused as to what to do with the data. 263 command to the drive even before it sees the DMA
264 In the worst case when all the data is returned back to the controller, the 264 start. When there are very fast drives connected to the
265 controller could hang. In other cases it could return partial data returning 265 controller, or when the data request hits in the drive
266 in data corruption. This problem has been seen in PPC systems and can also appear 266 cache, there is the possibility that the drive returns a
267 on an system with very fast disks, where the SATA controller is sitting behind a 267 part or all of the requested data to the controller before
268 number of bridges, and hence there is significant latency between the r/w command 268 the DMA start is issued. In this case, the controller
269 and the start command. */ 269 would become confused as to what to do with the data. In
270 /* issue r/w command if the access is to ATA*/ 270 the worst case when all the data is returned back to the
271 controller, the controller could hang. In other cases it
272 could return partial data returning in data
273 corruption. This problem has been seen in PPC systems and
274 can also appear on an system with very fast disks, where
275 the SATA controller is sitting behind a number of bridges,
276 and hence there is significant latency between the r/w
277 command and the start command. */
278 /* issue r/w command if the access is to ATA */
271 if (qc->tf.protocol == ATA_PROT_DMA) 279 if (qc->tf.protocol == ATA_PROT_DMA)
272 ap->ops->sff_exec_command(ap, &qc->tf); 280 ap->ops->sff_exec_command(ap, &qc->tf);
273} 281}