aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/libata-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata/libata-core.c')
-rw-r--r--drivers/ata/libata-core.c142
1 files changed, 99 insertions, 43 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index dc72690ed5db..49cffb6094a3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -58,6 +58,7 @@
58#include <linux/io.h> 58#include <linux/io.h>
59#include <linux/async.h> 59#include <linux/async.h>
60#include <linux/log2.h> 60#include <linux/log2.h>
61#include <linux/slab.h>
61#include <scsi/scsi.h> 62#include <scsi/scsi.h>
62#include <scsi/scsi_cmnd.h> 63#include <scsi/scsi_cmnd.h>
63#include <scsi/scsi_host.h> 64#include <scsi/scsi_host.h>
@@ -1493,6 +1494,7 @@ static int ata_hpa_resize(struct ata_device *dev)
1493{ 1494{
1494 struct ata_eh_context *ehc = &dev->link->eh_context; 1495 struct ata_eh_context *ehc = &dev->link->eh_context;
1495 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1496 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1497 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1496 u64 sectors = ata_id_n_sectors(dev->id); 1498 u64 sectors = ata_id_n_sectors(dev->id);
1497 u64 native_sectors; 1499 u64 native_sectors;
1498 int rc; 1500 int rc;
@@ -1509,7 +1511,7 @@ static int ata_hpa_resize(struct ata_device *dev)
1509 /* If device aborted the command or HPA isn't going to 1511 /* If device aborted the command or HPA isn't going to
1510 * be unlocked, skip HPA resizing. 1512 * be unlocked, skip HPA resizing.
1511 */ 1513 */
1512 if (rc == -EACCES || !ata_ignore_hpa) { 1514 if (rc == -EACCES || !unlock_hpa) {
1513 ata_dev_printk(dev, KERN_WARNING, "HPA support seems " 1515 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1514 "broken, skipping HPA handling\n"); 1516 "broken, skipping HPA handling\n");
1515 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1517 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
@@ -1524,7 +1526,7 @@ static int ata_hpa_resize(struct ata_device *dev)
1524 dev->n_native_sectors = native_sectors; 1526 dev->n_native_sectors = native_sectors;
1525 1527
1526 /* nothing to do? */ 1528 /* nothing to do? */
1527 if (native_sectors <= sectors || !ata_ignore_hpa) { 1529 if (native_sectors <= sectors || !unlock_hpa) {
1528 if (!print_info || native_sectors == sectors) 1530 if (!print_info || native_sectors == sectors)
1529 return 0; 1531 return 0;
1530 1532
@@ -2232,7 +2234,7 @@ retry:
2232 * Some drives were very specific about that exact sequence. 2234 * Some drives were very specific about that exact sequence.
2233 * 2235 *
2234 * Note that ATA4 says lba is mandatory so the second check 2236 * Note that ATA4 says lba is mandatory so the second check
2235 * shoud never trigger. 2237 * should never trigger.
2236 */ 2238 */
2237 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2239 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2238 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2240 err_mask = ata_dev_init_params(dev, id[3], id[6]);
@@ -3211,6 +3213,7 @@ const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3211int ata_timing_compute(struct ata_device *adev, unsigned short speed, 3213int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3212 struct ata_timing *t, int T, int UT) 3214 struct ata_timing *t, int T, int UT)
3213{ 3215{
3216 const u16 *id = adev->id;
3214 const struct ata_timing *s; 3217 const struct ata_timing *s;
3215 struct ata_timing p; 3218 struct ata_timing p;
3216 3219
@@ -3228,14 +3231,18 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3228 * PIO/MW_DMA cycle timing. 3231 * PIO/MW_DMA cycle timing.
3229 */ 3232 */
3230 3233
3231 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3234 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3232 memset(&p, 0, sizeof(p)); 3235 memset(&p, 0, sizeof(p));
3236
3233 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 3237 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3234 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; 3238 if (speed <= XFER_PIO_2)
3235 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; 3239 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3236 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { 3240 else if ((speed <= XFER_PIO_4) ||
3237 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; 3241 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3238 } 3242 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3243 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3244 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3245
3239 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3246 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3240 } 3247 }
3241 3248
@@ -3790,21 +3797,45 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3790int sata_link_resume(struct ata_link *link, const unsigned long *params, 3797int sata_link_resume(struct ata_link *link, const unsigned long *params,
3791 unsigned long deadline) 3798 unsigned long deadline)
3792{ 3799{
3800 int tries = ATA_LINK_RESUME_TRIES;
3793 u32 scontrol, serror; 3801 u32 scontrol, serror;
3794 int rc; 3802 int rc;
3795 3803
3796 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3804 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3797 return rc; 3805 return rc;
3798 3806
3799 scontrol = (scontrol & 0x0f0) | 0x300; 3807 /*
3808 * Writes to SControl sometimes get ignored under certain
3809 * controllers (ata_piix SIDPR). Make sure DET actually is
3810 * cleared.
3811 */
3812 do {
3813 scontrol = (scontrol & 0x0f0) | 0x300;
3814 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3815 return rc;
3816 /*
3817 * Some PHYs react badly if SStatus is pounded
3818 * immediately after resuming. Delay 200ms before
3819 * debouncing.
3820 */
3821 msleep(200);
3800 3822
3801 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3823 /* is SControl restored correctly? */
3802 return rc; 3824 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3825 return rc;
3826 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3803 3827
3804 /* Some PHYs react badly if SStatus is pounded immediately 3828 if ((scontrol & 0xf0f) != 0x300) {
3805 * after resuming. Delay 200ms before debouncing. 3829 ata_link_printk(link, KERN_ERR,
3806 */ 3830 "failed to resume link (SControl %X)\n",
3807 msleep(200); 3831 scontrol);
3832 return 0;
3833 }
3834
3835 if (tries < ATA_LINK_RESUME_TRIES)
3836 ata_link_printk(link, KERN_WARNING,
3837 "link resume succeeded after %d retries\n",
3838 ATA_LINK_RESUME_TRIES - tries);
3808 3839
3809 if ((rc = sata_link_debounce(link, params, deadline))) 3840 if ((rc = sata_link_debounce(link, params, deadline)))
3810 return rc; 3841 return rc;
@@ -4156,36 +4187,51 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4156 goto fail; 4187 goto fail;
4157 4188
4158 /* verify n_sectors hasn't changed */ 4189 /* verify n_sectors hasn't changed */
4159 if (dev->class == ATA_DEV_ATA && n_sectors && 4190 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4160 dev->n_sectors != n_sectors) { 4191 dev->n_sectors == n_sectors)
4161 ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch " 4192 return 0;
4162 "%llu != %llu\n", 4193
4163 (unsigned long long)n_sectors, 4194 /* n_sectors has changed */
4164 (unsigned long long)dev->n_sectors); 4195 ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n",
4165 /* 4196 (unsigned long long)n_sectors,
4166 * Something could have caused HPA to be unlocked 4197 (unsigned long long)dev->n_sectors);
4167 * involuntarily. If n_native_sectors hasn't changed 4198
4168 * and the new size matches it, keep the device. 4199 /*
4169 */ 4200 * Something could have caused HPA to be unlocked
4170 if (dev->n_native_sectors == n_native_sectors && 4201 * involuntarily. If n_native_sectors hasn't changed and the
4171 dev->n_sectors > n_sectors && 4202 * new size matches it, keep the device.
4172 dev->n_sectors == n_native_sectors) { 4203 */
4173 ata_dev_printk(dev, KERN_WARNING, 4204 if (dev->n_native_sectors == n_native_sectors &&
4174 "new n_sectors matches native, probably " 4205 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4175 "late HPA unlock, continuing\n"); 4206 ata_dev_printk(dev, KERN_WARNING,
4176 /* keep using the old n_sectors */ 4207 "new n_sectors matches native, probably "
4177 dev->n_sectors = n_sectors; 4208 "late HPA unlock, continuing\n");
4178 } else { 4209 /* keep using the old n_sectors */
4179 /* restore original n_[native]_sectors and fail */ 4210 dev->n_sectors = n_sectors;
4180 dev->n_native_sectors = n_native_sectors; 4211 return 0;
4181 dev->n_sectors = n_sectors;
4182 rc = -ENODEV;
4183 goto fail;
4184 }
4185 } 4212 }
4186 4213
4187 return 0; 4214 /*
4215 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4216 * unlocking HPA in those cases.
4217 *
4218 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4219 */
4220 if (dev->n_native_sectors == n_native_sectors &&
4221 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4222 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4223 ata_dev_printk(dev, KERN_WARNING,
4224 "old n_sectors matches native, probably "
4225 "late HPA lock, will try to unlock HPA\n");
4226 /* try unlocking HPA */
4227 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4228 rc = -EIO;
4229 } else
4230 rc = -ENODEV;
4188 4231
4232 /* restore original n_[native_]sectors and fail */
4233 dev->n_native_sectors = n_native_sectors;
4234 dev->n_sectors = n_sectors;
4189 fail: 4235 fail:
4190 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); 4236 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4191 return rc; 4237 return rc;
@@ -4324,6 +4370,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4324 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4370 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4325 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4371 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4326 4372
4373 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4374 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4375
4327 /* devices which puke on READ_NATIVE_MAX */ 4376 /* devices which puke on READ_NATIVE_MAX */
4328 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4377 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4329 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4378 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
@@ -6616,6 +6665,13 @@ static int __init ata_init(void)
6616{ 6665{
6617 ata_parse_force_param(); 6666 ata_parse_force_param();
6618 6667
6668 /*
6669 * FIXME: In UP case, there is only one workqueue thread and if you
6670 * have more than one PIO device, latency is bloody awful, with
6671 * occasional multi-second "hiccups" as one PIO device waits for
6672 * another. It's an ugly wart that users DO occasionally complain
6673 * about; luckily most users have at most one PIO polled device.
6674 */
6619 ata_wq = create_workqueue("ata"); 6675 ata_wq = create_workqueue("ata");
6620 if (!ata_wq) 6676 if (!ata_wq)
6621 goto free_force_tbl; 6677 goto free_force_tbl;