diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-12 16:38:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-12 16:38:50 -0400 |
commit | 57399ec9077a4b962b81037aaa279fab52f5e989 (patch) | |
tree | 847dfb9304001ab9ffc5eef5f20ae514cd01bb90 | |
parent | e1bd2ac5a6b7a8b625e40c9e9f8b6dea4cf22f85 (diff) | |
parent | c6e54a578133fb353a50fb44d650768b3b9eb18e (diff) |
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev: (21 commits)
libata: remove irq_on from ata_bus_reset() and ata_std_postreset()
ata_piix: kill incorrect invalid map value warning
libata: add another Maxtor drive with broken NCQ to the list
[libata] sata_mv: Fix and clean up per-chip-generation tests
[libata] sata_mv: Convert to new exception handling (EH) infrastructure
[libata] sata_mv: minor bug fixes, enhancements, and cleanups (prep for new EH)
[libata] sata_mv: Minor cleanups and renaming, preparing for new EH & NCQ
libata-link: add PMP related ATA constants
libata-link: separate out ata_eh_handle_dev_fail()
pata_hpt3x3: fix DMA Kconfig option to actually have a hope of working
Add Hitachi HDS7250SASUN500G 0621KTAWSD to NCQ blacklist
pata_scc.c: Workaround for errata A308
libata: add FUJITSU MHV2080BH to NCQ blacklist
pata_hpt3x3: major reworking and testing
libata: clean up horkage handling
libata: quirk IOMEGA ZIP 250 ATAPI FLOPPY
libata: simplify PCI legacy SFF host handling
pata_mpc52xx: suspend/resume support
sata_promise: SATA hotplug support, take 2
pata_sis: FIFO whack
...
-rw-r--r-- | drivers/ata/Kconfig | 10 | ||||
-rw-r--r-- | drivers/ata/ata_piix.c | 2 | ||||
-rw-r--r-- | drivers/ata/libata-core.c | 74 | ||||
-rw-r--r-- | drivers/ata/libata-eh.c | 96 | ||||
-rw-r--r-- | drivers/ata/libata-sff.c | 288 | ||||
-rw-r--r-- | drivers/ata/pata_hpt3x3.c | 93 | ||||
-rw-r--r-- | drivers/ata/pata_mpc52xx.c | 18 | ||||
-rw-r--r-- | drivers/ata/pata_scc.c | 54 | ||||
-rw-r--r-- | drivers/ata/pata_sis.c | 3 | ||||
-rw-r--r-- | drivers/ata/sata_mv.c | 887 | ||||
-rw-r--r-- | drivers/ata/sata_nv.c | 2 | ||||
-rw-r--r-- | drivers/ata/sata_promise.c | 41 | ||||
-rw-r--r-- | drivers/ata/sata_sis.c | 2 | ||||
-rw-r--r-- | drivers/ata/sata_uli.c | 2 | ||||
-rw-r--r-- | drivers/ata/sata_via.c | 2 | ||||
-rw-r--r-- | include/linux/ata.h | 29 | ||||
-rw-r--r-- | include/linux/libata.h | 12 |
17 files changed, 890 insertions, 725 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 4ad8675f5a16..d8046a113c37 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -309,7 +309,7 @@ config PATA_HPT3X2N | |||
309 | If unsure, say N. | 309 | If unsure, say N. |
310 | 310 | ||
311 | config PATA_HPT3X3 | 311 | config PATA_HPT3X3 |
312 | tristate "HPT 343/363 PATA support (Experimental)" | 312 | tristate "HPT 343/363 PATA support" |
313 | depends on PCI | 313 | depends on PCI |
314 | help | 314 | help |
315 | This option enables support for the HPT 343/363 | 315 | This option enables support for the HPT 343/363 |
@@ -317,6 +317,14 @@ config PATA_HPT3X3 | |||
317 | 317 | ||
318 | If unsure, say N. | 318 | If unsure, say N. |
319 | 319 | ||
320 | config PATA_HPT3X3_DMA | ||
321 | bool "HPT 343/363 DMA support (Experimental)" | ||
322 | depends on PATA_HPT3X3 | ||
323 | help | ||
324 | This option enables DMA support for the HPT343/363 | ||
325 | controllers. Enable with care as there are still some | ||
326 | problems with DMA on this chipset. | ||
327 | |||
320 | config PATA_ISAPNP | 328 | config PATA_ISAPNP |
321 | tristate "ISA Plug and Play PATA support (Experimental)" | 329 | tristate "ISA Plug and Play PATA support (Experimental)" |
322 | depends on EXPERIMENTAL && ISAPNP | 330 | depends on EXPERIMENTAL && ISAPNP |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 6a3bfef58e13..21a7ca4936b4 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -414,7 +414,7 @@ static const struct piix_map_db ich6m_map_db = { | |||
414 | */ | 414 | */ |
415 | .map = { | 415 | .map = { |
416 | /* PM PS SM SS MAP */ | 416 | /* PM PS SM SS MAP */ |
417 | { P0, P2, RV, RV }, /* 00b */ | 417 | { P0, P2, NA, NA }, /* 00b */ |
418 | { IDE, IDE, P1, P3 }, /* 01b */ | 418 | { IDE, IDE, P1, P3 }, /* 01b */ |
419 | { P0, P2, IDE, IDE }, /* 10b */ | 419 | { P0, P2, IDE, IDE }, /* 10b */ |
420 | { RV, RV, RV, RV }, | 420 | { RV, RV, RV, RV }, |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 5b25311ba885..88e2dd0983b5 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -71,6 +71,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev, | |||
71 | u16 heads, u16 sectors); | 71 | u16 heads, u16 sectors); |
72 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev); | 72 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev); |
73 | static void ata_dev_xfermask(struct ata_device *dev); | 73 | static void ata_dev_xfermask(struct ata_device *dev); |
74 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev); | ||
74 | 75 | ||
75 | unsigned int ata_print_id = 1; | 76 | unsigned int ata_print_id = 1; |
76 | static struct workqueue_struct *ata_wq; | 77 | static struct workqueue_struct *ata_wq; |
@@ -1283,18 +1284,11 @@ static unsigned int ata_id_xfermask(const u16 *id) | |||
1283 | void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, | 1284 | void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, |
1284 | unsigned long delay) | 1285 | unsigned long delay) |
1285 | { | 1286 | { |
1286 | int rc; | ||
1287 | |||
1288 | if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) | ||
1289 | return; | ||
1290 | |||
1291 | PREPARE_DELAYED_WORK(&ap->port_task, fn); | 1287 | PREPARE_DELAYED_WORK(&ap->port_task, fn); |
1292 | ap->port_task_data = data; | 1288 | ap->port_task_data = data; |
1293 | 1289 | ||
1294 | rc = queue_delayed_work(ata_wq, &ap->port_task, delay); | 1290 | /* may fail if ata_port_flush_task() in progress */ |
1295 | 1291 | queue_delayed_work(ata_wq, &ap->port_task, delay); | |
1296 | /* rc == 0 means that another user is using port task */ | ||
1297 | WARN_ON(rc == 0); | ||
1298 | } | 1292 | } |
1299 | 1293 | ||
1300 | /** | 1294 | /** |
@@ -1309,32 +1303,9 @@ void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, | |||
1309 | */ | 1303 | */ |
1310 | void ata_port_flush_task(struct ata_port *ap) | 1304 | void ata_port_flush_task(struct ata_port *ap) |
1311 | { | 1305 | { |
1312 | unsigned long flags; | ||
1313 | |||
1314 | DPRINTK("ENTER\n"); | 1306 | DPRINTK("ENTER\n"); |
1315 | 1307 | ||
1316 | spin_lock_irqsave(ap->lock, flags); | 1308 | cancel_rearming_delayed_work(&ap->port_task); |
1317 | ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK; | ||
1318 | spin_unlock_irqrestore(ap->lock, flags); | ||
1319 | |||
1320 | DPRINTK("flush #1\n"); | ||
1321 | cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */ | ||
1322 | |||
1323 | /* | ||
1324 | * At this point, if a task is running, it's guaranteed to see | ||
1325 | * the FLUSH flag; thus, it will never queue pio tasks again. | ||
1326 | * Cancel and flush. | ||
1327 | */ | ||
1328 | if (!cancel_delayed_work(&ap->port_task)) { | ||
1329 | if (ata_msg_ctl(ap)) | ||
1330 | ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", | ||
1331 | __FUNCTION__); | ||
1332 | cancel_work_sync(&ap->port_task.work); | ||
1333 | } | ||
1334 | |||
1335 | spin_lock_irqsave(ap->lock, flags); | ||
1336 | ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK; | ||
1337 | spin_unlock_irqrestore(ap->lock, flags); | ||
1338 | 1309 | ||
1339 | if (ata_msg_ctl(ap)) | 1310 | if (ata_msg_ctl(ap)) |
1340 | ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__); | 1311 | ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__); |
@@ -1814,7 +1785,7 @@ static void ata_dev_config_ncq(struct ata_device *dev, | |||
1814 | desc[0] = '\0'; | 1785 | desc[0] = '\0'; |
1815 | return; | 1786 | return; |
1816 | } | 1787 | } |
1817 | if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) { | 1788 | if (dev->horkage & ATA_HORKAGE_NONCQ) { |
1818 | snprintf(desc, desc_sz, "NCQ (not used)"); | 1789 | snprintf(desc, desc_sz, "NCQ (not used)"); |
1819 | return; | 1790 | return; |
1820 | } | 1791 | } |
@@ -1863,6 +1834,9 @@ int ata_dev_configure(struct ata_device *dev) | |||
1863 | if (ata_msg_probe(ap)) | 1834 | if (ata_msg_probe(ap)) |
1864 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__); | 1835 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__); |
1865 | 1836 | ||
1837 | /* set horkage */ | ||
1838 | dev->horkage |= ata_dev_blacklisted(dev); | ||
1839 | |||
1866 | /* let ACPI work its magic */ | 1840 | /* let ACPI work its magic */ |
1867 | rc = ata_acpi_on_devcfg(dev); | 1841 | rc = ata_acpi_on_devcfg(dev); |
1868 | if (rc) | 1842 | if (rc) |
@@ -2038,7 +2012,7 @@ int ata_dev_configure(struct ata_device *dev) | |||
2038 | dev->max_sectors = ATA_MAX_SECTORS; | 2012 | dev->max_sectors = ATA_MAX_SECTORS; |
2039 | } | 2013 | } |
2040 | 2014 | ||
2041 | if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128) | 2015 | if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) |
2042 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, | 2016 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, |
2043 | dev->max_sectors); | 2017 | dev->max_sectors); |
2044 | 2018 | ||
@@ -3190,9 +3164,6 @@ void ata_bus_reset(struct ata_port *ap) | |||
3190 | if ((slave_possible) && (err != 0x81)) | 3164 | if ((slave_possible) && (err != 0x81)) |
3191 | ap->device[1].class = ata_dev_try_classify(ap, 1, &err); | 3165 | ap->device[1].class = ata_dev_try_classify(ap, 1, &err); |
3192 | 3166 | ||
3193 | /* re-enable interrupts */ | ||
3194 | ap->ops->irq_on(ap); | ||
3195 | |||
3196 | /* is double-select really necessary? */ | 3167 | /* is double-select really necessary? */ |
3197 | if (ap->device[1].class != ATA_DEV_NONE) | 3168 | if (ap->device[1].class != ATA_DEV_NONE) |
3198 | ap->ops->dev_select(ap, 1); | 3169 | ap->ops->dev_select(ap, 1); |
@@ -3577,10 +3548,6 @@ void ata_std_postreset(struct ata_port *ap, unsigned int *classes) | |||
3577 | if (sata_scr_read(ap, SCR_ERROR, &serror) == 0) | 3548 | if (sata_scr_read(ap, SCR_ERROR, &serror) == 0) |
3578 | sata_scr_write(ap, SCR_ERROR, serror); | 3549 | sata_scr_write(ap, SCR_ERROR, serror); |
3579 | 3550 | ||
3580 | /* re-enable interrupts */ | ||
3581 | if (!ap->ops->error_handler) | ||
3582 | ap->ops->irq_on(ap); | ||
3583 | |||
3584 | /* is double-select really necessary? */ | 3551 | /* is double-select really necessary? */ |
3585 | if (classes[0] != ATA_DEV_NONE) | 3552 | if (classes[0] != ATA_DEV_NONE) |
3586 | ap->ops->dev_select(ap, 1); | 3553 | ap->ops->dev_select(ap, 1); |
@@ -3770,6 +3737,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3770 | { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA }, | 3737 | { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA }, |
3771 | { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, | 3738 | { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, |
3772 | { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ | 3739 | { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ |
3740 | { "IOMEGA ZIP 250 ATAPI Floppy", | ||
3741 | NULL, ATA_HORKAGE_NODMA }, | ||
3773 | 3742 | ||
3774 | /* Weird ATAPI devices */ | 3743 | /* Weird ATAPI devices */ |
3775 | { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, | 3744 | { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, |
@@ -3783,7 +3752,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3783 | { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, | 3752 | { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, |
3784 | /* NCQ is broken */ | 3753 | /* NCQ is broken */ |
3785 | { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ }, | 3754 | { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ }, |
3755 | { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ }, | ||
3786 | { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ }, | 3756 | { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ }, |
3757 | { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI", | ||
3758 | ATA_HORKAGE_NONCQ }, | ||
3787 | /* NCQ hard hangs device under heavier load, needs hard power cycle */ | 3759 | /* NCQ hard hangs device under heavier load, needs hard power cycle */ |
3788 | { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ }, | 3760 | { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ }, |
3789 | /* Blacklist entries taken from Silicon Image 3124/3132 | 3761 | /* Blacklist entries taken from Silicon Image 3124/3132 |
@@ -3796,6 +3768,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3796 | { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, }, | 3768 | { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, }, |
3797 | { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, | 3769 | { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, |
3798 | { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, | 3770 | { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, |
3771 | { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, | ||
3799 | 3772 | ||
3800 | /* Devices with NCQ limits */ | 3773 | /* Devices with NCQ limits */ |
3801 | 3774 | ||
@@ -3803,7 +3776,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3803 | { } | 3776 | { } |
3804 | }; | 3777 | }; |
3805 | 3778 | ||
3806 | unsigned long ata_device_blacklisted(const struct ata_device *dev) | 3779 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev) |
3807 | { | 3780 | { |
3808 | unsigned char model_num[ATA_ID_PROD_LEN + 1]; | 3781 | unsigned char model_num[ATA_ID_PROD_LEN + 1]; |
3809 | unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; | 3782 | unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; |
@@ -3833,7 +3806,7 @@ static int ata_dma_blacklisted(const struct ata_device *dev) | |||
3833 | if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) && | 3806 | if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) && |
3834 | (dev->flags & ATA_DFLAG_CDB_INTR)) | 3807 | (dev->flags & ATA_DFLAG_CDB_INTR)) |
3835 | return 1; | 3808 | return 1; |
3836 | return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0; | 3809 | return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; |
3837 | } | 3810 | } |
3838 | 3811 | ||
3839 | /** | 3812 | /** |
@@ -6557,13 +6530,7 @@ void ata_port_detach(struct ata_port *ap) | |||
6557 | spin_unlock_irqrestore(ap->lock, flags); | 6530 | spin_unlock_irqrestore(ap->lock, flags); |
6558 | 6531 | ||
6559 | ata_port_wait_eh(ap); | 6532 | ata_port_wait_eh(ap); |
6560 | 6533 | cancel_rearming_delayed_work(&ap->hotplug_task); | |
6561 | /* Flush hotplug task. The sequence is similar to | ||
6562 | * ata_port_flush_task(). | ||
6563 | */ | ||
6564 | cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */ | ||
6565 | cancel_delayed_work(&ap->hotplug_task); | ||
6566 | cancel_work_sync(&ap->hotplug_task.work); | ||
6567 | 6534 | ||
6568 | skip_eh: | 6535 | skip_eh: |
6569 | /* remove the associated SCSI host */ | 6536 | /* remove the associated SCSI host */ |
@@ -6952,7 +6919,6 @@ EXPORT_SYMBOL_GPL(ata_host_resume); | |||
6952 | EXPORT_SYMBOL_GPL(ata_id_string); | 6919 | EXPORT_SYMBOL_GPL(ata_id_string); |
6953 | EXPORT_SYMBOL_GPL(ata_id_c_string); | 6920 | EXPORT_SYMBOL_GPL(ata_id_c_string); |
6954 | EXPORT_SYMBOL_GPL(ata_id_to_dma_mode); | 6921 | EXPORT_SYMBOL_GPL(ata_id_to_dma_mode); |
6955 | EXPORT_SYMBOL_GPL(ata_device_blacklisted); | ||
6956 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); | 6922 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); |
6957 | 6923 | ||
6958 | EXPORT_SYMBOL_GPL(ata_pio_need_iordy); | 6924 | EXPORT_SYMBOL_GPL(ata_pio_need_iordy); |
@@ -6961,9 +6927,9 @@ EXPORT_SYMBOL_GPL(ata_timing_merge); | |||
6961 | 6927 | ||
6962 | #ifdef CONFIG_PCI | 6928 | #ifdef CONFIG_PCI |
6963 | EXPORT_SYMBOL_GPL(pci_test_config_bits); | 6929 | EXPORT_SYMBOL_GPL(pci_test_config_bits); |
6964 | EXPORT_SYMBOL_GPL(ata_pci_init_native_host); | 6930 | EXPORT_SYMBOL_GPL(ata_pci_init_sff_host); |
6965 | EXPORT_SYMBOL_GPL(ata_pci_init_bmdma); | 6931 | EXPORT_SYMBOL_GPL(ata_pci_init_bmdma); |
6966 | EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host); | 6932 | EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host); |
6967 | EXPORT_SYMBOL_GPL(ata_pci_init_one); | 6933 | EXPORT_SYMBOL_GPL(ata_pci_init_one); |
6968 | EXPORT_SYMBOL_GPL(ata_pci_remove_one); | 6934 | EXPORT_SYMBOL_GPL(ata_pci_remove_one); |
6969 | #ifdef CONFIG_PM | 6935 | #ifdef CONFIG_PM |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 9ee0a8c08d96..9aa62a0754f6 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -1897,6 +1897,57 @@ static int ata_eh_skip_recovery(struct ata_port *ap) | |||
1897 | return 1; | 1897 | return 1; |
1898 | } | 1898 | } |
1899 | 1899 | ||
1900 | static void ata_eh_handle_dev_fail(struct ata_device *dev, int err) | ||
1901 | { | ||
1902 | struct ata_port *ap = dev->ap; | ||
1903 | struct ata_eh_context *ehc = &ap->eh_context; | ||
1904 | |||
1905 | ehc->tries[dev->devno]--; | ||
1906 | |||
1907 | switch (err) { | ||
1908 | case -ENODEV: | ||
1909 | /* device missing or wrong IDENTIFY data, schedule probing */ | ||
1910 | ehc->i.probe_mask |= (1 << dev->devno); | ||
1911 | case -EINVAL: | ||
1912 | /* give it just one more chance */ | ||
1913 | ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); | ||
1914 | case -EIO: | ||
1915 | if (ehc->tries[dev->devno] == 1) { | ||
1916 | /* This is the last chance, better to slow | ||
1917 | * down than lose it. | ||
1918 | */ | ||
1919 | sata_down_spd_limit(ap); | ||
1920 | ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); | ||
1921 | } | ||
1922 | } | ||
1923 | |||
1924 | if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { | ||
1925 | /* disable device if it has used up all its chances */ | ||
1926 | ata_dev_disable(dev); | ||
1927 | |||
1928 | /* detach if offline */ | ||
1929 | if (ata_port_offline(ap)) | ||
1930 | ata_eh_detach_dev(dev); | ||
1931 | |||
1932 | /* probe if requested */ | ||
1933 | if ((ehc->i.probe_mask & (1 << dev->devno)) && | ||
1934 | !(ehc->did_probe_mask & (1 << dev->devno))) { | ||
1935 | ata_eh_detach_dev(dev); | ||
1936 | ata_dev_init(dev); | ||
1937 | |||
1938 | ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; | ||
1939 | ehc->did_probe_mask |= (1 << dev->devno); | ||
1940 | ehc->i.action |= ATA_EH_SOFTRESET; | ||
1941 | } | ||
1942 | } else { | ||
1943 | /* soft didn't work? be haaaaard */ | ||
1944 | if (ehc->i.flags & ATA_EHI_DID_RESET) | ||
1945 | ehc->i.action |= ATA_EH_HARDRESET; | ||
1946 | else | ||
1947 | ehc->i.action |= ATA_EH_SOFTRESET; | ||
1948 | } | ||
1949 | } | ||
1950 | |||
1900 | /** | 1951 | /** |
1901 | * ata_eh_recover - recover host port after error | 1952 | * ata_eh_recover - recover host port after error |
1902 | * @ap: host port to recover | 1953 | * @ap: host port to recover |
@@ -1997,50 +2048,7 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
1997 | goto out; | 2048 | goto out; |
1998 | 2049 | ||
1999 | dev_fail: | 2050 | dev_fail: |
2000 | ehc->tries[dev->devno]--; | 2051 | ata_eh_handle_dev_fail(dev, rc); |
2001 | |||
2002 | switch (rc) { | ||
2003 | case -ENODEV: | ||
2004 | /* device missing or wrong IDENTIFY data, schedule probing */ | ||
2005 | ehc->i.probe_mask |= (1 << dev->devno); | ||
2006 | case -EINVAL: | ||
2007 | /* give it just one more chance */ | ||
2008 | ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); | ||
2009 | case -EIO: | ||
2010 | if (ehc->tries[dev->devno] == 1) { | ||
2011 | /* This is the last chance, better to slow | ||
2012 | * down than lose it. | ||
2013 | */ | ||
2014 | sata_down_spd_limit(ap); | ||
2015 | ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); | ||
2016 | } | ||
2017 | } | ||
2018 | |||
2019 | if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { | ||
2020 | /* disable device if it has used up all its chances */ | ||
2021 | ata_dev_disable(dev); | ||
2022 | |||
2023 | /* detach if offline */ | ||
2024 | if (ata_port_offline(ap)) | ||
2025 | ata_eh_detach_dev(dev); | ||
2026 | |||
2027 | /* probe if requested */ | ||
2028 | if ((ehc->i.probe_mask & (1 << dev->devno)) && | ||
2029 | !(ehc->did_probe_mask & (1 << dev->devno))) { | ||
2030 | ata_eh_detach_dev(dev); | ||
2031 | ata_dev_init(dev); | ||
2032 | |||
2033 | ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; | ||
2034 | ehc->did_probe_mask |= (1 << dev->devno); | ||
2035 | ehc->i.action |= ATA_EH_SOFTRESET; | ||
2036 | } | ||
2037 | } else { | ||
2038 | /* soft didn't work? be haaaaard */ | ||
2039 | if (ehc->i.flags & ATA_EHI_DID_RESET) | ||
2040 | ehc->i.action |= ATA_EH_HARDRESET; | ||
2041 | else | ||
2042 | ehc->i.action |= ATA_EH_SOFTRESET; | ||
2043 | } | ||
2044 | 2052 | ||
2045 | if (ata_port_nr_enabled(ap)) { | 2053 | if (ata_port_nr_enabled(ap)) { |
2046 | ata_port_printk(ap, KERN_WARNING, "failed to recover some " | 2054 | ata_port_printk(ap, KERN_WARNING, "failed to recover some " |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index fa1c22c7b38f..ca7d2245d684 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -604,13 +604,17 @@ int ata_pci_init_bmdma(struct ata_host *host) | |||
604 | } | 604 | } |
605 | 605 | ||
606 | /** | 606 | /** |
607 | * ata_pci_init_native_host - acquire native ATA resources and init host | 607 | * ata_pci_init_sff_host - acquire native PCI ATA resources and init host |
608 | * @host: target ATA host | 608 | * @host: target ATA host |
609 | * | 609 | * |
610 | * Acquire native PCI ATA resources for @host and initialize the | 610 | * Acquire native PCI ATA resources for @host and initialize the |
611 | * first two ports of @host accordingly. Ports marked dummy are | 611 | * first two ports of @host accordingly. Ports marked dummy are |
612 | * skipped and allocation failure makes the port dummy. | 612 | * skipped and allocation failure makes the port dummy. |
613 | * | 613 | * |
614 | * Note that native PCI resources are valid even for legacy hosts | ||
615 | * as we fix up pdev resources array early in boot, so this | ||
616 | * function can be used for both native and legacy SFF hosts. | ||
617 | * | ||
614 | * LOCKING: | 618 | * LOCKING: |
615 | * Inherited from calling layer (may sleep). | 619 | * Inherited from calling layer (may sleep). |
616 | * | 620 | * |
@@ -618,7 +622,7 @@ int ata_pci_init_bmdma(struct ata_host *host) | |||
618 | * 0 if at least one port is initialized, -ENODEV if no port is | 622 | * 0 if at least one port is initialized, -ENODEV if no port is |
619 | * available. | 623 | * available. |
620 | */ | 624 | */ |
621 | int ata_pci_init_native_host(struct ata_host *host) | 625 | int ata_pci_init_sff_host(struct ata_host *host) |
622 | { | 626 | { |
623 | struct device *gdev = host->dev; | 627 | struct device *gdev = host->dev; |
624 | struct pci_dev *pdev = to_pci_dev(gdev); | 628 | struct pci_dev *pdev = to_pci_dev(gdev); |
@@ -673,7 +677,7 @@ int ata_pci_init_native_host(struct ata_host *host) | |||
673 | } | 677 | } |
674 | 678 | ||
675 | /** | 679 | /** |
676 | * ata_pci_prepare_native_host - helper to prepare native PCI ATA host | 680 | * ata_pci_prepare_sff_host - helper to prepare native PCI ATA host |
677 | * @pdev: target PCI device | 681 | * @pdev: target PCI device |
678 | * @ppi: array of port_info, must be enough for two ports | 682 | * @ppi: array of port_info, must be enough for two ports |
679 | * @r_host: out argument for the initialized ATA host | 683 | * @r_host: out argument for the initialized ATA host |
@@ -687,9 +691,9 @@ int ata_pci_init_native_host(struct ata_host *host) | |||
687 | * RETURNS: | 691 | * RETURNS: |
688 | * 0 on success, -errno otherwise. | 692 | * 0 on success, -errno otherwise. |
689 | */ | 693 | */ |
690 | int ata_pci_prepare_native_host(struct pci_dev *pdev, | 694 | int ata_pci_prepare_sff_host(struct pci_dev *pdev, |
691 | const struct ata_port_info * const * ppi, | 695 | const struct ata_port_info * const * ppi, |
692 | struct ata_host **r_host) | 696 | struct ata_host **r_host) |
693 | { | 697 | { |
694 | struct ata_host *host; | 698 | struct ata_host *host; |
695 | int rc; | 699 | int rc; |
@@ -705,7 +709,7 @@ int ata_pci_prepare_native_host(struct pci_dev *pdev, | |||
705 | goto err_out; | 709 | goto err_out; |
706 | } | 710 | } |
707 | 711 | ||
708 | rc = ata_pci_init_native_host(host); | 712 | rc = ata_pci_init_sff_host(host); |
709 | if (rc) | 713 | if (rc) |
710 | goto err_out; | 714 | goto err_out; |
711 | 715 | ||
@@ -730,221 +734,6 @@ int ata_pci_prepare_native_host(struct pci_dev *pdev, | |||
730 | return rc; | 734 | return rc; |
731 | } | 735 | } |
732 | 736 | ||
733 | struct ata_legacy_devres { | ||
734 | unsigned int mask; | ||
735 | unsigned long cmd_port[2]; | ||
736 | void __iomem * cmd_addr[2]; | ||
737 | void __iomem * ctl_addr[2]; | ||
738 | unsigned int irq[2]; | ||
739 | void * irq_dev_id[2]; | ||
740 | }; | ||
741 | |||
742 | static void ata_legacy_free_irqs(struct ata_legacy_devres *legacy_dr) | ||
743 | { | ||
744 | int i; | ||
745 | |||
746 | for (i = 0; i < 2; i++) { | ||
747 | if (!legacy_dr->irq[i]) | ||
748 | continue; | ||
749 | |||
750 | free_irq(legacy_dr->irq[i], legacy_dr->irq_dev_id[i]); | ||
751 | legacy_dr->irq[i] = 0; | ||
752 | legacy_dr->irq_dev_id[i] = NULL; | ||
753 | } | ||
754 | } | ||
755 | |||
756 | static void ata_legacy_release(struct device *gdev, void *res) | ||
757 | { | ||
758 | struct ata_legacy_devres *this = res; | ||
759 | int i; | ||
760 | |||
761 | ata_legacy_free_irqs(this); | ||
762 | |||
763 | for (i = 0; i < 2; i++) { | ||
764 | if (this->cmd_addr[i]) | ||
765 | ioport_unmap(this->cmd_addr[i]); | ||
766 | if (this->ctl_addr[i]) | ||
767 | ioport_unmap(this->ctl_addr[i]); | ||
768 | if (this->cmd_port[i]) | ||
769 | release_region(this->cmd_port[i], 8); | ||
770 | } | ||
771 | } | ||
772 | |||
773 | static int ata_init_legacy_port(struct ata_port *ap, | ||
774 | struct ata_legacy_devres *legacy_dr) | ||
775 | { | ||
776 | struct ata_host *host = ap->host; | ||
777 | int port_no = ap->port_no; | ||
778 | unsigned long cmd_port, ctl_port; | ||
779 | |||
780 | if (port_no == 0) { | ||
781 | cmd_port = ATA_PRIMARY_CMD; | ||
782 | ctl_port = ATA_PRIMARY_CTL; | ||
783 | } else { | ||
784 | cmd_port = ATA_SECONDARY_CMD; | ||
785 | ctl_port = ATA_SECONDARY_CTL; | ||
786 | } | ||
787 | |||
788 | /* request cmd_port */ | ||
789 | if (request_region(cmd_port, 8, "libata")) | ||
790 | legacy_dr->cmd_port[port_no] = cmd_port; | ||
791 | else { | ||
792 | dev_printk(KERN_WARNING, host->dev, | ||
793 | "0x%0lX IDE port busy\n", cmd_port); | ||
794 | return -EBUSY; | ||
795 | } | ||
796 | |||
797 | /* iomap cmd and ctl ports */ | ||
798 | legacy_dr->cmd_addr[port_no] = ioport_map(cmd_port, 8); | ||
799 | legacy_dr->ctl_addr[port_no] = ioport_map(ctl_port, 1); | ||
800 | if (!legacy_dr->cmd_addr[port_no] || !legacy_dr->ctl_addr[port_no]) { | ||
801 | dev_printk(KERN_WARNING, host->dev, | ||
802 | "failed to map cmd/ctl ports\n"); | ||
803 | return -ENOMEM; | ||
804 | } | ||
805 | |||
806 | /* init IO addresses */ | ||
807 | ap->ioaddr.cmd_addr = legacy_dr->cmd_addr[port_no]; | ||
808 | ap->ioaddr.altstatus_addr = legacy_dr->ctl_addr[port_no]; | ||
809 | ap->ioaddr.ctl_addr = legacy_dr->ctl_addr[port_no]; | ||
810 | ata_std_ports(&ap->ioaddr); | ||
811 | |||
812 | return 0; | ||
813 | } | ||
814 | |||
815 | /** | ||
816 | * ata_init_legacy_host - acquire legacy ATA resources and init ATA host | ||
817 | * @host: target ATA host | ||
818 | * @was_busy: out parameter, indicates whether any port was busy | ||
819 | * | ||
820 | * Acquire legacy ATA resources for the first two ports of @host | ||
821 | * and initialize it accordingly. Ports marked dummy are skipped | ||
822 | * and resource acquistion failure makes the port dummy. | ||
823 | * | ||
824 | * LOCKING: | ||
825 | * Inherited from calling layer (may sleep). | ||
826 | * | ||
827 | * RETURNS: | ||
828 | * 0 if at least one port is initialized, -ENODEV if no port is | ||
829 | * available. | ||
830 | */ | ||
831 | static int ata_init_legacy_host(struct ata_host *host, int *was_busy) | ||
832 | { | ||
833 | struct device *gdev = host->dev; | ||
834 | struct ata_legacy_devres *legacy_dr; | ||
835 | int i, rc; | ||
836 | |||
837 | if (!devres_open_group(gdev, NULL, GFP_KERNEL)) | ||
838 | return -ENOMEM; | ||
839 | |||
840 | rc = -ENOMEM; | ||
841 | legacy_dr = devres_alloc(ata_legacy_release, sizeof(*legacy_dr), | ||
842 | GFP_KERNEL); | ||
843 | if (!legacy_dr) | ||
844 | goto err_out; | ||
845 | devres_add(gdev, legacy_dr); | ||
846 | |||
847 | for (i = 0; i < 2; i++) { | ||
848 | if (ata_port_is_dummy(host->ports[i])) | ||
849 | continue; | ||
850 | |||
851 | rc = ata_init_legacy_port(host->ports[i], legacy_dr); | ||
852 | if (rc == 0) | ||
853 | legacy_dr->mask |= 1 << i; | ||
854 | else { | ||
855 | if (rc == -EBUSY) | ||
856 | (*was_busy)++; | ||
857 | host->ports[i]->ops = &ata_dummy_port_ops; | ||
858 | } | ||
859 | } | ||
860 | |||
861 | if (!legacy_dr->mask) { | ||
862 | dev_printk(KERN_ERR, gdev, "no available legacy port\n"); | ||
863 | return -ENODEV; | ||
864 | } | ||
865 | |||
866 | devres_remove_group(gdev, NULL); | ||
867 | return 0; | ||
868 | |||
869 | err_out: | ||
870 | devres_release_group(gdev, NULL); | ||
871 | return rc; | ||
872 | } | ||
873 | |||
874 | /** | ||
875 | * ata_request_legacy_irqs - request legacy ATA IRQs | ||
876 | * @host: target ATA host | ||
877 | * @handler: array of IRQ handlers | ||
878 | * @irq_flags: array of IRQ flags | ||
879 | * @dev_id: array of IRQ dev_ids | ||
880 | * | ||
881 | * Request legacy IRQs for non-dummy legacy ports in @host. All | ||
882 | * IRQ parameters are passed as array to allow ports to have | ||
883 | * separate IRQ handlers. | ||
884 | * | ||
885 | * LOCKING: | ||
886 | * Inherited from calling layer (may sleep). | ||
887 | * | ||
888 | * RETURNS: | ||
889 | * 0 on success, -errno otherwise. | ||
890 | */ | ||
891 | static int ata_request_legacy_irqs(struct ata_host *host, | ||
892 | irq_handler_t const *handler, | ||
893 | const unsigned int *irq_flags, | ||
894 | void * const *dev_id) | ||
895 | { | ||
896 | struct device *gdev = host->dev; | ||
897 | struct ata_legacy_devres *legacy_dr; | ||
898 | int i, rc; | ||
899 | |||
900 | legacy_dr = devres_find(host->dev, ata_legacy_release, NULL, NULL); | ||
901 | BUG_ON(!legacy_dr); | ||
902 | |||
903 | for (i = 0; i < 2; i++) { | ||
904 | unsigned int irq; | ||
905 | |||
906 | /* FIXME: ATA_*_IRQ() should take generic device not pci_dev */ | ||
907 | if (i == 0) | ||
908 | irq = ATA_PRIMARY_IRQ(to_pci_dev(gdev)); | ||
909 | else | ||
910 | irq = ATA_SECONDARY_IRQ(to_pci_dev(gdev)); | ||
911 | |||
912 | if (!(legacy_dr->mask & (1 << i))) | ||
913 | continue; | ||
914 | |||
915 | if (!handler[i]) { | ||
916 | dev_printk(KERN_ERR, gdev, | ||
917 | "NULL handler specified for port %d\n", i); | ||
918 | rc = -EINVAL; | ||
919 | goto err_out; | ||
920 | } | ||
921 | |||
922 | rc = request_irq(irq, handler[i], irq_flags[i], DRV_NAME, | ||
923 | dev_id[i]); | ||
924 | if (rc) { | ||
925 | dev_printk(KERN_ERR, gdev, | ||
926 | "irq %u request failed (errno=%d)\n", irq, rc); | ||
927 | goto err_out; | ||
928 | } | ||
929 | |||
930 | /* record irq allocation in legacy_dr */ | ||
931 | legacy_dr->irq[i] = irq; | ||
932 | legacy_dr->irq_dev_id[i] = dev_id[i]; | ||
933 | |||
934 | /* only used to print info */ | ||
935 | if (i == 0) | ||
936 | host->irq = irq; | ||
937 | else | ||
938 | host->irq2 = irq; | ||
939 | } | ||
940 | |||
941 | return 0; | ||
942 | |||
943 | err_out: | ||
944 | ata_legacy_free_irqs(legacy_dr); | ||
945 | return rc; | ||
946 | } | ||
947 | |||
948 | /** | 737 | /** |
949 | * ata_pci_init_one - Initialize/register PCI IDE host controller | 738 | * ata_pci_init_one - Initialize/register PCI IDE host controller |
950 | * @pdev: Controller to be initialized | 739 | * @pdev: Controller to be initialized |
@@ -1029,35 +818,11 @@ int ata_pci_init_one(struct pci_dev *pdev, | |||
1029 | #endif | 818 | #endif |
1030 | } | 819 | } |
1031 | 820 | ||
1032 | /* alloc and init host */ | 821 | /* prepare host */ |
1033 | host = ata_host_alloc_pinfo(dev, ppi, 2); | 822 | rc = ata_pci_prepare_sff_host(pdev, ppi, &host); |
1034 | if (!host) { | 823 | if (rc) |
1035 | dev_printk(KERN_ERR, &pdev->dev, | ||
1036 | "failed to allocate ATA host\n"); | ||
1037 | rc = -ENOMEM; | ||
1038 | goto err_out; | 824 | goto err_out; |
1039 | } | ||
1040 | 825 | ||
1041 | if (!legacy_mode) { | ||
1042 | rc = ata_pci_init_native_host(host); | ||
1043 | if (rc) | ||
1044 | goto err_out; | ||
1045 | } else { | ||
1046 | int was_busy = 0; | ||
1047 | |||
1048 | rc = ata_init_legacy_host(host, &was_busy); | ||
1049 | if (was_busy) | ||
1050 | pcim_pin_device(pdev); | ||
1051 | if (rc) | ||
1052 | goto err_out; | ||
1053 | |||
1054 | /* request respective PCI regions, may fail */ | ||
1055 | rc = pci_request_region(pdev, 1, DRV_NAME); | ||
1056 | rc = pci_request_region(pdev, 3, DRV_NAME); | ||
1057 | } | ||
1058 | |||
1059 | /* init BMDMA, may fail */ | ||
1060 | ata_pci_init_bmdma(host); | ||
1061 | pci_set_master(pdev); | 826 | pci_set_master(pdev); |
1062 | 827 | ||
1063 | /* start host and request IRQ */ | 828 | /* start host and request IRQ */ |
@@ -1068,17 +833,28 @@ int ata_pci_init_one(struct pci_dev *pdev, | |||
1068 | if (!legacy_mode) { | 833 | if (!legacy_mode) { |
1069 | rc = devm_request_irq(dev, pdev->irq, pi->port_ops->irq_handler, | 834 | rc = devm_request_irq(dev, pdev->irq, pi->port_ops->irq_handler, |
1070 | IRQF_SHARED, DRV_NAME, host); | 835 | IRQF_SHARED, DRV_NAME, host); |
836 | if (rc) | ||
837 | goto err_out; | ||
1071 | host->irq = pdev->irq; | 838 | host->irq = pdev->irq; |
1072 | } else { | 839 | } else { |
1073 | irq_handler_t handler[2] = { host->ops->irq_handler, | 840 | if (!ata_port_is_dummy(host->ports[0])) { |
1074 | host->ops->irq_handler }; | 841 | host->irq = ATA_PRIMARY_IRQ(pdev); |
1075 | unsigned int irq_flags[2] = { IRQF_SHARED, IRQF_SHARED }; | 842 | rc = devm_request_irq(dev, host->irq, |
1076 | void *dev_id[2] = { host, host }; | 843 | pi->port_ops->irq_handler, |
844 | IRQF_SHARED, DRV_NAME, host); | ||
845 | if (rc) | ||
846 | goto err_out; | ||
847 | } | ||
1077 | 848 | ||
1078 | rc = ata_request_legacy_irqs(host, handler, irq_flags, dev_id); | 849 | if (!ata_port_is_dummy(host->ports[1])) { |
850 | host->irq2 = ATA_SECONDARY_IRQ(pdev); | ||
851 | rc = devm_request_irq(dev, host->irq2, | ||
852 | pi->port_ops->irq_handler, | ||
853 | IRQF_SHARED, DRV_NAME, host); | ||
854 | if (rc) | ||
855 | goto err_out; | ||
856 | } | ||
1079 | } | 857 | } |
1080 | if (rc) | ||
1081 | goto err_out; | ||
1082 | 858 | ||
1083 | /* register */ | 859 | /* register */ |
1084 | rc = ata_host_register(host, pi->sht); | 860 | rc = ata_host_register(host, pi->sht); |
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c index d928c9105034..be0f05efac6d 100644 --- a/drivers/ata/pata_hpt3x3.c +++ b/drivers/ata/pata_hpt3x3.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/libata.h> | 23 | #include <linux/libata.h> |
24 | 24 | ||
25 | #define DRV_NAME "pata_hpt3x3" | 25 | #define DRV_NAME "pata_hpt3x3" |
26 | #define DRV_VERSION "0.4.3" | 26 | #define DRV_VERSION "0.5.3" |
27 | 27 | ||
28 | /** | 28 | /** |
29 | * hpt3x3_set_piomode - PIO setup | 29 | * hpt3x3_set_piomode - PIO setup |
@@ -52,6 +52,7 @@ static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
52 | pci_write_config_dword(pdev, 0x48, r2); | 52 | pci_write_config_dword(pdev, 0x48, r2); |
53 | } | 53 | } |
54 | 54 | ||
55 | #if defined(CONFIG_PATA_HPT3X3_DMA) | ||
55 | /** | 56 | /** |
56 | * hpt3x3_set_dmamode - DMA timing setup | 57 | * hpt3x3_set_dmamode - DMA timing setup |
57 | * @ap: ATA interface | 58 | * @ap: ATA interface |
@@ -59,6 +60,9 @@ static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
59 | * | 60 | * |
60 | * Set up the channel for MWDMA or UDMA modes. Much the same as with | 61 | * Set up the channel for MWDMA or UDMA modes. Much the same as with |
61 | * PIO, load the mode number and then set MWDMA or UDMA flag. | 62 | * PIO, load the mode number and then set MWDMA or UDMA flag. |
63 | * | ||
64 | * 0x44 : bit 0-2 master mode, 3-5 slave mode, etc | ||
65 | * 0x48 : bit 4/0 DMA/UDMA bit 5/1 for slave etc | ||
62 | */ | 66 | */ |
63 | 67 | ||
64 | static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev) | 68 | static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev) |
@@ -76,13 +80,26 @@ static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
76 | r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */ | 80 | r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */ |
77 | 81 | ||
78 | if (adev->dma_mode >= XFER_UDMA_0) | 82 | if (adev->dma_mode >= XFER_UDMA_0) |
79 | r2 |= 0x01 << dn; /* Ultra mode */ | 83 | r2 |= (0x10 << dn); /* Ultra mode */ |
80 | else | 84 | else |
81 | r2 |= 0x10 << dn; /* MWDMA */ | 85 | r2 |= (0x01 << dn); /* MWDMA */ |
82 | 86 | ||
83 | pci_write_config_dword(pdev, 0x44, r1); | 87 | pci_write_config_dword(pdev, 0x44, r1); |
84 | pci_write_config_dword(pdev, 0x48, r2); | 88 | pci_write_config_dword(pdev, 0x48, r2); |
85 | } | 89 | } |
90 | #endif /* CONFIG_PATA_HPT3X3_DMA */ | ||
91 | |||
92 | /** | ||
93 | * hpt3x3_atapi_dma - ATAPI DMA check | ||
94 | * @qc: Queued command | ||
95 | * | ||
96 | * Just say no - we don't do ATAPI DMA | ||
97 | */ | ||
98 | |||
99 | static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc) | ||
100 | { | ||
101 | return 1; | ||
102 | } | ||
86 | 103 | ||
87 | static struct scsi_host_template hpt3x3_sht = { | 104 | static struct scsi_host_template hpt3x3_sht = { |
88 | .module = THIS_MODULE, | 105 | .module = THIS_MODULE, |
@@ -105,7 +122,9 @@ static struct scsi_host_template hpt3x3_sht = { | |||
105 | static struct ata_port_operations hpt3x3_port_ops = { | 122 | static struct ata_port_operations hpt3x3_port_ops = { |
106 | .port_disable = ata_port_disable, | 123 | .port_disable = ata_port_disable, |
107 | .set_piomode = hpt3x3_set_piomode, | 124 | .set_piomode = hpt3x3_set_piomode, |
125 | #if defined(CONFIG_PATA_HPT3X3_DMA) | ||
108 | .set_dmamode = hpt3x3_set_dmamode, | 126 | .set_dmamode = hpt3x3_set_dmamode, |
127 | #endif | ||
109 | .mode_filter = ata_pci_default_filter, | 128 | .mode_filter = ata_pci_default_filter, |
110 | 129 | ||
111 | .tf_load = ata_tf_load, | 130 | .tf_load = ata_tf_load, |
@@ -124,6 +143,7 @@ static struct ata_port_operations hpt3x3_port_ops = { | |||
124 | .bmdma_start = ata_bmdma_start, | 143 | .bmdma_start = ata_bmdma_start, |
125 | .bmdma_stop = ata_bmdma_stop, | 144 | .bmdma_stop = ata_bmdma_stop, |
126 | .bmdma_status = ata_bmdma_status, | 145 | .bmdma_status = ata_bmdma_status, |
146 | .check_atapi_dma= hpt3x3_atapi_dma, | ||
127 | 147 | ||
128 | .qc_prep = ata_qc_prep, | 148 | .qc_prep = ata_qc_prep, |
129 | .qc_issue = ata_qc_issue_prot, | 149 | .qc_issue = ata_qc_issue_prot, |
@@ -158,32 +178,79 @@ static void hpt3x3_init_chipset(struct pci_dev *dev) | |||
158 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20); | 178 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20); |
159 | } | 179 | } |
160 | 180 | ||
161 | |||
162 | /** | 181 | /** |
163 | * hpt3x3_init_one - Initialise an HPT343/363 | 182 | * hpt3x3_init_one - Initialise an HPT343/363 |
164 | * @dev: PCI device | 183 | * @pdev: PCI device |
165 | * @id: Entry in match table | 184 | * @id: Entry in match table |
166 | * | 185 | * |
167 | * Perform basic initialisation. The chip has a quirk that it won't | 186 | * Perform basic initialisation. We set the device up so we access all |
168 | * function unless it is at XX00. The old ATA driver touched this up | 187 | * ports via BAR4. This is neccessary to work around errata. |
169 | * but we leave it for pci quirks to do properly. | ||
170 | */ | 188 | */ |
171 | 189 | ||
172 | static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 190 | static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
173 | { | 191 | { |
192 | static int printed_version; | ||
174 | static const struct ata_port_info info = { | 193 | static const struct ata_port_info info = { |
175 | .sht = &hpt3x3_sht, | 194 | .sht = &hpt3x3_sht, |
176 | .flags = ATA_FLAG_SLAVE_POSS, | 195 | .flags = ATA_FLAG_SLAVE_POSS, |
177 | .pio_mask = 0x1f, | 196 | .pio_mask = 0x1f, |
197 | #if defined(CONFIG_PATA_HPT3X3_DMA) | ||
198 | /* Further debug needed */ | ||
178 | .mwdma_mask = 0x07, | 199 | .mwdma_mask = 0x07, |
179 | .udma_mask = 0x07, | 200 | .udma_mask = 0x07, |
201 | #endif | ||
180 | .port_ops = &hpt3x3_port_ops | 202 | .port_ops = &hpt3x3_port_ops |
181 | }; | 203 | }; |
204 | /* Register offsets of taskfiles in BAR4 area */ | ||
205 | static const u8 offset_cmd[2] = { 0x20, 0x28 }; | ||
206 | static const u8 offset_ctl[2] = { 0x36, 0x3E }; | ||
182 | const struct ata_port_info *ppi[] = { &info, NULL }; | 207 | const struct ata_port_info *ppi[] = { &info, NULL }; |
183 | 208 | struct ata_host *host; | |
184 | hpt3x3_init_chipset(dev); | 209 | int i, rc; |
185 | /* Now kick off ATA set up */ | 210 | void __iomem *base; |
186 | return ata_pci_init_one(dev, ppi); | 211 | |
212 | hpt3x3_init_chipset(pdev); | ||
213 | |||
214 | if (!printed_version++) | ||
215 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | ||
216 | |||
217 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); | ||
218 | if (!host) | ||
219 | return -ENOMEM; | ||
220 | /* acquire resources and fill host */ | ||
221 | rc = pcim_enable_device(pdev); | ||
222 | if (rc) | ||
223 | return rc; | ||
224 | |||
225 | /* Everything is relative to BAR4 if we set up this way */ | ||
226 | rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME); | ||
227 | if (rc == -EBUSY) | ||
228 | pcim_pin_device(pdev); | ||
229 | if (rc) | ||
230 | return rc; | ||
231 | host->iomap = pcim_iomap_table(pdev); | ||
232 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | ||
233 | if (rc) | ||
234 | return rc; | ||
235 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | ||
236 | if (rc) | ||
237 | return rc; | ||
238 | |||
239 | base = host->iomap[4]; /* Bus mastering base */ | ||
240 | |||
241 | for (i = 0; i < host->n_ports; i++) { | ||
242 | struct ata_ioports *ioaddr = &host->ports[i]->ioaddr; | ||
243 | |||
244 | ioaddr->cmd_addr = base + offset_cmd[i]; | ||
245 | ioaddr->altstatus_addr = | ||
246 | ioaddr->ctl_addr = base + offset_ctl[i]; | ||
247 | ioaddr->scr_addr = NULL; | ||
248 | ata_std_ports(ioaddr); | ||
249 | ioaddr->bmdma_addr = base + 8 * i; | ||
250 | } | ||
251 | pci_set_master(pdev); | ||
252 | return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, | ||
253 | &hpt3x3_sht); | ||
187 | } | 254 | } |
188 | 255 | ||
189 | #ifdef CONFIG_PM | 256 | #ifdef CONFIG_PM |
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index 368fac7d168b..182e83c9047b 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c | |||
@@ -467,13 +467,27 @@ mpc52xx_ata_remove(struct of_device *op) | |||
467 | static int | 467 | static int |
468 | mpc52xx_ata_suspend(struct of_device *op, pm_message_t state) | 468 | mpc52xx_ata_suspend(struct of_device *op, pm_message_t state) |
469 | { | 469 | { |
470 | return 0; /* FIXME : What to do here ? */ | 470 | struct ata_host *host = dev_get_drvdata(&op->dev); |
471 | |||
472 | return ata_host_suspend(host, state); | ||
471 | } | 473 | } |
472 | 474 | ||
473 | static int | 475 | static int |
474 | mpc52xx_ata_resume(struct of_device *op) | 476 | mpc52xx_ata_resume(struct of_device *op) |
475 | { | 477 | { |
476 | return 0; /* FIXME : What to do here ? */ | 478 | struct ata_host *host = dev_get_drvdata(&op->dev); |
479 | struct mpc52xx_ata_priv *priv = host->private_data; | ||
480 | int rv; | ||
481 | |||
482 | rv = mpc52xx_ata_hw_init(priv); | ||
483 | if (rv) { | ||
484 | printk(KERN_ERR DRV_NAME ": Error during HW init\n"); | ||
485 | return rv; | ||
486 | } | ||
487 | |||
488 | ata_host_resume(host); | ||
489 | |||
490 | return 0; | ||
477 | } | 491 | } |
478 | 492 | ||
479 | #endif | 493 | #endif |
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index 61502bc7bf1d..c55667e0eb65 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c | |||
@@ -238,6 +238,12 @@ static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
238 | else | 238 | else |
239 | offset = 0; /* 100MHz */ | 239 | offset = 0; /* 100MHz */ |
240 | 240 | ||
241 | /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */ | ||
242 | if (adev->class == ATA_DEV_ATAPI && speed > XFER_UDMA_4) { | ||
243 | printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME); | ||
244 | speed = XFER_UDMA_4; | ||
245 | } | ||
246 | |||
241 | if (speed >= XFER_UDMA_0) | 247 | if (speed >= XFER_UDMA_0) |
242 | idx = speed - XFER_UDMA_0; | 248 | idx = speed - XFER_UDMA_0; |
243 | else | 249 | else |
@@ -724,22 +730,36 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc) | |||
724 | 730 | ||
725 | static u8 scc_bmdma_status (struct ata_port *ap) | 731 | static u8 scc_bmdma_status (struct ata_port *ap) |
726 | { | 732 | { |
727 | u8 host_stat; | ||
728 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | 733 | void __iomem *mmio = ap->ioaddr.bmdma_addr; |
729 | 734 | u8 host_stat = in_be32(mmio + SCC_DMA_STATUS); | |
730 | host_stat = in_be32(mmio + SCC_DMA_STATUS); | 735 | u32 int_status = in_be32(mmio + SCC_DMA_INTST); |
731 | 736 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); | |
732 | /* Workaround for PTERADD: emulate DMA_INTR when | 737 | static int retry = 0; |
733 | * - IDE_STATUS[ERR] = 1 | 738 | |
734 | * - INT_STATUS[INTRQ] = 1 | 739 | /* return if IOS_SS is cleared */ |
735 | * - DMA_STATUS[IORACTA] = 1 | 740 | if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START)) |
736 | */ | 741 | return host_stat; |
737 | if (!(host_stat & ATA_DMA_INTR)) { | 742 | |
738 | u32 int_status = in_be32(mmio + SCC_DMA_INTST); | 743 | /* errata A252,A308 workaround: Step4 */ |
739 | if (ata_altstatus(ap) & ATA_ERR && | 744 | if (ata_altstatus(ap) & ATA_ERR && int_status & INTSTS_INTRQ) |
740 | int_status & INTSTS_INTRQ && | 745 | return (host_stat | ATA_DMA_INTR); |
741 | host_stat & ATA_DMA_ACTIVE) | 746 | |
742 | host_stat |= ATA_DMA_INTR; | 747 | /* errata A308 workaround Step5 */ |
748 | if (int_status & INTSTS_IOIRQS) { | ||
749 | host_stat |= ATA_DMA_INTR; | ||
750 | |||
751 | /* We don't check ATAPI DMA because it is limited to UDMA4 */ | ||
752 | if ((qc->tf.protocol == ATA_PROT_DMA && | ||
753 | qc->dev->xfer_mode > XFER_UDMA_4)) { | ||
754 | if (!(int_status & INTSTS_ACTEINT)) { | ||
755 | printk(KERN_WARNING "ata%u: data lost occurred. (ACTEINT==0, retry:%d)\n", | ||
756 | ap->print_id, retry); | ||
757 | host_stat |= ATA_DMA_ERR; | ||
758 | if (retry++) | ||
759 | ap->udma_mask >>= 1; | ||
760 | } else | ||
761 | retry = 0; | ||
762 | } | ||
743 | } | 763 | } |
744 | 764 | ||
745 | return host_stat; | 765 | return host_stat; |
@@ -892,10 +912,6 @@ static void scc_std_postreset (struct ata_port *ap, unsigned int *classes) | |||
892 | { | 912 | { |
893 | DPRINTK("ENTER\n"); | 913 | DPRINTK("ENTER\n"); |
894 | 914 | ||
895 | /* re-enable interrupts */ | ||
896 | if (!ap->ops->error_handler) | ||
897 | ap->ops->irq_on(ap); | ||
898 | |||
899 | /* is double-select really necessary? */ | 915 | /* is double-select really necessary? */ |
900 | if (classes[0] != ATA_DEV_NONE) | 916 | if (classes[0] != ATA_DEV_NONE) |
901 | ap->ops->dev_select(ap, 1); | 917 | ap->ops->dev_select(ap, 1); |
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index 2b4508206a6c..657b1ee2f5c1 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c | |||
@@ -149,6 +149,9 @@ static int sis_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
149 | if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) | 149 | if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) |
150 | return -ENOENT; | 150 | return -ENOENT; |
151 | 151 | ||
152 | /* Clear the FIFO settings. We can't enable the FIFO until | ||
153 | we know we are poking at a disk */ | ||
154 | pci_write_config_byte(pdev, 0x4B, 0); | ||
152 | return ata_std_prereset(ap, deadline); | 155 | return ata_std_prereset(ap, deadline); |
153 | } | 156 | } |
154 | 157 | ||
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 3873b29c80d6..8a77a0ae83ad 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -29,11 +29,6 @@ | |||
29 | I distinctly remember a couple workarounds (one related to PCI-X) | 29 | I distinctly remember a couple workarounds (one related to PCI-X) |
30 | are still needed. | 30 | are still needed. |
31 | 31 | ||
32 | 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane | ||
33 | probing/error handling in general. MUST HAVE. | ||
34 | |||
35 | 3) Add hotplug support (easy, once new-EH support appears) | ||
36 | |||
37 | 4) Add NCQ support (easy to intermediate, once new-EH support appears) | 32 | 4) Add NCQ support (easy to intermediate, once new-EH support appears) |
38 | 33 | ||
39 | 5) Investigate problems with PCI Message Signalled Interrupts (MSI). | 34 | 5) Investigate problems with PCI Message Signalled Interrupts (MSI). |
@@ -108,8 +103,6 @@ enum { | |||
108 | MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ | 103 | MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ |
109 | MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, | 104 | MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, |
110 | 105 | ||
111 | MV_USE_Q_DEPTH = ATA_DEF_QUEUE, | ||
112 | |||
113 | MV_MAX_Q_DEPTH = 32, | 106 | MV_MAX_Q_DEPTH = 32, |
114 | MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, | 107 | MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, |
115 | 108 | ||
@@ -133,18 +126,22 @@ enum { | |||
133 | /* Host Flags */ | 126 | /* Host Flags */ |
134 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | 127 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ |
135 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 128 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
136 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 129 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
137 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | 130 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | |
138 | ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING), | 131 | ATA_FLAG_PIO_POLLING, |
139 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, | 132 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, |
140 | 133 | ||
141 | CRQB_FLAG_READ = (1 << 0), | 134 | CRQB_FLAG_READ = (1 << 0), |
142 | CRQB_TAG_SHIFT = 1, | 135 | CRQB_TAG_SHIFT = 1, |
136 | CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ | ||
137 | CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ | ||
143 | CRQB_CMD_ADDR_SHIFT = 8, | 138 | CRQB_CMD_ADDR_SHIFT = 8, |
144 | CRQB_CMD_CS = (0x2 << 11), | 139 | CRQB_CMD_CS = (0x2 << 11), |
145 | CRQB_CMD_LAST = (1 << 15), | 140 | CRQB_CMD_LAST = (1 << 15), |
146 | 141 | ||
147 | CRPB_FLAG_STATUS_SHIFT = 8, | 142 | CRPB_FLAG_STATUS_SHIFT = 8, |
143 | CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ | ||
144 | CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ | ||
148 | 145 | ||
149 | EPRD_FLAG_END_OF_TBL = (1 << 31), | 146 | EPRD_FLAG_END_OF_TBL = (1 << 31), |
150 | 147 | ||
@@ -236,8 +233,10 @@ enum { | |||
236 | EDMA_ERR_DEV_DCON = (1 << 3), | 233 | EDMA_ERR_DEV_DCON = (1 << 3), |
237 | EDMA_ERR_DEV_CON = (1 << 4), | 234 | EDMA_ERR_DEV_CON = (1 << 4), |
238 | EDMA_ERR_SERR = (1 << 5), | 235 | EDMA_ERR_SERR = (1 << 5), |
239 | EDMA_ERR_SELF_DIS = (1 << 7), | 236 | EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ |
237 | EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ | ||
240 | EDMA_ERR_BIST_ASYNC = (1 << 8), | 238 | EDMA_ERR_BIST_ASYNC = (1 << 8), |
239 | EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ | ||
241 | EDMA_ERR_CRBQ_PAR = (1 << 9), | 240 | EDMA_ERR_CRBQ_PAR = (1 << 9), |
242 | EDMA_ERR_CRPB_PAR = (1 << 10), | 241 | EDMA_ERR_CRPB_PAR = (1 << 10), |
243 | EDMA_ERR_INTRL_PAR = (1 << 11), | 242 | EDMA_ERR_INTRL_PAR = (1 << 11), |
@@ -248,13 +247,33 @@ enum { | |||
248 | EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), | 247 | EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), |
249 | EDMA_ERR_LNK_DATA_TX = (0x1f << 26), | 248 | EDMA_ERR_LNK_DATA_TX = (0x1f << 26), |
250 | EDMA_ERR_TRANS_PROTO = (1 << 31), | 249 | EDMA_ERR_TRANS_PROTO = (1 << 31), |
251 | EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | | 250 | EDMA_ERR_OVERRUN_5 = (1 << 5), |
252 | EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR | | 251 | EDMA_ERR_UNDERRUN_5 = (1 << 6), |
253 | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | | 252 | EDMA_EH_FREEZE = EDMA_ERR_D_PAR | |
254 | EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 | | 253 | EDMA_ERR_PRD_PAR | |
255 | EDMA_ERR_LNK_DATA_RX | | 254 | EDMA_ERR_DEV_DCON | |
256 | EDMA_ERR_LNK_DATA_TX | | 255 | EDMA_ERR_DEV_CON | |
257 | EDMA_ERR_TRANS_PROTO), | 256 | EDMA_ERR_SERR | |
257 | EDMA_ERR_SELF_DIS | | ||
258 | EDMA_ERR_CRBQ_PAR | | ||
259 | EDMA_ERR_CRPB_PAR | | ||
260 | EDMA_ERR_INTRL_PAR | | ||
261 | EDMA_ERR_IORDY | | ||
262 | EDMA_ERR_LNK_CTRL_RX_2 | | ||
263 | EDMA_ERR_LNK_DATA_RX | | ||
264 | EDMA_ERR_LNK_DATA_TX | | ||
265 | EDMA_ERR_TRANS_PROTO, | ||
266 | EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | | ||
267 | EDMA_ERR_PRD_PAR | | ||
268 | EDMA_ERR_DEV_DCON | | ||
269 | EDMA_ERR_DEV_CON | | ||
270 | EDMA_ERR_OVERRUN_5 | | ||
271 | EDMA_ERR_UNDERRUN_5 | | ||
272 | EDMA_ERR_SELF_DIS_5 | | ||
273 | EDMA_ERR_CRBQ_PAR | | ||
274 | EDMA_ERR_CRPB_PAR | | ||
275 | EDMA_ERR_INTRL_PAR | | ||
276 | EDMA_ERR_IORDY, | ||
258 | 277 | ||
259 | EDMA_REQ_Q_BASE_HI_OFS = 0x10, | 278 | EDMA_REQ_Q_BASE_HI_OFS = 0x10, |
260 | EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ | 279 | EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ |
@@ -282,18 +301,18 @@ enum { | |||
282 | MV_HP_ERRATA_60X1B2 = (1 << 3), | 301 | MV_HP_ERRATA_60X1B2 = (1 << 3), |
283 | MV_HP_ERRATA_60X1C0 = (1 << 4), | 302 | MV_HP_ERRATA_60X1C0 = (1 << 4), |
284 | MV_HP_ERRATA_XX42A0 = (1 << 5), | 303 | MV_HP_ERRATA_XX42A0 = (1 << 5), |
285 | MV_HP_50XX = (1 << 6), | 304 | MV_HP_GEN_I = (1 << 6), |
286 | MV_HP_GEN_IIE = (1 << 7), | 305 | MV_HP_GEN_II = (1 << 7), |
306 | MV_HP_GEN_IIE = (1 << 8), | ||
287 | 307 | ||
288 | /* Port private flags (pp_flags) */ | 308 | /* Port private flags (pp_flags) */ |
289 | MV_PP_FLAG_EDMA_EN = (1 << 0), | 309 | MV_PP_FLAG_EDMA_EN = (1 << 0), |
290 | MV_PP_FLAG_EDMA_DS_ACT = (1 << 1), | 310 | MV_PP_FLAG_EDMA_DS_ACT = (1 << 1), |
311 | MV_PP_FLAG_HAD_A_RESET = (1 << 2), | ||
291 | }; | 312 | }; |
292 | 313 | ||
293 | #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) | 314 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) |
294 | #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) | 315 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) |
295 | #define IS_GEN_I(hpriv) IS_50XX(hpriv) | ||
296 | #define IS_GEN_II(hpriv) IS_60XX(hpriv) | ||
297 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) | 316 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) |
298 | 317 | ||
299 | enum { | 318 | enum { |
@@ -352,6 +371,10 @@ struct mv_port_priv { | |||
352 | dma_addr_t crpb_dma; | 371 | dma_addr_t crpb_dma; |
353 | struct mv_sg *sg_tbl; | 372 | struct mv_sg *sg_tbl; |
354 | dma_addr_t sg_tbl_dma; | 373 | dma_addr_t sg_tbl_dma; |
374 | |||
375 | unsigned int req_idx; | ||
376 | unsigned int resp_idx; | ||
377 | |||
355 | u32 pp_flags; | 378 | u32 pp_flags; |
356 | }; | 379 | }; |
357 | 380 | ||
@@ -384,14 +407,15 @@ static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); | |||
384 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); | 407 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); |
385 | static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in); | 408 | static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in); |
386 | static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); | 409 | static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); |
387 | static void mv_phy_reset(struct ata_port *ap); | ||
388 | static void __mv_phy_reset(struct ata_port *ap, int can_sleep); | ||
389 | static int mv_port_start(struct ata_port *ap); | 410 | static int mv_port_start(struct ata_port *ap); |
390 | static void mv_port_stop(struct ata_port *ap); | 411 | static void mv_port_stop(struct ata_port *ap); |
391 | static void mv_qc_prep(struct ata_queued_cmd *qc); | 412 | static void mv_qc_prep(struct ata_queued_cmd *qc); |
392 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); | 413 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); |
393 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); | 414 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); |
394 | static void mv_eng_timeout(struct ata_port *ap); | 415 | static void mv_error_handler(struct ata_port *ap); |
416 | static void mv_post_int_cmd(struct ata_queued_cmd *qc); | ||
417 | static void mv_eh_freeze(struct ata_port *ap); | ||
418 | static void mv_eh_thaw(struct ata_port *ap); | ||
395 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | 419 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
396 | 420 | ||
397 | static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | 421 | static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
@@ -415,14 +439,31 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); | |||
415 | static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); | 439 | static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); |
416 | static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, | 440 | static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, |
417 | unsigned int port_no); | 441 | unsigned int port_no); |
418 | static void mv_stop_and_reset(struct ata_port *ap); | ||
419 | 442 | ||
420 | static struct scsi_host_template mv_sht = { | 443 | static struct scsi_host_template mv5_sht = { |
444 | .module = THIS_MODULE, | ||
445 | .name = DRV_NAME, | ||
446 | .ioctl = ata_scsi_ioctl, | ||
447 | .queuecommand = ata_scsi_queuecmd, | ||
448 | .can_queue = ATA_DEF_QUEUE, | ||
449 | .this_id = ATA_SHT_THIS_ID, | ||
450 | .sg_tablesize = MV_MAX_SG_CT, | ||
451 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
452 | .emulated = ATA_SHT_EMULATED, | ||
453 | .use_clustering = 1, | ||
454 | .proc_name = DRV_NAME, | ||
455 | .dma_boundary = MV_DMA_BOUNDARY, | ||
456 | .slave_configure = ata_scsi_slave_config, | ||
457 | .slave_destroy = ata_scsi_slave_destroy, | ||
458 | .bios_param = ata_std_bios_param, | ||
459 | }; | ||
460 | |||
461 | static struct scsi_host_template mv6_sht = { | ||
421 | .module = THIS_MODULE, | 462 | .module = THIS_MODULE, |
422 | .name = DRV_NAME, | 463 | .name = DRV_NAME, |
423 | .ioctl = ata_scsi_ioctl, | 464 | .ioctl = ata_scsi_ioctl, |
424 | .queuecommand = ata_scsi_queuecmd, | 465 | .queuecommand = ata_scsi_queuecmd, |
425 | .can_queue = MV_USE_Q_DEPTH, | 466 | .can_queue = ATA_DEF_QUEUE, |
426 | .this_id = ATA_SHT_THIS_ID, | 467 | .this_id = ATA_SHT_THIS_ID, |
427 | .sg_tablesize = MV_MAX_SG_CT, | 468 | .sg_tablesize = MV_MAX_SG_CT, |
428 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | 469 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, |
@@ -444,19 +485,21 @@ static const struct ata_port_operations mv5_ops = { | |||
444 | .exec_command = ata_exec_command, | 485 | .exec_command = ata_exec_command, |
445 | .dev_select = ata_std_dev_select, | 486 | .dev_select = ata_std_dev_select, |
446 | 487 | ||
447 | .phy_reset = mv_phy_reset, | ||
448 | .cable_detect = ata_cable_sata, | 488 | .cable_detect = ata_cable_sata, |
449 | 489 | ||
450 | .qc_prep = mv_qc_prep, | 490 | .qc_prep = mv_qc_prep, |
451 | .qc_issue = mv_qc_issue, | 491 | .qc_issue = mv_qc_issue, |
452 | .data_xfer = ata_data_xfer, | 492 | .data_xfer = ata_data_xfer, |
453 | 493 | ||
454 | .eng_timeout = mv_eng_timeout, | ||
455 | |||
456 | .irq_clear = mv_irq_clear, | 494 | .irq_clear = mv_irq_clear, |
457 | .irq_on = ata_irq_on, | 495 | .irq_on = ata_irq_on, |
458 | .irq_ack = ata_irq_ack, | 496 | .irq_ack = ata_irq_ack, |
459 | 497 | ||
498 | .error_handler = mv_error_handler, | ||
499 | .post_internal_cmd = mv_post_int_cmd, | ||
500 | .freeze = mv_eh_freeze, | ||
501 | .thaw = mv_eh_thaw, | ||
502 | |||
460 | .scr_read = mv5_scr_read, | 503 | .scr_read = mv5_scr_read, |
461 | .scr_write = mv5_scr_write, | 504 | .scr_write = mv5_scr_write, |
462 | 505 | ||
@@ -473,19 +516,21 @@ static const struct ata_port_operations mv6_ops = { | |||
473 | .exec_command = ata_exec_command, | 516 | .exec_command = ata_exec_command, |
474 | .dev_select = ata_std_dev_select, | 517 | .dev_select = ata_std_dev_select, |
475 | 518 | ||
476 | .phy_reset = mv_phy_reset, | ||
477 | .cable_detect = ata_cable_sata, | 519 | .cable_detect = ata_cable_sata, |
478 | 520 | ||
479 | .qc_prep = mv_qc_prep, | 521 | .qc_prep = mv_qc_prep, |
480 | .qc_issue = mv_qc_issue, | 522 | .qc_issue = mv_qc_issue, |
481 | .data_xfer = ata_data_xfer, | 523 | .data_xfer = ata_data_xfer, |
482 | 524 | ||
483 | .eng_timeout = mv_eng_timeout, | ||
484 | |||
485 | .irq_clear = mv_irq_clear, | 525 | .irq_clear = mv_irq_clear, |
486 | .irq_on = ata_irq_on, | 526 | .irq_on = ata_irq_on, |
487 | .irq_ack = ata_irq_ack, | 527 | .irq_ack = ata_irq_ack, |
488 | 528 | ||
529 | .error_handler = mv_error_handler, | ||
530 | .post_internal_cmd = mv_post_int_cmd, | ||
531 | .freeze = mv_eh_freeze, | ||
532 | .thaw = mv_eh_thaw, | ||
533 | |||
489 | .scr_read = mv_scr_read, | 534 | .scr_read = mv_scr_read, |
490 | .scr_write = mv_scr_write, | 535 | .scr_write = mv_scr_write, |
491 | 536 | ||
@@ -502,19 +547,21 @@ static const struct ata_port_operations mv_iie_ops = { | |||
502 | .exec_command = ata_exec_command, | 547 | .exec_command = ata_exec_command, |
503 | .dev_select = ata_std_dev_select, | 548 | .dev_select = ata_std_dev_select, |
504 | 549 | ||
505 | .phy_reset = mv_phy_reset, | ||
506 | .cable_detect = ata_cable_sata, | 550 | .cable_detect = ata_cable_sata, |
507 | 551 | ||
508 | .qc_prep = mv_qc_prep_iie, | 552 | .qc_prep = mv_qc_prep_iie, |
509 | .qc_issue = mv_qc_issue, | 553 | .qc_issue = mv_qc_issue, |
510 | .data_xfer = ata_data_xfer, | 554 | .data_xfer = ata_data_xfer, |
511 | 555 | ||
512 | .eng_timeout = mv_eng_timeout, | ||
513 | |||
514 | .irq_clear = mv_irq_clear, | 556 | .irq_clear = mv_irq_clear, |
515 | .irq_on = ata_irq_on, | 557 | .irq_on = ata_irq_on, |
516 | .irq_ack = ata_irq_ack, | 558 | .irq_ack = ata_irq_ack, |
517 | 559 | ||
560 | .error_handler = mv_error_handler, | ||
561 | .post_internal_cmd = mv_post_int_cmd, | ||
562 | .freeze = mv_eh_freeze, | ||
563 | .thaw = mv_eh_thaw, | ||
564 | |||
518 | .scr_read = mv_scr_read, | 565 | .scr_read = mv_scr_read, |
519 | .scr_write = mv_scr_write, | 566 | .scr_write = mv_scr_write, |
520 | 567 | ||
@@ -530,38 +577,38 @@ static const struct ata_port_info mv_port_info[] = { | |||
530 | .port_ops = &mv5_ops, | 577 | .port_ops = &mv5_ops, |
531 | }, | 578 | }, |
532 | { /* chip_508x */ | 579 | { /* chip_508x */ |
533 | .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), | 580 | .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, |
534 | .pio_mask = 0x1f, /* pio0-4 */ | 581 | .pio_mask = 0x1f, /* pio0-4 */ |
535 | .udma_mask = ATA_UDMA6, | 582 | .udma_mask = ATA_UDMA6, |
536 | .port_ops = &mv5_ops, | 583 | .port_ops = &mv5_ops, |
537 | }, | 584 | }, |
538 | { /* chip_5080 */ | 585 | { /* chip_5080 */ |
539 | .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), | 586 | .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, |
540 | .pio_mask = 0x1f, /* pio0-4 */ | 587 | .pio_mask = 0x1f, /* pio0-4 */ |
541 | .udma_mask = ATA_UDMA6, | 588 | .udma_mask = ATA_UDMA6, |
542 | .port_ops = &mv5_ops, | 589 | .port_ops = &mv5_ops, |
543 | }, | 590 | }, |
544 | { /* chip_604x */ | 591 | { /* chip_604x */ |
545 | .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), | 592 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, |
546 | .pio_mask = 0x1f, /* pio0-4 */ | 593 | .pio_mask = 0x1f, /* pio0-4 */ |
547 | .udma_mask = ATA_UDMA6, | 594 | .udma_mask = ATA_UDMA6, |
548 | .port_ops = &mv6_ops, | 595 | .port_ops = &mv6_ops, |
549 | }, | 596 | }, |
550 | { /* chip_608x */ | 597 | { /* chip_608x */ |
551 | .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 598 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | |
552 | MV_FLAG_DUAL_HC), | 599 | MV_FLAG_DUAL_HC, |
553 | .pio_mask = 0x1f, /* pio0-4 */ | 600 | .pio_mask = 0x1f, /* pio0-4 */ |
554 | .udma_mask = ATA_UDMA6, | 601 | .udma_mask = ATA_UDMA6, |
555 | .port_ops = &mv6_ops, | 602 | .port_ops = &mv6_ops, |
556 | }, | 603 | }, |
557 | { /* chip_6042 */ | 604 | { /* chip_6042 */ |
558 | .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), | 605 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, |
559 | .pio_mask = 0x1f, /* pio0-4 */ | 606 | .pio_mask = 0x1f, /* pio0-4 */ |
560 | .udma_mask = ATA_UDMA6, | 607 | .udma_mask = ATA_UDMA6, |
561 | .port_ops = &mv_iie_ops, | 608 | .port_ops = &mv_iie_ops, |
562 | }, | 609 | }, |
563 | { /* chip_7042 */ | 610 | { /* chip_7042 */ |
564 | .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), | 611 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, |
565 | .pio_mask = 0x1f, /* pio0-4 */ | 612 | .pio_mask = 0x1f, /* pio0-4 */ |
566 | .udma_mask = ATA_UDMA6, | 613 | .udma_mask = ATA_UDMA6, |
567 | .port_ops = &mv_iie_ops, | 614 | .port_ops = &mv_iie_ops, |
@@ -709,6 +756,46 @@ static void mv_irq_clear(struct ata_port *ap) | |||
709 | { | 756 | { |
710 | } | 757 | } |
711 | 758 | ||
759 | static void mv_set_edma_ptrs(void __iomem *port_mmio, | ||
760 | struct mv_host_priv *hpriv, | ||
761 | struct mv_port_priv *pp) | ||
762 | { | ||
763 | u32 index; | ||
764 | |||
765 | /* | ||
766 | * initialize request queue | ||
767 | */ | ||
768 | index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT; | ||
769 | |||
770 | WARN_ON(pp->crqb_dma & 0x3ff); | ||
771 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | ||
772 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, | ||
773 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | ||
774 | |||
775 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) | ||
776 | writelfl((pp->crqb_dma & 0xffffffff) | index, | ||
777 | port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
778 | else | ||
779 | writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
780 | |||
781 | /* | ||
782 | * initialize response queue | ||
783 | */ | ||
784 | index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT; | ||
785 | |||
786 | WARN_ON(pp->crpb_dma & 0xff); | ||
787 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | ||
788 | |||
789 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) | ||
790 | writelfl((pp->crpb_dma & 0xffffffff) | index, | ||
791 | port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
792 | else | ||
793 | writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
794 | |||
795 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, | ||
796 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
797 | } | ||
798 | |||
712 | /** | 799 | /** |
713 | * mv_start_dma - Enable eDMA engine | 800 | * mv_start_dma - Enable eDMA engine |
714 | * @base: port base address | 801 | * @base: port base address |
@@ -720,9 +807,15 @@ static void mv_irq_clear(struct ata_port *ap) | |||
720 | * LOCKING: | 807 | * LOCKING: |
721 | * Inherited from caller. | 808 | * Inherited from caller. |
722 | */ | 809 | */ |
723 | static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) | 810 | static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv, |
811 | struct mv_port_priv *pp) | ||
724 | { | 812 | { |
725 | if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) { | 813 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { |
814 | /* clear EDMA event indicators, if any */ | ||
815 | writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS); | ||
816 | |||
817 | mv_set_edma_ptrs(base, hpriv, pp); | ||
818 | |||
726 | writelfl(EDMA_EN, base + EDMA_CMD_OFS); | 819 | writelfl(EDMA_EN, base + EDMA_CMD_OFS); |
727 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; | 820 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; |
728 | } | 821 | } |
@@ -739,14 +832,14 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) | |||
739 | * LOCKING: | 832 | * LOCKING: |
740 | * Inherited from caller. | 833 | * Inherited from caller. |
741 | */ | 834 | */ |
742 | static void mv_stop_dma(struct ata_port *ap) | 835 | static int mv_stop_dma(struct ata_port *ap) |
743 | { | 836 | { |
744 | void __iomem *port_mmio = mv_ap_base(ap); | 837 | void __iomem *port_mmio = mv_ap_base(ap); |
745 | struct mv_port_priv *pp = ap->private_data; | 838 | struct mv_port_priv *pp = ap->private_data; |
746 | u32 reg; | 839 | u32 reg; |
747 | int i; | 840 | int i, err = 0; |
748 | 841 | ||
749 | if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) { | 842 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { |
750 | /* Disable EDMA if active. The disable bit auto clears. | 843 | /* Disable EDMA if active. The disable bit auto clears. |
751 | */ | 844 | */ |
752 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); | 845 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); |
@@ -758,16 +851,18 @@ static void mv_stop_dma(struct ata_port *ap) | |||
758 | /* now properly wait for the eDMA to stop */ | 851 | /* now properly wait for the eDMA to stop */ |
759 | for (i = 1000; i > 0; i--) { | 852 | for (i = 1000; i > 0; i--) { |
760 | reg = readl(port_mmio + EDMA_CMD_OFS); | 853 | reg = readl(port_mmio + EDMA_CMD_OFS); |
761 | if (!(EDMA_EN & reg)) { | 854 | if (!(reg & EDMA_EN)) |
762 | break; | 855 | break; |
763 | } | 856 | |
764 | udelay(100); | 857 | udelay(100); |
765 | } | 858 | } |
766 | 859 | ||
767 | if (EDMA_EN & reg) { | 860 | if (reg & EDMA_EN) { |
768 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); | 861 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); |
769 | /* FIXME: Consider doing a reset here to recover */ | 862 | err = -EIO; |
770 | } | 863 | } |
864 | |||
865 | return err; | ||
771 | } | 866 | } |
772 | 867 | ||
773 | #ifdef ATA_DEBUG | 868 | #ifdef ATA_DEBUG |
@@ -884,12 +979,13 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) | |||
884 | writelfl(val, mv_ap_base(ap) + ofs); | 979 | writelfl(val, mv_ap_base(ap) + ofs); |
885 | } | 980 | } |
886 | 981 | ||
887 | static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) | 982 | static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, |
983 | void __iomem *port_mmio) | ||
888 | { | 984 | { |
889 | u32 cfg = readl(port_mmio + EDMA_CFG_OFS); | 985 | u32 cfg = readl(port_mmio + EDMA_CFG_OFS); |
890 | 986 | ||
891 | /* set up non-NCQ EDMA configuration */ | 987 | /* set up non-NCQ EDMA configuration */ |
892 | cfg &= ~(1 << 9); /* disable equeue */ | 988 | cfg &= ~(1 << 9); /* disable eQue */ |
893 | 989 | ||
894 | if (IS_GEN_I(hpriv)) { | 990 | if (IS_GEN_I(hpriv)) { |
895 | cfg &= ~0x1f; /* clear queue depth */ | 991 | cfg &= ~0x1f; /* clear queue depth */ |
@@ -909,7 +1005,7 @@ static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) | |||
909 | cfg |= (1 << 18); /* enab early completion */ | 1005 | cfg |= (1 << 18); /* enab early completion */ |
910 | cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ | 1006 | cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ |
911 | cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */ | 1007 | cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */ |
912 | cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */ | 1008 | cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */ |
913 | } | 1009 | } |
914 | 1010 | ||
915 | writelfl(cfg, port_mmio + EDMA_CFG_OFS); | 1011 | writelfl(cfg, port_mmio + EDMA_CFG_OFS); |
@@ -971,28 +1067,9 @@ static int mv_port_start(struct ata_port *ap) | |||
971 | pp->sg_tbl = mem; | 1067 | pp->sg_tbl = mem; |
972 | pp->sg_tbl_dma = mem_dma; | 1068 | pp->sg_tbl_dma = mem_dma; |
973 | 1069 | ||
974 | mv_edma_cfg(hpriv, port_mmio); | 1070 | mv_edma_cfg(ap, hpriv, port_mmio); |
975 | 1071 | ||
976 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | 1072 | mv_set_edma_ptrs(port_mmio, hpriv, pp); |
977 | writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, | ||
978 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | ||
979 | |||
980 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) | ||
981 | writelfl(pp->crqb_dma & 0xffffffff, | ||
982 | port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
983 | else | ||
984 | writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
985 | |||
986 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | ||
987 | |||
988 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) | ||
989 | writelfl(pp->crpb_dma & 0xffffffff, | ||
990 | port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
991 | else | ||
992 | writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
993 | |||
994 | writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, | ||
995 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
996 | 1073 | ||
997 | /* Don't turn on EDMA here...do it before DMA commands only. Else | 1074 | /* Don't turn on EDMA here...do it before DMA commands only. Else |
998 | * we'll be unable to send non-data, PIO, etc due to restricted access | 1075 | * we'll be unable to send non-data, PIO, etc due to restricted access |
@@ -1055,11 +1132,6 @@ static unsigned int mv_fill_sg(struct ata_queued_cmd *qc) | |||
1055 | return n_sg; | 1132 | return n_sg; |
1056 | } | 1133 | } |
1057 | 1134 | ||
1058 | static inline unsigned mv_inc_q_index(unsigned index) | ||
1059 | { | ||
1060 | return (index + 1) & MV_MAX_Q_DEPTH_MASK; | ||
1061 | } | ||
1062 | |||
1063 | static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) | 1135 | static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) |
1064 | { | 1136 | { |
1065 | u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | | 1137 | u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | |
@@ -1088,7 +1160,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
1088 | u16 flags = 0; | 1160 | u16 flags = 0; |
1089 | unsigned in_index; | 1161 | unsigned in_index; |
1090 | 1162 | ||
1091 | if (ATA_PROT_DMA != qc->tf.protocol) | 1163 | if (qc->tf.protocol != ATA_PROT_DMA) |
1092 | return; | 1164 | return; |
1093 | 1165 | ||
1094 | /* Fill in command request block | 1166 | /* Fill in command request block |
@@ -1097,10 +1169,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
1097 | flags |= CRQB_FLAG_READ; | 1169 | flags |= CRQB_FLAG_READ; |
1098 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); | 1170 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
1099 | flags |= qc->tag << CRQB_TAG_SHIFT; | 1171 | flags |= qc->tag << CRQB_TAG_SHIFT; |
1172 | flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/ | ||
1100 | 1173 | ||
1101 | /* get current queue index from hardware */ | 1174 | /* get current queue index from software */ |
1102 | in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) | 1175 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; |
1103 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1104 | 1176 | ||
1105 | pp->crqb[in_index].sg_addr = | 1177 | pp->crqb[in_index].sg_addr = |
1106 | cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | 1178 | cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); |
@@ -1180,7 +1252,7 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1180 | unsigned in_index; | 1252 | unsigned in_index; |
1181 | u32 flags = 0; | 1253 | u32 flags = 0; |
1182 | 1254 | ||
1183 | if (ATA_PROT_DMA != qc->tf.protocol) | 1255 | if (qc->tf.protocol != ATA_PROT_DMA) |
1184 | return; | 1256 | return; |
1185 | 1257 | ||
1186 | /* Fill in Gen IIE command request block | 1258 | /* Fill in Gen IIE command request block |
@@ -1190,10 +1262,11 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1190 | 1262 | ||
1191 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); | 1263 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
1192 | flags |= qc->tag << CRQB_TAG_SHIFT; | 1264 | flags |= qc->tag << CRQB_TAG_SHIFT; |
1265 | flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really- | ||
1266 | what we use as our tag */ | ||
1193 | 1267 | ||
1194 | /* get current queue index from hardware */ | 1268 | /* get current queue index from software */ |
1195 | in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) | 1269 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; |
1196 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1197 | 1270 | ||
1198 | crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; | 1271 | crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; |
1199 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | 1272 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); |
@@ -1241,83 +1314,41 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1241 | */ | 1314 | */ |
1242 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | 1315 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) |
1243 | { | 1316 | { |
1244 | void __iomem *port_mmio = mv_ap_base(qc->ap); | 1317 | struct ata_port *ap = qc->ap; |
1245 | struct mv_port_priv *pp = qc->ap->private_data; | 1318 | void __iomem *port_mmio = mv_ap_base(ap); |
1246 | unsigned in_index; | 1319 | struct mv_port_priv *pp = ap->private_data; |
1247 | u32 in_ptr; | 1320 | struct mv_host_priv *hpriv = ap->host->private_data; |
1321 | u32 in_index; | ||
1248 | 1322 | ||
1249 | if (ATA_PROT_DMA != qc->tf.protocol) { | 1323 | if (qc->tf.protocol != ATA_PROT_DMA) { |
1250 | /* We're about to send a non-EDMA capable command to the | 1324 | /* We're about to send a non-EDMA capable command to the |
1251 | * port. Turn off EDMA so there won't be problems accessing | 1325 | * port. Turn off EDMA so there won't be problems accessing |
1252 | * shadow block, etc registers. | 1326 | * shadow block, etc registers. |
1253 | */ | 1327 | */ |
1254 | mv_stop_dma(qc->ap); | 1328 | mv_stop_dma(ap); |
1255 | return ata_qc_issue_prot(qc); | 1329 | return ata_qc_issue_prot(qc); |
1256 | } | 1330 | } |
1257 | 1331 | ||
1258 | in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 1332 | mv_start_dma(port_mmio, hpriv, pp); |
1259 | in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | 1333 | |
1334 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; | ||
1260 | 1335 | ||
1261 | /* until we do queuing, the queue should be empty at this point */ | 1336 | /* until we do queuing, the queue should be empty at this point */ |
1262 | WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) | 1337 | WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) |
1263 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); | 1338 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); |
1264 | 1339 | ||
1265 | in_index = mv_inc_q_index(in_index); /* now incr producer index */ | 1340 | pp->req_idx++; |
1266 | 1341 | ||
1267 | mv_start_dma(port_mmio, pp); | 1342 | in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT; |
1268 | 1343 | ||
1269 | /* and write the request in pointer to kick the EDMA to life */ | 1344 | /* and write the request in pointer to kick the EDMA to life */ |
1270 | in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; | 1345 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, |
1271 | in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT; | 1346 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
1272 | writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | ||
1273 | 1347 | ||
1274 | return 0; | 1348 | return 0; |
1275 | } | 1349 | } |
1276 | 1350 | ||
1277 | /** | 1351 | /** |
1278 | * mv_get_crpb_status - get status from most recently completed cmd | ||
1279 | * @ap: ATA channel to manipulate | ||
1280 | * | ||
1281 | * This routine is for use when the port is in DMA mode, when it | ||
1282 | * will be using the CRPB (command response block) method of | ||
1283 | * returning command completion information. We check indices | ||
1284 | * are good, grab status, and bump the response consumer index to | ||
1285 | * prove that we're up to date. | ||
1286 | * | ||
1287 | * LOCKING: | ||
1288 | * Inherited from caller. | ||
1289 | */ | ||
1290 | static u8 mv_get_crpb_status(struct ata_port *ap) | ||
1291 | { | ||
1292 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1293 | struct mv_port_priv *pp = ap->private_data; | ||
1294 | unsigned out_index; | ||
1295 | u32 out_ptr; | ||
1296 | u8 ata_status; | ||
1297 | |||
1298 | out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
1299 | out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1300 | |||
1301 | ata_status = le16_to_cpu(pp->crpb[out_index].flags) | ||
1302 | >> CRPB_FLAG_STATUS_SHIFT; | ||
1303 | |||
1304 | /* increment our consumer index... */ | ||
1305 | out_index = mv_inc_q_index(out_index); | ||
1306 | |||
1307 | /* and, until we do NCQ, there should only be 1 CRPB waiting */ | ||
1308 | WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) | ||
1309 | >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); | ||
1310 | |||
1311 | /* write out our inc'd consumer index so EDMA knows we're caught up */ | ||
1312 | out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; | ||
1313 | out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT; | ||
1314 | writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
1315 | |||
1316 | /* Return ATA status register for completed CRPB */ | ||
1317 | return ata_status; | ||
1318 | } | ||
1319 | |||
1320 | /** | ||
1321 | * mv_err_intr - Handle error interrupts on the port | 1352 | * mv_err_intr - Handle error interrupts on the port |
1322 | * @ap: ATA channel to manipulate | 1353 | * @ap: ATA channel to manipulate |
1323 | * @reset_allowed: bool: 0 == don't trigger from reset here | 1354 | * @reset_allowed: bool: 0 == don't trigger from reset here |
@@ -1331,30 +1362,191 @@ static u8 mv_get_crpb_status(struct ata_port *ap) | |||
1331 | * LOCKING: | 1362 | * LOCKING: |
1332 | * Inherited from caller. | 1363 | * Inherited from caller. |
1333 | */ | 1364 | */ |
1334 | static void mv_err_intr(struct ata_port *ap, int reset_allowed) | 1365 | static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) |
1335 | { | 1366 | { |
1336 | void __iomem *port_mmio = mv_ap_base(ap); | 1367 | void __iomem *port_mmio = mv_ap_base(ap); |
1337 | u32 edma_err_cause, serr = 0; | 1368 | u32 edma_err_cause, eh_freeze_mask, serr = 0; |
1369 | struct mv_port_priv *pp = ap->private_data; | ||
1370 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
1371 | unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); | ||
1372 | unsigned int action = 0, err_mask = 0; | ||
1373 | struct ata_eh_info *ehi = &ap->eh_info; | ||
1338 | 1374 | ||
1339 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1375 | ata_ehi_clear_desc(ehi); |
1340 | 1376 | ||
1341 | if (EDMA_ERR_SERR & edma_err_cause) { | 1377 | if (!edma_enabled) { |
1378 | /* just a guess: do we need to do this? should we | ||
1379 | * expand this, and do it in all cases? | ||
1380 | */ | ||
1342 | sata_scr_read(ap, SCR_ERROR, &serr); | 1381 | sata_scr_read(ap, SCR_ERROR, &serr); |
1343 | sata_scr_write_flush(ap, SCR_ERROR, serr); | 1382 | sata_scr_write_flush(ap, SCR_ERROR, serr); |
1344 | } | 1383 | } |
1345 | if (EDMA_ERR_SELF_DIS & edma_err_cause) { | 1384 | |
1346 | struct mv_port_priv *pp = ap->private_data; | 1385 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1347 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | 1386 | |
1387 | ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause); | ||
1388 | |||
1389 | /* | ||
1390 | * all generations share these EDMA error cause bits | ||
1391 | */ | ||
1392 | |||
1393 | if (edma_err_cause & EDMA_ERR_DEV) | ||
1394 | err_mask |= AC_ERR_DEV; | ||
1395 | if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | | ||
1396 | EDMA_ERR_CRBQ_PAR | EDMA_ERR_CRPB_PAR | | ||
1397 | EDMA_ERR_INTRL_PAR)) { | ||
1398 | err_mask |= AC_ERR_ATA_BUS; | ||
1399 | action |= ATA_EH_HARDRESET; | ||
1400 | ata_ehi_push_desc(ehi, ", parity error"); | ||
1401 | } | ||
1402 | if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { | ||
1403 | ata_ehi_hotplugged(ehi); | ||
1404 | ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? | ||
1405 | ", dev disconnect" : ", dev connect"); | ||
1406 | } | ||
1407 | |||
1408 | if (IS_GEN_I(hpriv)) { | ||
1409 | eh_freeze_mask = EDMA_EH_FREEZE_5; | ||
1410 | |||
1411 | if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { | ||
1412 | struct mv_port_priv *pp = ap->private_data; | ||
1413 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | ||
1414 | ata_ehi_push_desc(ehi, ", EDMA self-disable"); | ||
1415 | } | ||
1416 | } else { | ||
1417 | eh_freeze_mask = EDMA_EH_FREEZE; | ||
1418 | |||
1419 | if (edma_err_cause & EDMA_ERR_SELF_DIS) { | ||
1420 | struct mv_port_priv *pp = ap->private_data; | ||
1421 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | ||
1422 | ata_ehi_push_desc(ehi, ", EDMA self-disable"); | ||
1423 | } | ||
1424 | |||
1425 | if (edma_err_cause & EDMA_ERR_SERR) { | ||
1426 | sata_scr_read(ap, SCR_ERROR, &serr); | ||
1427 | sata_scr_write_flush(ap, SCR_ERROR, serr); | ||
1428 | err_mask = AC_ERR_ATA_BUS; | ||
1429 | action |= ATA_EH_HARDRESET; | ||
1430 | } | ||
1348 | } | 1431 | } |
1349 | DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x " | ||
1350 | "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr); | ||
1351 | 1432 | ||
1352 | /* Clear EDMA now that SERR cleanup done */ | 1433 | /* Clear EDMA now that SERR cleanup done */ |
1353 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1434 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1354 | 1435 | ||
1355 | /* check for fatal here and recover if needed */ | 1436 | if (!err_mask) { |
1356 | if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause)) | 1437 | err_mask = AC_ERR_OTHER; |
1357 | mv_stop_and_reset(ap); | 1438 | action |= ATA_EH_HARDRESET; |
1439 | } | ||
1440 | |||
1441 | ehi->serror |= serr; | ||
1442 | ehi->action |= action; | ||
1443 | |||
1444 | if (qc) | ||
1445 | qc->err_mask |= err_mask; | ||
1446 | else | ||
1447 | ehi->err_mask |= err_mask; | ||
1448 | |||
1449 | if (edma_err_cause & eh_freeze_mask) | ||
1450 | ata_port_freeze(ap); | ||
1451 | else | ||
1452 | ata_port_abort(ap); | ||
1453 | } | ||
1454 | |||
1455 | static void mv_intr_pio(struct ata_port *ap) | ||
1456 | { | ||
1457 | struct ata_queued_cmd *qc; | ||
1458 | u8 ata_status; | ||
1459 | |||
1460 | /* ignore spurious intr if drive still BUSY */ | ||
1461 | ata_status = readb(ap->ioaddr.status_addr); | ||
1462 | if (unlikely(ata_status & ATA_BUSY)) | ||
1463 | return; | ||
1464 | |||
1465 | /* get active ATA command */ | ||
1466 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
1467 | if (unlikely(!qc)) /* no active tag */ | ||
1468 | return; | ||
1469 | if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */ | ||
1470 | return; | ||
1471 | |||
1472 | /* and finally, complete the ATA command */ | ||
1473 | qc->err_mask |= ac_err_mask(ata_status); | ||
1474 | ata_qc_complete(qc); | ||
1475 | } | ||
1476 | |||
1477 | static void mv_intr_edma(struct ata_port *ap) | ||
1478 | { | ||
1479 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1480 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
1481 | struct mv_port_priv *pp = ap->private_data; | ||
1482 | struct ata_queued_cmd *qc; | ||
1483 | u32 out_index, in_index; | ||
1484 | bool work_done = false; | ||
1485 | |||
1486 | /* get h/w response queue pointer */ | ||
1487 | in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) | ||
1488 | >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1489 | |||
1490 | while (1) { | ||
1491 | u16 status; | ||
1492 | |||
1493 | /* get s/w response queue last-read pointer, and compare */ | ||
1494 | out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK; | ||
1495 | if (in_index == out_index) | ||
1496 | break; | ||
1497 | |||
1498 | |||
1499 | /* 50xx: get active ATA command */ | ||
1500 | if (IS_GEN_I(hpriv)) | ||
1501 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
1502 | |||
1503 | /* 60xx: get active ATA command via tag, to enable support | ||
1504 | * for queueing. this works transparently for queued and | ||
1505 | * non-queued modes. | ||
1506 | */ | ||
1507 | else { | ||
1508 | unsigned int tag; | ||
1509 | |||
1510 | if (IS_GEN_II(hpriv)) | ||
1511 | tag = (le16_to_cpu(pp->crpb[out_index].id) | ||
1512 | >> CRPB_IOID_SHIFT_6) & 0x3f; | ||
1513 | else | ||
1514 | tag = (le16_to_cpu(pp->crpb[out_index].id) | ||
1515 | >> CRPB_IOID_SHIFT_7) & 0x3f; | ||
1516 | |||
1517 | qc = ata_qc_from_tag(ap, tag); | ||
1518 | } | ||
1519 | |||
1520 | /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS | ||
1521 | * bits (WARNING: might not necessarily be associated | ||
1522 | * with this command), which -should- be clear | ||
1523 | * if all is well | ||
1524 | */ | ||
1525 | status = le16_to_cpu(pp->crpb[out_index].flags); | ||
1526 | if (unlikely(status & 0xff)) { | ||
1527 | mv_err_intr(ap, qc); | ||
1528 | return; | ||
1529 | } | ||
1530 | |||
1531 | /* and finally, complete the ATA command */ | ||
1532 | if (qc) { | ||
1533 | qc->err_mask |= | ||
1534 | ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT); | ||
1535 | ata_qc_complete(qc); | ||
1536 | } | ||
1537 | |||
1538 | /* advance software response queue pointer, to | ||
1539 | * indicate (after the loop completes) to hardware | ||
1540 | * that we have consumed a response queue entry. | ||
1541 | */ | ||
1542 | work_done = true; | ||
1543 | pp->resp_idx++; | ||
1544 | } | ||
1545 | |||
1546 | if (work_done) | ||
1547 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | | ||
1548 | (out_index << EDMA_RSP_Q_PTR_SHIFT), | ||
1549 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
1358 | } | 1550 | } |
1359 | 1551 | ||
1360 | /** | 1552 | /** |
@@ -1377,10 +1569,8 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) | |||
1377 | { | 1569 | { |
1378 | void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; | 1570 | void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; |
1379 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); | 1571 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); |
1380 | struct ata_queued_cmd *qc; | ||
1381 | u32 hc_irq_cause; | 1572 | u32 hc_irq_cause; |
1382 | int shift, port, port0, hard_port, handled; | 1573 | int port, port0; |
1383 | unsigned int err_mask; | ||
1384 | 1574 | ||
1385 | if (hc == 0) | 1575 | if (hc == 0) |
1386 | port0 = 0; | 1576 | port0 = 0; |
@@ -1389,79 +1579,95 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) | |||
1389 | 1579 | ||
1390 | /* we'll need the HC success int register in most cases */ | 1580 | /* we'll need the HC success int register in most cases */ |
1391 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); | 1581 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); |
1392 | if (hc_irq_cause) | 1582 | if (!hc_irq_cause) |
1393 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | 1583 | return; |
1584 | |||
1585 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
1394 | 1586 | ||
1395 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", | 1587 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", |
1396 | hc,relevant,hc_irq_cause); | 1588 | hc,relevant,hc_irq_cause); |
1397 | 1589 | ||
1398 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { | 1590 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { |
1399 | u8 ata_status = 0; | ||
1400 | struct ata_port *ap = host->ports[port]; | 1591 | struct ata_port *ap = host->ports[port]; |
1401 | struct mv_port_priv *pp = ap->private_data; | 1592 | struct mv_port_priv *pp = ap->private_data; |
1593 | int have_err_bits, hard_port, shift; | ||
1594 | |||
1595 | if ((!ap) || (ap->flags & ATA_FLAG_DISABLED)) | ||
1596 | continue; | ||
1597 | |||
1598 | shift = port << 1; /* (port * 2) */ | ||
1599 | if (port >= MV_PORTS_PER_HC) { | ||
1600 | shift++; /* skip bit 8 in the HC Main IRQ reg */ | ||
1601 | } | ||
1602 | have_err_bits = ((PORT0_ERR << shift) & relevant); | ||
1603 | |||
1604 | if (unlikely(have_err_bits)) { | ||
1605 | struct ata_queued_cmd *qc; | ||
1606 | |||
1607 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
1608 | if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) | ||
1609 | continue; | ||
1610 | |||
1611 | mv_err_intr(ap, qc); | ||
1612 | continue; | ||
1613 | } | ||
1402 | 1614 | ||
1403 | hard_port = mv_hardport_from_port(port); /* range 0..3 */ | 1615 | hard_port = mv_hardport_from_port(port); /* range 0..3 */ |
1404 | handled = 0; /* ensure ata_status is set if handled++ */ | ||
1405 | 1616 | ||
1406 | /* Note that DEV_IRQ might happen spuriously during EDMA, | ||
1407 | * and should be ignored in such cases. | ||
1408 | * The cause of this is still under investigation. | ||
1409 | */ | ||
1410 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { | 1617 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { |
1411 | /* EDMA: check for response queue interrupt */ | 1618 | if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) |
1412 | if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { | 1619 | mv_intr_edma(ap); |
1413 | ata_status = mv_get_crpb_status(ap); | ||
1414 | handled = 1; | ||
1415 | } | ||
1416 | } else { | 1620 | } else { |
1417 | /* PIO: check for device (drive) interrupt */ | 1621 | if ((DEV_IRQ << hard_port) & hc_irq_cause) |
1418 | if ((DEV_IRQ << hard_port) & hc_irq_cause) { | 1622 | mv_intr_pio(ap); |
1419 | ata_status = readb(ap->ioaddr.status_addr); | ||
1420 | handled = 1; | ||
1421 | /* ignore spurious intr if drive still BUSY */ | ||
1422 | if (ata_status & ATA_BUSY) { | ||
1423 | ata_status = 0; | ||
1424 | handled = 0; | ||
1425 | } | ||
1426 | } | ||
1427 | } | 1623 | } |
1624 | } | ||
1625 | VPRINTK("EXIT\n"); | ||
1626 | } | ||
1428 | 1627 | ||
1429 | if (ap && (ap->flags & ATA_FLAG_DISABLED)) | 1628 | static void mv_pci_error(struct ata_host *host, void __iomem *mmio) |
1430 | continue; | 1629 | { |
1630 | struct ata_port *ap; | ||
1631 | struct ata_queued_cmd *qc; | ||
1632 | struct ata_eh_info *ehi; | ||
1633 | unsigned int i, err_mask, printed = 0; | ||
1634 | u32 err_cause; | ||
1431 | 1635 | ||
1432 | err_mask = ac_err_mask(ata_status); | 1636 | err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS); |
1433 | 1637 | ||
1434 | shift = port << 1; /* (port * 2) */ | 1638 | dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", |
1435 | if (port >= MV_PORTS_PER_HC) { | 1639 | err_cause); |
1436 | shift++; /* skip bit 8 in the HC Main IRQ reg */ | 1640 | |
1437 | } | 1641 | DPRINTK("All regs @ PCI error\n"); |
1438 | if ((PORT0_ERR << shift) & relevant) { | 1642 | mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); |
1439 | mv_err_intr(ap, 1); | 1643 | |
1440 | err_mask |= AC_ERR_OTHER; | 1644 | writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); |
1441 | handled = 1; | ||
1442 | } | ||
1443 | 1645 | ||
1444 | if (handled) { | 1646 | for (i = 0; i < host->n_ports; i++) { |
1647 | ap = host->ports[i]; | ||
1648 | if (!ata_port_offline(ap)) { | ||
1649 | ehi = &ap->eh_info; | ||
1650 | ata_ehi_clear_desc(ehi); | ||
1651 | if (!printed++) | ||
1652 | ata_ehi_push_desc(ehi, | ||
1653 | "PCI err cause 0x%08x", err_cause); | ||
1654 | err_mask = AC_ERR_HOST_BUS; | ||
1655 | ehi->action = ATA_EH_HARDRESET; | ||
1445 | qc = ata_qc_from_tag(ap, ap->active_tag); | 1656 | qc = ata_qc_from_tag(ap, ap->active_tag); |
1446 | if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) { | 1657 | if (qc) |
1447 | VPRINTK("port %u IRQ found for qc, " | 1658 | qc->err_mask |= err_mask; |
1448 | "ata_status 0x%x\n", port,ata_status); | 1659 | else |
1449 | /* mark qc status appropriately */ | 1660 | ehi->err_mask |= err_mask; |
1450 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) { | 1661 | |
1451 | qc->err_mask |= err_mask; | 1662 | ata_port_freeze(ap); |
1452 | ata_qc_complete(qc); | ||
1453 | } | ||
1454 | } | ||
1455 | } | 1663 | } |
1456 | } | 1664 | } |
1457 | VPRINTK("EXIT\n"); | ||
1458 | } | 1665 | } |
1459 | 1666 | ||
1460 | /** | 1667 | /** |
1461 | * mv_interrupt - | 1668 | * mv_interrupt - Main interrupt event handler |
1462 | * @irq: unused | 1669 | * @irq: unused |
1463 | * @dev_instance: private data; in this case the host structure | 1670 | * @dev_instance: private data; in this case the host structure |
1464 | * @regs: unused | ||
1465 | * | 1671 | * |
1466 | * Read the read only register to determine if any host | 1672 | * Read the read only register to determine if any host |
1467 | * controllers have pending interrupts. If so, call lower level | 1673 | * controllers have pending interrupts. If so, call lower level |
@@ -1477,7 +1683,6 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) | |||
1477 | struct ata_host *host = dev_instance; | 1683 | struct ata_host *host = dev_instance; |
1478 | unsigned int hc, handled = 0, n_hcs; | 1684 | unsigned int hc, handled = 0, n_hcs; |
1479 | void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; | 1685 | void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; |
1480 | struct mv_host_priv *hpriv; | ||
1481 | u32 irq_stat; | 1686 | u32 irq_stat; |
1482 | 1687 | ||
1483 | irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); | 1688 | irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); |
@@ -1491,34 +1696,21 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) | |||
1491 | n_hcs = mv_get_hc_count(host->ports[0]->flags); | 1696 | n_hcs = mv_get_hc_count(host->ports[0]->flags); |
1492 | spin_lock(&host->lock); | 1697 | spin_lock(&host->lock); |
1493 | 1698 | ||
1699 | if (unlikely(irq_stat & PCI_ERR)) { | ||
1700 | mv_pci_error(host, mmio); | ||
1701 | handled = 1; | ||
1702 | goto out_unlock; /* skip all other HC irq handling */ | ||
1703 | } | ||
1704 | |||
1494 | for (hc = 0; hc < n_hcs; hc++) { | 1705 | for (hc = 0; hc < n_hcs; hc++) { |
1495 | u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); | 1706 | u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); |
1496 | if (relevant) { | 1707 | if (relevant) { |
1497 | mv_host_intr(host, relevant, hc); | 1708 | mv_host_intr(host, relevant, hc); |
1498 | handled++; | 1709 | handled = 1; |
1499 | } | ||
1500 | } | ||
1501 | |||
1502 | hpriv = host->private_data; | ||
1503 | if (IS_60XX(hpriv)) { | ||
1504 | /* deal with the interrupt coalescing bits */ | ||
1505 | if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) { | ||
1506 | writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO); | ||
1507 | writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI); | ||
1508 | writelfl(0, mmio + MV_IRQ_COAL_CAUSE); | ||
1509 | } | 1710 | } |
1510 | } | 1711 | } |
1511 | 1712 | ||
1512 | if (PCI_ERR & irq_stat) { | 1713 | out_unlock: |
1513 | printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", | ||
1514 | readl(mmio + PCI_IRQ_CAUSE_OFS)); | ||
1515 | |||
1516 | DPRINTK("All regs @ PCI error\n"); | ||
1517 | mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); | ||
1518 | |||
1519 | writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); | ||
1520 | handled++; | ||
1521 | } | ||
1522 | spin_unlock(&host->lock); | 1714 | spin_unlock(&host->lock); |
1523 | 1715 | ||
1524 | return IRQ_RETVAL(handled); | 1716 | return IRQ_RETVAL(handled); |
@@ -1907,7 +2099,7 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1907 | 2099 | ||
1908 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); | 2100 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); |
1909 | 2101 | ||
1910 | if (IS_60XX(hpriv)) { | 2102 | if (IS_GEN_II(hpriv)) { |
1911 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); | 2103 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); |
1912 | ifctl |= (1 << 7); /* enable gen2i speed */ | 2104 | ifctl |= (1 << 7); /* enable gen2i speed */ |
1913 | ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ | 2105 | ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ |
@@ -1923,32 +2115,12 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1923 | 2115 | ||
1924 | hpriv->ops->phy_errata(hpriv, mmio, port_no); | 2116 | hpriv->ops->phy_errata(hpriv, mmio, port_no); |
1925 | 2117 | ||
1926 | if (IS_50XX(hpriv)) | 2118 | if (IS_GEN_I(hpriv)) |
1927 | mdelay(1); | 2119 | mdelay(1); |
1928 | } | 2120 | } |
1929 | 2121 | ||
1930 | static void mv_stop_and_reset(struct ata_port *ap) | ||
1931 | { | ||
1932 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
1933 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; | ||
1934 | |||
1935 | mv_stop_dma(ap); | ||
1936 | |||
1937 | mv_channel_reset(hpriv, mmio, ap->port_no); | ||
1938 | |||
1939 | __mv_phy_reset(ap, 0); | ||
1940 | } | ||
1941 | |||
1942 | static inline void __msleep(unsigned int msec, int can_sleep) | ||
1943 | { | ||
1944 | if (can_sleep) | ||
1945 | msleep(msec); | ||
1946 | else | ||
1947 | mdelay(msec); | ||
1948 | } | ||
1949 | |||
1950 | /** | 2122 | /** |
1951 | * __mv_phy_reset - Perform eDMA reset followed by COMRESET | 2123 | * mv_phy_reset - Perform eDMA reset followed by COMRESET |
1952 | * @ap: ATA channel to manipulate | 2124 | * @ap: ATA channel to manipulate |
1953 | * | 2125 | * |
1954 | * Part of this is taken from __sata_phy_reset and modified to | 2126 | * Part of this is taken from __sata_phy_reset and modified to |
@@ -1958,14 +2130,12 @@ static inline void __msleep(unsigned int msec, int can_sleep) | |||
1958 | * Inherited from caller. This is coded to safe to call at | 2130 | * Inherited from caller. This is coded to safe to call at |
1959 | * interrupt level, i.e. it does not sleep. | 2131 | * interrupt level, i.e. it does not sleep. |
1960 | */ | 2132 | */ |
1961 | static void __mv_phy_reset(struct ata_port *ap, int can_sleep) | 2133 | static void mv_phy_reset(struct ata_port *ap, unsigned int *class, |
2134 | unsigned long deadline) | ||
1962 | { | 2135 | { |
1963 | struct mv_port_priv *pp = ap->private_data; | 2136 | struct mv_port_priv *pp = ap->private_data; |
1964 | struct mv_host_priv *hpriv = ap->host->private_data; | 2137 | struct mv_host_priv *hpriv = ap->host->private_data; |
1965 | void __iomem *port_mmio = mv_ap_base(ap); | 2138 | void __iomem *port_mmio = mv_ap_base(ap); |
1966 | struct ata_taskfile tf; | ||
1967 | struct ata_device *dev = &ap->device[0]; | ||
1968 | unsigned long timeout; | ||
1969 | int retry = 5; | 2139 | int retry = 5; |
1970 | u32 sstatus; | 2140 | u32 sstatus; |
1971 | 2141 | ||
@@ -1978,22 +2148,21 @@ static void __mv_phy_reset(struct ata_port *ap, int can_sleep) | |||
1978 | /* Issue COMRESET via SControl */ | 2148 | /* Issue COMRESET via SControl */ |
1979 | comreset_retry: | 2149 | comreset_retry: |
1980 | sata_scr_write_flush(ap, SCR_CONTROL, 0x301); | 2150 | sata_scr_write_flush(ap, SCR_CONTROL, 0x301); |
1981 | __msleep(1, can_sleep); | 2151 | msleep(1); |
1982 | 2152 | ||
1983 | sata_scr_write_flush(ap, SCR_CONTROL, 0x300); | 2153 | sata_scr_write_flush(ap, SCR_CONTROL, 0x300); |
1984 | __msleep(20, can_sleep); | 2154 | msleep(20); |
1985 | 2155 | ||
1986 | timeout = jiffies + msecs_to_jiffies(200); | ||
1987 | do { | 2156 | do { |
1988 | sata_scr_read(ap, SCR_STATUS, &sstatus); | 2157 | sata_scr_read(ap, SCR_STATUS, &sstatus); |
1989 | if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) | 2158 | if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) |
1990 | break; | 2159 | break; |
1991 | 2160 | ||
1992 | __msleep(1, can_sleep); | 2161 | msleep(1); |
1993 | } while (time_before(jiffies, timeout)); | 2162 | } while (time_before(jiffies, deadline)); |
1994 | 2163 | ||
1995 | /* work around errata */ | 2164 | /* work around errata */ |
1996 | if (IS_60XX(hpriv) && | 2165 | if (IS_GEN_II(hpriv) && |
1997 | (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) && | 2166 | (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) && |
1998 | (retry-- > 0)) | 2167 | (retry-- > 0)) |
1999 | goto comreset_retry; | 2168 | goto comreset_retry; |
@@ -2002,13 +2171,8 @@ comreset_retry: | |||
2002 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), | 2171 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), |
2003 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); | 2172 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); |
2004 | 2173 | ||
2005 | if (ata_port_online(ap)) { | 2174 | if (ata_port_offline(ap)) { |
2006 | ata_port_probe(ap); | 2175 | *class = ATA_DEV_NONE; |
2007 | } else { | ||
2008 | sata_scr_read(ap, SCR_STATUS, &sstatus); | ||
2009 | ata_port_printk(ap, KERN_INFO, | ||
2010 | "no device found (phy stat %08x)\n", sstatus); | ||
2011 | ata_port_disable(ap); | ||
2012 | return; | 2176 | return; |
2013 | } | 2177 | } |
2014 | 2178 | ||
@@ -2022,68 +2186,152 @@ comreset_retry: | |||
2022 | u8 drv_stat = ata_check_status(ap); | 2186 | u8 drv_stat = ata_check_status(ap); |
2023 | if ((drv_stat != 0x80) && (drv_stat != 0x7f)) | 2187 | if ((drv_stat != 0x80) && (drv_stat != 0x7f)) |
2024 | break; | 2188 | break; |
2025 | __msleep(500, can_sleep); | 2189 | msleep(500); |
2026 | if (retry-- <= 0) | 2190 | if (retry-- <= 0) |
2027 | break; | 2191 | break; |
2192 | if (time_after(jiffies, deadline)) | ||
2193 | break; | ||
2028 | } | 2194 | } |
2029 | 2195 | ||
2030 | tf.lbah = readb(ap->ioaddr.lbah_addr); | 2196 | /* FIXME: if we passed the deadline, the following |
2031 | tf.lbam = readb(ap->ioaddr.lbam_addr); | 2197 | * code probably produces an invalid result |
2032 | tf.lbal = readb(ap->ioaddr.lbal_addr); | 2198 | */ |
2033 | tf.nsect = readb(ap->ioaddr.nsect_addr); | ||
2034 | 2199 | ||
2035 | dev->class = ata_dev_classify(&tf); | 2200 | /* finally, read device signature from TF registers */ |
2036 | if (!ata_dev_enabled(dev)) { | 2201 | *class = ata_dev_try_classify(ap, 0, NULL); |
2037 | VPRINTK("Port disabled post-sig: No device present.\n"); | ||
2038 | ata_port_disable(ap); | ||
2039 | } | ||
2040 | 2202 | ||
2041 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 2203 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
2042 | 2204 | ||
2043 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | 2205 | WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN); |
2044 | 2206 | ||
2045 | VPRINTK("EXIT\n"); | 2207 | VPRINTK("EXIT\n"); |
2046 | } | 2208 | } |
2047 | 2209 | ||
2048 | static void mv_phy_reset(struct ata_port *ap) | 2210 | static int mv_prereset(struct ata_port *ap, unsigned long deadline) |
2049 | { | 2211 | { |
2050 | __mv_phy_reset(ap, 1); | 2212 | struct mv_port_priv *pp = ap->private_data; |
2213 | struct ata_eh_context *ehc = &ap->eh_context; | ||
2214 | int rc; | ||
2215 | |||
2216 | rc = mv_stop_dma(ap); | ||
2217 | if (rc) | ||
2218 | ehc->i.action |= ATA_EH_HARDRESET; | ||
2219 | |||
2220 | if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) { | ||
2221 | pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET; | ||
2222 | ehc->i.action |= ATA_EH_HARDRESET; | ||
2223 | } | ||
2224 | |||
2225 | /* if we're about to do hardreset, nothing more to do */ | ||
2226 | if (ehc->i.action & ATA_EH_HARDRESET) | ||
2227 | return 0; | ||
2228 | |||
2229 | if (ata_port_online(ap)) | ||
2230 | rc = ata_wait_ready(ap, deadline); | ||
2231 | else | ||
2232 | rc = -ENODEV; | ||
2233 | |||
2234 | return rc; | ||
2051 | } | 2235 | } |
2052 | 2236 | ||
2053 | /** | 2237 | static int mv_hardreset(struct ata_port *ap, unsigned int *class, |
2054 | * mv_eng_timeout - Routine called by libata when SCSI times out I/O | 2238 | unsigned long deadline) |
2055 | * @ap: ATA channel to manipulate | ||
2056 | * | ||
2057 | * Intent is to clear all pending error conditions, reset the | ||
2058 | * chip/bus, fail the command, and move on. | ||
2059 | * | ||
2060 | * LOCKING: | ||
2061 | * This routine holds the host lock while failing the command. | ||
2062 | */ | ||
2063 | static void mv_eng_timeout(struct ata_port *ap) | ||
2064 | { | 2239 | { |
2240 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
2065 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; | 2241 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; |
2066 | struct ata_queued_cmd *qc; | ||
2067 | unsigned long flags; | ||
2068 | 2242 | ||
2069 | ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n"); | 2243 | mv_stop_dma(ap); |
2070 | DPRINTK("All regs @ start of eng_timeout\n"); | ||
2071 | mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev)); | ||
2072 | 2244 | ||
2073 | qc = ata_qc_from_tag(ap, ap->active_tag); | 2245 | mv_channel_reset(hpriv, mmio, ap->port_no); |
2074 | printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", | ||
2075 | mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd); | ||
2076 | 2246 | ||
2077 | spin_lock_irqsave(&ap->host->lock, flags); | 2247 | mv_phy_reset(ap, class, deadline); |
2078 | mv_err_intr(ap, 0); | 2248 | |
2079 | mv_stop_and_reset(ap); | 2249 | return 0; |
2080 | spin_unlock_irqrestore(&ap->host->lock, flags); | 2250 | } |
2251 | |||
2252 | static void mv_postreset(struct ata_port *ap, unsigned int *classes) | ||
2253 | { | ||
2254 | u32 serr; | ||
2255 | |||
2256 | /* print link status */ | ||
2257 | sata_print_link_status(ap); | ||
2258 | |||
2259 | /* clear SError */ | ||
2260 | sata_scr_read(ap, SCR_ERROR, &serr); | ||
2261 | sata_scr_write_flush(ap, SCR_ERROR, serr); | ||
2081 | 2262 | ||
2082 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); | 2263 | /* bail out if no device is present */ |
2083 | if (qc->flags & ATA_QCFLAG_ACTIVE) { | 2264 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { |
2084 | qc->err_mask |= AC_ERR_TIMEOUT; | 2265 | DPRINTK("EXIT, no device\n"); |
2085 | ata_eh_qc_complete(qc); | 2266 | return; |
2086 | } | 2267 | } |
2268 | |||
2269 | /* set up device control */ | ||
2270 | iowrite8(ap->ctl, ap->ioaddr.ctl_addr); | ||
2271 | } | ||
2272 | |||
2273 | static void mv_error_handler(struct ata_port *ap) | ||
2274 | { | ||
2275 | ata_do_eh(ap, mv_prereset, ata_std_softreset, | ||
2276 | mv_hardreset, mv_postreset); | ||
2277 | } | ||
2278 | |||
2279 | static void mv_post_int_cmd(struct ata_queued_cmd *qc) | ||
2280 | { | ||
2281 | mv_stop_dma(qc->ap); | ||
2282 | } | ||
2283 | |||
2284 | static void mv_eh_freeze(struct ata_port *ap) | ||
2285 | { | ||
2286 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; | ||
2287 | unsigned int hc = (ap->port_no > 3) ? 1 : 0; | ||
2288 | u32 tmp, mask; | ||
2289 | unsigned int shift; | ||
2290 | |||
2291 | /* FIXME: handle coalescing completion events properly */ | ||
2292 | |||
2293 | shift = ap->port_no * 2; | ||
2294 | if (hc > 0) | ||
2295 | shift++; | ||
2296 | |||
2297 | mask = 0x3 << shift; | ||
2298 | |||
2299 | /* disable assertion of portN err, done events */ | ||
2300 | tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS); | ||
2301 | writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS); | ||
2302 | } | ||
2303 | |||
2304 | static void mv_eh_thaw(struct ata_port *ap) | ||
2305 | { | ||
2306 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; | ||
2307 | unsigned int hc = (ap->port_no > 3) ? 1 : 0; | ||
2308 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); | ||
2309 | void __iomem *port_mmio = mv_ap_base(ap); | ||
2310 | u32 tmp, mask, hc_irq_cause; | ||
2311 | unsigned int shift, hc_port_no = ap->port_no; | ||
2312 | |||
2313 | /* FIXME: handle coalescing completion events properly */ | ||
2314 | |||
2315 | shift = ap->port_no * 2; | ||
2316 | if (hc > 0) { | ||
2317 | shift++; | ||
2318 | hc_port_no -= 4; | ||
2319 | } | ||
2320 | |||
2321 | mask = 0x3 << shift; | ||
2322 | |||
2323 | /* clear EDMA errors on this port */ | ||
2324 | writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | ||
2325 | |||
2326 | /* clear pending irq events */ | ||
2327 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); | ||
2328 | hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */ | ||
2329 | hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */ | ||
2330 | writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
2331 | |||
2332 | /* enable assertion of portN err, done events */ | ||
2333 | tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS); | ||
2334 | writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS); | ||
2087 | } | 2335 | } |
2088 | 2336 | ||
2089 | /** | 2337 | /** |
@@ -2147,7 +2395,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2147 | switch(board_idx) { | 2395 | switch(board_idx) { |
2148 | case chip_5080: | 2396 | case chip_5080: |
2149 | hpriv->ops = &mv5xxx_ops; | 2397 | hpriv->ops = &mv5xxx_ops; |
2150 | hp_flags |= MV_HP_50XX; | 2398 | hp_flags |= MV_HP_GEN_I; |
2151 | 2399 | ||
2152 | switch (rev_id) { | 2400 | switch (rev_id) { |
2153 | case 0x1: | 2401 | case 0x1: |
@@ -2167,7 +2415,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2167 | case chip_504x: | 2415 | case chip_504x: |
2168 | case chip_508x: | 2416 | case chip_508x: |
2169 | hpriv->ops = &mv5xxx_ops; | 2417 | hpriv->ops = &mv5xxx_ops; |
2170 | hp_flags |= MV_HP_50XX; | 2418 | hp_flags |= MV_HP_GEN_I; |
2171 | 2419 | ||
2172 | switch (rev_id) { | 2420 | switch (rev_id) { |
2173 | case 0x0: | 2421 | case 0x0: |
@@ -2187,6 +2435,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2187 | case chip_604x: | 2435 | case chip_604x: |
2188 | case chip_608x: | 2436 | case chip_608x: |
2189 | hpriv->ops = &mv6xxx_ops; | 2437 | hpriv->ops = &mv6xxx_ops; |
2438 | hp_flags |= MV_HP_GEN_II; | ||
2190 | 2439 | ||
2191 | switch (rev_id) { | 2440 | switch (rev_id) { |
2192 | case 0x7: | 2441 | case 0x7: |
@@ -2206,7 +2455,6 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2206 | case chip_7042: | 2455 | case chip_7042: |
2207 | case chip_6042: | 2456 | case chip_6042: |
2208 | hpriv->ops = &mv6xxx_ops; | 2457 | hpriv->ops = &mv6xxx_ops; |
2209 | |||
2210 | hp_flags |= MV_HP_GEN_IIE; | 2458 | hp_flags |= MV_HP_GEN_IIE; |
2211 | 2459 | ||
2212 | switch (rev_id) { | 2460 | switch (rev_id) { |
@@ -2273,7 +2521,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
2273 | hpriv->ops->enable_leds(hpriv, mmio); | 2521 | hpriv->ops->enable_leds(hpriv, mmio); |
2274 | 2522 | ||
2275 | for (port = 0; port < host->n_ports; port++) { | 2523 | for (port = 0; port < host->n_ports; port++) { |
2276 | if (IS_60XX(hpriv)) { | 2524 | if (IS_GEN_II(hpriv)) { |
2277 | void __iomem *port_mmio = mv_port_base(mmio, port); | 2525 | void __iomem *port_mmio = mv_port_base(mmio, port); |
2278 | 2526 | ||
2279 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); | 2527 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); |
@@ -2308,7 +2556,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
2308 | /* and unmask interrupt generation for host regs */ | 2556 | /* and unmask interrupt generation for host regs */ |
2309 | writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); | 2557 | writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); |
2310 | 2558 | ||
2311 | if (IS_50XX(hpriv)) | 2559 | if (IS_GEN_I(hpriv)) |
2312 | writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS); | 2560 | writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS); |
2313 | else | 2561 | else |
2314 | writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); | 2562 | writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); |
@@ -2426,8 +2674,9 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2426 | mv_print_info(host); | 2674 | mv_print_info(host); |
2427 | 2675 | ||
2428 | pci_set_master(pdev); | 2676 | pci_set_master(pdev); |
2677 | pci_set_mwi(pdev); | ||
2429 | return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, | 2678 | return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, |
2430 | &mv_sht); | 2679 | IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); |
2431 | } | 2680 | } |
2432 | 2681 | ||
2433 | static int __init mv_init(void) | 2682 | static int __init mv_init(void) |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index b2656867c647..db81e3efa5ec 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -1560,7 +1560,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1560 | } | 1560 | } |
1561 | 1561 | ||
1562 | ppi[0] = &nv_port_info[type]; | 1562 | ppi[0] = &nv_port_info[type]; |
1563 | rc = ata_pci_prepare_native_host(pdev, ppi, &host); | 1563 | rc = ata_pci_prepare_sff_host(pdev, ppi, &host); |
1564 | if (rc) | 1564 | if (rc) |
1565 | return rc; | 1565 | return rc; |
1566 | 1566 | ||
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 2ad5872fe90c..d2fcb9a6bec2 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c | |||
@@ -45,7 +45,7 @@ | |||
45 | #include "sata_promise.h" | 45 | #include "sata_promise.h" |
46 | 46 | ||
47 | #define DRV_NAME "sata_promise" | 47 | #define DRV_NAME "sata_promise" |
48 | #define DRV_VERSION "2.08" | 48 | #define DRV_VERSION "2.09" |
49 | 49 | ||
50 | enum { | 50 | enum { |
51 | PDC_MAX_PORTS = 4, | 51 | PDC_MAX_PORTS = 4, |
@@ -716,6 +716,9 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance) | |||
716 | unsigned int i, tmp; | 716 | unsigned int i, tmp; |
717 | unsigned int handled = 0; | 717 | unsigned int handled = 0; |
718 | void __iomem *mmio_base; | 718 | void __iomem *mmio_base; |
719 | unsigned int hotplug_offset, ata_no; | ||
720 | u32 hotplug_status; | ||
721 | int is_sataii_tx4; | ||
719 | 722 | ||
720 | VPRINTK("ENTER\n"); | 723 | VPRINTK("ENTER\n"); |
721 | 724 | ||
@@ -726,10 +729,20 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance) | |||
726 | 729 | ||
727 | mmio_base = host->iomap[PDC_MMIO_BAR]; | 730 | mmio_base = host->iomap[PDC_MMIO_BAR]; |
728 | 731 | ||
732 | /* read and clear hotplug flags for all ports */ | ||
733 | if (host->ports[0]->flags & PDC_FLAG_GEN_II) | ||
734 | hotplug_offset = PDC2_SATA_PLUG_CSR; | ||
735 | else | ||
736 | hotplug_offset = PDC_SATA_PLUG_CSR; | ||
737 | hotplug_status = readl(mmio_base + hotplug_offset); | ||
738 | if (hotplug_status & 0xff) | ||
739 | writel(hotplug_status | 0xff, mmio_base + hotplug_offset); | ||
740 | hotplug_status &= 0xff; /* clear uninteresting bits */ | ||
741 | |||
729 | /* reading should also clear interrupts */ | 742 | /* reading should also clear interrupts */ |
730 | mask = readl(mmio_base + PDC_INT_SEQMASK); | 743 | mask = readl(mmio_base + PDC_INT_SEQMASK); |
731 | 744 | ||
732 | if (mask == 0xffffffff) { | 745 | if (mask == 0xffffffff && hotplug_status == 0) { |
733 | VPRINTK("QUICK EXIT 2\n"); | 746 | VPRINTK("QUICK EXIT 2\n"); |
734 | return IRQ_NONE; | 747 | return IRQ_NONE; |
735 | } | 748 | } |
@@ -737,16 +750,34 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance) | |||
737 | spin_lock(&host->lock); | 750 | spin_lock(&host->lock); |
738 | 751 | ||
739 | mask &= 0xffff; /* only 16 tags possible */ | 752 | mask &= 0xffff; /* only 16 tags possible */ |
740 | if (!mask) { | 753 | if (mask == 0 && hotplug_status == 0) { |
741 | VPRINTK("QUICK EXIT 3\n"); | 754 | VPRINTK("QUICK EXIT 3\n"); |
742 | goto done_irq; | 755 | goto done_irq; |
743 | } | 756 | } |
744 | 757 | ||
745 | writel(mask, mmio_base + PDC_INT_SEQMASK); | 758 | writel(mask, mmio_base + PDC_INT_SEQMASK); |
746 | 759 | ||
760 | is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags); | ||
761 | |||
747 | for (i = 0; i < host->n_ports; i++) { | 762 | for (i = 0; i < host->n_ports; i++) { |
748 | VPRINTK("port %u\n", i); | 763 | VPRINTK("port %u\n", i); |
749 | ap = host->ports[i]; | 764 | ap = host->ports[i]; |
765 | |||
766 | /* check for a plug or unplug event */ | ||
767 | ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); | ||
768 | tmp = hotplug_status & (0x11 << ata_no); | ||
769 | if (tmp && ap && | ||
770 | !(ap->flags & ATA_FLAG_DISABLED)) { | ||
771 | struct ata_eh_info *ehi = &ap->eh_info; | ||
772 | ata_ehi_clear_desc(ehi); | ||
773 | ata_ehi_hotplugged(ehi); | ||
774 | ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp); | ||
775 | ata_port_freeze(ap); | ||
776 | ++handled; | ||
777 | continue; | ||
778 | } | ||
779 | |||
780 | /* check for a packet interrupt */ | ||
750 | tmp = mask & (1 << (i + 1)); | 781 | tmp = mask & (1 << (i + 1)); |
751 | if (tmp && ap && | 782 | if (tmp && ap && |
752 | !(ap->flags & ATA_FLAG_DISABLED)) { | 783 | !(ap->flags & ATA_FLAG_DISABLED)) { |
@@ -902,9 +933,9 @@ static void pdc_host_init(struct ata_host *host) | |||
902 | tmp = readl(mmio + hotplug_offset); | 933 | tmp = readl(mmio + hotplug_offset); |
903 | writel(tmp | 0xff, mmio + hotplug_offset); | 934 | writel(tmp | 0xff, mmio + hotplug_offset); |
904 | 935 | ||
905 | /* mask plug/unplug ints */ | 936 | /* unmask plug/unplug ints */ |
906 | tmp = readl(mmio + hotplug_offset); | 937 | tmp = readl(mmio + hotplug_offset); |
907 | writel(tmp | 0xff0000, mmio + hotplug_offset); | 938 | writel(tmp & ~0xff0000, mmio + hotplug_offset); |
908 | 939 | ||
909 | /* don't initialise TBG or SLEW on 2nd generation chips */ | 940 | /* don't initialise TBG or SLEW on 2nd generation chips */ |
910 | if (is_gen2) | 941 | if (is_gen2) |
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index fd80bcf1b236..33716b00c6b7 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c | |||
@@ -334,7 +334,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
334 | break; | 334 | break; |
335 | } | 335 | } |
336 | 336 | ||
337 | rc = ata_pci_prepare_native_host(pdev, ppi, &host); | 337 | rc = ata_pci_prepare_sff_host(pdev, ppi, &host); |
338 | if (rc) | 338 | if (rc) |
339 | return rc; | 339 | return rc; |
340 | 340 | ||
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index aca71819f6e8..b52f83ab056a 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c | |||
@@ -213,7 +213,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
213 | host->private_data = hpriv; | 213 | host->private_data = hpriv; |
214 | 214 | ||
215 | /* the first two ports are standard SFF */ | 215 | /* the first two ports are standard SFF */ |
216 | rc = ata_pci_init_native_host(host); | 216 | rc = ata_pci_init_sff_host(host); |
217 | if (rc) | 217 | if (rc) |
218 | return rc; | 218 | return rc; |
219 | 219 | ||
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index a4c0832033d8..c4124475f754 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c | |||
@@ -412,7 +412,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) | |||
412 | struct ata_host *host; | 412 | struct ata_host *host; |
413 | int rc; | 413 | int rc; |
414 | 414 | ||
415 | rc = ata_pci_prepare_native_host(pdev, ppi, &host); | 415 | rc = ata_pci_prepare_sff_host(pdev, ppi, &host); |
416 | if (rc) | 416 | if (rc) |
417 | return rc; | 417 | return rc; |
418 | *r_host = host; | 418 | *r_host = host; |
diff --git a/include/linux/ata.h b/include/linux/ata.h index 407dc7e098bc..b5a20162af32 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -164,6 +164,8 @@ enum { | |||
164 | ATA_CMD_SET_MAX = 0xF9, | 164 | ATA_CMD_SET_MAX = 0xF9, |
165 | ATA_CMD_SET_MAX_EXT = 0x37, | 165 | ATA_CMD_SET_MAX_EXT = 0x37, |
166 | ATA_CMD_READ_LOG_EXT = 0x2f, | 166 | ATA_CMD_READ_LOG_EXT = 0x2f, |
167 | ATA_CMD_PMP_READ = 0xE4, | ||
168 | ATA_CMD_PMP_WRITE = 0xE8, | ||
167 | 169 | ||
168 | /* READ_LOG_EXT pages */ | 170 | /* READ_LOG_EXT pages */ |
169 | ATA_LOG_SATA_NCQ = 0x10, | 171 | ATA_LOG_SATA_NCQ = 0x10, |
@@ -212,6 +214,28 @@ enum { | |||
212 | 0=to device, 1=to host */ | 214 | 0=to device, 1=to host */ |
213 | ATAPI_CDB_LEN = 16, | 215 | ATAPI_CDB_LEN = 16, |
214 | 216 | ||
217 | /* PMP stuff */ | ||
218 | SATA_PMP_MAX_PORTS = 15, | ||
219 | SATA_PMP_CTRL_PORT = 15, | ||
220 | |||
221 | SATA_PMP_GSCR_DWORDS = 128, | ||
222 | SATA_PMP_GSCR_PROD_ID = 0, | ||
223 | SATA_PMP_GSCR_REV = 1, | ||
224 | SATA_PMP_GSCR_PORT_INFO = 2, | ||
225 | SATA_PMP_GSCR_ERROR = 32, | ||
226 | SATA_PMP_GSCR_ERROR_EN = 33, | ||
227 | SATA_PMP_GSCR_FEAT = 64, | ||
228 | SATA_PMP_GSCR_FEAT_EN = 96, | ||
229 | |||
230 | SATA_PMP_PSCR_STATUS = 0, | ||
231 | SATA_PMP_PSCR_ERROR = 1, | ||
232 | SATA_PMP_PSCR_CONTROL = 2, | ||
233 | |||
234 | SATA_PMP_FEAT_BIST = (1 << 0), | ||
235 | SATA_PMP_FEAT_PMREQ = (1 << 1), | ||
236 | SATA_PMP_FEAT_DYNSSC = (1 << 2), | ||
237 | SATA_PMP_FEAT_NOTIFY = (1 << 3), | ||
238 | |||
215 | /* cable types */ | 239 | /* cable types */ |
216 | ATA_CBL_NONE = 0, | 240 | ATA_CBL_NONE = 0, |
217 | ATA_CBL_PATA40 = 1, | 241 | ATA_CBL_PATA40 = 1, |
@@ -418,4 +442,9 @@ static inline int lba_48_ok(u64 block, u32 n_block) | |||
418 | return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536); | 442 | return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536); |
419 | } | 443 | } |
420 | 444 | ||
445 | #define sata_pmp_gscr_vendor(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] & 0xffff) | ||
446 | #define sata_pmp_gscr_devid(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] >> 16) | ||
447 | #define sata_pmp_gscr_rev(gscr) (((gscr)[SATA_PMP_GSCR_REV] >> 8) & 0xff) | ||
448 | #define sata_pmp_gscr_ports(gscr) ((gscr)[SATA_PMP_GSCR_PORT_INFO] & 0xf) | ||
449 | |||
421 | #endif /* __LINUX_ATA_H__ */ | 450 | #endif /* __LINUX_ATA_H__ */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index a3df64677ac3..47cd2a1c5544 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -196,7 +196,6 @@ enum { | |||
196 | ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ | 196 | ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ |
197 | ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */ | 197 | ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */ |
198 | 198 | ||
199 | ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */ | ||
200 | ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ | 199 | ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ |
201 | ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ | 200 | ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ |
202 | ATA_PFLAG_GTM_VALID = (1 << 19), /* acpi_gtm data valid */ | 201 | ATA_PFLAG_GTM_VALID = (1 << 19), /* acpi_gtm data valid */ |
@@ -435,6 +434,7 @@ struct ata_device { | |||
435 | struct ata_port *ap; | 434 | struct ata_port *ap; |
436 | unsigned int devno; /* 0 or 1 */ | 435 | unsigned int devno; /* 0 or 1 */ |
437 | unsigned long flags; /* ATA_DFLAG_xxx */ | 436 | unsigned long flags; /* ATA_DFLAG_xxx */ |
437 | unsigned int horkage; /* List of broken features */ | ||
438 | struct scsi_device *sdev; /* attached SCSI device */ | 438 | struct scsi_device *sdev; /* attached SCSI device */ |
439 | #ifdef CONFIG_ATA_ACPI | 439 | #ifdef CONFIG_ATA_ACPI |
440 | acpi_handle acpi_handle; | 440 | acpi_handle acpi_handle; |
@@ -466,7 +466,6 @@ struct ata_device { | |||
466 | /* error history */ | 466 | /* error history */ |
467 | struct ata_ering ering; | 467 | struct ata_ering ering; |
468 | int spdn_cnt; | 468 | int spdn_cnt; |
469 | unsigned int horkage; /* List of broken features */ | ||
470 | }; | 469 | }; |
471 | 470 | ||
472 | /* Offset into struct ata_device. Fields above it are maintained | 471 | /* Offset into struct ata_device. Fields above it are maintained |
@@ -794,7 +793,6 @@ extern void ata_id_string(const u16 *id, unsigned char *s, | |||
794 | extern void ata_id_c_string(const u16 *id, unsigned char *s, | 793 | extern void ata_id_c_string(const u16 *id, unsigned char *s, |
795 | unsigned int ofs, unsigned int len); | 794 | unsigned int ofs, unsigned int len); |
796 | extern void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown); | 795 | extern void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown); |
797 | extern unsigned long ata_device_blacklisted(const struct ata_device *dev); | ||
798 | extern void ata_bmdma_setup (struct ata_queued_cmd *qc); | 796 | extern void ata_bmdma_setup (struct ata_queued_cmd *qc); |
799 | extern void ata_bmdma_start (struct ata_queued_cmd *qc); | 797 | extern void ata_bmdma_start (struct ata_queued_cmd *qc); |
800 | extern void ata_bmdma_stop(struct ata_queued_cmd *qc); | 798 | extern void ata_bmdma_stop(struct ata_queued_cmd *qc); |
@@ -871,11 +869,11 @@ struct pci_bits { | |||
871 | unsigned long val; | 869 | unsigned long val; |
872 | }; | 870 | }; |
873 | 871 | ||
874 | extern int ata_pci_init_native_host(struct ata_host *host); | 872 | extern int ata_pci_init_sff_host(struct ata_host *host); |
875 | extern int ata_pci_init_bmdma(struct ata_host *host); | 873 | extern int ata_pci_init_bmdma(struct ata_host *host); |
876 | extern int ata_pci_prepare_native_host(struct pci_dev *pdev, | 874 | extern int ata_pci_prepare_sff_host(struct pci_dev *pdev, |
877 | const struct ata_port_info * const * ppi, | 875 | const struct ata_port_info * const * ppi, |
878 | struct ata_host **r_host); | 876 | struct ata_host **r_host); |
879 | extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); | 877 | extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); |
880 | extern unsigned long ata_pci_default_filter(struct ata_device *, unsigned long); | 878 | extern unsigned long ata_pci_default_filter(struct ata_device *, unsigned long); |
881 | #endif /* CONFIG_PCI */ | 879 | #endif /* CONFIG_PCI */ |