aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-02-01 16:47:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-02-01 16:47:54 -0500
commitcbb51afa6d69be003cc827a89e023906885f241e (patch)
treefc38ae723b6080f5073729fe5306ec47d81078fe
parentdd5f5fed6c9458a7aa81eeef3732cc3a9891cfdf (diff)
parentb95d58eaf20eb33c245a2172ec4ecf46bd832309 (diff)
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev
* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev: (24 commits) pci: allow multiple calls to pcim_enable_device() Blackfin pata-bf54x driver: fix compiling bug - no ata_port struct in struct ata_device any more Blackfin pata-bf54x driver: should cover all possible interrupt sources Blackfin pata-bf54x driver: Add debug information Blackfin pata-bf54x driver: Remove obsolete PM function pata_sl82c105: dual channel support ata_piix.c: make piix_merge_scr() static sata_nv: fix for completion handling sata_mv: Remove PCI dependency sata_mv ncq Comments and version bump sata_mv ncq Remove post internal cmd op sata_mv ncq Enable NCQ operation sata_mv ncq Introduce per-tag SG tables ata_piix: IDE mode SATA patch for Intel ICH10 DeviceID's ahci: RAID mode SATA patch for Intel ICH10 DeviceID's sata_mv ncq Use DMA memory pools for hardware memory tables sata_mv ncq Restrict max sectors to 8-bits on GenII NCQ sata_mv ncq Ignore response status LSB on NCQ sata_mv ncq Use hqtag instead of ioid sata_mv ncq Add want ncq parameter for EDMA configuration ...
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ata_piix.c10
-rw-r--r--drivers/ata/pata_bf54x.c53
-rw-r--r--drivers/ata/pata_sl82c105.c33
-rw-r--r--drivers/ata/sata_mv.c486
-rw-r--r--drivers/ata/sata_nv.c18
-rw-r--r--drivers/pci/pci.c3
8 files changed, 389 insertions, 218 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index ae19c9b30d15..ba8f7f4dfa11 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -69,7 +69,7 @@ config ATA_PIIX
69 69
70config SATA_MV 70config SATA_MV
71 tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)" 71 tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)"
72 depends on PCI && EXPERIMENTAL 72 depends on EXPERIMENTAL
73 help 73 help
74 This option enables support for the Marvell Serial ATA family. 74 This option enables support for the Marvell Serial ATA family.
75 Currently supports 88SX[56]0[48][01] chips. 75 Currently supports 88SX[56]0[48][01] chips.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6f089b899a1a..27c8d56111c2 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -475,6 +475,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
475 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ 475 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
476 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ 476 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
477 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ 477 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
478 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
479 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
478 480
479 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 481 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
480 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 482 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index a65c8ae5c461..47892e6f5ded 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -267,6 +267,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
267 { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, 267 { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
268 /* SATA Controller IDE (Tolapai) */ 268 /* SATA Controller IDE (Tolapai) */
269 { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata_ahci }, 269 { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata_ahci },
270 /* SATA Controller IDE (ICH10) */
271 { 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
272 /* SATA Controller IDE (ICH10) */
273 { 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
274 /* SATA Controller IDE (ICH10) */
275 { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
276 /* SATA Controller IDE (ICH10) */
277 { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
270 278
271 { } /* terminate list */ 279 { } /* terminate list */
272}; 280};
@@ -1068,7 +1076,7 @@ static void piix_sidpr_write(struct ata_device *dev, unsigned int reg, u32 val)
1068 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA); 1076 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
1069} 1077}
1070 1078
1071u32 piix_merge_scr(u32 val0, u32 val1, const int * const *merge_tbl) 1079static u32 piix_merge_scr(u32 val0, u32 val1, const int * const *merge_tbl)
1072{ 1080{
1073 u32 val = 0; 1081 u32 val = 0;
1074 int i, mi; 1082 int i, mi;
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index a32e3c44a606..7f87f105c2f6 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -299,7 +299,7 @@ static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev)
299 */ 299 */
300 n6 = num_clocks_min(t6min, fsclk); 300 n6 = num_clocks_min(t6min, fsclk);
301 if (mode >= 0 && mode <= 4 && n6 >= 1) { 301 if (mode >= 0 && mode <= 4 && n6 >= 1) {
302 pr_debug("set piomode: mode=%d, fsclk=%ud\n", mode, fsclk); 302 dev_dbg(adev->link->ap->dev, "set piomode: mode=%d, fsclk=%ud\n", mode, fsclk);
303 /* calculate the timing values for register transfers. */ 303 /* calculate the timing values for register transfers. */
304 while (mode > 0 && pio_fsclk[mode] > fsclk) 304 while (mode > 0 && pio_fsclk[mode] > fsclk)
305 mode--; 305 mode--;
@@ -376,7 +376,7 @@ static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
376 376
377 mode = adev->dma_mode - XFER_UDMA_0; 377 mode = adev->dma_mode - XFER_UDMA_0;
378 if (mode >= 0 && mode <= 5) { 378 if (mode >= 0 && mode <= 5) {
379 pr_debug("set udmamode: mode=%d\n", mode); 379 dev_dbg(adev->link->ap->dev, "set udmamode: mode=%d\n", mode);
380 /* the most restrictive timing value is t6 and tc, 380 /* the most restrictive timing value is t6 and tc,
381 * the DIOW - data hold. If one SCLK pulse is longer 381 * the DIOW - data hold. If one SCLK pulse is longer
382 * than this minimum value then register 382 * than this minimum value then register
@@ -433,7 +433,7 @@ static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
433 433
434 mode = adev->dma_mode - XFER_MW_DMA_0; 434 mode = adev->dma_mode - XFER_MW_DMA_0;
435 if (mode >= 0 && mode <= 2) { 435 if (mode >= 0 && mode <= 2) {
436 pr_debug("set mdmamode: mode=%d\n", mode); 436 dev_dbg(adev->link->ap->dev, "set mdmamode: mode=%d\n", mode);
437 /* the most restrictive timing value is tf, the DMACK to 437 /* the most restrictive timing value is tf, the DMACK to
438 * read data released. If one SCLK pulse is longer than 438 * read data released. If one SCLK pulse is longer than
439 * this maximum value then the MDMA mode 439 * this maximum value then the MDMA mode
@@ -697,7 +697,7 @@ static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
697 write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal); 697 write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal);
698 write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam); 698 write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam);
699 write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah); 699 write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah);
700 pr_debug("hob: feat 0x%X nsect 0x%X, lba 0x%X " 700 dev_dbg(ap->dev, "hob: feat 0x%X nsect 0x%X, lba 0x%X "
701 "0x%X 0x%X\n", 701 "0x%X 0x%X\n",
702 tf->hob_feature, 702 tf->hob_feature,
703 tf->hob_nsect, 703 tf->hob_nsect,
@@ -711,7 +711,7 @@ static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
711 write_atapi_register(base, ATA_REG_LBAL, tf->lbal); 711 write_atapi_register(base, ATA_REG_LBAL, tf->lbal);
712 write_atapi_register(base, ATA_REG_LBAM, tf->lbam); 712 write_atapi_register(base, ATA_REG_LBAM, tf->lbam);
713 write_atapi_register(base, ATA_REG_LBAH, tf->lbah); 713 write_atapi_register(base, ATA_REG_LBAH, tf->lbah);
714 pr_debug("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 714 dev_dbg(ap->dev, "feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
715 tf->feature, 715 tf->feature,
716 tf->nsect, 716 tf->nsect,
717 tf->lbal, 717 tf->lbal,
@@ -721,7 +721,7 @@ static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
721 721
722 if (tf->flags & ATA_TFLAG_DEVICE) { 722 if (tf->flags & ATA_TFLAG_DEVICE) {
723 write_atapi_register(base, ATA_REG_DEVICE, tf->device); 723 write_atapi_register(base, ATA_REG_DEVICE, tf->device);
724 pr_debug("device 0x%X\n", tf->device); 724 dev_dbg(ap->dev, "device 0x%X\n", tf->device);
725 } 725 }
726 726
727 ata_wait_idle(ap); 727 ata_wait_idle(ap);
@@ -782,7 +782,7 @@ static void bfin_exec_command(struct ata_port *ap,
782 const struct ata_taskfile *tf) 782 const struct ata_taskfile *tf)
783{ 783{
784 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 784 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
785 pr_debug("ata%u: cmd 0x%X\n", ap->print_id, tf->command); 785 dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command);
786 786
787 write_atapi_register(base, ATA_REG_CMD, tf->command); 787 write_atapi_register(base, ATA_REG_CMD, tf->command);
788 ata_pause(ap); 788 ata_pause(ap);
@@ -834,7 +834,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
834 struct scatterlist *sg; 834 struct scatterlist *sg;
835 unsigned int si; 835 unsigned int si;
836 836
837 pr_debug("in atapi dma setup\n"); 837 dev_dbg(qc->ap->dev, "in atapi dma setup\n");
838 /* Program the ATA_CTRL register with dir */ 838 /* Program the ATA_CTRL register with dir */
839 if (qc->tf.flags & ATA_TFLAG_WRITE) { 839 if (qc->tf.flags & ATA_TFLAG_WRITE) {
840 /* fill the ATAPI DMA controller */ 840 /* fill the ATAPI DMA controller */
@@ -870,7 +870,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
870 struct scatterlist *sg; 870 struct scatterlist *sg;
871 unsigned int si; 871 unsigned int si;
872 872
873 pr_debug("in atapi dma start\n"); 873 dev_dbg(qc->ap->dev, "in atapi dma start\n");
874 if (!(ap->udma_mask || ap->mwdma_mask)) 874 if (!(ap->udma_mask || ap->mwdma_mask))
875 return; 875 return;
876 876
@@ -888,7 +888,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
888 sg_dma_address(sg) + sg_dma_len(sg)); 888 sg_dma_address(sg) + sg_dma_len(sg));
889 } 889 }
890 enable_dma(CH_ATAPI_TX); 890 enable_dma(CH_ATAPI_TX);
891 pr_debug("enable udma write\n"); 891 dev_dbg(qc->ap->dev, "enable udma write\n");
892 892
893 /* Send ATA DMA write command */ 893 /* Send ATA DMA write command */
894 bfin_exec_command(ap, &qc->tf); 894 bfin_exec_command(ap, &qc->tf);
@@ -898,7 +898,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
898 | XFER_DIR)); 898 | XFER_DIR));
899 } else { 899 } else {
900 enable_dma(CH_ATAPI_RX); 900 enable_dma(CH_ATAPI_RX);
901 pr_debug("enable udma read\n"); 901 dev_dbg(qc->ap->dev, "enable udma read\n");
902 902
903 /* Send ATA DMA read command */ 903 /* Send ATA DMA read command */
904 bfin_exec_command(ap, &qc->tf); 904 bfin_exec_command(ap, &qc->tf);
@@ -936,7 +936,7 @@ static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
936 struct scatterlist *sg; 936 struct scatterlist *sg;
937 unsigned int si; 937 unsigned int si;
938 938
939 pr_debug("in atapi dma stop\n"); 939 dev_dbg(qc->ap->dev, "in atapi dma stop\n");
940 if (!(ap->udma_mask || ap->mwdma_mask)) 940 if (!(ap->udma_mask || ap->mwdma_mask))
941 return; 941 return;
942 942
@@ -1147,15 +1147,15 @@ static unsigned char bfin_bmdma_status(struct ata_port *ap)
1147 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1147 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1148 unsigned short int_status = ATAPI_GET_INT_STATUS(base); 1148 unsigned short int_status = ATAPI_GET_INT_STATUS(base);
1149 1149
1150 if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON)) { 1150 if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON))
1151 host_stat |= ATA_DMA_ACTIVE; 1151 host_stat |= ATA_DMA_ACTIVE;
1152 } 1152 if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT|
1153 if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT)) { 1153 ATAPI_DEV_INT))
1154 host_stat |= ATA_DMA_INTR; 1154 host_stat |= ATA_DMA_INTR;
1155 } 1155 if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT))
1156 if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT)) { 1156 host_stat |= ATA_DMA_ERR|ATA_DMA_INTR;
1157 host_stat |= ATA_DMA_ERR; 1157
1158 } 1158 dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat);
1159 1159
1160 return host_stat; 1160 return host_stat;
1161} 1161}
@@ -1213,8 +1213,7 @@ static void bfin_irq_clear(struct ata_port *ap)
1213{ 1213{
1214 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1214 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1215 1215
1216 pr_debug("in atapi irq clear\n"); 1216 dev_dbg(ap->dev, "in atapi irq clear\n");
1217
1218 ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT 1217 ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT
1219 | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT 1218 | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT
1220 | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT); 1219 | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT);
@@ -1232,7 +1231,7 @@ static unsigned char bfin_irq_on(struct ata_port *ap)
1232 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1231 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1233 u8 tmp; 1232 u8 tmp;
1234 1233
1235 pr_debug("in atapi irq on\n"); 1234 dev_dbg(ap->dev, "in atapi irq on\n");
1236 ap->ctl &= ~ATA_NIEN; 1235 ap->ctl &= ~ATA_NIEN;
1237 ap->last_ctl = ap->ctl; 1236 ap->last_ctl = ap->ctl;
1238 1237
@@ -1255,7 +1254,7 @@ static void bfin_bmdma_freeze(struct ata_port *ap)
1255{ 1254{
1256 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1255 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1257 1256
1258 pr_debug("in atapi dma freeze\n"); 1257 dev_dbg(ap->dev, "in atapi dma freeze\n");
1259 ap->ctl |= ATA_NIEN; 1258 ap->ctl |= ATA_NIEN;
1260 ap->last_ctl = ap->ctl; 1259 ap->last_ctl = ap->ctl;
1261 1260
@@ -1328,7 +1327,7 @@ static void bfin_error_handler(struct ata_port *ap)
1328 1327
1329static void bfin_port_stop(struct ata_port *ap) 1328static void bfin_port_stop(struct ata_port *ap)
1330{ 1329{
1331 pr_debug("in atapi port stop\n"); 1330 dev_dbg(ap->dev, "in atapi port stop\n");
1332 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { 1331 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
1333 free_dma(CH_ATAPI_RX); 1332 free_dma(CH_ATAPI_RX);
1334 free_dma(CH_ATAPI_TX); 1333 free_dma(CH_ATAPI_TX);
@@ -1337,7 +1336,7 @@ static void bfin_port_stop(struct ata_port *ap)
1337 1336
1338static int bfin_port_start(struct ata_port *ap) 1337static int bfin_port_start(struct ata_port *ap)
1339{ 1338{
1340 pr_debug("in atapi port start\n"); 1339 dev_dbg(ap->dev, "in atapi port start\n");
1341 if (!(ap->udma_mask || ap->mwdma_mask)) 1340 if (!(ap->udma_mask || ap->mwdma_mask))
1342 return 0; 1341 return 0;
1343 1342
@@ -1373,10 +1372,6 @@ static struct scsi_host_template bfin_sht = {
1373 .slave_configure = ata_scsi_slave_config, 1372 .slave_configure = ata_scsi_slave_config,
1374 .slave_destroy = ata_scsi_slave_destroy, 1373 .slave_destroy = ata_scsi_slave_destroy,
1375 .bios_param = ata_std_bios_param, 1374 .bios_param = ata_std_bios_param,
1376#ifdef CONFIG_PM
1377 .resume = ata_scsi_device_resume,
1378 .suspend = ata_scsi_device_suspend,
1379#endif
1380}; 1375};
1381 1376
1382static const struct ata_port_operations bfin_pata_ops = { 1377static const struct ata_port_operations bfin_pata_ops = {
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 1388cef52c07..81ef207f8265 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -26,7 +26,7 @@
26#include <linux/libata.h> 26#include <linux/libata.h>
27 27
28#define DRV_NAME "pata_sl82c105" 28#define DRV_NAME "pata_sl82c105"
29#define DRV_VERSION "0.3.2" 29#define DRV_VERSION "0.3.3"
30 30
31enum { 31enum {
32 /* 32 /*
@@ -206,6 +206,34 @@ static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc)
206 sl82c105_set_piomode(ap, qc->dev); 206 sl82c105_set_piomode(ap, qc->dev);
207} 207}
208 208
209/**
210 * sl82c105_qc_defer - implement serialization
211 * @qc: command
212 *
213 * We must issue one command per host not per channel because
214 * of the reset bug.
215 *
216 * Q: is the scsi host lock sufficient ?
217 */
218
219static int sl82c105_qc_defer(struct ata_queued_cmd *qc)
220{
221 struct ata_host *host = qc->ap->host;
222 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
223 int rc;
224
225 /* First apply the usual rules */
226 rc = ata_std_qc_defer(qc);
227 if (rc != 0)
228 return rc;
229
230 /* Now apply serialization rules. Only allow a command if the
231 other channel state machine is idle */
232 if (alt && alt->qc_active)
233 return ATA_DEFER_PORT;
234 return 0;
235}
236
209static struct scsi_host_template sl82c105_sht = { 237static struct scsi_host_template sl82c105_sht = {
210 .module = THIS_MODULE, 238 .module = THIS_MODULE,
211 .name = DRV_NAME, 239 .name = DRV_NAME,
@@ -245,6 +273,7 @@ static struct ata_port_operations sl82c105_port_ops = {
245 .bmdma_stop = sl82c105_bmdma_stop, 273 .bmdma_stop = sl82c105_bmdma_stop,
246 .bmdma_status = ata_bmdma_status, 274 .bmdma_status = ata_bmdma_status,
247 275
276 .qc_defer = sl82c105_qc_defer,
248 .qc_prep = ata_qc_prep, 277 .qc_prep = ata_qc_prep,
249 .qc_issue = ata_qc_issue_prot, 278 .qc_issue = ata_qc_issue_prot,
250 279
@@ -312,7 +341,7 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
312 }; 341 };
313 /* for now use only the first port */ 342 /* for now use only the first port */
314 const struct ata_port_info *ppi[] = { &info_early, 343 const struct ata_port_info *ppi[] = { &info_early,
315 &ata_dummy_port_info }; 344 NULL };
316 u32 val; 345 u32 val;
317 int rev; 346 int rev;
318 347
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 7e72463a90eb..3c1b5c9027db 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -29,7 +29,13 @@
29 I distinctly remember a couple workarounds (one related to PCI-X) 29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed. 30 are still needed.
31 31
32 4) Add NCQ support (easy to intermediate, once new-EH support appears) 32 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
33 39
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI). 40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35 41
@@ -53,8 +59,6 @@
53 Target mode, for those without docs, is the ability to directly 59 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers. 60 connect two SATA controllers.
55 61
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/ 62*/
59 63
60 64
@@ -73,7 +77,7 @@
73#include <linux/libata.h> 77#include <linux/libata.h>
74 78
75#define DRV_NAME "sata_mv" 79#define DRV_NAME "sata_mv"
76#define DRV_VERSION "1.01" 80#define DRV_VERSION "1.20"
77 81
78enum { 82enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */ 83 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -107,14 +111,12 @@ enum {
107 111
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB 112 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B 113 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B 114 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */ 115 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), 116 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), 117 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176, 118 MV_MAX_SG_CT = 256,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 119 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118 120
119 MV_PORTS_PER_HC = 4, 121 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ 122 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
@@ -125,6 +127,9 @@ enum {
125 /* Host Flags */ 127 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 128 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 129 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
130 /* SoC integrated controllers, no PCI interface */
131 MV_FLAG_SOC = (1 << 28),
132
128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 133 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 134 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING, 135 ATA_FLAG_PIO_POLLING,
@@ -170,7 +175,7 @@ enum {
170 175
171 PCIE_IRQ_CAUSE_OFS = 0x1900, 176 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910, 177 PCIE_IRQ_MASK_OFS = 0x1910,
173 PCIE_UNMASK_ALL_IRQS = 0x70a, /* assorted bits */ 178 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
174 179
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, 180 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64, 181 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
@@ -210,6 +215,7 @@ enum {
210 /* SATA registers */ 215 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 216 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350, 217 SATA_ACTIVE_OFS = 0x350,
218 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
213 PHY_MODE3 = 0x310, 219 PHY_MODE3 = 0x310,
214 PHY_MODE4 = 0x314, 220 PHY_MODE4 = 0x314,
215 PHY_MODE2 = 0x330, 221 PHY_MODE2 = 0x330,
@@ -222,11 +228,11 @@ enum {
222 228
223 /* Port registers */ 229 /* Port registers */
224 EDMA_CFG_OFS = 0, 230 EDMA_CFG_OFS = 0,
225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */ 231 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
226 EDMA_CFG_NCQ = (1 << 5), 232 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 233 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 234 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 235 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
230 236
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 237 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc, 238 EDMA_ERR_IRQ_MASK_OFS = 0xc,
@@ -244,14 +250,33 @@ enum {
244 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ 250 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
245 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ 251 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
246 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ 252 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
253
247 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ 254 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
248 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), 255 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
256 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
257 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
258 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
259
249 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ 260 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
261
250 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ 262 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
263 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
264 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
265 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
266 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
267 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
268
251 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ 269 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
270
252 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ 271 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
253 EDMA_ERR_OVERRUN_5 = (1 << 5), 272 EDMA_ERR_OVERRUN_5 = (1 << 5),
254 EDMA_ERR_UNDERRUN_5 = (1 << 6), 273 EDMA_ERR_UNDERRUN_5 = (1 << 6),
274
275 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
276 EDMA_ERR_LNK_CTRL_RX_1 |
277 EDMA_ERR_LNK_CTRL_RX_3 |
278 EDMA_ERR_LNK_CTRL_TX,
279
255 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 280 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
256 EDMA_ERR_PRD_PAR | 281 EDMA_ERR_PRD_PAR |
257 EDMA_ERR_DEV_DCON | 282 EDMA_ERR_DEV_DCON |
@@ -311,12 +336,14 @@ enum {
311 336
312 /* Port private flags (pp_flags) */ 337 /* Port private flags (pp_flags) */
313 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 338 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
339 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
314 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */ 340 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
315}; 341};
316 342
317#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 343#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
318#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 344#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
319#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 345#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
346#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
320 347
321enum { 348enum {
322 /* DMA boundary 0xffff is required by the s/g splitting 349 /* DMA boundary 0xffff is required by the s/g splitting
@@ -379,8 +406,8 @@ struct mv_port_priv {
379 dma_addr_t crqb_dma; 406 dma_addr_t crqb_dma;
380 struct mv_crpb *crpb; 407 struct mv_crpb *crpb;
381 dma_addr_t crpb_dma; 408 dma_addr_t crpb_dma;
382 struct mv_sg *sg_tbl; 409 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
383 dma_addr_t sg_tbl_dma; 410 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
384 411
385 unsigned int req_idx; 412 unsigned int req_idx;
386 unsigned int resp_idx; 413 unsigned int resp_idx;
@@ -400,6 +427,14 @@ struct mv_host_priv {
400 u32 irq_cause_ofs; 427 u32 irq_cause_ofs;
401 u32 irq_mask_ofs; 428 u32 irq_mask_ofs;
402 u32 unmask_all_irqs; 429 u32 unmask_all_irqs;
430 /*
431 * These consistent DMA memory pools give us guaranteed
432 * alignment for hardware-accessed data structures,
433 * and less memory waste in accomplishing the alignment.
434 */
435 struct dma_pool *crqb_pool;
436 struct dma_pool *crpb_pool;
437 struct dma_pool *sg_tbl_pool;
403}; 438};
404 439
405struct mv_hw_ops { 440struct mv_hw_ops {
@@ -411,7 +446,7 @@ struct mv_hw_ops {
411 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, 446 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
412 unsigned int n_hc); 447 unsigned int n_hc);
413 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); 448 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
414 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio); 449 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
415}; 450};
416 451
417static void mv_irq_clear(struct ata_port *ap); 452static void mv_irq_clear(struct ata_port *ap);
@@ -425,10 +460,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc);
425static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 460static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
426static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 461static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
427static void mv_error_handler(struct ata_port *ap); 462static void mv_error_handler(struct ata_port *ap);
428static void mv_post_int_cmd(struct ata_queued_cmd *qc);
429static void mv_eh_freeze(struct ata_port *ap); 463static void mv_eh_freeze(struct ata_port *ap);
430static void mv_eh_thaw(struct ata_port *ap); 464static void mv_eh_thaw(struct ata_port *ap);
431static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 465static void mv6_dev_config(struct ata_device *dev);
432 466
433static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 467static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
434 unsigned int port); 468 unsigned int port);
@@ -438,7 +472,7 @@ static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
438static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 472static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
439 unsigned int n_hc); 473 unsigned int n_hc);
440static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 474static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
441static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio); 475static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
442 476
443static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 477static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
444 unsigned int port); 478 unsigned int port);
@@ -448,10 +482,17 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
448static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 482static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
449 unsigned int n_hc); 483 unsigned int n_hc);
450static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 484static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
451static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); 485static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
452static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, 486static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
453 unsigned int port_no); 487 unsigned int port_no);
488static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
489 void __iomem *port_mmio, int want_ncq);
490static int __mv_stop_dma(struct ata_port *ap);
454 491
492/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
493 * because we have to allow room for worst case splitting of
494 * PRDs for 64K boundaries in mv_fill_sg().
495 */
455static struct scsi_host_template mv5_sht = { 496static struct scsi_host_template mv5_sht = {
456 .module = THIS_MODULE, 497 .module = THIS_MODULE,
457 .name = DRV_NAME, 498 .name = DRV_NAME,
@@ -475,7 +516,8 @@ static struct scsi_host_template mv6_sht = {
475 .name = DRV_NAME, 516 .name = DRV_NAME,
476 .ioctl = ata_scsi_ioctl, 517 .ioctl = ata_scsi_ioctl,
477 .queuecommand = ata_scsi_queuecmd, 518 .queuecommand = ata_scsi_queuecmd,
478 .can_queue = ATA_DEF_QUEUE, 519 .change_queue_depth = ata_scsi_change_queue_depth,
520 .can_queue = MV_MAX_Q_DEPTH - 1,
479 .this_id = ATA_SHT_THIS_ID, 521 .this_id = ATA_SHT_THIS_ID,
480 .sg_tablesize = MV_MAX_SG_CT / 2, 522 .sg_tablesize = MV_MAX_SG_CT / 2,
481 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 523 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@@ -505,7 +547,6 @@ static const struct ata_port_operations mv5_ops = {
505 .irq_on = ata_irq_on, 547 .irq_on = ata_irq_on,
506 548
507 .error_handler = mv_error_handler, 549 .error_handler = mv_error_handler,
508 .post_internal_cmd = mv_post_int_cmd,
509 .freeze = mv_eh_freeze, 550 .freeze = mv_eh_freeze,
510 .thaw = mv_eh_thaw, 551 .thaw = mv_eh_thaw,
511 552
@@ -517,6 +558,7 @@ static const struct ata_port_operations mv5_ops = {
517}; 558};
518 559
519static const struct ata_port_operations mv6_ops = { 560static const struct ata_port_operations mv6_ops = {
561 .dev_config = mv6_dev_config,
520 .tf_load = ata_tf_load, 562 .tf_load = ata_tf_load,
521 .tf_read = ata_tf_read, 563 .tf_read = ata_tf_read,
522 .check_status = ata_check_status, 564 .check_status = ata_check_status,
@@ -533,9 +575,9 @@ static const struct ata_port_operations mv6_ops = {
533 .irq_on = ata_irq_on, 575 .irq_on = ata_irq_on,
534 576
535 .error_handler = mv_error_handler, 577 .error_handler = mv_error_handler,
536 .post_internal_cmd = mv_post_int_cmd,
537 .freeze = mv_eh_freeze, 578 .freeze = mv_eh_freeze,
538 .thaw = mv_eh_thaw, 579 .thaw = mv_eh_thaw,
580 .qc_defer = ata_std_qc_defer,
539 581
540 .scr_read = mv_scr_read, 582 .scr_read = mv_scr_read,
541 .scr_write = mv_scr_write, 583 .scr_write = mv_scr_write,
@@ -561,9 +603,9 @@ static const struct ata_port_operations mv_iie_ops = {
561 .irq_on = ata_irq_on, 603 .irq_on = ata_irq_on,
562 604
563 .error_handler = mv_error_handler, 605 .error_handler = mv_error_handler,
564 .post_internal_cmd = mv_post_int_cmd,
565 .freeze = mv_eh_freeze, 606 .freeze = mv_eh_freeze,
566 .thaw = mv_eh_thaw, 607 .thaw = mv_eh_thaw,
608 .qc_defer = ata_std_qc_defer,
567 609
568 .scr_read = mv_scr_read, 610 .scr_read = mv_scr_read,
569 .scr_write = mv_scr_write, 611 .scr_write = mv_scr_write,
@@ -592,26 +634,29 @@ static const struct ata_port_info mv_port_info[] = {
592 .port_ops = &mv5_ops, 634 .port_ops = &mv5_ops,
593 }, 635 },
594 { /* chip_604x */ 636 { /* chip_604x */
595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, 637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
638 ATA_FLAG_NCQ,
596 .pio_mask = 0x1f, /* pio0-4 */ 639 .pio_mask = 0x1f, /* pio0-4 */
597 .udma_mask = ATA_UDMA6, 640 .udma_mask = ATA_UDMA6,
598 .port_ops = &mv6_ops, 641 .port_ops = &mv6_ops,
599 }, 642 },
600 { /* chip_608x */ 643 { /* chip_608x */
601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
602 MV_FLAG_DUAL_HC, 645 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
603 .pio_mask = 0x1f, /* pio0-4 */ 646 .pio_mask = 0x1f, /* pio0-4 */
604 .udma_mask = ATA_UDMA6, 647 .udma_mask = ATA_UDMA6,
605 .port_ops = &mv6_ops, 648 .port_ops = &mv6_ops,
606 }, 649 },
607 { /* chip_6042 */ 650 { /* chip_6042 */
608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, 651 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
652 ATA_FLAG_NCQ,
609 .pio_mask = 0x1f, /* pio0-4 */ 653 .pio_mask = 0x1f, /* pio0-4 */
610 .udma_mask = ATA_UDMA6, 654 .udma_mask = ATA_UDMA6,
611 .port_ops = &mv_iie_ops, 655 .port_ops = &mv_iie_ops,
612 }, 656 },
613 { /* chip_7042 */ 657 { /* chip_7042 */
614 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, 658 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
659 ATA_FLAG_NCQ,
615 .pio_mask = 0x1f, /* pio0-4 */ 660 .pio_mask = 0x1f, /* pio0-4 */
616 .udma_mask = ATA_UDMA6, 661 .udma_mask = ATA_UDMA6,
617 .port_ops = &mv_iie_ops, 662 .port_ops = &mv_iie_ops,
@@ -648,13 +693,6 @@ static const struct pci_device_id mv_pci_tbl[] = {
648 { } /* terminate list */ 693 { } /* terminate list */
649}; 694};
650 695
651static struct pci_driver mv_pci_driver = {
652 .name = DRV_NAME,
653 .id_table = mv_pci_tbl,
654 .probe = mv_init_one,
655 .remove = ata_pci_remove_one,
656};
657
658static const struct mv_hw_ops mv5xxx_ops = { 696static const struct mv_hw_ops mv5xxx_ops = {
659 .phy_errata = mv5_phy_errata, 697 .phy_errata = mv5_phy_errata,
660 .enable_leds = mv5_enable_leds, 698 .enable_leds = mv5_enable_leds,
@@ -674,45 +712,6 @@ static const struct mv_hw_ops mv6xxx_ops = {
674}; 712};
675 713
676/* 714/*
677 * module options
678 */
679static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
680
681
682/* move to PCI layer or libata core? */
683static int pci_go_64(struct pci_dev *pdev)
684{
685 int rc;
686
687 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
688 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
689 if (rc) {
690 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
691 if (rc) {
692 dev_printk(KERN_ERR, &pdev->dev,
693 "64-bit DMA enable failed\n");
694 return rc;
695 }
696 }
697 } else {
698 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
699 if (rc) {
700 dev_printk(KERN_ERR, &pdev->dev,
701 "32-bit DMA enable failed\n");
702 return rc;
703 }
704 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
705 if (rc) {
706 dev_printk(KERN_ERR, &pdev->dev,
707 "32-bit consistent DMA enable failed\n");
708 return rc;
709 }
710 }
711
712 return rc;
713}
714
715/*
716 * Functions 715 * Functions
717 */ 716 */
718 717
@@ -815,19 +814,46 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio,
815 * LOCKING: 814 * LOCKING:
816 * Inherited from caller. 815 * Inherited from caller.
817 */ 816 */
818static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv, 817static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
819 struct mv_port_priv *pp) 818 struct mv_port_priv *pp, u8 protocol)
820{ 819{
820 int want_ncq = (protocol == ATA_PROT_NCQ);
821
822 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
823 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
824 if (want_ncq != using_ncq)
825 __mv_stop_dma(ap);
826 }
821 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 827 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
828 struct mv_host_priv *hpriv = ap->host->private_data;
829 int hard_port = mv_hardport_from_port(ap->port_no);
830 void __iomem *hc_mmio = mv_hc_base_from_port(
831 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
832 u32 hc_irq_cause, ipending;
833
822 /* clear EDMA event indicators, if any */ 834 /* clear EDMA event indicators, if any */
823 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS); 835 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
836
837 /* clear EDMA interrupt indicator, if any */
838 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
839 ipending = (DEV_IRQ << hard_port) |
840 (CRPB_DMA_DONE << hard_port);
841 if (hc_irq_cause & ipending) {
842 writelfl(hc_irq_cause & ~ipending,
843 hc_mmio + HC_IRQ_CAUSE_OFS);
844 }
845
846 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
847
848 /* clear FIS IRQ Cause */
849 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
824 850
825 mv_set_edma_ptrs(base, hpriv, pp); 851 mv_set_edma_ptrs(port_mmio, hpriv, pp);
826 852
827 writelfl(EDMA_EN, base + EDMA_CMD_OFS); 853 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
828 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 854 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
829 } 855 }
830 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS))); 856 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
831} 857}
832 858
833/** 859/**
@@ -1003,38 +1029,76 @@ static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1003 return -EINVAL; 1029 return -EINVAL;
1004} 1030}
1005 1031
1006static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, 1032static void mv6_dev_config(struct ata_device *adev)
1007 void __iomem *port_mmio)
1008{ 1033{
1009 u32 cfg = readl(port_mmio + EDMA_CFG_OFS); 1034 /*
1035 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1036 * See mv_qc_prep() for more info.
1037 */
1038 if (adev->flags & ATA_DFLAG_NCQ)
1039 if (adev->max_sectors > ATA_MAX_SECTORS)
1040 adev->max_sectors = ATA_MAX_SECTORS;
1041}
1042
1043static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1044 void __iomem *port_mmio, int want_ncq)
1045{
1046 u32 cfg;
1010 1047
1011 /* set up non-NCQ EDMA configuration */ 1048 /* set up non-NCQ EDMA configuration */
1012 cfg &= ~(1 << 9); /* disable eQue */ 1049 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1013 1050
1014 if (IS_GEN_I(hpriv)) { 1051 if (IS_GEN_I(hpriv))
1015 cfg &= ~0x1f; /* clear queue depth */
1016 cfg |= (1 << 8); /* enab config burst size mask */ 1052 cfg |= (1 << 8); /* enab config burst size mask */
1017 }
1018 1053
1019 else if (IS_GEN_II(hpriv)) { 1054 else if (IS_GEN_II(hpriv))
1020 cfg &= ~0x1f; /* clear queue depth */
1021 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1055 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1022 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1023 }
1024 1056
1025 else if (IS_GEN_IIE(hpriv)) { 1057 else if (IS_GEN_IIE(hpriv)) {
1026 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1058 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1027 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1059 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1028 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1029 cfg |= (1 << 18); /* enab early completion */ 1060 cfg |= (1 << 18); /* enab early completion */
1030 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ 1061 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1031 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
1032 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1033 } 1062 }
1034 1063
1064 if (want_ncq) {
1065 cfg |= EDMA_CFG_NCQ;
1066 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1067 } else
1068 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1069
1035 writelfl(cfg, port_mmio + EDMA_CFG_OFS); 1070 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1036} 1071}
1037 1072
1073static void mv_port_free_dma_mem(struct ata_port *ap)
1074{
1075 struct mv_host_priv *hpriv = ap->host->private_data;
1076 struct mv_port_priv *pp = ap->private_data;
1077 int tag;
1078
1079 if (pp->crqb) {
1080 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1081 pp->crqb = NULL;
1082 }
1083 if (pp->crpb) {
1084 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1085 pp->crpb = NULL;
1086 }
1087 /*
1088 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1089 * For later hardware, we have one unique sg_tbl per NCQ tag.
1090 */
1091 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1092 if (pp->sg_tbl[tag]) {
1093 if (tag == 0 || !IS_GEN_I(hpriv))
1094 dma_pool_free(hpriv->sg_tbl_pool,
1095 pp->sg_tbl[tag],
1096 pp->sg_tbl_dma[tag]);
1097 pp->sg_tbl[tag] = NULL;
1098 }
1099 }
1100}
1101
1038/** 1102/**
1039 * mv_port_start - Port specific init/start routine. 1103 * mv_port_start - Port specific init/start routine.
1040 * @ap: ATA channel to manipulate 1104 * @ap: ATA channel to manipulate
@@ -1051,51 +1115,47 @@ static int mv_port_start(struct ata_port *ap)
1051 struct mv_host_priv *hpriv = ap->host->private_data; 1115 struct mv_host_priv *hpriv = ap->host->private_data;
1052 struct mv_port_priv *pp; 1116 struct mv_port_priv *pp;
1053 void __iomem *port_mmio = mv_ap_base(ap); 1117 void __iomem *port_mmio = mv_ap_base(ap);
1054 void *mem;
1055 dma_addr_t mem_dma;
1056 unsigned long flags; 1118 unsigned long flags;
1057 int rc; 1119 int tag, rc;
1058 1120
1059 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1121 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1060 if (!pp) 1122 if (!pp)
1061 return -ENOMEM; 1123 return -ENOMEM;
1062 1124 ap->private_data = pp;
1063 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1064 GFP_KERNEL);
1065 if (!mem)
1066 return -ENOMEM;
1067 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1068 1125
1069 rc = ata_pad_alloc(ap, dev); 1126 rc = ata_pad_alloc(ap, dev);
1070 if (rc) 1127 if (rc)
1071 return rc; 1128 return rc;
1072 1129
1073 /* First item in chunk of DMA memory: 1130 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1074 * 32-slot command request table (CRQB), 32 bytes each in size 1131 if (!pp->crqb)
1075 */ 1132 return -ENOMEM;
1076 pp->crqb = mem; 1133 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1077 pp->crqb_dma = mem_dma;
1078 mem += MV_CRQB_Q_SZ;
1079 mem_dma += MV_CRQB_Q_SZ;
1080 1134
1081 /* Second item: 1135 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1082 * 32-slot command response table (CRPB), 8 bytes each in size 1136 if (!pp->crpb)
1083 */ 1137 goto out_port_free_dma_mem;
1084 pp->crpb = mem; 1138 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1085 pp->crpb_dma = mem_dma;
1086 mem += MV_CRPB_Q_SZ;
1087 mem_dma += MV_CRPB_Q_SZ;
1088 1139
1089 /* Third item: 1140 /*
1090 * Table of scatter-gather descriptors (ePRD), 16 bytes each 1141 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1142 * For later hardware, we need one unique sg_tbl per NCQ tag.
1091 */ 1143 */
1092 pp->sg_tbl = mem; 1144 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1093 pp->sg_tbl_dma = mem_dma; 1145 if (tag == 0 || !IS_GEN_I(hpriv)) {
1146 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1147 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1148 if (!pp->sg_tbl[tag])
1149 goto out_port_free_dma_mem;
1150 } else {
1151 pp->sg_tbl[tag] = pp->sg_tbl[0];
1152 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1153 }
1154 }
1094 1155
1095 spin_lock_irqsave(&ap->host->lock, flags); 1156 spin_lock_irqsave(&ap->host->lock, flags);
1096 1157
1097 mv_edma_cfg(ap, hpriv, port_mmio); 1158 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1098
1099 mv_set_edma_ptrs(port_mmio, hpriv, pp); 1159 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1100 1160
1101 spin_unlock_irqrestore(&ap->host->lock, flags); 1161 spin_unlock_irqrestore(&ap->host->lock, flags);
@@ -1104,8 +1164,11 @@ static int mv_port_start(struct ata_port *ap)
1104 * we'll be unable to send non-data, PIO, etc due to restricted access 1164 * we'll be unable to send non-data, PIO, etc due to restricted access
1105 * to shadow regs. 1165 * to shadow regs.
1106 */ 1166 */
1107 ap->private_data = pp;
1108 return 0; 1167 return 0;
1168
1169out_port_free_dma_mem:
1170 mv_port_free_dma_mem(ap);
1171 return -ENOMEM;
1109} 1172}
1110 1173
1111/** 1174/**
@@ -1120,6 +1183,7 @@ static int mv_port_start(struct ata_port *ap)
1120static void mv_port_stop(struct ata_port *ap) 1183static void mv_port_stop(struct ata_port *ap)
1121{ 1184{
1122 mv_stop_dma(ap); 1185 mv_stop_dma(ap);
1186 mv_port_free_dma_mem(ap);
1123} 1187}
1124 1188
1125/** 1189/**
@@ -1138,7 +1202,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
1138 struct mv_sg *mv_sg, *last_sg = NULL; 1202 struct mv_sg *mv_sg, *last_sg = NULL;
1139 unsigned int si; 1203 unsigned int si;
1140 1204
1141 mv_sg = pp->sg_tbl; 1205 mv_sg = pp->sg_tbl[qc->tag];
1142 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1206 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1143 dma_addr_t addr = sg_dma_address(sg); 1207 dma_addr_t addr = sg_dma_address(sg);
1144 u32 sg_len = sg_dma_len(sg); 1208 u32 sg_len = sg_dma_len(sg);
@@ -1194,7 +1258,8 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1194 u16 flags = 0; 1258 u16 flags = 0;
1195 unsigned in_index; 1259 unsigned in_index;
1196 1260
1197 if (qc->tf.protocol != ATA_PROT_DMA) 1261 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1262 (qc->tf.protocol != ATA_PROT_NCQ))
1198 return; 1263 return;
1199 1264
1200 /* Fill in command request block 1265 /* Fill in command request block
@@ -1203,15 +1268,14 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1203 flags |= CRQB_FLAG_READ; 1268 flags |= CRQB_FLAG_READ;
1204 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1269 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1205 flags |= qc->tag << CRQB_TAG_SHIFT; 1270 flags |= qc->tag << CRQB_TAG_SHIFT;
1206 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1207 1271
1208 /* get current queue index from software */ 1272 /* get current queue index from software */
1209 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1273 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1210 1274
1211 pp->crqb[in_index].sg_addr = 1275 pp->crqb[in_index].sg_addr =
1212 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1276 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1213 pp->crqb[in_index].sg_addr_hi = 1277 pp->crqb[in_index].sg_addr_hi =
1214 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1278 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1215 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 1279 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1216 1280
1217 cw = &pp->crqb[in_index].ata_cmd[0]; 1281 cw = &pp->crqb[in_index].ata_cmd[0];
@@ -1231,13 +1295,11 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1231 case ATA_CMD_WRITE_FUA_EXT: 1295 case ATA_CMD_WRITE_FUA_EXT:
1232 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 1296 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1233 break; 1297 break;
1234#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1235 case ATA_CMD_FPDMA_READ: 1298 case ATA_CMD_FPDMA_READ:
1236 case ATA_CMD_FPDMA_WRITE: 1299 case ATA_CMD_FPDMA_WRITE:
1237 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 1300 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1238 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 1301 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1239 break; 1302 break;
1240#endif /* FIXME: remove this line when NCQ added */
1241 default: 1303 default:
1242 /* The only other commands EDMA supports in non-queued and 1304 /* The only other commands EDMA supports in non-queued and
1243 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none 1305 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
@@ -1286,7 +1348,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1286 unsigned in_index; 1348 unsigned in_index;
1287 u32 flags = 0; 1349 u32 flags = 0;
1288 1350
1289 if (qc->tf.protocol != ATA_PROT_DMA) 1351 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1352 (qc->tf.protocol != ATA_PROT_NCQ))
1290 return; 1353 return;
1291 1354
1292 /* Fill in Gen IIE command request block 1355 /* Fill in Gen IIE command request block
@@ -1296,15 +1359,14 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1296 1359
1297 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1360 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1298 flags |= qc->tag << CRQB_TAG_SHIFT; 1361 flags |= qc->tag << CRQB_TAG_SHIFT;
1299 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really- 1362 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1300 what we use as our tag */
1301 1363
1302 /* get current queue index from software */ 1364 /* get current queue index from software */
1303 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1365 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1304 1366
1305 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 1367 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1306 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1368 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1307 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1369 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1308 crqb->flags = cpu_to_le32(flags); 1370 crqb->flags = cpu_to_le32(flags);
1309 1371
1310 tf = &qc->tf; 1372 tf = &qc->tf;
@@ -1351,10 +1413,10 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1351 struct ata_port *ap = qc->ap; 1413 struct ata_port *ap = qc->ap;
1352 void __iomem *port_mmio = mv_ap_base(ap); 1414 void __iomem *port_mmio = mv_ap_base(ap);
1353 struct mv_port_priv *pp = ap->private_data; 1415 struct mv_port_priv *pp = ap->private_data;
1354 struct mv_host_priv *hpriv = ap->host->private_data;
1355 u32 in_index; 1416 u32 in_index;
1356 1417
1357 if (qc->tf.protocol != ATA_PROT_DMA) { 1418 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1419 (qc->tf.protocol != ATA_PROT_NCQ)) {
1358 /* We're about to send a non-EDMA capable command to the 1420 /* We're about to send a non-EDMA capable command to the
1359 * port. Turn off EDMA so there won't be problems accessing 1421 * port. Turn off EDMA so there won't be problems accessing
1360 * shadow block, etc registers. 1422 * shadow block, etc registers.
@@ -1363,13 +1425,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1363 return ata_qc_issue_prot(qc); 1425 return ata_qc_issue_prot(qc);
1364 } 1426 }
1365 1427
1366 mv_start_dma(port_mmio, hpriv, pp); 1428 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1367
1368 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1369
1370 /* until we do queuing, the queue should be empty at this point */
1371 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1372 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1373 1429
1374 pp->req_idx++; 1430 pp->req_idx++;
1375 1431
@@ -1437,6 +1493,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1437 ata_ehi_hotplugged(ehi); 1493 ata_ehi_hotplugged(ehi);
1438 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 1494 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1439 "dev disconnect" : "dev connect"); 1495 "dev disconnect" : "dev connect");
1496 action |= ATA_EH_HARDRESET;
1440 } 1497 }
1441 1498
1442 if (IS_GEN_I(hpriv)) { 1499 if (IS_GEN_I(hpriv)) {
@@ -1465,7 +1522,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1465 } 1522 }
1466 1523
1467 /* Clear EDMA now that SERR cleanup done */ 1524 /* Clear EDMA now that SERR cleanup done */
1468 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1525 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1469 1526
1470 if (!err_mask) { 1527 if (!err_mask) {
1471 err_mask = AC_ERR_OTHER; 1528 err_mask = AC_ERR_OTHER;
@@ -1538,23 +1595,17 @@ static void mv_intr_edma(struct ata_port *ap)
1538 * support for queueing. this works transparently for 1595 * support for queueing. this works transparently for
1539 * queued and non-queued modes. 1596 * queued and non-queued modes.
1540 */ 1597 */
1541 else if (IS_GEN_II(hpriv)) 1598 else
1542 tag = (le16_to_cpu(pp->crpb[out_index].id) 1599 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1543 >> CRPB_IOID_SHIFT_6) & 0x3f;
1544
1545 else /* IS_GEN_IIE */
1546 tag = (le16_to_cpu(pp->crpb[out_index].id)
1547 >> CRPB_IOID_SHIFT_7) & 0x3f;
1548 1600
1549 qc = ata_qc_from_tag(ap, tag); 1601 qc = ata_qc_from_tag(ap, tag);
1550 1602
1551 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS 1603 /* For non-NCQ mode, the lower 8 bits of status
1552 * bits (WARNING: might not necessarily be associated 1604 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1553 * with this command), which -should- be clear 1605 * which should be zero if all went well.
1554 * if all is well
1555 */ 1606 */
1556 status = le16_to_cpu(pp->crpb[out_index].flags); 1607 status = le16_to_cpu(pp->crpb[out_index].flags);
1557 if (unlikely(status & 0xff)) { 1608 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1558 mv_err_intr(ap, qc); 1609 mv_err_intr(ap, qc);
1559 return; 1610 return;
1560 } 1611 }
@@ -1715,20 +1766,21 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1715 struct ata_host *host = dev_instance; 1766 struct ata_host *host = dev_instance;
1716 unsigned int hc, handled = 0, n_hcs; 1767 unsigned int hc, handled = 0, n_hcs;
1717 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; 1768 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1718 u32 irq_stat; 1769 u32 irq_stat, irq_mask;
1719 1770
1771 spin_lock(&host->lock);
1720 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); 1772 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1773 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
1721 1774
1722 /* check the cases where we either have nothing pending or have read 1775 /* check the cases where we either have nothing pending or have read
1723 * a bogus register value which can indicate HW removal or PCI fault 1776 * a bogus register value which can indicate HW removal or PCI fault
1724 */ 1777 */
1725 if (!irq_stat || (0xffffffffU == irq_stat)) 1778 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1726 return IRQ_NONE; 1779 goto out_unlock;
1727 1780
1728 n_hcs = mv_get_hc_count(host->ports[0]->flags); 1781 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1729 spin_lock(&host->lock);
1730 1782
1731 if (unlikely(irq_stat & PCI_ERR)) { 1783 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1732 mv_pci_error(host, mmio); 1784 mv_pci_error(host, mmio);
1733 handled = 1; 1785 handled = 1;
1734 goto out_unlock; /* skip all other HC irq handling */ 1786 goto out_unlock; /* skip all other HC irq handling */
@@ -1799,8 +1851,9 @@ static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1799 return -EINVAL; 1851 return -EINVAL;
1800} 1852}
1801 1853
1802static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) 1854static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1803{ 1855{
1856 struct pci_dev *pdev = to_pci_dev(host->dev);
1804 int early_5080; 1857 int early_5080;
1805 1858
1806 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); 1859 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
@@ -1811,7 +1864,7 @@ static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1811 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 1864 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1812 } 1865 }
1813 1866
1814 mv_reset_pci_bus(pdev, mmio); 1867 mv_reset_pci_bus(host, mmio);
1815} 1868}
1816 1869
1817static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 1870static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
@@ -1935,9 +1988,8 @@ static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1935 1988
1936#undef ZERO 1989#undef ZERO
1937#define ZERO(reg) writel(0, mmio + (reg)) 1990#define ZERO(reg) writel(0, mmio + (reg))
1938static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio) 1991static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
1939{ 1992{
1940 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1941 struct mv_host_priv *hpriv = host->private_data; 1993 struct mv_host_priv *hpriv = host->private_data;
1942 u32 tmp; 1994 u32 tmp;
1943 1995
@@ -2329,11 +2381,6 @@ static void mv_error_handler(struct ata_port *ap)
2329 mv_hardreset, mv_postreset); 2381 mv_hardreset, mv_postreset);
2330} 2382}
2331 2383
2332static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2333{
2334 mv_stop_dma(qc->ap);
2335}
2336
2337static void mv_eh_freeze(struct ata_port *ap) 2384static void mv_eh_freeze(struct ata_port *ap)
2338{ 2385{
2339 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; 2386 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
@@ -2427,8 +2474,8 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2427 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); 2474 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2428 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2475 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2429 2476
2430 /* unmask all EDMA error interrupts */ 2477 /* unmask all non-transient EDMA error interrupts */
2431 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS); 2478 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2432 2479
2433 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 2480 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2434 readl(port_mmio + EDMA_CFG_OFS), 2481 readl(port_mmio + EDMA_CFG_OFS),
@@ -2586,7 +2633,6 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2586static int mv_init_host(struct ata_host *host, unsigned int board_idx) 2633static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2587{ 2634{
2588 int rc = 0, n_hc, port, hc; 2635 int rc = 0, n_hc, port, hc;
2589 struct pci_dev *pdev = to_pci_dev(host->dev);
2590 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; 2636 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2591 struct mv_host_priv *hpriv = host->private_data; 2637 struct mv_host_priv *hpriv = host->private_data;
2592 2638
@@ -2607,7 +2653,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2607 goto done; 2653 goto done;
2608 2654
2609 hpriv->ops->reset_flash(hpriv, mmio); 2655 hpriv->ops->reset_flash(hpriv, mmio);
2610 hpriv->ops->reset_bus(pdev, mmio); 2656 hpriv->ops->reset_bus(host, mmio);
2611 hpriv->ops->enable_leds(hpriv, mmio); 2657 hpriv->ops->enable_leds(hpriv, mmio);
2612 2658
2613 for (port = 0; port < host->n_ports; port++) { 2659 for (port = 0; port < host->n_ports; port++) {
@@ -2630,8 +2676,10 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2630 2676
2631 mv_port_init(&ap->ioaddr, port_mmio); 2677 mv_port_init(&ap->ioaddr, port_mmio);
2632 2678
2679#ifdef CONFIG_PCI
2633 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 2680 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2634 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 2681 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2682#endif
2635 } 2683 }
2636 2684
2637 for (hc = 0; hc < n_hc; hc++) { 2685 for (hc = 0; hc < n_hc; hc++) {
@@ -2668,6 +2716,55 @@ done:
2668 return rc; 2716 return rc;
2669} 2717}
2670 2718
2719#ifdef CONFIG_PCI
2720static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
2721
2722static struct pci_driver mv_pci_driver = {
2723 .name = DRV_NAME,
2724 .id_table = mv_pci_tbl,
2725 .probe = mv_init_one,
2726 .remove = ata_pci_remove_one,
2727};
2728
2729/*
2730 * module options
2731 */
2732static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2733
2734
2735/* move to PCI layer or libata core? */
2736static int pci_go_64(struct pci_dev *pdev)
2737{
2738 int rc;
2739
2740 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2741 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2742 if (rc) {
2743 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2744 if (rc) {
2745 dev_printk(KERN_ERR, &pdev->dev,
2746 "64-bit DMA enable failed\n");
2747 return rc;
2748 }
2749 }
2750 } else {
2751 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2752 if (rc) {
2753 dev_printk(KERN_ERR, &pdev->dev,
2754 "32-bit DMA enable failed\n");
2755 return rc;
2756 }
2757 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2758 if (rc) {
2759 dev_printk(KERN_ERR, &pdev->dev,
2760 "32-bit consistent DMA enable failed\n");
2761 return rc;
2762 }
2763 }
2764
2765 return rc;
2766}
2767
2671/** 2768/**
2672 * mv_print_info - Dump key info to kernel log for perusal. 2769 * mv_print_info - Dump key info to kernel log for perusal.
2673 * @host: ATA host to print info about 2770 * @host: ATA host to print info about
@@ -2710,6 +2807,26 @@ static void mv_print_info(struct ata_host *host)
2710 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 2807 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2711} 2808}
2712 2809
2810static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2811{
2812 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2813 MV_CRQB_Q_SZ, 0);
2814 if (!hpriv->crqb_pool)
2815 return -ENOMEM;
2816
2817 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2818 MV_CRPB_Q_SZ, 0);
2819 if (!hpriv->crpb_pool)
2820 return -ENOMEM;
2821
2822 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2823 MV_SG_TBL_SZ, 0);
2824 if (!hpriv->sg_tbl_pool)
2825 return -ENOMEM;
2826
2827 return 0;
2828}
2829
2713/** 2830/**
2714 * mv_init_one - handle a positive probe of a Marvell host 2831 * mv_init_one - handle a positive probe of a Marvell host
2715 * @pdev: PCI device found 2832 * @pdev: PCI device found
@@ -2755,6 +2872,10 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2755 if (rc) 2872 if (rc)
2756 return rc; 2873 return rc;
2757 2874
2875 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2876 if (rc)
2877 return rc;
2878
2758 /* initialize adapter */ 2879 /* initialize adapter */
2759 rc = mv_init_host(host, board_idx); 2880 rc = mv_init_host(host, board_idx);
2760 if (rc) 2881 if (rc)
@@ -2772,15 +2893,22 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2772 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 2893 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2773 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 2894 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2774} 2895}
2896#endif
2775 2897
2776static int __init mv_init(void) 2898static int __init mv_init(void)
2777{ 2899{
2778 return pci_register_driver(&mv_pci_driver); 2900 int rc = -ENODEV;
2901#ifdef CONFIG_PCI
2902 rc = pci_register_driver(&mv_pci_driver);
2903#endif
2904 return rc;
2779} 2905}
2780 2906
2781static void __exit mv_exit(void) 2907static void __exit mv_exit(void)
2782{ 2908{
2909#ifdef CONFIG_PCI
2783 pci_unregister_driver(&mv_pci_driver); 2910 pci_unregister_driver(&mv_pci_driver);
2911#endif
2784} 2912}
2785 2913
2786MODULE_AUTHOR("Brett Russ"); 2914MODULE_AUTHOR("Brett Russ");
@@ -2789,8 +2917,10 @@ MODULE_LICENSE("GPL");
2789MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 2917MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2790MODULE_VERSION(DRV_VERSION); 2918MODULE_VERSION(DRV_VERSION);
2791 2919
2920#ifdef CONFIG_PCI
2792module_param(msi, int, 0444); 2921module_param(msi, int, 0444);
2793MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); 2922MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2923#endif
2794 2924
2795module_init(mv_init); 2925module_init(mv_init);
2796module_exit(mv_exit); 2926module_exit(mv_exit);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index a0f98fdab7a0..bfe92a43cf89 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1011,14 +1011,20 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
1011 } 1011 }
1012 1012
1013 if (status & (NV_ADMA_STAT_DONE | 1013 if (status & (NV_ADMA_STAT_DONE |
1014 NV_ADMA_STAT_CPBERR)) { 1014 NV_ADMA_STAT_CPBERR |
1015 u32 check_commands; 1015 NV_ADMA_STAT_CMD_COMPLETE)) {
1016 u32 check_commands = notifier_clears[i];
1016 int pos, error = 0; 1017 int pos, error = 0;
1017 1018
1018 if (ata_tag_valid(ap->link.active_tag)) 1019 if (status & NV_ADMA_STAT_CPBERR) {
1019 check_commands = 1 << ap->link.active_tag; 1020 /* Check all active commands */
1020 else 1021 if (ata_tag_valid(ap->link.active_tag))
1021 check_commands = ap->link.sactive; 1022 check_commands = 1 <<
1023 ap->link.active_tag;
1024 else
1025 check_commands = ap->
1026 link.sactive;
1027 }
1022 1028
1023 /** Check CPBs for completed commands */ 1029 /** Check CPBs for completed commands */
1024 while ((pos = ffs(check_commands)) && !error) { 1030 while ((pos = ffs(check_commands)) && !error) {
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 71d561fda0a2..7d4ce906d207 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -823,7 +823,8 @@ int pcim_enable_device(struct pci_dev *pdev)
823 dr = get_pci_dr(pdev); 823 dr = get_pci_dr(pdev);
824 if (unlikely(!dr)) 824 if (unlikely(!dr))
825 return -ENOMEM; 825 return -ENOMEM;
826 WARN_ON(!!dr->enabled); 826 if (dr->enabled)
827 return 0;
827 828
828 rc = pci_enable_device(pdev); 829 rc = pci_enable_device(pdev);
829 if (!rc) { 830 if (!rc) {