aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-01-30 19:25:51 -0500
committerPaul Mackerras <paulus@samba.org>2008-01-30 19:25:51 -0500
commitbd45ac0c5daae35e7c71138172e63df5cf644cf6 (patch)
tree5eb5a599bf6a9d7a8a34e802db932aa9e9555de4 /drivers/ata
parent4eece4ccf997c0e6d8fdad3d842e37b16b8d705f (diff)
parent5bdeae46be6dfe9efa44a548bd622af325f4bdb4 (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c72
-rw-r--r--drivers/ata/ata_generic.c51
-rw-r--r--drivers/ata/ata_piix.c393
-rw-r--r--drivers/ata/libata-acpi.c117
-rw-r--r--drivers/ata/libata-core.c809
-rw-r--r--drivers/ata/libata-eh.c299
-rw-r--r--drivers/ata/libata-scsi.c51
-rw-r--r--drivers/ata/libata-sff.c199
-rw-r--r--drivers/ata/libata.h6
-rw-r--r--drivers/ata/pata_acpi.c67
-rw-r--r--drivers/ata/pata_ali.c2
-rw-r--r--drivers/ata/pata_amd.c128
-rw-r--r--drivers/ata/pata_bf54x.c41
-rw-r--r--drivers/ata/pata_cs5520.c2
-rw-r--r--drivers/ata/pata_hpt37x.c7
-rw-r--r--drivers/ata/pata_icside.c3
-rw-r--r--drivers/ata/pata_it821x.c35
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c26
-rw-r--r--drivers/ata/pata_legacy.c912
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/ata/pata_ninja32.c214
-rw-r--r--drivers/ata/pata_pcmcia.c101
-rw-r--r--drivers/ata/pata_pdc2027x.c2
-rw-r--r--drivers/ata/pata_pdc202xx_old.c5
-rw-r--r--drivers/ata/pata_qdi.c30
-rw-r--r--drivers/ata/pata_scc.c30
-rw-r--r--drivers/ata/pata_serverworks.c9
-rw-r--r--drivers/ata/pata_via.c3
-rw-r--r--drivers/ata/pata_winbond.c30
-rw-r--r--drivers/ata/pdc_adma.c5
-rw-r--r--drivers/ata/sata_fsl.c5
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_mv.c3
-rw-r--r--drivers/ata/sata_nv.c25
-rw-r--r--drivers/ata/sata_promise.c98
-rw-r--r--drivers/ata/sata_promise.h2
-rw-r--r--drivers/ata/sata_qstor.c15
-rw-r--r--drivers/ata/sata_sil.c10
-rw-r--r--drivers/ata/sata_sil24.c30
-rw-r--r--drivers/ata/sata_sx4.c15
42 files changed, 2636 insertions, 1230 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 75939dd06295..ae19c9b30d15 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -459,6 +459,15 @@ config PATA_NETCELL
459 459
460 If unsure, say N. 460 If unsure, say N.
461 461
462config PATA_NINJA32
463 tristate "Ninja32/Delkin Cardbus ATA support (Experimental)"
464 depends on PCI && EXPERIMENTAL
465 help
466 This option enables support for the Ninja32, Delkin and
467 possibly other brands of Cardbus ATA adapter
468
469 If unsure, say N.
470
462config PATA_NS87410 471config PATA_NS87410
463 tristate "Nat Semi NS87410 PATA support (Experimental)" 472 tristate "Nat Semi NS87410 PATA support (Experimental)"
464 depends on PCI && EXPERIMENTAL 473 depends on PCI && EXPERIMENTAL
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index ebcee64dd5e2..701651e37c89 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
41obj-$(CONFIG_PATA_IT8213) += pata_it8213.o 41obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
42obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o 42obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
43obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o 43obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
44obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o
44obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o 45obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
45obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o 46obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o
46obj-$(CONFIG_PATA_OPTI) += pata_opti.o 47obj-$(CONFIG_PATA_OPTI) += pata_opti.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 54f38c21dd95..6f089b899a1a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -198,18 +198,18 @@ enum {
198}; 198};
199 199
200struct ahci_cmd_hdr { 200struct ahci_cmd_hdr {
201 u32 opts; 201 __le32 opts;
202 u32 status; 202 __le32 status;
203 u32 tbl_addr; 203 __le32 tbl_addr;
204 u32 tbl_addr_hi; 204 __le32 tbl_addr_hi;
205 u32 reserved[4]; 205 __le32 reserved[4];
206}; 206};
207 207
208struct ahci_sg { 208struct ahci_sg {
209 u32 addr; 209 __le32 addr;
210 u32 addr_hi; 210 __le32 addr_hi;
211 u32 reserved; 211 __le32 reserved;
212 u32 flags_size; 212 __le32 flags_size;
213}; 213};
214 214
215struct ahci_host_priv { 215struct ahci_host_priv {
@@ -597,6 +597,20 @@ static inline void __iomem *ahci_port_base(struct ata_port *ap)
597 return __ahci_port_base(ap->host, ap->port_no); 597 return __ahci_port_base(ap->host, ap->port_no);
598} 598}
599 599
600static void ahci_enable_ahci(void __iomem *mmio)
601{
602 u32 tmp;
603
604 /* turn on AHCI_EN */
605 tmp = readl(mmio + HOST_CTL);
606 if (!(tmp & HOST_AHCI_EN)) {
607 tmp |= HOST_AHCI_EN;
608 writel(tmp, mmio + HOST_CTL);
609 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
610 WARN_ON(!(tmp & HOST_AHCI_EN));
611 }
612}
613
600/** 614/**
601 * ahci_save_initial_config - Save and fixup initial config values 615 * ahci_save_initial_config - Save and fixup initial config values
602 * @pdev: target PCI device 616 * @pdev: target PCI device
@@ -619,6 +633,9 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
619 u32 cap, port_map; 633 u32 cap, port_map;
620 int i; 634 int i;
621 635
636 /* make sure AHCI mode is enabled before accessing CAP */
637 ahci_enable_ahci(mmio);
638
622 /* Values prefixed with saved_ are written back to host after 639 /* Values prefixed with saved_ are written back to host after
623 * reset. Values without are used for driver operation. 640 * reset. Values without are used for driver operation.
624 */ 641 */
@@ -1036,19 +1053,17 @@ static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1036static int ahci_reset_controller(struct ata_host *host) 1053static int ahci_reset_controller(struct ata_host *host)
1037{ 1054{
1038 struct pci_dev *pdev = to_pci_dev(host->dev); 1055 struct pci_dev *pdev = to_pci_dev(host->dev);
1056 struct ahci_host_priv *hpriv = host->private_data;
1039 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1057 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1040 u32 tmp; 1058 u32 tmp;
1041 1059
1042 /* we must be in AHCI mode, before using anything 1060 /* we must be in AHCI mode, before using anything
1043 * AHCI-specific, such as HOST_RESET. 1061 * AHCI-specific, such as HOST_RESET.
1044 */ 1062 */
1045 tmp = readl(mmio + HOST_CTL); 1063 ahci_enable_ahci(mmio);
1046 if (!(tmp & HOST_AHCI_EN)) {
1047 tmp |= HOST_AHCI_EN;
1048 writel(tmp, mmio + HOST_CTL);
1049 }
1050 1064
1051 /* global controller reset */ 1065 /* global controller reset */
1066 tmp = readl(mmio + HOST_CTL);
1052 if ((tmp & HOST_RESET) == 0) { 1067 if ((tmp & HOST_RESET) == 0) {
1053 writel(tmp | HOST_RESET, mmio + HOST_CTL); 1068 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1054 readl(mmio + HOST_CTL); /* flush */ 1069 readl(mmio + HOST_CTL); /* flush */
@@ -1067,8 +1082,7 @@ static int ahci_reset_controller(struct ata_host *host)
1067 } 1082 }
1068 1083
1069 /* turn on AHCI mode */ 1084 /* turn on AHCI mode */
1070 writel(HOST_AHCI_EN, mmio + HOST_CTL); 1085 ahci_enable_ahci(mmio);
1071 (void) readl(mmio + HOST_CTL); /* flush */
1072 1086
1073 /* some registers might be cleared on reset. restore initial values */ 1087 /* some registers might be cleared on reset. restore initial values */
1074 ahci_restore_initial_config(host); 1088 ahci_restore_initial_config(host);
@@ -1078,8 +1092,10 @@ static int ahci_reset_controller(struct ata_host *host)
1078 1092
1079 /* configure PCS */ 1093 /* configure PCS */
1080 pci_read_config_word(pdev, 0x92, &tmp16); 1094 pci_read_config_word(pdev, 0x92, &tmp16);
1081 tmp16 |= 0xf; 1095 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1082 pci_write_config_word(pdev, 0x92, tmp16); 1096 tmp16 |= hpriv->port_map;
1097 pci_write_config_word(pdev, 0x92, tmp16);
1098 }
1083 } 1099 }
1084 1100
1085 return 0; 1101 return 0;
@@ -1480,35 +1496,31 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
1480static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) 1496static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1481{ 1497{
1482 struct scatterlist *sg; 1498 struct scatterlist *sg;
1483 struct ahci_sg *ahci_sg; 1499 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1484 unsigned int n_sg = 0; 1500 unsigned int si;
1485 1501
1486 VPRINTK("ENTER\n"); 1502 VPRINTK("ENTER\n");
1487 1503
1488 /* 1504 /*
1489 * Next, the S/G list. 1505 * Next, the S/G list.
1490 */ 1506 */
1491 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; 1507 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1492 ata_for_each_sg(sg, qc) {
1493 dma_addr_t addr = sg_dma_address(sg); 1508 dma_addr_t addr = sg_dma_address(sg);
1494 u32 sg_len = sg_dma_len(sg); 1509 u32 sg_len = sg_dma_len(sg);
1495 1510
1496 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff); 1511 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1497 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); 1512 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1498 ahci_sg->flags_size = cpu_to_le32(sg_len - 1); 1513 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1499
1500 ahci_sg++;
1501 n_sg++;
1502 } 1514 }
1503 1515
1504 return n_sg; 1516 return si;
1505} 1517}
1506 1518
1507static void ahci_qc_prep(struct ata_queued_cmd *qc) 1519static void ahci_qc_prep(struct ata_queued_cmd *qc)
1508{ 1520{
1509 struct ata_port *ap = qc->ap; 1521 struct ata_port *ap = qc->ap;
1510 struct ahci_port_priv *pp = ap->private_data; 1522 struct ahci_port_priv *pp = ap->private_data;
1511 int is_atapi = is_atapi_taskfile(&qc->tf); 1523 int is_atapi = ata_is_atapi(qc->tf.protocol);
1512 void *cmd_tbl; 1524 void *cmd_tbl;
1513 u32 opts; 1525 u32 opts;
1514 const u32 cmd_fis_len = 5; /* five dwords */ 1526 const u32 cmd_fis_len = 5; /* five dwords */
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 90329982bef7..20534202fc79 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -26,7 +26,7 @@
26#include <linux/libata.h> 26#include <linux/libata.h>
27 27
28#define DRV_NAME "ata_generic" 28#define DRV_NAME "ata_generic"
29#define DRV_VERSION "0.2.13" 29#define DRV_VERSION "0.2.15"
30 30
31/* 31/*
32 * A generic parallel ATA driver using libata 32 * A generic parallel ATA driver using libata
@@ -48,27 +48,47 @@ static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
48 struct ata_port *ap = link->ap; 48 struct ata_port *ap = link->ap;
49 int dma_enabled = 0; 49 int dma_enabled = 0;
50 struct ata_device *dev; 50 struct ata_device *dev;
51 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
51 52
52 /* Bits 5 and 6 indicate if DMA is active on master/slave */ 53 /* Bits 5 and 6 indicate if DMA is active on master/slave */
53 if (ap->ioaddr.bmdma_addr) 54 if (ap->ioaddr.bmdma_addr)
54 dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 55 dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
55 56
57 if (pdev->vendor == PCI_VENDOR_ID_CENATEK)
58 dma_enabled = 0xFF;
59
56 ata_link_for_each_dev(dev, link) { 60 ata_link_for_each_dev(dev, link) {
57 if (ata_dev_enabled(dev)) { 61 if (!ata_dev_enabled(dev))
58 /* We don't really care */ 62 continue;
59 dev->pio_mode = XFER_PIO_0; 63
60 dev->dma_mode = XFER_MW_DMA_0; 64 /* We don't really care */
61 /* We do need the right mode information for DMA or PIO 65 dev->pio_mode = XFER_PIO_0;
62 and this comes from the current configuration flags */ 66 dev->dma_mode = XFER_MW_DMA_0;
63 if (dma_enabled & (1 << (5 + dev->devno))) { 67 /* We do need the right mode information for DMA or PIO
64 ata_id_to_dma_mode(dev, XFER_MW_DMA_0); 68 and this comes from the current configuration flags */
65 dev->flags &= ~ATA_DFLAG_PIO; 69 if (dma_enabled & (1 << (5 + dev->devno))) {
66 } else { 70 unsigned int xfer_mask = ata_id_xfermask(dev->id);
67 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n"); 71 const char *name;
68 dev->xfer_mode = XFER_PIO_0; 72
69 dev->xfer_shift = ATA_SHIFT_PIO; 73 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
70 dev->flags |= ATA_DFLAG_PIO; 74 name = ata_mode_string(xfer_mask);
75 else {
76 /* SWDMA perhaps? */
77 name = "DMA";
78 xfer_mask |= ata_xfer_mode2mask(XFER_MW_DMA_0);
71 } 79 }
80
81 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
82 name);
83
84 dev->xfer_mode = ata_xfer_mask2mode(xfer_mask);
85 dev->xfer_shift = ata_xfer_mode2shift(dev->xfer_mode);
86 dev->flags &= ~ATA_DFLAG_PIO;
87 } else {
88 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
89 dev->xfer_mode = XFER_PIO_0;
90 dev->xfer_shift = ATA_SHIFT_PIO;
91 dev->flags |= ATA_DFLAG_PIO;
72 } 92 }
73 } 93 }
74 return 0; 94 return 0;
@@ -185,6 +205,7 @@ static struct pci_device_id ata_generic[] = {
185 { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), }, 205 { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), },
186 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), }, 206 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), },
187 { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), }, 207 { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), },
208 { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), },
188 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), }, 209 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), },
189 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, 210 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
190 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, 211 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index b406b39b878e..a65c8ae5c461 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -101,39 +101,21 @@ enum {
101 ICH5_PMR = 0x90, /* port mapping register */ 101 ICH5_PMR = 0x90, /* port mapping register */
102 ICH5_PCS = 0x92, /* port control and status */ 102 ICH5_PCS = 0x92, /* port control and status */
103 PIIX_SCC = 0x0A, /* sub-class code register */ 103 PIIX_SCC = 0x0A, /* sub-class code register */
104 PIIX_SIDPR_BAR = 5,
105 PIIX_SIDPR_LEN = 16,
106 PIIX_SIDPR_IDX = 0,
107 PIIX_SIDPR_DATA = 4,
104 108
105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */ 109 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ 110 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
111 PIIX_FLAG_SIDPR = (1 << 29), /* SATA idx/data pair regs */
108 112
109 PIIX_PATA_FLAGS = ATA_FLAG_SLAVE_POSS, 113 PIIX_PATA_FLAGS = ATA_FLAG_SLAVE_POSS,
110 PIIX_SATA_FLAGS = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR, 114 PIIX_SATA_FLAGS = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
111 115
112 /* combined mode. if set, PATA is channel 0.
113 * if clear, PATA is channel 1.
114 */
115 PIIX_PORT_ENABLED = (1 << 0),
116 PIIX_PORT_PRESENT = (1 << 4),
117
118 PIIX_80C_PRI = (1 << 5) | (1 << 4), 116 PIIX_80C_PRI = (1 << 5) | (1 << 4),
119 PIIX_80C_SEC = (1 << 7) | (1 << 6), 117 PIIX_80C_SEC = (1 << 7) | (1 << 6),
120 118
121 /* controller IDs */
122 piix_pata_mwdma = 0, /* PIIX3 MWDMA only */
123 piix_pata_33, /* PIIX4 at 33Mhz */
124 ich_pata_33, /* ICH up to UDMA 33 only */
125 ich_pata_66, /* ICH up to 66 Mhz */
126 ich_pata_100, /* ICH up to UDMA 100 */
127 ich5_sata,
128 ich6_sata,
129 ich6_sata_ahci,
130 ich6m_sata_ahci,
131 ich8_sata_ahci,
132 ich8_2port_sata,
133 ich8m_apple_sata_ahci, /* locks up on second port enable */
134 tolapai_sata_ahci,
135 piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
136
137 /* constants for mapping table */ 119 /* constants for mapping table */
138 P0 = 0, /* port 0 */ 120 P0 = 0, /* port 0 */
139 P1 = 1, /* port 1 */ 121 P1 = 1, /* port 1 */
@@ -149,6 +131,24 @@ enum {
149 PIIX_HOST_BROKEN_SUSPEND = (1 << 24), 131 PIIX_HOST_BROKEN_SUSPEND = (1 << 24),
150}; 132};
151 133
134enum piix_controller_ids {
135 /* controller IDs */
136 piix_pata_mwdma, /* PIIX3 MWDMA only */
137 piix_pata_33, /* PIIX4 at 33Mhz */
138 ich_pata_33, /* ICH up to UDMA 33 only */
139 ich_pata_66, /* ICH up to 66 Mhz */
140 ich_pata_100, /* ICH up to UDMA 100 */
141 ich5_sata,
142 ich6_sata,
143 ich6_sata_ahci,
144 ich6m_sata_ahci,
145 ich8_sata_ahci,
146 ich8_2port_sata,
147 ich8m_apple_sata_ahci, /* locks up on second port enable */
148 tolapai_sata_ahci,
149 piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
150};
151
152struct piix_map_db { 152struct piix_map_db {
153 const u32 mask; 153 const u32 mask;
154 const u16 port_enable; 154 const u16 port_enable;
@@ -157,6 +157,7 @@ struct piix_map_db {
157 157
158struct piix_host_priv { 158struct piix_host_priv {
159 const int *map; 159 const int *map;
160 void __iomem *sidpr;
160}; 161};
161 162
162static int piix_init_one(struct pci_dev *pdev, 163static int piix_init_one(struct pci_dev *pdev,
@@ -167,6 +168,9 @@ static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
167static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev); 168static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev);
168static int ich_pata_cable_detect(struct ata_port *ap); 169static int ich_pata_cable_detect(struct ata_port *ap);
169static u8 piix_vmw_bmdma_status(struct ata_port *ap); 170static u8 piix_vmw_bmdma_status(struct ata_port *ap);
171static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val);
172static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val);
173static void piix_sidpr_error_handler(struct ata_port *ap);
170#ifdef CONFIG_PM 174#ifdef CONFIG_PM
171static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 175static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
172static int piix_pci_device_resume(struct pci_dev *pdev); 176static int piix_pci_device_resume(struct pci_dev *pdev);
@@ -321,7 +325,6 @@ static const struct ata_port_operations piix_pata_ops = {
321 .post_internal_cmd = ata_bmdma_post_internal_cmd, 325 .post_internal_cmd = ata_bmdma_post_internal_cmd,
322 .cable_detect = ata_cable_40wire, 326 .cable_detect = ata_cable_40wire,
323 327
324 .irq_handler = ata_interrupt,
325 .irq_clear = ata_bmdma_irq_clear, 328 .irq_clear = ata_bmdma_irq_clear,
326 .irq_on = ata_irq_on, 329 .irq_on = ata_irq_on,
327 330
@@ -353,7 +356,6 @@ static const struct ata_port_operations ich_pata_ops = {
353 .post_internal_cmd = ata_bmdma_post_internal_cmd, 356 .post_internal_cmd = ata_bmdma_post_internal_cmd,
354 .cable_detect = ich_pata_cable_detect, 357 .cable_detect = ich_pata_cable_detect,
355 358
356 .irq_handler = ata_interrupt,
357 .irq_clear = ata_bmdma_irq_clear, 359 .irq_clear = ata_bmdma_irq_clear,
358 .irq_on = ata_irq_on, 360 .irq_on = ata_irq_on,
359 361
@@ -380,7 +382,6 @@ static const struct ata_port_operations piix_sata_ops = {
380 .error_handler = ata_bmdma_error_handler, 382 .error_handler = ata_bmdma_error_handler,
381 .post_internal_cmd = ata_bmdma_post_internal_cmd, 383 .post_internal_cmd = ata_bmdma_post_internal_cmd,
382 384
383 .irq_handler = ata_interrupt,
384 .irq_clear = ata_bmdma_irq_clear, 385 .irq_clear = ata_bmdma_irq_clear,
385 .irq_on = ata_irq_on, 386 .irq_on = ata_irq_on,
386 387
@@ -419,6 +420,35 @@ static const struct ata_port_operations piix_vmw_ops = {
419 .port_start = ata_port_start, 420 .port_start = ata_port_start,
420}; 421};
421 422
423static const struct ata_port_operations piix_sidpr_sata_ops = {
424 .tf_load = ata_tf_load,
425 .tf_read = ata_tf_read,
426 .check_status = ata_check_status,
427 .exec_command = ata_exec_command,
428 .dev_select = ata_std_dev_select,
429
430 .bmdma_setup = ata_bmdma_setup,
431 .bmdma_start = ata_bmdma_start,
432 .bmdma_stop = ata_bmdma_stop,
433 .bmdma_status = ata_bmdma_status,
434 .qc_prep = ata_qc_prep,
435 .qc_issue = ata_qc_issue_prot,
436 .data_xfer = ata_data_xfer,
437
438 .scr_read = piix_sidpr_scr_read,
439 .scr_write = piix_sidpr_scr_write,
440
441 .freeze = ata_bmdma_freeze,
442 .thaw = ata_bmdma_thaw,
443 .error_handler = piix_sidpr_error_handler,
444 .post_internal_cmd = ata_bmdma_post_internal_cmd,
445
446 .irq_clear = ata_bmdma_irq_clear,
447 .irq_on = ata_irq_on,
448
449 .port_start = ata_port_start,
450};
451
422static const struct piix_map_db ich5_map_db = { 452static const struct piix_map_db ich5_map_db = {
423 .mask = 0x7, 453 .mask = 0x7,
424 .port_enable = 0x3, 454 .port_enable = 0x3,
@@ -526,7 +556,6 @@ static const struct piix_map_db *piix_map_db_table[] = {
526static struct ata_port_info piix_port_info[] = { 556static struct ata_port_info piix_port_info[] = {
527 [piix_pata_mwdma] = /* PIIX3 MWDMA only */ 557 [piix_pata_mwdma] = /* PIIX3 MWDMA only */
528 { 558 {
529 .sht = &piix_sht,
530 .flags = PIIX_PATA_FLAGS, 559 .flags = PIIX_PATA_FLAGS,
531 .pio_mask = 0x1f, /* pio0-4 */ 560 .pio_mask = 0x1f, /* pio0-4 */
532 .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ 561 .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
@@ -535,7 +564,6 @@ static struct ata_port_info piix_port_info[] = {
535 564
536 [piix_pata_33] = /* PIIX4 at 33MHz */ 565 [piix_pata_33] = /* PIIX4 at 33MHz */
537 { 566 {
538 .sht = &piix_sht,
539 .flags = PIIX_PATA_FLAGS, 567 .flags = PIIX_PATA_FLAGS,
540 .pio_mask = 0x1f, /* pio0-4 */ 568 .pio_mask = 0x1f, /* pio0-4 */
541 .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ 569 .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
@@ -545,7 +573,6 @@ static struct ata_port_info piix_port_info[] = {
545 573
546 [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/ 574 [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/
547 { 575 {
548 .sht = &piix_sht,
549 .flags = PIIX_PATA_FLAGS, 576 .flags = PIIX_PATA_FLAGS,
550 .pio_mask = 0x1f, /* pio 0-4 */ 577 .pio_mask = 0x1f, /* pio 0-4 */
551 .mwdma_mask = 0x06, /* Check: maybe 0x07 */ 578 .mwdma_mask = 0x06, /* Check: maybe 0x07 */
@@ -555,7 +582,6 @@ static struct ata_port_info piix_port_info[] = {
555 582
556 [ich_pata_66] = /* ICH controllers up to 66MHz */ 583 [ich_pata_66] = /* ICH controllers up to 66MHz */
557 { 584 {
558 .sht = &piix_sht,
559 .flags = PIIX_PATA_FLAGS, 585 .flags = PIIX_PATA_FLAGS,
560 .pio_mask = 0x1f, /* pio 0-4 */ 586 .pio_mask = 0x1f, /* pio 0-4 */
561 .mwdma_mask = 0x06, /* MWDMA0 is broken on chip */ 587 .mwdma_mask = 0x06, /* MWDMA0 is broken on chip */
@@ -565,7 +591,6 @@ static struct ata_port_info piix_port_info[] = {
565 591
566 [ich_pata_100] = 592 [ich_pata_100] =
567 { 593 {
568 .sht = &piix_sht,
569 .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR, 594 .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
570 .pio_mask = 0x1f, /* pio0-4 */ 595 .pio_mask = 0x1f, /* pio0-4 */
571 .mwdma_mask = 0x06, /* mwdma1-2 */ 596 .mwdma_mask = 0x06, /* mwdma1-2 */
@@ -575,7 +600,6 @@ static struct ata_port_info piix_port_info[] = {
575 600
576 [ich5_sata] = 601 [ich5_sata] =
577 { 602 {
578 .sht = &piix_sht,
579 .flags = PIIX_SATA_FLAGS, 603 .flags = PIIX_SATA_FLAGS,
580 .pio_mask = 0x1f, /* pio0-4 */ 604 .pio_mask = 0x1f, /* pio0-4 */
581 .mwdma_mask = 0x07, /* mwdma0-2 */ 605 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -585,8 +609,7 @@ static struct ata_port_info piix_port_info[] = {
585 609
586 [ich6_sata] = 610 [ich6_sata] =
587 { 611 {
588 .sht = &piix_sht, 612 .flags = PIIX_SATA_FLAGS,
589 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR,
590 .pio_mask = 0x1f, /* pio0-4 */ 613 .pio_mask = 0x1f, /* pio0-4 */
591 .mwdma_mask = 0x07, /* mwdma0-2 */ 614 .mwdma_mask = 0x07, /* mwdma0-2 */
592 .udma_mask = ATA_UDMA6, 615 .udma_mask = ATA_UDMA6,
@@ -595,9 +618,7 @@ static struct ata_port_info piix_port_info[] = {
595 618
596 [ich6_sata_ahci] = 619 [ich6_sata_ahci] =
597 { 620 {
598 .sht = &piix_sht, 621 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_AHCI,
599 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
600 PIIX_FLAG_AHCI,
601 .pio_mask = 0x1f, /* pio0-4 */ 622 .pio_mask = 0x1f, /* pio0-4 */
602 .mwdma_mask = 0x07, /* mwdma0-2 */ 623 .mwdma_mask = 0x07, /* mwdma0-2 */
603 .udma_mask = ATA_UDMA6, 624 .udma_mask = ATA_UDMA6,
@@ -606,9 +627,7 @@ static struct ata_port_info piix_port_info[] = {
606 627
607 [ich6m_sata_ahci] = 628 [ich6m_sata_ahci] =
608 { 629 {
609 .sht = &piix_sht, 630 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_AHCI,
610 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
611 PIIX_FLAG_AHCI,
612 .pio_mask = 0x1f, /* pio0-4 */ 631 .pio_mask = 0x1f, /* pio0-4 */
613 .mwdma_mask = 0x07, /* mwdma0-2 */ 632 .mwdma_mask = 0x07, /* mwdma0-2 */
614 .udma_mask = ATA_UDMA6, 633 .udma_mask = ATA_UDMA6,
@@ -617,9 +636,8 @@ static struct ata_port_info piix_port_info[] = {
617 636
618 [ich8_sata_ahci] = 637 [ich8_sata_ahci] =
619 { 638 {
620 .sht = &piix_sht, 639 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_AHCI |
621 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | 640 PIIX_FLAG_SIDPR,
622 PIIX_FLAG_AHCI,
623 .pio_mask = 0x1f, /* pio0-4 */ 641 .pio_mask = 0x1f, /* pio0-4 */
624 .mwdma_mask = 0x07, /* mwdma0-2 */ 642 .mwdma_mask = 0x07, /* mwdma0-2 */
625 .udma_mask = ATA_UDMA6, 643 .udma_mask = ATA_UDMA6,
@@ -628,9 +646,8 @@ static struct ata_port_info piix_port_info[] = {
628 646
629 [ich8_2port_sata] = 647 [ich8_2port_sata] =
630 { 648 {
631 .sht = &piix_sht, 649 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_AHCI |
632 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | 650 PIIX_FLAG_SIDPR,
633 PIIX_FLAG_AHCI,
634 .pio_mask = 0x1f, /* pio0-4 */ 651 .pio_mask = 0x1f, /* pio0-4 */
635 .mwdma_mask = 0x07, /* mwdma0-2 */ 652 .mwdma_mask = 0x07, /* mwdma0-2 */
636 .udma_mask = ATA_UDMA6, 653 .udma_mask = ATA_UDMA6,
@@ -639,9 +656,7 @@ static struct ata_port_info piix_port_info[] = {
639 656
640 [tolapai_sata_ahci] = 657 [tolapai_sata_ahci] =
641 { 658 {
642 .sht = &piix_sht, 659 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_AHCI,
643 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
644 PIIX_FLAG_AHCI,
645 .pio_mask = 0x1f, /* pio0-4 */ 660 .pio_mask = 0x1f, /* pio0-4 */
646 .mwdma_mask = 0x07, /* mwdma0-2 */ 661 .mwdma_mask = 0x07, /* mwdma0-2 */
647 .udma_mask = ATA_UDMA6, 662 .udma_mask = ATA_UDMA6,
@@ -650,9 +665,8 @@ static struct ata_port_info piix_port_info[] = {
650 665
651 [ich8m_apple_sata_ahci] = 666 [ich8m_apple_sata_ahci] =
652 { 667 {
653 .sht = &piix_sht, 668 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_AHCI |
654 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | 669 PIIX_FLAG_SIDPR,
655 PIIX_FLAG_AHCI,
656 .pio_mask = 0x1f, /* pio0-4 */ 670 .pio_mask = 0x1f, /* pio0-4 */
657 .mwdma_mask = 0x07, /* mwdma0-2 */ 671 .mwdma_mask = 0x07, /* mwdma0-2 */
658 .udma_mask = ATA_UDMA6, 672 .udma_mask = ATA_UDMA6,
@@ -1001,6 +1015,180 @@ static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev)
1001 do_pata_set_dmamode(ap, adev, 1); 1015 do_pata_set_dmamode(ap, adev, 1);
1002} 1016}
1003 1017
1018/*
1019 * Serial ATA Index/Data Pair Superset Registers access
1020 *
1021 * Beginning from ICH8, there's a sane way to access SCRs using index
1022 * and data register pair located at BAR5. This creates an
1023 * interesting problem of mapping two SCRs to one port.
1024 *
1025 * Although they have separate SCRs, the master and slave aren't
1026 * independent enough to be treated as separate links - e.g. softreset
1027 * resets both. Also, there's no protocol defined for hard resetting
1028 * singled device sharing the virtual port (no defined way to acquire
1029 * device signature). This is worked around by merging the SCR values
1030 * into one sensible value and requesting follow-up SRST after
1031 * hardreset.
1032 *
1033 * SCR merging is perfomed in nibbles which is the unit contents in
1034 * SCRs are organized. If two values are equal, the value is used.
1035 * When they differ, merge table which lists precedence of possible
1036 * values is consulted and the first match or the last entry when
1037 * nothing matches is used. When there's no merge table for the
1038 * specific nibble, value from the first port is used.
1039 */
1040static const int piix_sidx_map[] = {
1041 [SCR_STATUS] = 0,
1042 [SCR_ERROR] = 2,
1043 [SCR_CONTROL] = 1,
1044};
1045
1046static void piix_sidpr_sel(struct ata_device *dev, unsigned int reg)
1047{
1048 struct ata_port *ap = dev->link->ap;
1049 struct piix_host_priv *hpriv = ap->host->private_data;
1050
1051 iowrite32(((ap->port_no * 2 + dev->devno) << 8) | piix_sidx_map[reg],
1052 hpriv->sidpr + PIIX_SIDPR_IDX);
1053}
1054
1055static int piix_sidpr_read(struct ata_device *dev, unsigned int reg)
1056{
1057 struct piix_host_priv *hpriv = dev->link->ap->host->private_data;
1058
1059 piix_sidpr_sel(dev, reg);
1060 return ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
1061}
1062
1063static void piix_sidpr_write(struct ata_device *dev, unsigned int reg, u32 val)
1064{
1065 struct piix_host_priv *hpriv = dev->link->ap->host->private_data;
1066
1067 piix_sidpr_sel(dev, reg);
1068 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
1069}
1070
1071u32 piix_merge_scr(u32 val0, u32 val1, const int * const *merge_tbl)
1072{
1073 u32 val = 0;
1074 int i, mi;
1075
1076 for (i = 0, mi = 0; i < 32 / 4; i++) {
1077 u8 c0 = (val0 >> (i * 4)) & 0xf;
1078 u8 c1 = (val1 >> (i * 4)) & 0xf;
1079 u8 merged = c0;
1080 const int *cur;
1081
1082 /* if no merge preference, assume the first value */
1083 cur = merge_tbl[mi];
1084 if (!cur)
1085 goto done;
1086 mi++;
1087
1088 /* if two values equal, use it */
1089 if (c0 == c1)
1090 goto done;
1091
1092 /* choose the first match or the last from the merge table */
1093 while (*cur != -1) {
1094 if (c0 == *cur || c1 == *cur)
1095 break;
1096 cur++;
1097 }
1098 if (*cur == -1)
1099 cur--;
1100 merged = *cur;
1101 done:
1102 val |= merged << (i * 4);
1103 }
1104
1105 return val;
1106}
1107
1108static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val)
1109{
1110 const int * const sstatus_merge_tbl[] = {
1111 /* DET */ (const int []){ 1, 3, 0, 4, 3, -1 },
1112 /* SPD */ (const int []){ 2, 1, 0, -1 },
1113 /* IPM */ (const int []){ 6, 2, 1, 0, -1 },
1114 NULL,
1115 };
1116 const int * const scontrol_merge_tbl[] = {
1117 /* DET */ (const int []){ 1, 0, 4, 0, -1 },
1118 /* SPD */ (const int []){ 0, 2, 1, 0, -1 },
1119 /* IPM */ (const int []){ 0, 1, 2, 3, 0, -1 },
1120 NULL,
1121 };
1122 u32 v0, v1;
1123
1124 if (reg >= ARRAY_SIZE(piix_sidx_map))
1125 return -EINVAL;
1126
1127 if (!(ap->flags & ATA_FLAG_SLAVE_POSS)) {
1128 *val = piix_sidpr_read(&ap->link.device[0], reg);
1129 return 0;
1130 }
1131
1132 v0 = piix_sidpr_read(&ap->link.device[0], reg);
1133 v1 = piix_sidpr_read(&ap->link.device[1], reg);
1134
1135 switch (reg) {
1136 case SCR_STATUS:
1137 *val = piix_merge_scr(v0, v1, sstatus_merge_tbl);
1138 break;
1139 case SCR_ERROR:
1140 *val = v0 | v1;
1141 break;
1142 case SCR_CONTROL:
1143 *val = piix_merge_scr(v0, v1, scontrol_merge_tbl);
1144 break;
1145 }
1146
1147 return 0;
1148}
1149
1150static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val)
1151{
1152 if (reg >= ARRAY_SIZE(piix_sidx_map))
1153 return -EINVAL;
1154
1155 piix_sidpr_write(&ap->link.device[0], reg, val);
1156
1157 if (ap->flags & ATA_FLAG_SLAVE_POSS)
1158 piix_sidpr_write(&ap->link.device[1], reg, val);
1159
1160 return 0;
1161}
1162
1163static int piix_sidpr_hardreset(struct ata_link *link, unsigned int *class,
1164 unsigned long deadline)
1165{
1166 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1167 int rc;
1168
1169 /* do hardreset */
1170 rc = sata_link_hardreset(link, timing, deadline);
1171 if (rc) {
1172 ata_link_printk(link, KERN_ERR,
1173 "COMRESET failed (errno=%d)\n", rc);
1174 return rc;
1175 }
1176
1177 /* TODO: phy layer with polling, timeouts, etc. */
1178 if (ata_link_offline(link)) {
1179 *class = ATA_DEV_NONE;
1180 return 0;
1181 }
1182
1183 return -EAGAIN;
1184}
1185
1186static void piix_sidpr_error_handler(struct ata_port *ap)
1187{
1188 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1189 piix_sidpr_hardreset, ata_std_postreset);
1190}
1191
1004#ifdef CONFIG_PM 1192#ifdef CONFIG_PM
1005static int piix_broken_suspend(void) 1193static int piix_broken_suspend(void)
1006{ 1194{
@@ -1034,6 +1222,13 @@ static int piix_broken_suspend(void)
1034 }, 1222 },
1035 }, 1223 },
1036 { 1224 {
1225 .ident = "TECRA M6",
1226 .matches = {
1227 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1228 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M6"),
1229 },
1230 },
1231 {
1037 .ident = "TECRA M7", 1232 .ident = "TECRA M7",
1038 .matches = { 1233 .matches = {
1039 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1234 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
@@ -1048,6 +1243,13 @@ static int piix_broken_suspend(void)
1048 }, 1243 },
1049 }, 1244 },
1050 { 1245 {
1246 .ident = "Satellite R20",
1247 .matches = {
1248 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1249 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R20"),
1250 },
1251 },
1252 {
1051 .ident = "Satellite R25", 1253 .ident = "Satellite R25",
1052 .matches = { 1254 .matches = {
1053 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1255 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
@@ -1253,10 +1455,10 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
1253 return no_piix_dma; 1455 return no_piix_dma;
1254} 1456}
1255 1457
1256static void __devinit piix_init_pcs(struct pci_dev *pdev, 1458static void __devinit piix_init_pcs(struct ata_host *host,
1257 struct ata_port_info *pinfo,
1258 const struct piix_map_db *map_db) 1459 const struct piix_map_db *map_db)
1259{ 1460{
1461 struct pci_dev *pdev = to_pci_dev(host->dev);
1260 u16 pcs, new_pcs; 1462 u16 pcs, new_pcs;
1261 1463
1262 pci_read_config_word(pdev, ICH5_PCS, &pcs); 1464 pci_read_config_word(pdev, ICH5_PCS, &pcs);
@@ -1270,11 +1472,10 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev,
1270 } 1472 }
1271} 1473}
1272 1474
1273static void __devinit piix_init_sata_map(struct pci_dev *pdev, 1475static const int *__devinit piix_init_sata_map(struct pci_dev *pdev,
1274 struct ata_port_info *pinfo, 1476 struct ata_port_info *pinfo,
1275 const struct piix_map_db *map_db) 1477 const struct piix_map_db *map_db)
1276{ 1478{
1277 struct piix_host_priv *hpriv = pinfo[0].private_data;
1278 const int *map; 1479 const int *map;
1279 int i, invalid_map = 0; 1480 int i, invalid_map = 0;
1280 u8 map_value; 1481 u8 map_value;
@@ -1298,7 +1499,6 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
1298 case IDE: 1499 case IDE:
1299 WARN_ON((i & 1) || map[i + 1] != IDE); 1500 WARN_ON((i & 1) || map[i + 1] != IDE);
1300 pinfo[i / 2] = piix_port_info[ich_pata_100]; 1501 pinfo[i / 2] = piix_port_info[ich_pata_100];
1301 pinfo[i / 2].private_data = hpriv;
1302 i++; 1502 i++;
1303 printk(" IDE IDE"); 1503 printk(" IDE IDE");
1304 break; 1504 break;
@@ -1316,7 +1516,33 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
1316 dev_printk(KERN_ERR, &pdev->dev, 1516 dev_printk(KERN_ERR, &pdev->dev,
1317 "invalid MAP value %u\n", map_value); 1517 "invalid MAP value %u\n", map_value);
1318 1518
1319 hpriv->map = map; 1519 return map;
1520}
1521
1522static void __devinit piix_init_sidpr(struct ata_host *host)
1523{
1524 struct pci_dev *pdev = to_pci_dev(host->dev);
1525 struct piix_host_priv *hpriv = host->private_data;
1526 int i;
1527
1528 /* check for availability */
1529 for (i = 0; i < 4; i++)
1530 if (hpriv->map[i] == IDE)
1531 return;
1532
1533 if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR))
1534 return;
1535
1536 if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 ||
1537 pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN)
1538 return;
1539
1540 if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME))
1541 return;
1542
1543 hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
1544 host->ports[0]->ops = &piix_sidpr_sata_ops;
1545 host->ports[1]->ops = &piix_sidpr_sata_ops;
1320} 1546}
1321 1547
1322static void piix_iocfg_bit18_quirk(struct pci_dev *pdev) 1548static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
@@ -1375,8 +1601,10 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1375 struct device *dev = &pdev->dev; 1601 struct device *dev = &pdev->dev;
1376 struct ata_port_info port_info[2]; 1602 struct ata_port_info port_info[2];
1377 const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] }; 1603 const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
1378 struct piix_host_priv *hpriv;
1379 unsigned long port_flags; 1604 unsigned long port_flags;
1605 struct ata_host *host;
1606 struct piix_host_priv *hpriv;
1607 int rc;
1380 1608
1381 if (!printed_version++) 1609 if (!printed_version++)
1382 dev_printk(KERN_DEBUG, &pdev->dev, 1610 dev_printk(KERN_DEBUG, &pdev->dev,
@@ -1386,17 +1614,31 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1386 if (!in_module_init) 1614 if (!in_module_init)
1387 return -ENODEV; 1615 return -ENODEV;
1388 1616
1617 port_info[0] = piix_port_info[ent->driver_data];
1618 port_info[1] = piix_port_info[ent->driver_data];
1619
1620 port_flags = port_info[0].flags;
1621
1622 /* enable device and prepare host */
1623 rc = pcim_enable_device(pdev);
1624 if (rc)
1625 return rc;
1626
1627 /* SATA map init can change port_info, do it before prepping host */
1389 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 1628 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1390 if (!hpriv) 1629 if (!hpriv)
1391 return -ENOMEM; 1630 return -ENOMEM;
1392 1631
1393 port_info[0] = piix_port_info[ent->driver_data]; 1632 if (port_flags & ATA_FLAG_SATA)
1394 port_info[1] = piix_port_info[ent->driver_data]; 1633 hpriv->map = piix_init_sata_map(pdev, port_info,
1395 port_info[0].private_data = hpriv; 1634 piix_map_db_table[ent->driver_data]);
1396 port_info[1].private_data = hpriv;
1397 1635
1398 port_flags = port_info[0].flags; 1636 rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
1637 if (rc)
1638 return rc;
1639 host->private_data = hpriv;
1399 1640
1641 /* initialize controller */
1400 if (port_flags & PIIX_FLAG_AHCI) { 1642 if (port_flags & PIIX_FLAG_AHCI) {
1401 u8 tmp; 1643 u8 tmp;
1402 pci_read_config_byte(pdev, PIIX_SCC, &tmp); 1644 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
@@ -1407,12 +1649,9 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1407 } 1649 }
1408 } 1650 }
1409 1651
1410 /* Initialize SATA map */
1411 if (port_flags & ATA_FLAG_SATA) { 1652 if (port_flags & ATA_FLAG_SATA) {
1412 piix_init_sata_map(pdev, port_info, 1653 piix_init_pcs(host, piix_map_db_table[ent->driver_data]);
1413 piix_map_db_table[ent->driver_data]); 1654 piix_init_sidpr(host);
1414 piix_init_pcs(pdev, port_info,
1415 piix_map_db_table[ent->driver_data]);
1416 } 1655 }
1417 1656
1418 /* apply IOCFG bit18 quirk */ 1657 /* apply IOCFG bit18 quirk */
@@ -1431,12 +1670,14 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1431 /* This writes into the master table but it does not 1670 /* This writes into the master table but it does not
1432 really matter for this errata as we will apply it to 1671 really matter for this errata as we will apply it to
1433 all the PIIX devices on the board */ 1672 all the PIIX devices on the board */
1434 port_info[0].mwdma_mask = 0; 1673 host->ports[0]->mwdma_mask = 0;
1435 port_info[0].udma_mask = 0; 1674 host->ports[0]->udma_mask = 0;
1436 port_info[1].mwdma_mask = 0; 1675 host->ports[1]->mwdma_mask = 0;
1437 port_info[1].udma_mask = 0; 1676 host->ports[1]->udma_mask = 0;
1438 } 1677 }
1439 return ata_pci_init_one(pdev, ppi); 1678
1679 pci_set_master(pdev);
1680 return ata_pci_activate_sff_host(host, ata_interrupt, &piix_sht);
1440} 1681}
1441 1682
1442static int __init piix_init(void) 1683static int __init piix_init(void)
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 7bf4befd96bc..9e8ec19260af 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -442,40 +442,77 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
442} 442}
443 443
444/** 444/**
445 * ata_acpi_gtm_xfermode - determine xfermode from GTM parameter
446 * @dev: target device
447 * @gtm: GTM parameter to use
448 *
449 * Determine xfermask for @dev from @gtm.
450 *
451 * LOCKING:
452 * None.
453 *
454 * RETURNS:
455 * Determined xfermask.
456 */
457unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
458 const struct ata_acpi_gtm *gtm)
459{
460 unsigned long xfer_mask = 0;
461 unsigned int type;
462 int unit;
463 u8 mode;
464
465 /* we always use the 0 slot for crap hardware */
466 unit = dev->devno;
467 if (!(gtm->flags & 0x10))
468 unit = 0;
469
470 /* PIO */
471 mode = ata_timing_cycle2mode(ATA_SHIFT_PIO, gtm->drive[unit].pio);
472 xfer_mask |= ata_xfer_mode2mask(mode);
473
474 /* See if we have MWDMA or UDMA data. We don't bother with
475 * MWDMA if UDMA is available as this means the BIOS set UDMA
476 * and our error changedown if it works is UDMA to PIO anyway.
477 */
478 if (!(gtm->flags & (1 << (2 * unit))))
479 type = ATA_SHIFT_MWDMA;
480 else
481 type = ATA_SHIFT_UDMA;
482
483 mode = ata_timing_cycle2mode(type, gtm->drive[unit].dma);
484 xfer_mask |= ata_xfer_mode2mask(mode);
485
486 return xfer_mask;
487}
488EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
489
490/**
445 * ata_acpi_cbl_80wire - Check for 80 wire cable 491 * ata_acpi_cbl_80wire - Check for 80 wire cable
446 * @ap: Port to check 492 * @ap: Port to check
493 * @gtm: GTM data to use
447 * 494 *
448 * Return 1 if the ACPI mode data for this port indicates the BIOS selected 495 * Return 1 if the @gtm indicates the BIOS selected an 80wire mode.
449 * an 80wire mode.
450 */ 496 */
451 497int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
452int ata_acpi_cbl_80wire(struct ata_port *ap)
453{ 498{
454 const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap); 499 struct ata_device *dev;
455 int valid = 0;
456 500
457 if (!gtm) 501 ata_link_for_each_dev(dev, &ap->link) {
458 return 0; 502 unsigned long xfer_mask, udma_mask;
503
504 if (!ata_dev_enabled(dev))
505 continue;
506
507 xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
508 ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
509
510 if (udma_mask & ~ATA_UDMA_MASK_40C)
511 return 1;
512 }
459 513
460 /* Split timing, DMA enabled */
461 if ((gtm->flags & 0x11) == 0x11 && gtm->drive[0].dma < 55)
462 valid |= 1;
463 if ((gtm->flags & 0x14) == 0x14 && gtm->drive[1].dma < 55)
464 valid |= 2;
465 /* Shared timing, DMA enabled */
466 if ((gtm->flags & 0x11) == 0x01 && gtm->drive[0].dma < 55)
467 valid |= 1;
468 if ((gtm->flags & 0x14) == 0x04 && gtm->drive[0].dma < 55)
469 valid |= 2;
470
471 /* Drive check */
472 if ((valid & 1) && ata_dev_enabled(&ap->link.device[0]))
473 return 1;
474 if ((valid & 2) && ata_dev_enabled(&ap->link.device[1]))
475 return 1;
476 return 0; 514 return 0;
477} 515}
478
479EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire); 516EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
480 517
481static void ata_acpi_gtf_to_tf(struct ata_device *dev, 518static void ata_acpi_gtf_to_tf(struct ata_device *dev,
@@ -776,6 +813,36 @@ void ata_acpi_on_resume(struct ata_port *ap)
776} 813}
777 814
778/** 815/**
816 * ata_acpi_set_state - set the port power state
817 * @ap: target ATA port
818 * @state: state, on/off
819 *
820 * This function executes the _PS0/_PS3 ACPI method to set the power state.
821 * ACPI spec requires _PS0 when IDE power on and _PS3 when power off
822 */
823void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
824{
825 struct ata_device *dev;
826
827 if (!ap->acpi_handle || (ap->flags & ATA_FLAG_ACPI_SATA))
828 return;
829
830 /* channel first and then drives for power on and vica versa
831 for power off */
832 if (state.event == PM_EVENT_ON)
833 acpi_bus_set_power(ap->acpi_handle, ACPI_STATE_D0);
834
835 ata_link_for_each_dev(dev, &ap->link) {
836 if (dev->acpi_handle && ata_dev_enabled(dev))
837 acpi_bus_set_power(dev->acpi_handle,
838 state.event == PM_EVENT_ON ?
839 ACPI_STATE_D0 : ACPI_STATE_D3);
840 }
841 if (state.event != PM_EVENT_ON)
842 acpi_bus_set_power(ap->acpi_handle, ACPI_STATE_D3);
843}
844
845/**
779 * ata_acpi_on_devcfg - ATA ACPI hook called on device donfiguration 846 * ata_acpi_on_devcfg - ATA ACPI hook called on device donfiguration
780 * @dev: target ATA device 847 * @dev: target ATA device
781 * 848 *
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6380726f7538..bdbd55af7022 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -119,6 +119,10 @@ int libata_noacpi = 0;
119module_param_named(noacpi, libata_noacpi, int, 0444); 119module_param_named(noacpi, libata_noacpi, int, 0444);
120MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set"); 120MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
121 121
122int libata_allow_tpm = 0;
123module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
124MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
125
122MODULE_AUTHOR("Jeff Garzik"); 126MODULE_AUTHOR("Jeff Garzik");
123MODULE_DESCRIPTION("Library module for ATA devices"); 127MODULE_DESCRIPTION("Library module for ATA devices");
124MODULE_LICENSE("GPL"); 128MODULE_LICENSE("GPL");
@@ -450,9 +454,9 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
450 * RETURNS: 454 * RETURNS:
451 * Packed xfer_mask. 455 * Packed xfer_mask.
452 */ 456 */
453static unsigned int ata_pack_xfermask(unsigned int pio_mask, 457unsigned long ata_pack_xfermask(unsigned long pio_mask,
454 unsigned int mwdma_mask, 458 unsigned long mwdma_mask,
455 unsigned int udma_mask) 459 unsigned long udma_mask)
456{ 460{
457 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 461 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
458 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 462 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
@@ -469,10 +473,8 @@ static unsigned int ata_pack_xfermask(unsigned int pio_mask,
469 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 473 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
470 * Any NULL distination masks will be ignored. 474 * Any NULL distination masks will be ignored.
471 */ 475 */
472static void ata_unpack_xfermask(unsigned int xfer_mask, 476void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
473 unsigned int *pio_mask, 477 unsigned long *mwdma_mask, unsigned long *udma_mask)
474 unsigned int *mwdma_mask,
475 unsigned int *udma_mask)
476{ 478{
477 if (pio_mask) 479 if (pio_mask)
478 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 480 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
@@ -486,9 +488,9 @@ static const struct ata_xfer_ent {
486 int shift, bits; 488 int shift, bits;
487 u8 base; 489 u8 base;
488} ata_xfer_tbl[] = { 490} ata_xfer_tbl[] = {
489 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 }, 491 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
490 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 }, 492 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
491 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 }, 493 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
492 { -1, }, 494 { -1, },
493}; 495};
494 496
@@ -503,9 +505,9 @@ static const struct ata_xfer_ent {
503 * None. 505 * None.
504 * 506 *
505 * RETURNS: 507 * RETURNS:
506 * Matching XFER_* value, 0 if no match found. 508 * Matching XFER_* value, 0xff if no match found.
507 */ 509 */
508static u8 ata_xfer_mask2mode(unsigned int xfer_mask) 510u8 ata_xfer_mask2mode(unsigned long xfer_mask)
509{ 511{
510 int highbit = fls(xfer_mask) - 1; 512 int highbit = fls(xfer_mask) - 1;
511 const struct ata_xfer_ent *ent; 513 const struct ata_xfer_ent *ent;
@@ -513,7 +515,7 @@ static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
513 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 515 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
514 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 516 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
515 return ent->base + highbit - ent->shift; 517 return ent->base + highbit - ent->shift;
516 return 0; 518 return 0xff;
517} 519}
518 520
519/** 521/**
@@ -528,13 +530,14 @@ static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
528 * RETURNS: 530 * RETURNS:
529 * Matching xfer_mask, 0 if no match found. 531 * Matching xfer_mask, 0 if no match found.
530 */ 532 */
531static unsigned int ata_xfer_mode2mask(u8 xfer_mode) 533unsigned long ata_xfer_mode2mask(u8 xfer_mode)
532{ 534{
533 const struct ata_xfer_ent *ent; 535 const struct ata_xfer_ent *ent;
534 536
535 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 537 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
536 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 538 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
537 return 1 << (ent->shift + xfer_mode - ent->base); 539 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
540 & ~((1 << ent->shift) - 1);
538 return 0; 541 return 0;
539} 542}
540 543
@@ -550,7 +553,7 @@ static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
550 * RETURNS: 553 * RETURNS:
551 * Matching xfer_shift, -1 if no match found. 554 * Matching xfer_shift, -1 if no match found.
552 */ 555 */
553static int ata_xfer_mode2shift(unsigned int xfer_mode) 556int ata_xfer_mode2shift(unsigned long xfer_mode)
554{ 557{
555 const struct ata_xfer_ent *ent; 558 const struct ata_xfer_ent *ent;
556 559
@@ -574,7 +577,7 @@ static int ata_xfer_mode2shift(unsigned int xfer_mode)
574 * Constant C string representing highest speed listed in 577 * Constant C string representing highest speed listed in
575 * @mode_mask, or the constant C string "<n/a>". 578 * @mode_mask, or the constant C string "<n/a>".
576 */ 579 */
577static const char *ata_mode_string(unsigned int xfer_mask) 580const char *ata_mode_string(unsigned long xfer_mask)
578{ 581{
579 static const char * const xfer_mode_str[] = { 582 static const char * const xfer_mode_str[] = {
580 "PIO0", 583 "PIO0",
@@ -947,8 +950,8 @@ unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
947 if (r_err) 950 if (r_err)
948 *r_err = err; 951 *r_err = err;
949 952
950 /* see if device passed diags: if master then continue and warn later */ 953 /* see if device passed diags: continue and warn later */
951 if (err == 0 && dev->devno == 0) 954 if (err == 0)
952 /* diagnostic fail : do nothing _YET_ */ 955 /* diagnostic fail : do nothing _YET_ */
953 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; 956 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
954 else if (err == 1) 957 else if (err == 1)
@@ -1286,48 +1289,6 @@ static int ata_hpa_resize(struct ata_device *dev)
1286} 1289}
1287 1290
1288/** 1291/**
1289 * ata_id_to_dma_mode - Identify DMA mode from id block
1290 * @dev: device to identify
1291 * @unknown: mode to assume if we cannot tell
1292 *
1293 * Set up the timing values for the device based upon the identify
1294 * reported values for the DMA mode. This function is used by drivers
1295 * which rely upon firmware configured modes, but wish to report the
1296 * mode correctly when possible.
1297 *
1298 * In addition we emit similarly formatted messages to the default
1299 * ata_dev_set_mode handler, in order to provide consistency of
1300 * presentation.
1301 */
1302
1303void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1304{
1305 unsigned int mask;
1306 u8 mode;
1307
1308 /* Pack the DMA modes */
1309 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1310 if (dev->id[53] & 0x04)
1311 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1312
1313 /* Select the mode in use */
1314 mode = ata_xfer_mask2mode(mask);
1315
1316 if (mode != 0) {
1317 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1318 ata_mode_string(mask));
1319 } else {
1320 /* SWDMA perhaps ? */
1321 mode = unknown;
1322 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1323 }
1324
1325 /* Configure the device reporting */
1326 dev->xfer_mode = mode;
1327 dev->xfer_shift = ata_xfer_mode2shift(mode);
1328}
1329
1330/**
1331 * ata_noop_dev_select - Select device 0/1 on ATA bus 1292 * ata_noop_dev_select - Select device 0/1 on ATA bus
1332 * @ap: ATA channel to manipulate 1293 * @ap: ATA channel to manipulate
1333 * @device: ATA device (numbered from zero) to select 1294 * @device: ATA device (numbered from zero) to select
@@ -1464,9 +1425,9 @@ static inline void ata_dump_id(const u16 *id)
1464 * RETURNS: 1425 * RETURNS:
1465 * Computed xfermask 1426 * Computed xfermask
1466 */ 1427 */
1467static unsigned int ata_id_xfermask(const u16 *id) 1428unsigned long ata_id_xfermask(const u16 *id)
1468{ 1429{
1469 unsigned int pio_mask, mwdma_mask, udma_mask; 1430 unsigned long pio_mask, mwdma_mask, udma_mask;
1470 1431
1471 /* Usual case. Word 53 indicates word 64 is valid */ 1432 /* Usual case. Word 53 indicates word 64 is valid */
1472 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1433 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
@@ -1519,7 +1480,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
1519} 1480}
1520 1481
1521/** 1482/**
1522 * ata_port_queue_task - Queue port_task 1483 * ata_pio_queue_task - Queue port_task
1523 * @ap: The ata_port to queue port_task for 1484 * @ap: The ata_port to queue port_task for
1524 * @fn: workqueue function to be scheduled 1485 * @fn: workqueue function to be scheduled
1525 * @data: data for @fn to use 1486 * @data: data for @fn to use
@@ -1531,16 +1492,15 @@ static unsigned int ata_id_xfermask(const u16 *id)
1531 * one task is active at any given time. 1492 * one task is active at any given time.
1532 * 1493 *
1533 * libata core layer takes care of synchronization between 1494 * libata core layer takes care of synchronization between
1534 * port_task and EH. ata_port_queue_task() may be ignored for EH 1495 * port_task and EH. ata_pio_queue_task() may be ignored for EH
1535 * synchronization. 1496 * synchronization.
1536 * 1497 *
1537 * LOCKING: 1498 * LOCKING:
1538 * Inherited from caller. 1499 * Inherited from caller.
1539 */ 1500 */
1540void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, 1501static void ata_pio_queue_task(struct ata_port *ap, void *data,
1541 unsigned long delay) 1502 unsigned long delay)
1542{ 1503{
1543 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1544 ap->port_task_data = data; 1504 ap->port_task_data = data;
1545 1505
1546 /* may fail if ata_port_flush_task() in progress */ 1506 /* may fail if ata_port_flush_task() in progress */
@@ -2090,7 +2050,7 @@ int ata_dev_configure(struct ata_device *dev)
2090 struct ata_eh_context *ehc = &dev->link->eh_context; 2050 struct ata_eh_context *ehc = &dev->link->eh_context;
2091 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2051 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2092 const u16 *id = dev->id; 2052 const u16 *id = dev->id;
2093 unsigned int xfer_mask; 2053 unsigned long xfer_mask;
2094 char revbuf[7]; /* XYZ-99\0 */ 2054 char revbuf[7]; /* XYZ-99\0 */
2095 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2055 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2096 char modelbuf[ATA_ID_PROD_LEN+1]; 2056 char modelbuf[ATA_ID_PROD_LEN+1];
@@ -2161,8 +2121,14 @@ int ata_dev_configure(struct ata_device *dev)
2161 "supports DRM functions and may " 2121 "supports DRM functions and may "
2162 "not be fully accessable.\n"); 2122 "not be fully accessable.\n");
2163 snprintf(revbuf, 7, "CFA"); 2123 snprintf(revbuf, 7, "CFA");
2164 } else 2124 } else {
2165 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2125 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2126 /* Warn the user if the device has TPM extensions */
2127 if (ata_id_has_tpm(id))
2128 ata_dev_printk(dev, KERN_WARNING,
2129 "supports DRM functions and may "
2130 "not be fully accessable.\n");
2131 }
2166 2132
2167 dev->n_sectors = ata_id_n_sectors(id); 2133 dev->n_sectors = ata_id_n_sectors(id);
2168 2134
@@ -2295,19 +2261,8 @@ int ata_dev_configure(struct ata_device *dev)
2295 dev->flags |= ATA_DFLAG_DIPM; 2261 dev->flags |= ATA_DFLAG_DIPM;
2296 } 2262 }
2297 2263
2298 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2264 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2299 /* Let the user know. We don't want to disallow opens for 2265 200 sectors */
2300 rescue purposes, or in case the vendor is just a blithering
2301 idiot */
2302 if (print_info) {
2303 ata_dev_printk(dev, KERN_WARNING,
2304"Drive reports diagnostics failure. This may indicate a drive\n");
2305 ata_dev_printk(dev, KERN_WARNING,
2306"fault or invalid emulation. Contact drive vendor for information.\n");
2307 }
2308 }
2309
2310 /* limit bridge transfers to udma5, 200 sectors */
2311 if (ata_dev_knobble(dev)) { 2266 if (ata_dev_knobble(dev)) {
2312 if (ata_msg_drv(ap) && print_info) 2267 if (ata_msg_drv(ap) && print_info)
2313 ata_dev_printk(dev, KERN_INFO, 2268 ata_dev_printk(dev, KERN_INFO,
@@ -2336,6 +2291,21 @@ int ata_dev_configure(struct ata_device *dev)
2336 if (ap->ops->dev_config) 2291 if (ap->ops->dev_config)
2337 ap->ops->dev_config(dev); 2292 ap->ops->dev_config(dev);
2338 2293
2294 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2295 /* Let the user know. We don't want to disallow opens for
2296 rescue purposes, or in case the vendor is just a blithering
2297 idiot. Do this after the dev_config call as some controllers
2298 with buggy firmware may want to avoid reporting false device
2299 bugs */
2300
2301 if (print_info) {
2302 ata_dev_printk(dev, KERN_WARNING,
2303"Drive reports diagnostics failure. This may indicate a drive\n");
2304 ata_dev_printk(dev, KERN_WARNING,
2305"fault or invalid emulation. Contact drive vendor for information.\n");
2306 }
2307 }
2308
2339 if (ata_msg_probe(ap)) 2309 if (ata_msg_probe(ap))
2340 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n", 2310 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2341 __FUNCTION__, ata_chk_status(ap)); 2311 __FUNCTION__, ata_chk_status(ap));
@@ -2387,6 +2357,18 @@ int ata_cable_unknown(struct ata_port *ap)
2387} 2357}
2388 2358
2389/** 2359/**
2360 * ata_cable_ignore - return ignored PATA cable.
2361 * @ap: port
2362 *
2363 * Helper method for drivers which don't use cable type to limit
2364 * transfer mode.
2365 */
2366int ata_cable_ignore(struct ata_port *ap)
2367{
2368 return ATA_CBL_PATA_IGN;
2369}
2370
2371/**
2390 * ata_cable_sata - return SATA cable type 2372 * ata_cable_sata - return SATA cable type
2391 * @ap: port 2373 * @ap: port
2392 * 2374 *
@@ -2781,38 +2763,33 @@ int sata_set_spd(struct ata_link *link)
2781 */ 2763 */
2782 2764
2783static const struct ata_timing ata_timing[] = { 2765static const struct ata_timing ata_timing[] = {
2766/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2767 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2768 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2769 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2770 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2771 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2772 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2773 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2784 2774
2785 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, 2775 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2786 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, 2776 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2787 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, 2777 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2788 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2789 2778
2790 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 }, 2779 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2780 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2781 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2791 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 }, 2782 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2792 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, 2783 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2793 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2794 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2795 2784
2796/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 2785/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2797 2786 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2798 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, 2787 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2799 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, 2788 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2800 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, 2789 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2801 2790 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2802 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, 2791 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2803 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, 2792 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2804 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2805
2806 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2807 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2808 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2809 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2810
2811 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2812 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2813 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2814
2815/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2816 2793
2817 { 0xFF } 2794 { 0xFF }
2818}; 2795};
@@ -2845,14 +2822,16 @@ void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2845 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 2822 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2846} 2823}
2847 2824
2848static const struct ata_timing *ata_timing_find_mode(unsigned short speed) 2825const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2849{ 2826{
2850 const struct ata_timing *t; 2827 const struct ata_timing *t = ata_timing;
2828
2829 while (xfer_mode > t->mode)
2830 t++;
2851 2831
2852 for (t = ata_timing; t->mode != speed; t++) 2832 if (xfer_mode == t->mode)
2853 if (t->mode == 0xFF) 2833 return t;
2854 return NULL; 2834 return NULL;
2855 return t;
2856} 2835}
2857 2836
2858int ata_timing_compute(struct ata_device *adev, unsigned short speed, 2837int ata_timing_compute(struct ata_device *adev, unsigned short speed,
@@ -2927,6 +2906,57 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2927} 2906}
2928 2907
2929/** 2908/**
2909 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2910 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
2911 * @cycle: cycle duration in ns
2912 *
2913 * Return matching xfer mode for @cycle. The returned mode is of
2914 * the transfer type specified by @xfer_shift. If @cycle is too
2915 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
2916 * than the fastest known mode, the fasted mode is returned.
2917 *
2918 * LOCKING:
2919 * None.
2920 *
2921 * RETURNS:
2922 * Matching xfer_mode, 0xff if no match found.
2923 */
2924u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
2925{
2926 u8 base_mode = 0xff, last_mode = 0xff;
2927 const struct ata_xfer_ent *ent;
2928 const struct ata_timing *t;
2929
2930 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
2931 if (ent->shift == xfer_shift)
2932 base_mode = ent->base;
2933
2934 for (t = ata_timing_find_mode(base_mode);
2935 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
2936 unsigned short this_cycle;
2937
2938 switch (xfer_shift) {
2939 case ATA_SHIFT_PIO:
2940 case ATA_SHIFT_MWDMA:
2941 this_cycle = t->cycle;
2942 break;
2943 case ATA_SHIFT_UDMA:
2944 this_cycle = t->udma;
2945 break;
2946 default:
2947 return 0xff;
2948 }
2949
2950 if (cycle > this_cycle)
2951 break;
2952
2953 last_mode = t->mode;
2954 }
2955
2956 return last_mode;
2957}
2958
2959/**
2930 * ata_down_xfermask_limit - adjust dev xfer masks downward 2960 * ata_down_xfermask_limit - adjust dev xfer masks downward
2931 * @dev: Device to adjust xfer masks 2961 * @dev: Device to adjust xfer masks
2932 * @sel: ATA_DNXFER_* selector 2962 * @sel: ATA_DNXFER_* selector
@@ -2944,8 +2974,8 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2944int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 2974int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2945{ 2975{
2946 char buf[32]; 2976 char buf[32];
2947 unsigned int orig_mask, xfer_mask; 2977 unsigned long orig_mask, xfer_mask;
2948 unsigned int pio_mask, mwdma_mask, udma_mask; 2978 unsigned long pio_mask, mwdma_mask, udma_mask;
2949 int quiet, highbit; 2979 int quiet, highbit;
2950 2980
2951 quiet = !!(sel & ATA_DNXFER_QUIET); 2981 quiet = !!(sel & ATA_DNXFER_QUIET);
@@ -3039,7 +3069,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
3039 3069
3040 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3070 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3041 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3071 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3042 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3072 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3043 dev->dma_mode == XFER_MW_DMA_0 && 3073 dev->dma_mode == XFER_MW_DMA_0 &&
3044 (dev->id[63] >> 8) & 1) 3074 (dev->id[63] >> 8) & 1)
3045 err_mask &= ~AC_ERR_DEV; 3075 err_mask &= ~AC_ERR_DEV;
@@ -3089,7 +3119,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3089 3119
3090 /* step 1: calculate xfer_mask */ 3120 /* step 1: calculate xfer_mask */
3091 ata_link_for_each_dev(dev, link) { 3121 ata_link_for_each_dev(dev, link) {
3092 unsigned int pio_mask, dma_mask; 3122 unsigned long pio_mask, dma_mask;
3093 unsigned int mode_mask; 3123 unsigned int mode_mask;
3094 3124
3095 if (!ata_dev_enabled(dev)) 3125 if (!ata_dev_enabled(dev))
@@ -3115,7 +3145,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3115 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3145 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3116 3146
3117 found = 1; 3147 found = 1;
3118 if (dev->dma_mode) 3148 if (dev->dma_mode != 0xff)
3119 used_dma = 1; 3149 used_dma = 1;
3120 } 3150 }
3121 if (!found) 3151 if (!found)
@@ -3126,7 +3156,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3126 if (!ata_dev_enabled(dev)) 3156 if (!ata_dev_enabled(dev))
3127 continue; 3157 continue;
3128 3158
3129 if (!dev->pio_mode) { 3159 if (dev->pio_mode == 0xff) {
3130 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); 3160 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3131 rc = -EINVAL; 3161 rc = -EINVAL;
3132 goto out; 3162 goto out;
@@ -3140,7 +3170,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3140 3170
3141 /* step 3: set host DMA timings */ 3171 /* step 3: set host DMA timings */
3142 ata_link_for_each_dev(dev, link) { 3172 ata_link_for_each_dev(dev, link) {
3143 if (!ata_dev_enabled(dev) || !dev->dma_mode) 3173 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3144 continue; 3174 continue;
3145 3175
3146 dev->xfer_mode = dev->dma_mode; 3176 dev->xfer_mode = dev->dma_mode;
@@ -3173,31 +3203,6 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3173} 3203}
3174 3204
3175/** 3205/**
3176 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3177 * @link: link on which timings will be programmed
3178 * @r_failed_dev: out paramter for failed device
3179 *
3180 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3181 * ata_set_mode() fails, pointer to the failing device is
3182 * returned in @r_failed_dev.
3183 *
3184 * LOCKING:
3185 * PCI/etc. bus probe sem.
3186 *
3187 * RETURNS:
3188 * 0 on success, negative errno otherwise
3189 */
3190int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3191{
3192 struct ata_port *ap = link->ap;
3193
3194 /* has private set_mode? */
3195 if (ap->ops->set_mode)
3196 return ap->ops->set_mode(link, r_failed_dev);
3197 return ata_do_set_mode(link, r_failed_dev);
3198}
3199
3200/**
3201 * ata_tf_to_host - issue ATA taskfile to host controller 3206 * ata_tf_to_host - issue ATA taskfile to host controller
3202 * @ap: port to which command is being issued 3207 * @ap: port to which command is being issued
3203 * @tf: ATA taskfile register set 3208 * @tf: ATA taskfile register set
@@ -4363,7 +4368,14 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4363 tf.feature = SETFEATURES_XFER; 4368 tf.feature = SETFEATURES_XFER;
4364 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4369 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4365 tf.protocol = ATA_PROT_NODATA; 4370 tf.protocol = ATA_PROT_NODATA;
4366 tf.nsect = dev->xfer_mode; 4371 /* If we are using IORDY we must send the mode setting command */
4372 if (ata_pio_need_iordy(dev))
4373 tf.nsect = dev->xfer_mode;
4374 /* If the device has IORDY and the controller does not - turn it off */
4375 else if (ata_id_has_iordy(dev->id))
4376 tf.nsect = 0x01;
4377 else /* In the ancient relic department - skip all of this */
4378 return 0;
4367 4379
4368 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4380 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4369 4381
@@ -4462,17 +4474,13 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
4462void ata_sg_clean(struct ata_queued_cmd *qc) 4474void ata_sg_clean(struct ata_queued_cmd *qc)
4463{ 4475{
4464 struct ata_port *ap = qc->ap; 4476 struct ata_port *ap = qc->ap;
4465 struct scatterlist *sg = qc->__sg; 4477 struct scatterlist *sg = qc->sg;
4466 int dir = qc->dma_dir; 4478 int dir = qc->dma_dir;
4467 void *pad_buf = NULL; 4479 void *pad_buf = NULL;
4468 4480
4469 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4470 WARN_ON(sg == NULL); 4481 WARN_ON(sg == NULL);
4471 4482
4472 if (qc->flags & ATA_QCFLAG_SINGLE) 4483 VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem);
4473 WARN_ON(qc->n_elem > 1);
4474
4475 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4476 4484
4477 /* if we padded the buffer out to 32-bit bound, and data 4485 /* if we padded the buffer out to 32-bit bound, and data
4478 * xfer direction is from-device, we must copy from the 4486 * xfer direction is from-device, we must copy from the
@@ -4481,31 +4489,20 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
4481 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) 4489 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4482 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 4490 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4483 4491
4484 if (qc->flags & ATA_QCFLAG_SG) { 4492 if (qc->mapped_n_elem)
4485 if (qc->n_elem) 4493 dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir);
4486 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); 4494 /* restore last sg */
4487 /* restore last sg */ 4495 if (qc->last_sg)
4488 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len; 4496 *qc->last_sg = qc->saved_last_sg;
4489 if (pad_buf) { 4497 if (pad_buf) {
4490 struct scatterlist *psg = &qc->pad_sgent; 4498 struct scatterlist *psg = &qc->extra_sg[1];
4491 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); 4499 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4492 memcpy(addr + psg->offset, pad_buf, qc->pad_len); 4500 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4493 kunmap_atomic(addr, KM_IRQ0); 4501 kunmap_atomic(addr, KM_IRQ0);
4494 }
4495 } else {
4496 if (qc->n_elem)
4497 dma_unmap_single(ap->dev,
4498 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4499 dir);
4500 /* restore sg */
4501 sg->length += qc->pad_len;
4502 if (pad_buf)
4503 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4504 pad_buf, qc->pad_len);
4505 } 4502 }
4506 4503
4507 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4504 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4508 qc->__sg = NULL; 4505 qc->sg = NULL;
4509} 4506}
4510 4507
4511/** 4508/**
@@ -4523,13 +4520,10 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
4523{ 4520{
4524 struct ata_port *ap = qc->ap; 4521 struct ata_port *ap = qc->ap;
4525 struct scatterlist *sg; 4522 struct scatterlist *sg;
4526 unsigned int idx; 4523 unsigned int si, pi;
4527 4524
4528 WARN_ON(qc->__sg == NULL); 4525 pi = 0;
4529 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); 4526 for_each_sg(qc->sg, sg, qc->n_elem, si) {
4530
4531 idx = 0;
4532 ata_for_each_sg(sg, qc) {
4533 u32 addr, offset; 4527 u32 addr, offset;
4534 u32 sg_len, len; 4528 u32 sg_len, len;
4535 4529
@@ -4546,18 +4540,17 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
4546 if ((offset + sg_len) > 0x10000) 4540 if ((offset + sg_len) > 0x10000)
4547 len = 0x10000 - offset; 4541 len = 0x10000 - offset;
4548 4542
4549 ap->prd[idx].addr = cpu_to_le32(addr); 4543 ap->prd[pi].addr = cpu_to_le32(addr);
4550 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); 4544 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
4551 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 4545 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
4552 4546
4553 idx++; 4547 pi++;
4554 sg_len -= len; 4548 sg_len -= len;
4555 addr += len; 4549 addr += len;
4556 } 4550 }
4557 } 4551 }
4558 4552
4559 if (idx) 4553 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4560 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4561} 4554}
4562 4555
4563/** 4556/**
@@ -4577,13 +4570,10 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4577{ 4570{
4578 struct ata_port *ap = qc->ap; 4571 struct ata_port *ap = qc->ap;
4579 struct scatterlist *sg; 4572 struct scatterlist *sg;
4580 unsigned int idx; 4573 unsigned int si, pi;
4581
4582 WARN_ON(qc->__sg == NULL);
4583 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4584 4574
4585 idx = 0; 4575 pi = 0;
4586 ata_for_each_sg(sg, qc) { 4576 for_each_sg(qc->sg, sg, qc->n_elem, si) {
4587 u32 addr, offset; 4577 u32 addr, offset;
4588 u32 sg_len, len, blen; 4578 u32 sg_len, len, blen;
4589 4579
@@ -4601,25 +4591,24 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4601 len = 0x10000 - offset; 4591 len = 0x10000 - offset;
4602 4592
4603 blen = len & 0xffff; 4593 blen = len & 0xffff;
4604 ap->prd[idx].addr = cpu_to_le32(addr); 4594 ap->prd[pi].addr = cpu_to_le32(addr);
4605 if (blen == 0) { 4595 if (blen == 0) {
4606 /* Some PATA chipsets like the CS5530 can't 4596 /* Some PATA chipsets like the CS5530 can't
4607 cope with 0x0000 meaning 64K as the spec says */ 4597 cope with 0x0000 meaning 64K as the spec says */
4608 ap->prd[idx].flags_len = cpu_to_le32(0x8000); 4598 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
4609 blen = 0x8000; 4599 blen = 0x8000;
4610 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000); 4600 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
4611 } 4601 }
4612 ap->prd[idx].flags_len = cpu_to_le32(blen); 4602 ap->prd[pi].flags_len = cpu_to_le32(blen);
4613 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 4603 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
4614 4604
4615 idx++; 4605 pi++;
4616 sg_len -= len; 4606 sg_len -= len;
4617 addr += len; 4607 addr += len;
4618 } 4608 }
4619 } 4609 }
4620 4610
4621 if (idx) 4611 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4622 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4623} 4612}
4624 4613
4625/** 4614/**
@@ -4669,8 +4658,8 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4669 */ 4658 */
4670static int atapi_qc_may_overflow(struct ata_queued_cmd *qc) 4659static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
4671{ 4660{
4672 if (qc->tf.protocol != ATA_PROT_ATAPI && 4661 if (qc->tf.protocol != ATAPI_PROT_PIO &&
4673 qc->tf.protocol != ATA_PROT_ATAPI_DMA) 4662 qc->tf.protocol != ATAPI_PROT_DMA)
4674 return 0; 4663 return 0;
4675 4664
4676 if (qc->tf.flags & ATA_TFLAG_WRITE) 4665 if (qc->tf.flags & ATA_TFLAG_WRITE)
@@ -4756,33 +4745,6 @@ void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4756void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4745void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4757 4746
4758/** 4747/**
4759 * ata_sg_init_one - Associate command with memory buffer
4760 * @qc: Command to be associated
4761 * @buf: Memory buffer
4762 * @buflen: Length of memory buffer, in bytes.
4763 *
4764 * Initialize the data-related elements of queued_cmd @qc
4765 * to point to a single memory buffer, @buf of byte length @buflen.
4766 *
4767 * LOCKING:
4768 * spin_lock_irqsave(host lock)
4769 */
4770
4771void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4772{
4773 qc->flags |= ATA_QCFLAG_SINGLE;
4774
4775 qc->__sg = &qc->sgent;
4776 qc->n_elem = 1;
4777 qc->orig_n_elem = 1;
4778 qc->buf_virt = buf;
4779 qc->nbytes = buflen;
4780 qc->cursg = qc->__sg;
4781
4782 sg_init_one(&qc->sgent, buf, buflen);
4783}
4784
4785/**
4786 * ata_sg_init - Associate command with scatter-gather table. 4748 * ata_sg_init - Associate command with scatter-gather table.
4787 * @qc: Command to be associated 4749 * @qc: Command to be associated
4788 * @sg: Scatter-gather table. 4750 * @sg: Scatter-gather table.
@@ -4795,84 +4757,103 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4795 * LOCKING: 4757 * LOCKING:
4796 * spin_lock_irqsave(host lock) 4758 * spin_lock_irqsave(host lock)
4797 */ 4759 */
4798
4799void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4760void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4800 unsigned int n_elem) 4761 unsigned int n_elem)
4801{ 4762{
4802 qc->flags |= ATA_QCFLAG_SG; 4763 qc->sg = sg;
4803 qc->__sg = sg;
4804 qc->n_elem = n_elem; 4764 qc->n_elem = n_elem;
4805 qc->orig_n_elem = n_elem; 4765 qc->cursg = qc->sg;
4806 qc->cursg = qc->__sg;
4807} 4766}
4808 4767
4809/** 4768static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
4810 * ata_sg_setup_one - DMA-map the memory buffer associated with a command. 4769 unsigned int *n_elem_extra,
4811 * @qc: Command with memory buffer to be mapped. 4770 unsigned int *nbytes_extra)
4812 *
4813 * DMA-map the memory buffer associated with queued_cmd @qc.
4814 *
4815 * LOCKING:
4816 * spin_lock_irqsave(host lock)
4817 *
4818 * RETURNS:
4819 * Zero on success, negative on error.
4820 */
4821
4822static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4823{ 4771{
4824 struct ata_port *ap = qc->ap; 4772 struct ata_port *ap = qc->ap;
4825 int dir = qc->dma_dir; 4773 unsigned int n_elem = qc->n_elem;
4826 struct scatterlist *sg = qc->__sg; 4774 struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL;
4827 dma_addr_t dma_address; 4775
4828 int trim_sg = 0; 4776 *n_elem_extra = 0;
4777 *nbytes_extra = 0;
4778
4779 /* needs padding? */
4780 qc->pad_len = qc->nbytes & 3;
4781
4782 if (likely(!qc->pad_len))
4783 return n_elem;
4784
4785 /* locate last sg and save it */
4786 lsg = sg_last(qc->sg, n_elem);
4787 qc->last_sg = lsg;
4788 qc->saved_last_sg = *lsg;
4789
4790 sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg));
4829 4791
4830 /* we must lengthen transfers to end on a 32-bit boundary */
4831 qc->pad_len = sg->length & 3;
4832 if (qc->pad_len) { 4792 if (qc->pad_len) {
4793 struct scatterlist *psg = &qc->extra_sg[1];
4833 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 4794 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4834 struct scatterlist *psg = &qc->pad_sgent; 4795 unsigned int offset;
4835 4796
4836 WARN_ON(qc->dev->class != ATA_DEV_ATAPI); 4797 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4837 4798
4838 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 4799 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4839 4800
4840 if (qc->tf.flags & ATA_TFLAG_WRITE) 4801 /* psg->page/offset are used to copy to-be-written
4841 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len, 4802 * data in this function or read data in ata_sg_clean.
4842 qc->pad_len); 4803 */
4804 offset = lsg->offset + lsg->length - qc->pad_len;
4805 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4806 qc->pad_len, offset_in_page(offset));
4807
4808 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4809 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4810 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4811 kunmap_atomic(addr, KM_IRQ0);
4812 }
4843 4813
4844 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); 4814 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4845 sg_dma_len(psg) = ATA_DMA_PAD_SZ; 4815 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4846 /* trim sg */
4847 sg->length -= qc->pad_len;
4848 if (sg->length == 0)
4849 trim_sg = 1;
4850 4816
4851 DPRINTK("padding done, sg->length=%u pad_len=%u\n", 4817 /* Trim the last sg entry and chain the original and
4852 sg->length, qc->pad_len); 4818 * padding sg lists.
4853 } 4819 *
4820 * Because chaining consumes one sg entry, one extra
4821 * sg entry is allocated and the last sg entry is
4822 * copied to it if the length isn't zero after padded
4823 * amount is removed.
4824 *
4825 * If the last sg entry is completely replaced by
4826 * padding sg entry, the first sg entry is skipped
4827 * while chaining.
4828 */
4829 lsg->length -= qc->pad_len;
4830 if (lsg->length) {
4831 copy_lsg = &qc->extra_sg[0];
4832 tsg = &qc->extra_sg[0];
4833 } else {
4834 n_elem--;
4835 tsg = &qc->extra_sg[1];
4836 }
4854 4837
4855 if (trim_sg) { 4838 esg = &qc->extra_sg[1];
4856 qc->n_elem--;
4857 goto skip_map;
4858 }
4859 4839
4860 dma_address = dma_map_single(ap->dev, qc->buf_virt, 4840 (*n_elem_extra)++;
4861 sg->length, dir); 4841 (*nbytes_extra) += 4 - qc->pad_len;
4862 if (dma_mapping_error(dma_address)) {
4863 /* restore sg */
4864 sg->length += qc->pad_len;
4865 return -1;
4866 } 4842 }
4867 4843
4868 sg_dma_address(sg) = dma_address; 4844 if (copy_lsg)
4869 sg_dma_len(sg) = sg->length; 4845 sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset);
4870 4846
4871skip_map: 4847 sg_chain(lsg, 1, tsg);
4872 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), 4848 sg_mark_end(esg);
4873 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4874 4849
4875 return 0; 4850 /* sglist can't start with chaining sg entry, fast forward */
4851 if (qc->sg == lsg) {
4852 qc->sg = tsg;
4853 qc->cursg = tsg;
4854 }
4855
4856 return n_elem;
4876} 4857}
4877 4858
4878/** 4859/**
@@ -4888,75 +4869,30 @@ skip_map:
4888 * Zero on success, negative on error. 4869 * Zero on success, negative on error.
4889 * 4870 *
4890 */ 4871 */
4891
4892static int ata_sg_setup(struct ata_queued_cmd *qc) 4872static int ata_sg_setup(struct ata_queued_cmd *qc)
4893{ 4873{
4894 struct ata_port *ap = qc->ap; 4874 struct ata_port *ap = qc->ap;
4895 struct scatterlist *sg = qc->__sg; 4875 unsigned int n_elem, n_elem_extra, nbytes_extra;
4896 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4897 int n_elem, pre_n_elem, dir, trim_sg = 0;
4898 4876
4899 VPRINTK("ENTER, ata%u\n", ap->print_id); 4877 VPRINTK("ENTER, ata%u\n", ap->print_id);
4900 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4901 4878
4902 /* we must lengthen transfers to end on a 32-bit boundary */ 4879 n_elem = ata_sg_setup_extra(qc, &n_elem_extra, &nbytes_extra);
4903 qc->pad_len = lsg->length & 3;
4904 if (qc->pad_len) {
4905 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4906 struct scatterlist *psg = &qc->pad_sgent;
4907 unsigned int offset;
4908
4909 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4910 4880
4911 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 4881 if (n_elem) {
4912 4882 n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir);
4913 /* 4883 if (n_elem < 1) {
4914 * psg->page/offset are used to copy to-be-written 4884 /* restore last sg */
4915 * data in this function or read data in ata_sg_clean. 4885 if (qc->last_sg)
4916 */ 4886 *qc->last_sg = qc->saved_last_sg;
4917 offset = lsg->offset + lsg->length - qc->pad_len; 4887 return -1;
4918 sg_init_table(psg, 1);
4919 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4920 qc->pad_len, offset_in_page(offset));
4921
4922 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4923 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4924 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4925 kunmap_atomic(addr, KM_IRQ0);
4926 } 4888 }
4927 4889 DPRINTK("%d sg elements mapped\n", n_elem);
4928 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4929 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4930 /* trim last sg */
4931 lsg->length -= qc->pad_len;
4932 if (lsg->length == 0)
4933 trim_sg = 1;
4934
4935 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4936 qc->n_elem - 1, lsg->length, qc->pad_len);
4937 }
4938
4939 pre_n_elem = qc->n_elem;
4940 if (trim_sg && pre_n_elem)
4941 pre_n_elem--;
4942
4943 if (!pre_n_elem) {
4944 n_elem = 0;
4945 goto skip_map;
4946 }
4947
4948 dir = qc->dma_dir;
4949 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4950 if (n_elem < 1) {
4951 /* restore last sg */
4952 lsg->length += qc->pad_len;
4953 return -1;
4954 } 4890 }
4955 4891
4956 DPRINTK("%d sg elements mapped\n", n_elem); 4892 qc->n_elem = qc->mapped_n_elem = n_elem;
4957 4893 qc->n_elem += n_elem_extra;
4958skip_map: 4894 qc->nbytes += nbytes_extra;
4959 qc->n_elem = n_elem; 4895 qc->flags |= ATA_QCFLAG_DMAMAP;
4960 4896
4961 return 0; 4897 return 0;
4962} 4898}
@@ -4985,63 +4921,77 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4985 4921
4986/** 4922/**
4987 * ata_data_xfer - Transfer data by PIO 4923 * ata_data_xfer - Transfer data by PIO
4988 * @adev: device to target 4924 * @dev: device to target
4989 * @buf: data buffer 4925 * @buf: data buffer
4990 * @buflen: buffer length 4926 * @buflen: buffer length
4991 * @write_data: read/write 4927 * @rw: read/write
4992 * 4928 *
4993 * Transfer data from/to the device data register by PIO. 4929 * Transfer data from/to the device data register by PIO.
4994 * 4930 *
4995 * LOCKING: 4931 * LOCKING:
4996 * Inherited from caller. 4932 * Inherited from caller.
4933 *
4934 * RETURNS:
4935 * Bytes consumed.
4997 */ 4936 */
4998void ata_data_xfer(struct ata_device *adev, unsigned char *buf, 4937unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
4999 unsigned int buflen, int write_data) 4938 unsigned int buflen, int rw)
5000{ 4939{
5001 struct ata_port *ap = adev->link->ap; 4940 struct ata_port *ap = dev->link->ap;
4941 void __iomem *data_addr = ap->ioaddr.data_addr;
5002 unsigned int words = buflen >> 1; 4942 unsigned int words = buflen >> 1;
5003 4943
5004 /* Transfer multiple of 2 bytes */ 4944 /* Transfer multiple of 2 bytes */
5005 if (write_data) 4945 if (rw == READ)
5006 iowrite16_rep(ap->ioaddr.data_addr, buf, words); 4946 ioread16_rep(data_addr, buf, words);
5007 else 4947 else
5008 ioread16_rep(ap->ioaddr.data_addr, buf, words); 4948 iowrite16_rep(data_addr, buf, words);
5009 4949
5010 /* Transfer trailing 1 byte, if any. */ 4950 /* Transfer trailing 1 byte, if any. */
5011 if (unlikely(buflen & 0x01)) { 4951 if (unlikely(buflen & 0x01)) {
5012 u16 align_buf[1] = { 0 }; 4952 __le16 align_buf[1] = { 0 };
5013 unsigned char *trailing_buf = buf + buflen - 1; 4953 unsigned char *trailing_buf = buf + buflen - 1;
5014 4954
5015 if (write_data) { 4955 if (rw == READ) {
5016 memcpy(align_buf, trailing_buf, 1); 4956 align_buf[0] = cpu_to_le16(ioread16(data_addr));
5017 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
5018 } else {
5019 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
5020 memcpy(trailing_buf, align_buf, 1); 4957 memcpy(trailing_buf, align_buf, 1);
4958 } else {
4959 memcpy(align_buf, trailing_buf, 1);
4960 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
5021 } 4961 }
4962 words++;
5022 } 4963 }
4964
4965 return words << 1;
5023} 4966}
5024 4967
5025/** 4968/**
5026 * ata_data_xfer_noirq - Transfer data by PIO 4969 * ata_data_xfer_noirq - Transfer data by PIO
5027 * @adev: device to target 4970 * @dev: device to target
5028 * @buf: data buffer 4971 * @buf: data buffer
5029 * @buflen: buffer length 4972 * @buflen: buffer length
5030 * @write_data: read/write 4973 * @rw: read/write
5031 * 4974 *
5032 * Transfer data from/to the device data register by PIO. Do the 4975 * Transfer data from/to the device data register by PIO. Do the
5033 * transfer with interrupts disabled. 4976 * transfer with interrupts disabled.
5034 * 4977 *
5035 * LOCKING: 4978 * LOCKING:
5036 * Inherited from caller. 4979 * Inherited from caller.
4980 *
4981 * RETURNS:
4982 * Bytes consumed.
5037 */ 4983 */
5038void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, 4984unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
5039 unsigned int buflen, int write_data) 4985 unsigned int buflen, int rw)
5040{ 4986{
5041 unsigned long flags; 4987 unsigned long flags;
4988 unsigned int consumed;
4989
5042 local_irq_save(flags); 4990 local_irq_save(flags);
5043 ata_data_xfer(adev, buf, buflen, write_data); 4991 consumed = ata_data_xfer(dev, buf, buflen, rw);
5044 local_irq_restore(flags); 4992 local_irq_restore(flags);
4993
4994 return consumed;
5045} 4995}
5046 4996
5047 4997
@@ -5152,13 +5102,13 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5152 ata_altstatus(ap); /* flush */ 5102 ata_altstatus(ap); /* flush */
5153 5103
5154 switch (qc->tf.protocol) { 5104 switch (qc->tf.protocol) {
5155 case ATA_PROT_ATAPI: 5105 case ATAPI_PROT_PIO:
5156 ap->hsm_task_state = HSM_ST; 5106 ap->hsm_task_state = HSM_ST;
5157 break; 5107 break;
5158 case ATA_PROT_ATAPI_NODATA: 5108 case ATAPI_PROT_NODATA:
5159 ap->hsm_task_state = HSM_ST_LAST; 5109 ap->hsm_task_state = HSM_ST_LAST;
5160 break; 5110 break;
5161 case ATA_PROT_ATAPI_DMA: 5111 case ATAPI_PROT_DMA:
5162 ap->hsm_task_state = HSM_ST_LAST; 5112 ap->hsm_task_state = HSM_ST_LAST;
5163 /* initiate bmdma */ 5113 /* initiate bmdma */
5164 ap->ops->bmdma_start(qc); 5114 ap->ops->bmdma_start(qc);
@@ -5300,12 +5250,15 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5300 bytes = (bc_hi << 8) | bc_lo; 5250 bytes = (bc_hi << 8) | bc_lo;
5301 5251
5302 /* shall be cleared to zero, indicating xfer of data */ 5252 /* shall be cleared to zero, indicating xfer of data */
5303 if (ireason & (1 << 0)) 5253 if (unlikely(ireason & (1 << 0)))
5304 goto err_out; 5254 goto err_out;
5305 5255
5306 /* make sure transfer direction matches expected */ 5256 /* make sure transfer direction matches expected */
5307 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; 5257 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5308 if (do_write != i_write) 5258 if (unlikely(do_write != i_write))
5259 goto err_out;
5260
5261 if (unlikely(!bytes))
5309 goto err_out; 5262 goto err_out;
5310 5263
5311 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); 5264 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
@@ -5341,7 +5294,7 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *q
5341 (qc->tf.flags & ATA_TFLAG_WRITE)) 5294 (qc->tf.flags & ATA_TFLAG_WRITE))
5342 return 1; 5295 return 1;
5343 5296
5344 if (is_atapi_taskfile(&qc->tf) && 5297 if (ata_is_atapi(qc->tf.protocol) &&
5345 !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 5298 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5346 return 1; 5299 return 1;
5347 } 5300 }
@@ -5506,7 +5459,7 @@ fsm_start:
5506 5459
5507 case HSM_ST: 5460 case HSM_ST:
5508 /* complete command or read/write the data register */ 5461 /* complete command or read/write the data register */
5509 if (qc->tf.protocol == ATA_PROT_ATAPI) { 5462 if (qc->tf.protocol == ATAPI_PROT_PIO) {
5510 /* ATAPI PIO protocol */ 5463 /* ATAPI PIO protocol */
5511 if ((status & ATA_DRQ) == 0) { 5464 if ((status & ATA_DRQ) == 0) {
5512 /* No more data to transfer or device error. 5465 /* No more data to transfer or device error.
@@ -5664,7 +5617,7 @@ fsm_start:
5664 msleep(2); 5617 msleep(2);
5665 status = ata_busy_wait(ap, ATA_BUSY, 10); 5618 status = ata_busy_wait(ap, ATA_BUSY, 10);
5666 if (status & ATA_BUSY) { 5619 if (status & ATA_BUSY) {
5667 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE); 5620 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
5668 return; 5621 return;
5669 } 5622 }
5670 } 5623 }
@@ -5805,6 +5758,22 @@ static void fill_result_tf(struct ata_queued_cmd *qc)
5805 ap->ops->tf_read(ap, &qc->result_tf); 5758 ap->ops->tf_read(ap, &qc->result_tf);
5806} 5759}
5807 5760
5761static void ata_verify_xfer(struct ata_queued_cmd *qc)
5762{
5763 struct ata_device *dev = qc->dev;
5764
5765 if (ata_tag_internal(qc->tag))
5766 return;
5767
5768 if (ata_is_nodata(qc->tf.protocol))
5769 return;
5770
5771 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5772 return;
5773
5774 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5775}
5776
5808/** 5777/**
5809 * ata_qc_complete - Complete an active ATA command 5778 * ata_qc_complete - Complete an active ATA command
5810 * @qc: Command to complete 5779 * @qc: Command to complete
@@ -5876,6 +5845,9 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
5876 break; 5845 break;
5877 } 5846 }
5878 5847
5848 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5849 ata_verify_xfer(qc);
5850
5879 __ata_qc_complete(qc); 5851 __ata_qc_complete(qc);
5880 } else { 5852 } else {
5881 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 5853 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
@@ -5938,30 +5910,6 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5938 return nr_done; 5910 return nr_done;
5939} 5911}
5940 5912
5941static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5942{
5943 struct ata_port *ap = qc->ap;
5944
5945 switch (qc->tf.protocol) {
5946 case ATA_PROT_NCQ:
5947 case ATA_PROT_DMA:
5948 case ATA_PROT_ATAPI_DMA:
5949 return 1;
5950
5951 case ATA_PROT_ATAPI:
5952 case ATA_PROT_PIO:
5953 if (ap->flags & ATA_FLAG_PIO_DMA)
5954 return 1;
5955
5956 /* fall through */
5957
5958 default:
5959 return 0;
5960 }
5961
5962 /* never reached */
5963}
5964
5965/** 5913/**
5966 * ata_qc_issue - issue taskfile to device 5914 * ata_qc_issue - issue taskfile to device
5967 * @qc: command to issue to device 5915 * @qc: command to issue to device
@@ -5978,6 +5926,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5978{ 5926{
5979 struct ata_port *ap = qc->ap; 5927 struct ata_port *ap = qc->ap;
5980 struct ata_link *link = qc->dev->link; 5928 struct ata_link *link = qc->dev->link;
5929 u8 prot = qc->tf.protocol;
5981 5930
5982 /* Make sure only one non-NCQ command is outstanding. The 5931 /* Make sure only one non-NCQ command is outstanding. The
5983 * check is skipped for old EH because it reuses active qc to 5932 * check is skipped for old EH because it reuses active qc to
@@ -5985,7 +5934,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5985 */ 5934 */
5986 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5935 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5987 5936
5988 if (qc->tf.protocol == ATA_PROT_NCQ) { 5937 if (ata_is_ncq(prot)) {
5989 WARN_ON(link->sactive & (1 << qc->tag)); 5938 WARN_ON(link->sactive & (1 << qc->tag));
5990 5939
5991 if (!link->sactive) 5940 if (!link->sactive)
@@ -6001,17 +5950,18 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
6001 qc->flags |= ATA_QCFLAG_ACTIVE; 5950 qc->flags |= ATA_QCFLAG_ACTIVE;
6002 ap->qc_active |= 1 << qc->tag; 5951 ap->qc_active |= 1 << qc->tag;
6003 5952
6004 if (ata_should_dma_map(qc)) { 5953 /* We guarantee to LLDs that they will have at least one
6005 if (qc->flags & ATA_QCFLAG_SG) { 5954 * non-zero sg if the command is a data command.
6006 if (ata_sg_setup(qc)) 5955 */
6007 goto sg_err; 5956 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
6008 } else if (qc->flags & ATA_QCFLAG_SINGLE) { 5957
6009 if (ata_sg_setup_one(qc)) 5958 /* ata_sg_setup() may update nbytes */
6010 goto sg_err; 5959 qc->raw_nbytes = qc->nbytes;
6011 } 5960
6012 } else { 5961 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
6013 qc->flags &= ~ATA_QCFLAG_DMAMAP; 5962 (ap->flags & ATA_FLAG_PIO_DMA)))
6014 } 5963 if (ata_sg_setup(qc))
5964 goto sg_err;
6015 5965
6016 /* if device is sleeping, schedule softreset and abort the link */ 5966 /* if device is sleeping, schedule softreset and abort the link */
6017 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5967 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
@@ -6029,7 +5979,6 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
6029 return; 5979 return;
6030 5980
6031sg_err: 5981sg_err:
6032 qc->flags &= ~ATA_QCFLAG_DMAMAP;
6033 qc->err_mask |= AC_ERR_SYSTEM; 5982 qc->err_mask |= AC_ERR_SYSTEM;
6034err: 5983err:
6035 ata_qc_complete(qc); 5984 ata_qc_complete(qc);
@@ -6064,11 +6013,11 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6064 switch (qc->tf.protocol) { 6013 switch (qc->tf.protocol) {
6065 case ATA_PROT_PIO: 6014 case ATA_PROT_PIO:
6066 case ATA_PROT_NODATA: 6015 case ATA_PROT_NODATA:
6067 case ATA_PROT_ATAPI: 6016 case ATAPI_PROT_PIO:
6068 case ATA_PROT_ATAPI_NODATA: 6017 case ATAPI_PROT_NODATA:
6069 qc->tf.flags |= ATA_TFLAG_POLLING; 6018 qc->tf.flags |= ATA_TFLAG_POLLING;
6070 break; 6019 break;
6071 case ATA_PROT_ATAPI_DMA: 6020 case ATAPI_PROT_DMA:
6072 if (qc->dev->flags & ATA_DFLAG_CDB_INTR) 6021 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
6073 /* see ata_dma_blacklisted() */ 6022 /* see ata_dma_blacklisted() */
6074 BUG(); 6023 BUG();
@@ -6091,7 +6040,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6091 ap->hsm_task_state = HSM_ST_LAST; 6040 ap->hsm_task_state = HSM_ST_LAST;
6092 6041
6093 if (qc->tf.flags & ATA_TFLAG_POLLING) 6042 if (qc->tf.flags & ATA_TFLAG_POLLING)
6094 ata_port_queue_task(ap, ata_pio_task, qc, 0); 6043 ata_pio_queue_task(ap, qc, 0);
6095 6044
6096 break; 6045 break;
6097 6046
@@ -6113,7 +6062,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6113 if (qc->tf.flags & ATA_TFLAG_WRITE) { 6062 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6114 /* PIO data out protocol */ 6063 /* PIO data out protocol */
6115 ap->hsm_task_state = HSM_ST_FIRST; 6064 ap->hsm_task_state = HSM_ST_FIRST;
6116 ata_port_queue_task(ap, ata_pio_task, qc, 0); 6065 ata_pio_queue_task(ap, qc, 0);
6117 6066
6118 /* always send first data block using 6067 /* always send first data block using
6119 * the ata_pio_task() codepath. 6068 * the ata_pio_task() codepath.
@@ -6123,7 +6072,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6123 ap->hsm_task_state = HSM_ST; 6072 ap->hsm_task_state = HSM_ST;
6124 6073
6125 if (qc->tf.flags & ATA_TFLAG_POLLING) 6074 if (qc->tf.flags & ATA_TFLAG_POLLING)
6126 ata_port_queue_task(ap, ata_pio_task, qc, 0); 6075 ata_pio_queue_task(ap, qc, 0);
6127 6076
6128 /* if polling, ata_pio_task() handles the rest. 6077 /* if polling, ata_pio_task() handles the rest.
6129 * otherwise, interrupt handler takes over from here. 6078 * otherwise, interrupt handler takes over from here.
@@ -6132,8 +6081,8 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6132 6081
6133 break; 6082 break;
6134 6083
6135 case ATA_PROT_ATAPI: 6084 case ATAPI_PROT_PIO:
6136 case ATA_PROT_ATAPI_NODATA: 6085 case ATAPI_PROT_NODATA:
6137 if (qc->tf.flags & ATA_TFLAG_POLLING) 6086 if (qc->tf.flags & ATA_TFLAG_POLLING)
6138 ata_qc_set_polling(qc); 6087 ata_qc_set_polling(qc);
6139 6088
@@ -6144,10 +6093,10 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6144 /* send cdb by polling if no cdb interrupt */ 6093 /* send cdb by polling if no cdb interrupt */
6145 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 6094 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6146 (qc->tf.flags & ATA_TFLAG_POLLING)) 6095 (qc->tf.flags & ATA_TFLAG_POLLING))
6147 ata_port_queue_task(ap, ata_pio_task, qc, 0); 6096 ata_pio_queue_task(ap, qc, 0);
6148 break; 6097 break;
6149 6098
6150 case ATA_PROT_ATAPI_DMA: 6099 case ATAPI_PROT_DMA:
6151 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); 6100 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6152 6101
6153 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 6102 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
@@ -6156,7 +6105,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6156 6105
6157 /* send cdb by polling if no cdb interrupt */ 6106 /* send cdb by polling if no cdb interrupt */
6158 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 6107 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6159 ata_port_queue_task(ap, ata_pio_task, qc, 0); 6108 ata_pio_queue_task(ap, qc, 0);
6160 break; 6109 break;
6161 6110
6162 default: 6111 default:
@@ -6200,15 +6149,15 @@ inline unsigned int ata_host_intr(struct ata_port *ap,
6200 */ 6149 */
6201 6150
6202 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 6151 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6203 * The flag was turned on only for atapi devices. 6152 * The flag was turned on only for atapi devices. No
6204 * No need to check is_atapi_taskfile(&qc->tf) again. 6153 * need to check ata_is_atapi(qc->tf.protocol) again.
6205 */ 6154 */
6206 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 6155 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6207 goto idle_irq; 6156 goto idle_irq;
6208 break; 6157 break;
6209 case HSM_ST_LAST: 6158 case HSM_ST_LAST:
6210 if (qc->tf.protocol == ATA_PROT_DMA || 6159 if (qc->tf.protocol == ATA_PROT_DMA ||
6211 qc->tf.protocol == ATA_PROT_ATAPI_DMA) { 6160 qc->tf.protocol == ATAPI_PROT_DMA) {
6212 /* check status of DMA engine */ 6161 /* check status of DMA engine */
6213 host_stat = ap->ops->bmdma_status(ap); 6162 host_stat = ap->ops->bmdma_status(ap);
6214 VPRINTK("ata%u: host_stat 0x%X\n", 6163 VPRINTK("ata%u: host_stat 0x%X\n",
@@ -6250,7 +6199,7 @@ inline unsigned int ata_host_intr(struct ata_port *ap,
6250 ata_hsm_move(ap, qc, status, 0); 6199 ata_hsm_move(ap, qc, status, 0);
6251 6200
6252 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || 6201 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6253 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) 6202 qc->tf.protocol == ATAPI_PROT_DMA))
6254 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); 6203 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6255 6204
6256 return 1; /* irq handled */ 6205 return 1; /* irq handled */
@@ -6772,7 +6721,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
6772 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 6721 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6773#endif 6722#endif
6774 6723
6775 INIT_DELAYED_WORK(&ap->port_task, NULL); 6724 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
6776 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 6725 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6777 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 6726 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6778 INIT_LIST_HEAD(&ap->eh_done_q); 6727 INIT_LIST_HEAD(&ap->eh_done_q);
@@ -7589,7 +7538,6 @@ EXPORT_SYMBOL_GPL(ata_host_register);
7589EXPORT_SYMBOL_GPL(ata_host_activate); 7538EXPORT_SYMBOL_GPL(ata_host_activate);
7590EXPORT_SYMBOL_GPL(ata_host_detach); 7539EXPORT_SYMBOL_GPL(ata_host_detach);
7591EXPORT_SYMBOL_GPL(ata_sg_init); 7540EXPORT_SYMBOL_GPL(ata_sg_init);
7592EXPORT_SYMBOL_GPL(ata_sg_init_one);
7593EXPORT_SYMBOL_GPL(ata_hsm_move); 7541EXPORT_SYMBOL_GPL(ata_hsm_move);
7594EXPORT_SYMBOL_GPL(ata_qc_complete); 7542EXPORT_SYMBOL_GPL(ata_qc_complete);
7595EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 7543EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
@@ -7601,6 +7549,13 @@ EXPORT_SYMBOL_GPL(ata_std_dev_select);
7601EXPORT_SYMBOL_GPL(sata_print_link_status); 7549EXPORT_SYMBOL_GPL(sata_print_link_status);
7602EXPORT_SYMBOL_GPL(ata_tf_to_fis); 7550EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7603EXPORT_SYMBOL_GPL(ata_tf_from_fis); 7551EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7552EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7553EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7554EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7555EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7556EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7557EXPORT_SYMBOL_GPL(ata_mode_string);
7558EXPORT_SYMBOL_GPL(ata_id_xfermask);
7604EXPORT_SYMBOL_GPL(ata_check_status); 7559EXPORT_SYMBOL_GPL(ata_check_status);
7605EXPORT_SYMBOL_GPL(ata_altstatus); 7560EXPORT_SYMBOL_GPL(ata_altstatus);
7606EXPORT_SYMBOL_GPL(ata_exec_command); 7561EXPORT_SYMBOL_GPL(ata_exec_command);
@@ -7643,7 +7598,6 @@ EXPORT_SYMBOL_GPL(ata_wait_register);
7643EXPORT_SYMBOL_GPL(ata_busy_sleep); 7598EXPORT_SYMBOL_GPL(ata_busy_sleep);
7644EXPORT_SYMBOL_GPL(ata_wait_after_reset); 7599EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7645EXPORT_SYMBOL_GPL(ata_wait_ready); 7600EXPORT_SYMBOL_GPL(ata_wait_ready);
7646EXPORT_SYMBOL_GPL(ata_port_queue_task);
7647EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 7601EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7648EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 7602EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7649EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 7603EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
@@ -7662,18 +7616,20 @@ EXPORT_SYMBOL_GPL(ata_host_resume);
7662#endif /* CONFIG_PM */ 7616#endif /* CONFIG_PM */
7663EXPORT_SYMBOL_GPL(ata_id_string); 7617EXPORT_SYMBOL_GPL(ata_id_string);
7664EXPORT_SYMBOL_GPL(ata_id_c_string); 7618EXPORT_SYMBOL_GPL(ata_id_c_string);
7665EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
7666EXPORT_SYMBOL_GPL(ata_scsi_simulate); 7619EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7667 7620
7668EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 7621EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7622EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7669EXPORT_SYMBOL_GPL(ata_timing_compute); 7623EXPORT_SYMBOL_GPL(ata_timing_compute);
7670EXPORT_SYMBOL_GPL(ata_timing_merge); 7624EXPORT_SYMBOL_GPL(ata_timing_merge);
7625EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7671 7626
7672#ifdef CONFIG_PCI 7627#ifdef CONFIG_PCI
7673EXPORT_SYMBOL_GPL(pci_test_config_bits); 7628EXPORT_SYMBOL_GPL(pci_test_config_bits);
7674EXPORT_SYMBOL_GPL(ata_pci_init_sff_host); 7629EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
7675EXPORT_SYMBOL_GPL(ata_pci_init_bmdma); 7630EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7676EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host); 7631EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7632EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
7677EXPORT_SYMBOL_GPL(ata_pci_init_one); 7633EXPORT_SYMBOL_GPL(ata_pci_init_one);
7678EXPORT_SYMBOL_GPL(ata_pci_remove_one); 7634EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7679#ifdef CONFIG_PM 7635#ifdef CONFIG_PM
@@ -7715,4 +7671,5 @@ EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7715EXPORT_SYMBOL_GPL(ata_cable_40wire); 7671EXPORT_SYMBOL_GPL(ata_cable_40wire);
7716EXPORT_SYMBOL_GPL(ata_cable_80wire); 7672EXPORT_SYMBOL_GPL(ata_cable_80wire);
7717EXPORT_SYMBOL_GPL(ata_cable_unknown); 7673EXPORT_SYMBOL_GPL(ata_cable_unknown);
7674EXPORT_SYMBOL_GPL(ata_cable_ignore);
7718EXPORT_SYMBOL_GPL(ata_cable_sata); 7675EXPORT_SYMBOL_GPL(ata_cable_sata);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 21a81cd148e4..4e31071acc02 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -46,9 +46,26 @@
46#include "libata.h" 46#include "libata.h"
47 47
48enum { 48enum {
49 /* speed down verdicts */
49 ATA_EH_SPDN_NCQ_OFF = (1 << 0), 50 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
50 ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 51 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
51 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 52 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
53 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
54
55 /* error flags */
56 ATA_EFLAG_IS_IO = (1 << 0),
57 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
58
59 /* error categories */
60 ATA_ECAT_NONE = 0,
61 ATA_ECAT_ATA_BUS = 1,
62 ATA_ECAT_TOUT_HSM = 2,
63 ATA_ECAT_UNK_DEV = 3,
64 ATA_ECAT_DUBIOUS_NONE = 4,
65 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
66 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
67 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
68 ATA_ECAT_NR = 8,
52}; 69};
53 70
54/* Waiting in ->prereset can never be reliable. It's sometimes nice 71/* Waiting in ->prereset can never be reliable. It's sometimes nice
@@ -213,12 +230,13 @@ void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
213 if (offset < 0) 230 if (offset < 0)
214 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 231 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
215 else 232 else
216 ata_port_desc(ap, "%s 0x%llx", name, start + offset); 233 ata_port_desc(ap, "%s 0x%llx", name,
234 start + (unsigned long long)offset);
217} 235}
218 236
219#endif /* CONFIG_PCI */ 237#endif /* CONFIG_PCI */
220 238
221static void ata_ering_record(struct ata_ering *ering, int is_io, 239static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
222 unsigned int err_mask) 240 unsigned int err_mask)
223{ 241{
224 struct ata_ering_entry *ent; 242 struct ata_ering_entry *ent;
@@ -229,11 +247,20 @@ static void ata_ering_record(struct ata_ering *ering, int is_io,
229 ering->cursor %= ATA_ERING_SIZE; 247 ering->cursor %= ATA_ERING_SIZE;
230 248
231 ent = &ering->ring[ering->cursor]; 249 ent = &ering->ring[ering->cursor];
232 ent->is_io = is_io; 250 ent->eflags = eflags;
233 ent->err_mask = err_mask; 251 ent->err_mask = err_mask;
234 ent->timestamp = get_jiffies_64(); 252 ent->timestamp = get_jiffies_64();
235} 253}
236 254
255static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
256{
257 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
258
259 if (ent->err_mask)
260 return ent;
261 return NULL;
262}
263
237static void ata_ering_clear(struct ata_ering *ering) 264static void ata_ering_clear(struct ata_ering *ering)
238{ 265{
239 memset(ering, 0, sizeof(*ering)); 266 memset(ering, 0, sizeof(*ering));
@@ -445,9 +472,20 @@ void ata_scsi_error(struct Scsi_Host *host)
445 spin_lock_irqsave(ap->lock, flags); 472 spin_lock_irqsave(ap->lock, flags);
446 473
447 __ata_port_for_each_link(link, ap) { 474 __ata_port_for_each_link(link, ap) {
475 struct ata_eh_context *ehc = &link->eh_context;
476 struct ata_device *dev;
477
448 memset(&link->eh_context, 0, sizeof(link->eh_context)); 478 memset(&link->eh_context, 0, sizeof(link->eh_context));
449 link->eh_context.i = link->eh_info; 479 link->eh_context.i = link->eh_info;
450 memset(&link->eh_info, 0, sizeof(link->eh_info)); 480 memset(&link->eh_info, 0, sizeof(link->eh_info));
481
482 ata_link_for_each_dev(dev, link) {
483 int devno = dev->devno;
484
485 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
486 if (ata_ncq_enabled(dev))
487 ehc->saved_ncq_enabled |= 1 << devno;
488 }
451 } 489 }
452 490
453 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 491 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
@@ -1260,10 +1298,10 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
1260 1298
1261 /* is it pointless to prefer PIO for "safety reasons"? */ 1299 /* is it pointless to prefer PIO for "safety reasons"? */
1262 if (ap->flags & ATA_FLAG_PIO_DMA) { 1300 if (ap->flags & ATA_FLAG_PIO_DMA) {
1263 tf.protocol = ATA_PROT_ATAPI_DMA; 1301 tf.protocol = ATAPI_PROT_DMA;
1264 tf.feature |= ATAPI_PKT_DMA; 1302 tf.feature |= ATAPI_PKT_DMA;
1265 } else { 1303 } else {
1266 tf.protocol = ATA_PROT_ATAPI; 1304 tf.protocol = ATAPI_PROT_PIO;
1267 tf.lbam = SCSI_SENSE_BUFFERSIZE; 1305 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1268 tf.lbah = 0; 1306 tf.lbah = 0;
1269 } 1307 }
@@ -1451,20 +1489,29 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1451 return action; 1489 return action;
1452} 1490}
1453 1491
1454static int ata_eh_categorize_error(int is_io, unsigned int err_mask) 1492static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1493 int *xfer_ok)
1455{ 1494{
1495 int base = 0;
1496
1497 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1498 *xfer_ok = 1;
1499
1500 if (!*xfer_ok)
1501 base = ATA_ECAT_DUBIOUS_NONE;
1502
1456 if (err_mask & AC_ERR_ATA_BUS) 1503 if (err_mask & AC_ERR_ATA_BUS)
1457 return 1; 1504 return base + ATA_ECAT_ATA_BUS;
1458 1505
1459 if (err_mask & AC_ERR_TIMEOUT) 1506 if (err_mask & AC_ERR_TIMEOUT)
1460 return 2; 1507 return base + ATA_ECAT_TOUT_HSM;
1461 1508
1462 if (is_io) { 1509 if (eflags & ATA_EFLAG_IS_IO) {
1463 if (err_mask & AC_ERR_HSM) 1510 if (err_mask & AC_ERR_HSM)
1464 return 2; 1511 return base + ATA_ECAT_TOUT_HSM;
1465 if ((err_mask & 1512 if ((err_mask &
1466 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 1513 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1467 return 3; 1514 return base + ATA_ECAT_UNK_DEV;
1468 } 1515 }
1469 1516
1470 return 0; 1517 return 0;
@@ -1472,18 +1519,22 @@ static int ata_eh_categorize_error(int is_io, unsigned int err_mask)
1472 1519
1473struct speed_down_verdict_arg { 1520struct speed_down_verdict_arg {
1474 u64 since; 1521 u64 since;
1475 int nr_errors[4]; 1522 int xfer_ok;
1523 int nr_errors[ATA_ECAT_NR];
1476}; 1524};
1477 1525
1478static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1526static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1479{ 1527{
1480 struct speed_down_verdict_arg *arg = void_arg; 1528 struct speed_down_verdict_arg *arg = void_arg;
1481 int cat = ata_eh_categorize_error(ent->is_io, ent->err_mask); 1529 int cat;
1482 1530
1483 if (ent->timestamp < arg->since) 1531 if (ent->timestamp < arg->since)
1484 return -1; 1532 return -1;
1485 1533
1534 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1535 &arg->xfer_ok);
1486 arg->nr_errors[cat]++; 1536 arg->nr_errors[cat]++;
1537
1487 return 0; 1538 return 0;
1488} 1539}
1489 1540
@@ -1495,22 +1546,48 @@ static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1495 * whether NCQ needs to be turned off, transfer speed should be 1546 * whether NCQ needs to be turned off, transfer speed should be
1496 * stepped down, or falling back to PIO is necessary. 1547 * stepped down, or falling back to PIO is necessary.
1497 * 1548 *
1498 * Cat-1 is ATA_BUS error for any command. 1549 * ECAT_ATA_BUS : ATA_BUS error for any command
1550 *
1551 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1552 * IO commands
1553 *
1554 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1555 *
1556 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1557 * data transfer hasn't been verified.
1558 *
1559 * Verdicts are
1560 *
1561 * NCQ_OFF : Turn off NCQ.
1562 *
1563 * SPEED_DOWN : Speed down transfer speed but don't fall back
1564 * to PIO.
1565 *
1566 * FALLBACK_TO_PIO : Fall back to PIO.
1567 *
1568 * Even if multiple verdicts are returned, only one action is
1569 * taken per error. An action triggered by non-DUBIOUS errors
1570 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1571 * This is to expedite speed down decisions right after device is
1572 * initially configured.
1499 * 1573 *
1500 * Cat-2 is TIMEOUT for any command or HSM violation for known 1574 * The followings are speed down rules. #1 and #2 deal with
1501 * supported commands. 1575 * DUBIOUS errors.
1502 * 1576 *
1503 * Cat-3 is is unclassified DEV error for known supported 1577 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1504 * command. 1578 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1505 * 1579 *
1506 * NCQ needs to be turned off if there have been more than 3 1580 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1507 * Cat-2 + Cat-3 errors during last 10 minutes. 1581 * occurred during last 5 mins, NCQ_OFF.
1508 * 1582 *
1509 * Speed down is necessary if there have been more than 3 Cat-1 + 1583 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1510 * Cat-2 errors or 10 Cat-3 errors during last 10 minutes. 1584 * ocurred during last 5 mins, FALLBACK_TO_PIO
1511 * 1585 *
1512 * Falling back to PIO mode is necessary if there have been more 1586 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1513 * than 10 Cat-1 + Cat-2 + Cat-3 errors during last 5 minutes. 1587 * during last 10 mins, NCQ_OFF.
1588 *
1589 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1590 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1514 * 1591 *
1515 * LOCKING: 1592 * LOCKING:
1516 * Inherited from caller. 1593 * Inherited from caller.
@@ -1525,23 +1602,38 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1525 struct speed_down_verdict_arg arg; 1602 struct speed_down_verdict_arg arg;
1526 unsigned int verdict = 0; 1603 unsigned int verdict = 0;
1527 1604
1528 /* scan past 10 mins of error history */ 1605 /* scan past 5 mins of error history */
1529 memset(&arg, 0, sizeof(arg)); 1606 memset(&arg, 0, sizeof(arg));
1530 arg.since = j64 - min(j64, j10mins); 1607 arg.since = j64 - min(j64, j5mins);
1531 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1608 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1532 1609
1533 if (arg.nr_errors[2] + arg.nr_errors[3] > 3) 1610 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1534 verdict |= ATA_EH_SPDN_NCQ_OFF; 1611 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1535 if (arg.nr_errors[1] + arg.nr_errors[2] > 3 || arg.nr_errors[3] > 10) 1612 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1536 verdict |= ATA_EH_SPDN_SPEED_DOWN; 1613 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1537 1614
1538 /* scan past 3 mins of error history */ 1615 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1616 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1617 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1618
1619 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1620 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1621 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1622 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1623
1624 /* scan past 10 mins of error history */
1539 memset(&arg, 0, sizeof(arg)); 1625 memset(&arg, 0, sizeof(arg));
1540 arg.since = j64 - min(j64, j5mins); 1626 arg.since = j64 - min(j64, j10mins);
1541 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1627 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1542 1628
1543 if (arg.nr_errors[1] + arg.nr_errors[2] + arg.nr_errors[3] > 10) 1629 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1544 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 1630 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1631 verdict |= ATA_EH_SPDN_NCQ_OFF;
1632
1633 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1634 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1635 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1636 verdict |= ATA_EH_SPDN_SPEED_DOWN;
1545 1637
1546 return verdict; 1638 return verdict;
1547} 1639}
@@ -1549,7 +1641,7 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1549/** 1641/**
1550 * ata_eh_speed_down - record error and speed down if necessary 1642 * ata_eh_speed_down - record error and speed down if necessary
1551 * @dev: Failed device 1643 * @dev: Failed device
1552 * @is_io: Did the device fail during normal IO? 1644 * @eflags: mask of ATA_EFLAG_* flags
1553 * @err_mask: err_mask of the error 1645 * @err_mask: err_mask of the error
1554 * 1646 *
1555 * Record error and examine error history to determine whether 1647 * Record error and examine error history to determine whether
@@ -1563,18 +1655,20 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1563 * RETURNS: 1655 * RETURNS:
1564 * Determined recovery action. 1656 * Determined recovery action.
1565 */ 1657 */
1566static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io, 1658static unsigned int ata_eh_speed_down(struct ata_device *dev,
1567 unsigned int err_mask) 1659 unsigned int eflags, unsigned int err_mask)
1568{ 1660{
1661 struct ata_link *link = dev->link;
1662 int xfer_ok = 0;
1569 unsigned int verdict; 1663 unsigned int verdict;
1570 unsigned int action = 0; 1664 unsigned int action = 0;
1571 1665
1572 /* don't bother if Cat-0 error */ 1666 /* don't bother if Cat-0 error */
1573 if (ata_eh_categorize_error(is_io, err_mask) == 0) 1667 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1574 return 0; 1668 return 0;
1575 1669
1576 /* record error and determine whether speed down is necessary */ 1670 /* record error and determine whether speed down is necessary */
1577 ata_ering_record(&dev->ering, is_io, err_mask); 1671 ata_ering_record(&dev->ering, eflags, err_mask);
1578 verdict = ata_eh_speed_down_verdict(dev); 1672 verdict = ata_eh_speed_down_verdict(dev);
1579 1673
1580 /* turn off NCQ? */ 1674 /* turn off NCQ? */
@@ -1590,7 +1684,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
1590 /* speed down? */ 1684 /* speed down? */
1591 if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1685 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1592 /* speed down SATA link speed if possible */ 1686 /* speed down SATA link speed if possible */
1593 if (sata_down_spd_limit(dev->link) == 0) { 1687 if (sata_down_spd_limit(link) == 0) {
1594 action |= ATA_EH_HARDRESET; 1688 action |= ATA_EH_HARDRESET;
1595 goto done; 1689 goto done;
1596 } 1690 }
@@ -1618,10 +1712,10 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
1618 } 1712 }
1619 1713
1620 /* Fall back to PIO? Slowing down to PIO is meaningless for 1714 /* Fall back to PIO? Slowing down to PIO is meaningless for
1621 * SATA. Consider it only for PATA. 1715 * SATA ATA devices. Consider it only for PATA and SATAPI.
1622 */ 1716 */
1623 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1717 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1624 (dev->link->ap->cbl != ATA_CBL_SATA) && 1718 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
1625 (dev->xfer_shift != ATA_SHIFT_PIO)) { 1719 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1626 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 1720 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1627 dev->spdn_cnt = 0; 1721 dev->spdn_cnt = 0;
@@ -1633,7 +1727,8 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
1633 return 0; 1727 return 0;
1634 done: 1728 done:
1635 /* device has been slowed down, blow error history */ 1729 /* device has been slowed down, blow error history */
1636 ata_ering_clear(&dev->ering); 1730 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1731 ata_ering_clear(&dev->ering);
1637 return action; 1732 return action;
1638} 1733}
1639 1734
@@ -1653,8 +1748,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
1653 struct ata_port *ap = link->ap; 1748 struct ata_port *ap = link->ap;
1654 struct ata_eh_context *ehc = &link->eh_context; 1749 struct ata_eh_context *ehc = &link->eh_context;
1655 struct ata_device *dev; 1750 struct ata_device *dev;
1656 unsigned int all_err_mask = 0; 1751 unsigned int all_err_mask = 0, eflags = 0;
1657 int tag, is_io = 0; 1752 int tag;
1658 u32 serror; 1753 u32 serror;
1659 int rc; 1754 int rc;
1660 1755
@@ -1713,15 +1808,15 @@ static void ata_eh_link_autopsy(struct ata_link *link)
1713 ehc->i.dev = qc->dev; 1808 ehc->i.dev = qc->dev;
1714 all_err_mask |= qc->err_mask; 1809 all_err_mask |= qc->err_mask;
1715 if (qc->flags & ATA_QCFLAG_IO) 1810 if (qc->flags & ATA_QCFLAG_IO)
1716 is_io = 1; 1811 eflags |= ATA_EFLAG_IS_IO;
1717 } 1812 }
1718 1813
1719 /* enforce default EH actions */ 1814 /* enforce default EH actions */
1720 if (ap->pflags & ATA_PFLAG_FROZEN || 1815 if (ap->pflags & ATA_PFLAG_FROZEN ||
1721 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 1816 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1722 ehc->i.action |= ATA_EH_SOFTRESET; 1817 ehc->i.action |= ATA_EH_SOFTRESET;
1723 else if ((is_io && all_err_mask) || 1818 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
1724 (!is_io && (all_err_mask & ~AC_ERR_DEV))) 1819 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
1725 ehc->i.action |= ATA_EH_REVALIDATE; 1820 ehc->i.action |= ATA_EH_REVALIDATE;
1726 1821
1727 /* If we have offending qcs and the associated failed device, 1822 /* If we have offending qcs and the associated failed device,
@@ -1743,8 +1838,11 @@ static void ata_eh_link_autopsy(struct ata_link *link)
1743 ata_dev_enabled(link->device)))) 1838 ata_dev_enabled(link->device))))
1744 dev = link->device; 1839 dev = link->device;
1745 1840
1746 if (dev) 1841 if (dev) {
1747 ehc->i.action |= ata_eh_speed_down(dev, is_io, all_err_mask); 1842 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
1843 eflags |= ATA_EFLAG_DUBIOUS_XFER;
1844 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
1845 }
1748 1846
1749 DPRINTK("EXIT\n"); 1847 DPRINTK("EXIT\n");
1750} 1848}
@@ -1880,8 +1978,8 @@ static void ata_eh_link_report(struct ata_link *link)
1880 [ATA_PROT_PIO] = "pio", 1978 [ATA_PROT_PIO] = "pio",
1881 [ATA_PROT_DMA] = "dma", 1979 [ATA_PROT_DMA] = "dma",
1882 [ATA_PROT_NCQ] = "ncq", 1980 [ATA_PROT_NCQ] = "ncq",
1883 [ATA_PROT_ATAPI] = "pio", 1981 [ATAPI_PROT_PIO] = "pio",
1884 [ATA_PROT_ATAPI_DMA] = "dma", 1982 [ATAPI_PROT_DMA] = "dma",
1885 }; 1983 };
1886 1984
1887 snprintf(data_buf, sizeof(data_buf), " %s %u %s", 1985 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
@@ -1889,7 +1987,7 @@ static void ata_eh_link_report(struct ata_link *link)
1889 dma_str[qc->dma_dir]); 1987 dma_str[qc->dma_dir]);
1890 } 1988 }
1891 1989
1892 if (is_atapi_taskfile(&qc->tf)) 1990 if (ata_is_atapi(qc->tf.protocol))
1893 snprintf(cdb_buf, sizeof(cdb_buf), 1991 snprintf(cdb_buf, sizeof(cdb_buf),
1894 "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 1992 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
1895 "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 1993 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
@@ -2329,6 +2427,58 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
2329 return rc; 2427 return rc;
2330} 2428}
2331 2429
2430/**
2431 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2432 * @link: link on which timings will be programmed
2433 * @r_failed_dev: out paramter for failed device
2434 *
2435 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2436 * ata_set_mode() fails, pointer to the failing device is
2437 * returned in @r_failed_dev.
2438 *
2439 * LOCKING:
2440 * PCI/etc. bus probe sem.
2441 *
2442 * RETURNS:
2443 * 0 on success, negative errno otherwise
2444 */
2445int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2446{
2447 struct ata_port *ap = link->ap;
2448 struct ata_device *dev;
2449 int rc;
2450
2451 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
2452 ata_link_for_each_dev(dev, link) {
2453 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
2454 struct ata_ering_entry *ent;
2455
2456 ent = ata_ering_top(&dev->ering);
2457 if (ent)
2458 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
2459 }
2460 }
2461
2462 /* has private set_mode? */
2463 if (ap->ops->set_mode)
2464 rc = ap->ops->set_mode(link, r_failed_dev);
2465 else
2466 rc = ata_do_set_mode(link, r_failed_dev);
2467
2468 /* if transfer mode has changed, set DUBIOUS_XFER on device */
2469 ata_link_for_each_dev(dev, link) {
2470 struct ata_eh_context *ehc = &link->eh_context;
2471 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
2472 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
2473
2474 if (dev->xfer_mode != saved_xfer_mode ||
2475 ata_ncq_enabled(dev) != saved_ncq)
2476 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
2477 }
2478
2479 return rc;
2480}
2481
2332static int ata_link_nr_enabled(struct ata_link *link) 2482static int ata_link_nr_enabled(struct ata_link *link)
2333{ 2483{
2334 struct ata_device *dev; 2484 struct ata_device *dev;
@@ -2375,6 +2525,24 @@ static int ata_eh_skip_recovery(struct ata_link *link)
2375 return 1; 2525 return 1;
2376} 2526}
2377 2527
2528static int ata_eh_schedule_probe(struct ata_device *dev)
2529{
2530 struct ata_eh_context *ehc = &dev->link->eh_context;
2531
2532 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
2533 (ehc->did_probe_mask & (1 << dev->devno)))
2534 return 0;
2535
2536 ata_eh_detach_dev(dev);
2537 ata_dev_init(dev);
2538 ehc->did_probe_mask |= (1 << dev->devno);
2539 ehc->i.action |= ATA_EH_SOFTRESET;
2540 ehc->saved_xfer_mode[dev->devno] = 0;
2541 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
2542
2543 return 1;
2544}
2545
2378static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 2546static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2379{ 2547{
2380 struct ata_eh_context *ehc = &dev->link->eh_context; 2548 struct ata_eh_context *ehc = &dev->link->eh_context;
@@ -2406,16 +2574,9 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2406 if (ata_link_offline(dev->link)) 2574 if (ata_link_offline(dev->link))
2407 ata_eh_detach_dev(dev); 2575 ata_eh_detach_dev(dev);
2408 2576
2409 /* probe if requested */ 2577 /* schedule probe if necessary */
2410 if ((ehc->i.probe_mask & (1 << dev->devno)) && 2578 if (ata_eh_schedule_probe(dev))
2411 !(ehc->did_probe_mask & (1 << dev->devno))) {
2412 ata_eh_detach_dev(dev);
2413 ata_dev_init(dev);
2414
2415 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 2579 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
2416 ehc->did_probe_mask |= (1 << dev->devno);
2417 ehc->i.action |= ATA_EH_SOFTRESET;
2418 }
2419 2580
2420 return 1; 2581 return 1;
2421 } else { 2582 } else {
@@ -2492,14 +2653,9 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2492 if (dev->flags & ATA_DFLAG_DETACH) 2653 if (dev->flags & ATA_DFLAG_DETACH)
2493 ata_eh_detach_dev(dev); 2654 ata_eh_detach_dev(dev);
2494 2655
2495 if (!ata_dev_enabled(dev) && 2656 /* schedule probe if necessary */
2496 ((ehc->i.probe_mask & (1 << dev->devno)) && 2657 if (!ata_dev_enabled(dev))
2497 !(ehc->did_probe_mask & (1 << dev->devno)))) { 2658 ata_eh_schedule_probe(dev);
2498 ata_eh_detach_dev(dev);
2499 ata_dev_init(dev);
2500 ehc->did_probe_mask |= (1 << dev->devno);
2501 ehc->i.action |= ATA_EH_SOFTRESET;
2502 }
2503 } 2659 }
2504 } 2660 }
2505 2661
@@ -2747,6 +2903,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
2747 if (ap->ops->port_suspend) 2903 if (ap->ops->port_suspend)
2748 rc = ap->ops->port_suspend(ap, ap->pm_mesg); 2904 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
2749 2905
2906 ata_acpi_set_state(ap, PMSG_SUSPEND);
2750 out: 2907 out:
2751 /* report result */ 2908 /* report result */
2752 spin_lock_irqsave(ap->lock, flags); 2909 spin_lock_irqsave(ap->lock, flags);
@@ -2792,6 +2949,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
2792 2949
2793 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 2950 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
2794 2951
2952 ata_acpi_set_state(ap, PMSG_ON);
2953
2795 if (ap->ops->port_resume) 2954 if (ap->ops->port_resume)
2796 rc = ap->ops->port_resume(ap); 2955 rc = ap->ops->port_resume(ap);
2797 2956
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 14daf4848f09..c02c490122dc 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -517,7 +517,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
517 qc->scsicmd = cmd; 517 qc->scsicmd = cmd;
518 qc->scsidone = done; 518 qc->scsidone = done;
519 519
520 qc->__sg = scsi_sglist(cmd); 520 qc->sg = scsi_sglist(cmd);
521 qc->n_elem = scsi_sg_count(cmd); 521 qc->n_elem = scsi_sg_count(cmd);
522 } else { 522 } else {
523 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); 523 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
@@ -839,7 +839,14 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
839 if (dev->class == ATA_DEV_ATAPI) { 839 if (dev->class == ATA_DEV_ATAPI) {
840 struct request_queue *q = sdev->request_queue; 840 struct request_queue *q = sdev->request_queue;
841 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 841 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
842 } 842
843 /* set the min alignment */
844 blk_queue_update_dma_alignment(sdev->request_queue,
845 ATA_DMA_PAD_SZ - 1);
846 } else
847 /* ATA devices must be sector aligned */
848 blk_queue_update_dma_alignment(sdev->request_queue,
849 ATA_SECT_SIZE - 1);
843 850
844 if (dev->class == ATA_DEV_ATA) 851 if (dev->class == ATA_DEV_ATA)
845 sdev->manage_start_stop = 1; 852 sdev->manage_start_stop = 1;
@@ -878,7 +885,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
878 if (dev) 885 if (dev)
879 ata_scsi_dev_config(sdev, dev); 886 ata_scsi_dev_config(sdev, dev);
880 887
881 return 0; /* scsi layer doesn't check return value, sigh */ 888 return 0;
882} 889}
883 890
884/** 891/**
@@ -2210,7 +2217,7 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
2210 2217
2211 /* sector size */ 2218 /* sector size */
2212 ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8); 2219 ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8);
2213 ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE); 2220 ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE & 0xff);
2214 } else { 2221 } else {
2215 /* sector count, 64-bit */ 2222 /* sector count, 64-bit */
2216 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7)); 2223 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7));
@@ -2224,7 +2231,7 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
2224 2231
2225 /* sector size */ 2232 /* sector size */
2226 ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8); 2233 ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8);
2227 ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE); 2234 ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE & 0xff);
2228 } 2235 }
2229 2236
2230 return 0; 2237 return 0;
@@ -2331,7 +2338,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2331 DPRINTK("ATAPI request sense\n"); 2338 DPRINTK("ATAPI request sense\n");
2332 2339
2333 /* FIXME: is this needed? */ 2340 /* FIXME: is this needed? */
2334 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 2341 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2335 2342
2336 ap->ops->tf_read(ap, &qc->tf); 2343 ap->ops->tf_read(ap, &qc->tf);
2337 2344
@@ -2341,7 +2348,9 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2341 2348
2342 ata_qc_reinit(qc); 2349 ata_qc_reinit(qc);
2343 2350
2344 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); 2351 /* setup sg table and init transfer direction */
2352 sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
2353 ata_sg_init(qc, &qc->sgent, 1);
2345 qc->dma_dir = DMA_FROM_DEVICE; 2354 qc->dma_dir = DMA_FROM_DEVICE;
2346 2355
2347 memset(&qc->cdb, 0, qc->dev->cdb_len); 2356 memset(&qc->cdb, 0, qc->dev->cdb_len);
@@ -2352,10 +2361,10 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2352 qc->tf.command = ATA_CMD_PACKET; 2361 qc->tf.command = ATA_CMD_PACKET;
2353 2362
2354 if (ata_pio_use_silly(ap)) { 2363 if (ata_pio_use_silly(ap)) {
2355 qc->tf.protocol = ATA_PROT_ATAPI_DMA; 2364 qc->tf.protocol = ATAPI_PROT_DMA;
2356 qc->tf.feature |= ATAPI_PKT_DMA; 2365 qc->tf.feature |= ATAPI_PKT_DMA;
2357 } else { 2366 } else {
2358 qc->tf.protocol = ATA_PROT_ATAPI; 2367 qc->tf.protocol = ATAPI_PROT_PIO;
2359 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE; 2368 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
2360 qc->tf.lbah = 0; 2369 qc->tf.lbah = 0;
2361 } 2370 }
@@ -2526,12 +2535,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2526 if (using_pio || nodata) { 2535 if (using_pio || nodata) {
2527 /* no data, or PIO data xfer */ 2536 /* no data, or PIO data xfer */
2528 if (nodata) 2537 if (nodata)
2529 qc->tf.protocol = ATA_PROT_ATAPI_NODATA; 2538 qc->tf.protocol = ATAPI_PROT_NODATA;
2530 else 2539 else
2531 qc->tf.protocol = ATA_PROT_ATAPI; 2540 qc->tf.protocol = ATAPI_PROT_PIO;
2532 } else { 2541 } else {
2533 /* DMA data xfer */ 2542 /* DMA data xfer */
2534 qc->tf.protocol = ATA_PROT_ATAPI_DMA; 2543 qc->tf.protocol = ATAPI_PROT_DMA;
2535 qc->tf.feature |= ATAPI_PKT_DMA; 2544 qc->tf.feature |= ATAPI_PKT_DMA;
2536 2545
2537 if (atapi_dmadir && (scmd->sc_data_direction != DMA_TO_DEVICE)) 2546 if (atapi_dmadir && (scmd->sc_data_direction != DMA_TO_DEVICE))
@@ -2690,6 +2699,24 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2690 if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN) 2699 if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
2691 goto invalid_fld; 2700 goto invalid_fld;
2692 2701
2702 /*
2703 * Filter TPM commands by default. These provide an
2704 * essentially uncontrolled encrypted "back door" between
2705 * applications and the disk. Set libata.allow_tpm=1 if you
2706 * have a real reason for wanting to use them. This ensures
2707 * that installed software cannot easily mess stuff up without
2708 * user intent. DVR type users will probably ship with this enabled
2709 * for movie content management.
2710 *
2711 * Note that for ATA8 we can issue a DCS change and DCS freeze lock
2712 * for this and should do in future but that it is not sufficient as
2713 * DCS is an optional feature set. Thus we also do the software filter
2714 * so that we comply with the TC consortium stated goal that the user
2715 * can turn off TC features of their system.
2716 */
2717 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
2718 goto invalid_fld;
2719
2693 /* We may not issue DMA commands if no DMA mode is set */ 2720 /* We may not issue DMA commands if no DMA mode is set */
2694 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) 2721 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2695 goto invalid_fld; 2722 goto invalid_fld;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index b7ac80b4b1fb..60cd4b179766 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -147,7 +147,9 @@ void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
147 * @tf: ATA taskfile register set for storing input 147 * @tf: ATA taskfile register set for storing input
148 * 148 *
149 * Reads ATA taskfile registers for currently-selected device 149 * Reads ATA taskfile registers for currently-selected device
150 * into @tf. 150 * into @tf. Assumes the device has a fully SFF compliant task file
151 * layout and behaviour. If you device does not (eg has a different
152 * status method) then you will need to provide a replacement tf_read
151 * 153 *
152 * LOCKING: 154 * LOCKING:
153 * Inherited from caller. 155 * Inherited from caller.
@@ -156,7 +158,7 @@ void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
156{ 158{
157 struct ata_ioports *ioaddr = &ap->ioaddr; 159 struct ata_ioports *ioaddr = &ap->ioaddr;
158 160
159 tf->command = ata_chk_status(ap); 161 tf->command = ata_check_status(ap);
160 tf->feature = ioread8(ioaddr->error_addr); 162 tf->feature = ioread8(ioaddr->error_addr);
161 tf->nsect = ioread8(ioaddr->nsect_addr); 163 tf->nsect = ioread8(ioaddr->nsect_addr);
162 tf->lbal = ioread8(ioaddr->lbal_addr); 164 tf->lbal = ioread8(ioaddr->lbal_addr);
@@ -415,7 +417,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
415 ap->hsm_task_state = HSM_ST_IDLE; 417 ap->hsm_task_state = HSM_ST_IDLE;
416 418
417 if (qc && (qc->tf.protocol == ATA_PROT_DMA || 419 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
418 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) { 420 qc->tf.protocol == ATAPI_PROT_DMA)) {
419 u8 host_stat; 421 u8 host_stat;
420 422
421 host_stat = ap->ops->bmdma_status(ap); 423 host_stat = ap->ops->bmdma_status(ap);
@@ -549,7 +551,7 @@ int ata_pci_init_bmdma(struct ata_host *host)
549 return rc; 551 return rc;
550 552
551 /* request and iomap DMA region */ 553 /* request and iomap DMA region */
552 rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME); 554 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
553 if (rc) { 555 if (rc) {
554 dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n"); 556 dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
555 return -ENOMEM; 557 return -ENOMEM;
@@ -619,7 +621,8 @@ int ata_pci_init_sff_host(struct ata_host *host)
619 continue; 621 continue;
620 } 622 }
621 623
622 rc = pcim_iomap_regions(pdev, 0x3 << base, DRV_NAME); 624 rc = pcim_iomap_regions(pdev, 0x3 << base,
625 dev_driver_string(gdev));
623 if (rc) { 626 if (rc) {
624 dev_printk(KERN_WARNING, gdev, 627 dev_printk(KERN_WARNING, gdev,
625 "failed to request/iomap BARs for port %d " 628 "failed to request/iomap BARs for port %d "
@@ -711,6 +714,99 @@ int ata_pci_prepare_sff_host(struct pci_dev *pdev,
711} 714}
712 715
713/** 716/**
717 * ata_pci_activate_sff_host - start SFF host, request IRQ and register it
718 * @host: target SFF ATA host
719 * @irq_handler: irq_handler used when requesting IRQ(s)
720 * @sht: scsi_host_template to use when registering the host
721 *
722 * This is the counterpart of ata_host_activate() for SFF ATA
723 * hosts. This separate helper is necessary because SFF hosts
724 * use two separate interrupts in legacy mode.
725 *
726 * LOCKING:
727 * Inherited from calling layer (may sleep).
728 *
729 * RETURNS:
730 * 0 on success, -errno otherwise.
731 */
732int ata_pci_activate_sff_host(struct ata_host *host,
733 irq_handler_t irq_handler,
734 struct scsi_host_template *sht)
735{
736 struct device *dev = host->dev;
737 struct pci_dev *pdev = to_pci_dev(dev);
738 const char *drv_name = dev_driver_string(host->dev);
739 int legacy_mode = 0, rc;
740
741 rc = ata_host_start(host);
742 if (rc)
743 return rc;
744
745 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
746 u8 tmp8, mask;
747
748 /* TODO: What if one channel is in native mode ... */
749 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
750 mask = (1 << 2) | (1 << 0);
751 if ((tmp8 & mask) != mask)
752 legacy_mode = 1;
753#if defined(CONFIG_NO_ATA_LEGACY)
754 /* Some platforms with PCI limits cannot address compat
755 port space. In that case we punt if their firmware has
756 left a device in compatibility mode */
757 if (legacy_mode) {
758 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
759 return -EOPNOTSUPP;
760 }
761#endif
762 }
763
764 if (!devres_open_group(dev, NULL, GFP_KERNEL))
765 return -ENOMEM;
766
767 if (!legacy_mode && pdev->irq) {
768 rc = devm_request_irq(dev, pdev->irq, irq_handler,
769 IRQF_SHARED, drv_name, host);
770 if (rc)
771 goto out;
772
773 ata_port_desc(host->ports[0], "irq %d", pdev->irq);
774 ata_port_desc(host->ports[1], "irq %d", pdev->irq);
775 } else if (legacy_mode) {
776 if (!ata_port_is_dummy(host->ports[0])) {
777 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
778 irq_handler, IRQF_SHARED,
779 drv_name, host);
780 if (rc)
781 goto out;
782
783 ata_port_desc(host->ports[0], "irq %d",
784 ATA_PRIMARY_IRQ(pdev));
785 }
786
787 if (!ata_port_is_dummy(host->ports[1])) {
788 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
789 irq_handler, IRQF_SHARED,
790 drv_name, host);
791 if (rc)
792 goto out;
793
794 ata_port_desc(host->ports[1], "irq %d",
795 ATA_SECONDARY_IRQ(pdev));
796 }
797 }
798
799 rc = ata_host_register(host, sht);
800 out:
801 if (rc == 0)
802 devres_remove_group(dev, NULL);
803 else
804 devres_release_group(dev, NULL);
805
806 return rc;
807}
808
809/**
714 * ata_pci_init_one - Initialize/register PCI IDE host controller 810 * ata_pci_init_one - Initialize/register PCI IDE host controller
715 * @pdev: Controller to be initialized 811 * @pdev: Controller to be initialized
716 * @ppi: array of port_info, must be enough for two ports 812 * @ppi: array of port_info, must be enough for two ports
@@ -739,8 +835,6 @@ int ata_pci_init_one(struct pci_dev *pdev,
739 struct device *dev = &pdev->dev; 835 struct device *dev = &pdev->dev;
740 const struct ata_port_info *pi = NULL; 836 const struct ata_port_info *pi = NULL;
741 struct ata_host *host = NULL; 837 struct ata_host *host = NULL;
742 u8 mask;
743 int legacy_mode = 0;
744 int i, rc; 838 int i, rc;
745 839
746 DPRINTK("ENTER\n"); 840 DPRINTK("ENTER\n");
@@ -762,95 +856,24 @@ int ata_pci_init_one(struct pci_dev *pdev,
762 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 856 if (!devres_open_group(dev, NULL, GFP_KERNEL))
763 return -ENOMEM; 857 return -ENOMEM;
764 858
765 /* FIXME: Really for ATA it isn't safe because the device may be
766 multi-purpose and we want to leave it alone if it was already
767 enabled. Secondly for shared use as Arjan says we want refcounting
768
769 Checking dev->is_enabled is insufficient as this is not set at
770 boot for the primary video which is BIOS enabled
771 */
772
773 rc = pcim_enable_device(pdev); 859 rc = pcim_enable_device(pdev);
774 if (rc) 860 if (rc)
775 goto err_out; 861 goto out;
776 862
777 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 863 /* prepare and activate SFF host */
778 u8 tmp8;
779
780 /* TODO: What if one channel is in native mode ... */
781 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
782 mask = (1 << 2) | (1 << 0);
783 if ((tmp8 & mask) != mask)
784 legacy_mode = 1;
785#if defined(CONFIG_NO_ATA_LEGACY)
786 /* Some platforms with PCI limits cannot address compat
787 port space. In that case we punt if their firmware has
788 left a device in compatibility mode */
789 if (legacy_mode) {
790 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
791 rc = -EOPNOTSUPP;
792 goto err_out;
793 }
794#endif
795 }
796
797 /* prepare host */
798 rc = ata_pci_prepare_sff_host(pdev, ppi, &host); 864 rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
799 if (rc) 865 if (rc)
800 goto err_out; 866 goto out;
801 867
802 pci_set_master(pdev); 868 pci_set_master(pdev);
869 rc = ata_pci_activate_sff_host(host, pi->port_ops->irq_handler,
870 pi->sht);
871 out:
872 if (rc == 0)
873 devres_remove_group(&pdev->dev, NULL);
874 else
875 devres_release_group(&pdev->dev, NULL);
803 876
804 /* start host and request IRQ */
805 rc = ata_host_start(host);
806 if (rc)
807 goto err_out;
808
809 if (!legacy_mode && pdev->irq) {
810 /* We may have no IRQ assigned in which case we can poll. This
811 shouldn't happen on a sane system but robustness is cheap
812 in this case */
813 rc = devm_request_irq(dev, pdev->irq, pi->port_ops->irq_handler,
814 IRQF_SHARED, DRV_NAME, host);
815 if (rc)
816 goto err_out;
817
818 ata_port_desc(host->ports[0], "irq %d", pdev->irq);
819 ata_port_desc(host->ports[1], "irq %d", pdev->irq);
820 } else if (legacy_mode) {
821 if (!ata_port_is_dummy(host->ports[0])) {
822 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
823 pi->port_ops->irq_handler,
824 IRQF_SHARED, DRV_NAME, host);
825 if (rc)
826 goto err_out;
827
828 ata_port_desc(host->ports[0], "irq %d",
829 ATA_PRIMARY_IRQ(pdev));
830 }
831
832 if (!ata_port_is_dummy(host->ports[1])) {
833 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
834 pi->port_ops->irq_handler,
835 IRQF_SHARED, DRV_NAME, host);
836 if (rc)
837 goto err_out;
838
839 ata_port_desc(host->ports[1], "irq %d",
840 ATA_SECONDARY_IRQ(pdev));
841 }
842 }
843
844 /* register */
845 rc = ata_host_register(host, pi->sht);
846 if (rc)
847 goto err_out;
848
849 devres_remove_group(dev, NULL);
850 return 0;
851
852err_out:
853 devres_release_group(dev, NULL);
854 return rc; 877 return rc;
855} 878}
856 879
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index bbe59c2fd1e2..409ffb9af163 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -60,6 +60,7 @@ extern int atapi_dmadir;
60extern int atapi_passthru16; 60extern int atapi_passthru16;
61extern int libata_fua; 61extern int libata_fua;
62extern int libata_noacpi; 62extern int libata_noacpi;
63extern int libata_allow_tpm;
63extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev); 64extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
64extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 65extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
65 u64 block, u32 n_block, unsigned int tf_flags, 66 u64 block, u32 n_block, unsigned int tf_flags,
@@ -85,7 +86,6 @@ extern int ata_dev_configure(struct ata_device *dev);
85extern int sata_down_spd_limit(struct ata_link *link); 86extern int sata_down_spd_limit(struct ata_link *link);
86extern int sata_set_spd_needed(struct ata_link *link); 87extern int sata_set_spd_needed(struct ata_link *link);
87extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); 88extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
88extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
89extern void ata_sg_clean(struct ata_queued_cmd *qc); 89extern void ata_sg_clean(struct ata_queued_cmd *qc);
90extern void ata_qc_free(struct ata_queued_cmd *qc); 90extern void ata_qc_free(struct ata_queued_cmd *qc);
91extern void ata_qc_issue(struct ata_queued_cmd *qc); 91extern void ata_qc_issue(struct ata_queued_cmd *qc);
@@ -113,6 +113,7 @@ extern int ata_acpi_on_suspend(struct ata_port *ap);
113extern void ata_acpi_on_resume(struct ata_port *ap); 113extern void ata_acpi_on_resume(struct ata_port *ap);
114extern int ata_acpi_on_devcfg(struct ata_device *dev); 114extern int ata_acpi_on_devcfg(struct ata_device *dev);
115extern void ata_acpi_on_disable(struct ata_device *dev); 115extern void ata_acpi_on_disable(struct ata_device *dev);
116extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
116#else 117#else
117static inline void ata_acpi_associate_sata_port(struct ata_port *ap) { } 118static inline void ata_acpi_associate_sata_port(struct ata_port *ap) { }
118static inline void ata_acpi_associate(struct ata_host *host) { } 119static inline void ata_acpi_associate(struct ata_host *host) { }
@@ -121,6 +122,8 @@ static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
121static inline void ata_acpi_on_resume(struct ata_port *ap) { } 122static inline void ata_acpi_on_resume(struct ata_port *ap) { }
122static inline int ata_acpi_on_devcfg(struct ata_device *dev) { return 0; } 123static inline int ata_acpi_on_devcfg(struct ata_device *dev) { return 0; }
123static inline void ata_acpi_on_disable(struct ata_device *dev) { } 124static inline void ata_acpi_on_disable(struct ata_device *dev) { }
125static inline void ata_acpi_set_state(struct ata_port *ap,
126 pm_message_t state) { }
124#endif 127#endif
125 128
126/* libata-scsi.c */ 129/* libata-scsi.c */
@@ -183,6 +186,7 @@ extern void ata_eh_report(struct ata_port *ap);
183extern int ata_eh_reset(struct ata_link *link, int classify, 186extern int ata_eh_reset(struct ata_link *link, int classify,
184 ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 187 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
185 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset); 188 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
189extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
186extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 190extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
187 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 191 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
188 ata_postreset_fn_t postreset, 192 ata_postreset_fn_t postreset,
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index e4542ab9c7f8..244098a80ce4 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -81,17 +81,6 @@ static void pacpi_error_handler(struct ata_port *ap)
81 NULL, ata_std_postreset); 81 NULL, ata_std_postreset);
82} 82}
83 83
84/* Welcome to ACPI, bring a bucket */
85static const unsigned int pio_cycle[7] = {
86 600, 383, 240, 180, 120, 100, 80
87};
88static const unsigned int mwdma_cycle[5] = {
89 480, 150, 120, 100, 80
90};
91static const unsigned int udma_cycle[7] = {
92 120, 80, 60, 45, 30, 20, 15
93};
94
95/** 84/**
96 * pacpi_discover_modes - filter non ACPI modes 85 * pacpi_discover_modes - filter non ACPI modes
97 * @adev: ATA device 86 * @adev: ATA device
@@ -103,56 +92,20 @@ static const unsigned int udma_cycle[7] = {
103 92
104static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device *adev) 93static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device *adev)
105{ 94{
106 int unit = adev->devno;
107 struct pata_acpi *acpi = ap->private_data; 95 struct pata_acpi *acpi = ap->private_data;
108 int i;
109 u32 t;
110 unsigned long mask = (0x7f << ATA_SHIFT_UDMA) | (0x7 << ATA_SHIFT_MWDMA) | (0x1F << ATA_SHIFT_PIO);
111
112 struct ata_acpi_gtm probe; 96 struct ata_acpi_gtm probe;
97 unsigned int xfer_mask;
113 98
114 probe = acpi->gtm; 99 probe = acpi->gtm;
115 100
116 /* We always use the 0 slot for crap hardware */
117 if (!(probe.flags & 0x10))
118 unit = 0;
119
120 ata_acpi_gtm(ap, &probe); 101 ata_acpi_gtm(ap, &probe);
121 102
122 /* Start by scanning for PIO modes */ 103 xfer_mask = ata_acpi_gtm_xfermask(adev, &probe);
123 for (i = 0; i < 7; i++) {
124 t = probe.drive[unit].pio;
125 if (t <= pio_cycle[i]) {
126 mask |= (2 << (ATA_SHIFT_PIO + i)) - 1;
127 break;
128 }
129 }
130 104
131 /* See if we have MWDMA or UDMA data. We don't bother with MWDMA 105 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
132 if UDMA is availabe as this means the BIOS set UDMA and our
133 error changedown if it works is UDMA to PIO anyway */
134 if (probe.flags & (1 << (2 * unit))) {
135 /* MWDMA */
136 for (i = 0; i < 5; i++) {
137 t = probe.drive[unit].dma;
138 if (t <= mwdma_cycle[i]) {
139 mask |= (2 << (ATA_SHIFT_MWDMA + i)) - 1;
140 break;
141 }
142 }
143 } else {
144 /* UDMA */
145 for (i = 0; i < 7; i++) {
146 t = probe.drive[unit].dma;
147 if (t <= udma_cycle[i]) {
148 mask |= (2 << (ATA_SHIFT_UDMA + i)) - 1;
149 break;
150 }
151 }
152 }
153 if (mask & (0xF8 << ATA_SHIFT_UDMA))
154 ap->cbl = ATA_CBL_PATA80; 106 ap->cbl = ATA_CBL_PATA80;
155 return mask; 107
108 return xfer_mask;
156} 109}
157 110
158/** 111/**
@@ -180,12 +133,14 @@ static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev)
180{ 133{
181 int unit = adev->devno; 134 int unit = adev->devno;
182 struct pata_acpi *acpi = ap->private_data; 135 struct pata_acpi *acpi = ap->private_data;
136 const struct ata_timing *t;
183 137
184 if (!(acpi->gtm.flags & 0x10)) 138 if (!(acpi->gtm.flags & 0x10))
185 unit = 0; 139 unit = 0;
186 140
187 /* Now stuff the nS values into the structure */ 141 /* Now stuff the nS values into the structure */
188 acpi->gtm.drive[unit].pio = pio_cycle[adev->pio_mode - XFER_PIO_0]; 142 t = ata_timing_find_mode(adev->pio_mode);
143 acpi->gtm.drive[unit].pio = t->cycle;
189 ata_acpi_stm(ap, &acpi->gtm); 144 ata_acpi_stm(ap, &acpi->gtm);
190 /* See what mode we actually got */ 145 /* See what mode we actually got */
191 ata_acpi_gtm(ap, &acpi->gtm); 146 ata_acpi_gtm(ap, &acpi->gtm);
@@ -201,16 +156,18 @@ static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev)
201{ 156{
202 int unit = adev->devno; 157 int unit = adev->devno;
203 struct pata_acpi *acpi = ap->private_data; 158 struct pata_acpi *acpi = ap->private_data;
159 const struct ata_timing *t;
204 160
205 if (!(acpi->gtm.flags & 0x10)) 161 if (!(acpi->gtm.flags & 0x10))
206 unit = 0; 162 unit = 0;
207 163
208 /* Now stuff the nS values into the structure */ 164 /* Now stuff the nS values into the structure */
165 t = ata_timing_find_mode(adev->dma_mode);
209 if (adev->dma_mode >= XFER_UDMA_0) { 166 if (adev->dma_mode >= XFER_UDMA_0) {
210 acpi->gtm.drive[unit].dma = udma_cycle[adev->dma_mode - XFER_UDMA_0]; 167 acpi->gtm.drive[unit].dma = t->udma;
211 acpi->gtm.flags |= (1 << (2 * unit)); 168 acpi->gtm.flags |= (1 << (2 * unit));
212 } else { 169 } else {
213 acpi->gtm.drive[unit].dma = mwdma_cycle[adev->dma_mode - XFER_MW_DMA_0]; 170 acpi->gtm.drive[unit].dma = t->cycle;
214 acpi->gtm.flags &= ~(1 << (2 * unit)); 171 acpi->gtm.flags &= ~(1 << (2 * unit));
215 } 172 }
216 ata_acpi_stm(ap, &acpi->gtm); 173 ata_acpi_stm(ap, &acpi->gtm);
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 8caf9afc8b90..7e68edf3c0f3 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -64,7 +64,7 @@ static int ali_cable_override(struct pci_dev *pdev)
64 if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF) 64 if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF)
65 return 1; 65 return 1;
66 /* Mitac 8317 (Winbook-A) and relatives */ 66 /* Mitac 8317 (Winbook-A) and relatives */
67 if (pdev->subsystem_vendor == 0x1071 && pdev->subsystem_device == 0x8317) 67 if (pdev->subsystem_vendor == 0x1071 && pdev->subsystem_device == 0x8317)
68 return 1; 68 return 1;
69 /* Systems by DMI */ 69 /* Systems by DMI */
70 if (dmi_check_system(cable_dmi_table)) 70 if (dmi_check_system(cable_dmi_table))
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 3cc27b514654..761a66608d7b 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -220,6 +220,62 @@ static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
220 timing_setup(ap, adev, 0x40, adev->dma_mode, 4); 220 timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
221} 221}
222 222
223/* Both host-side and drive-side detection results are worthless on NV
224 * PATAs. Ignore them and just follow what BIOS configured. Both the
225 * current configuration in PCI config reg and ACPI GTM result are
226 * cached during driver attach and are consulted to select transfer
227 * mode.
228 */
229static unsigned long nv_mode_filter(struct ata_device *dev,
230 unsigned long xfer_mask)
231{
232 static const unsigned int udma_mask_map[] =
233 { ATA_UDMA2, ATA_UDMA1, ATA_UDMA0, 0,
234 ATA_UDMA3, ATA_UDMA4, ATA_UDMA5, ATA_UDMA6 };
235 struct ata_port *ap = dev->link->ap;
236 char acpi_str[32] = "";
237 u32 saved_udma, udma;
238 const struct ata_acpi_gtm *gtm;
239 unsigned long bios_limit = 0, acpi_limit = 0, limit;
240
241 /* find out what BIOS configured */
242 udma = saved_udma = (unsigned long)ap->host->private_data;
243
244 if (ap->port_no == 0)
245 udma >>= 16;
246 if (dev->devno == 0)
247 udma >>= 8;
248
249 if ((udma & 0xc0) == 0xc0)
250 bios_limit = ata_pack_xfermask(0, 0, udma_mask_map[udma & 0x7]);
251
252 /* consult ACPI GTM too */
253 gtm = ata_acpi_init_gtm(ap);
254 if (gtm) {
255 acpi_limit = ata_acpi_gtm_xfermask(dev, gtm);
256
257 snprintf(acpi_str, sizeof(acpi_str), " (%u:%u:0x%x)",
258 gtm->drive[0].dma, gtm->drive[1].dma, gtm->flags);
259 }
260
261 /* be optimistic, EH can take care of things if something goes wrong */
262 limit = bios_limit | acpi_limit;
263
264 /* If PIO or DMA isn't configured at all, don't limit. Let EH
265 * handle it.
266 */
267 if (!(limit & ATA_MASK_PIO))
268 limit |= ATA_MASK_PIO;
269 if (!(limit & (ATA_MASK_MWDMA | ATA_MASK_UDMA)))
270 limit |= ATA_MASK_MWDMA | ATA_MASK_UDMA;
271
272 ata_port_printk(ap, KERN_DEBUG, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, "
273 "BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n",
274 xfer_mask, limit, xfer_mask & limit, bios_limit,
275 saved_udma, acpi_limit, acpi_str);
276
277 return xfer_mask & limit;
278}
223 279
224/** 280/**
225 * nv_probe_init - cable detection 281 * nv_probe_init - cable detection
@@ -252,31 +308,6 @@ static void nv_error_handler(struct ata_port *ap)
252 ata_std_postreset); 308 ata_std_postreset);
253} 309}
254 310
255static int nv_cable_detect(struct ata_port *ap)
256{
257 static const u8 bitmask[2] = {0x03, 0x0C};
258 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
259 u8 ata66;
260 u16 udma;
261 int cbl;
262
263 pci_read_config_byte(pdev, 0x52, &ata66);
264 if (ata66 & bitmask[ap->port_no])
265 cbl = ATA_CBL_PATA80;
266 else
267 cbl = ATA_CBL_PATA40;
268
269 /* We now have to double check because the Nvidia boxes BIOS
270 doesn't always set the cable bits but does set mode bits */
271 pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
272 if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
273 cbl = ATA_CBL_PATA80;
274 /* And a triple check across suspend/resume with ACPI around */
275 if (ata_acpi_cbl_80wire(ap))
276 cbl = ATA_CBL_PATA80;
277 return cbl;
278}
279
280/** 311/**
281 * nv100_set_piomode - set initial PIO mode data 312 * nv100_set_piomode - set initial PIO mode data
282 * @ap: ATA interface 313 * @ap: ATA interface
@@ -314,6 +345,14 @@ static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
314 timing_setup(ap, adev, 0x50, adev->dma_mode, 4); 345 timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
315} 346}
316 347
348static void nv_host_stop(struct ata_host *host)
349{
350 u32 udma = (unsigned long)host->private_data;
351
352 /* restore PCI config register 0x60 */
353 pci_write_config_dword(to_pci_dev(host->dev), 0x60, udma);
354}
355
317static struct scsi_host_template amd_sht = { 356static struct scsi_host_template amd_sht = {
318 .module = THIS_MODULE, 357 .module = THIS_MODULE,
319 .name = DRV_NAME, 358 .name = DRV_NAME,
@@ -478,7 +517,8 @@ static struct ata_port_operations nv100_port_ops = {
478 .thaw = ata_bmdma_thaw, 517 .thaw = ata_bmdma_thaw,
479 .error_handler = nv_error_handler, 518 .error_handler = nv_error_handler,
480 .post_internal_cmd = ata_bmdma_post_internal_cmd, 519 .post_internal_cmd = ata_bmdma_post_internal_cmd,
481 .cable_detect = nv_cable_detect, 520 .cable_detect = ata_cable_ignore,
521 .mode_filter = nv_mode_filter,
482 522
483 .bmdma_setup = ata_bmdma_setup, 523 .bmdma_setup = ata_bmdma_setup,
484 .bmdma_start = ata_bmdma_start, 524 .bmdma_start = ata_bmdma_start,
@@ -495,6 +535,7 @@ static struct ata_port_operations nv100_port_ops = {
495 .irq_on = ata_irq_on, 535 .irq_on = ata_irq_on,
496 536
497 .port_start = ata_sff_port_start, 537 .port_start = ata_sff_port_start,
538 .host_stop = nv_host_stop,
498}; 539};
499 540
500static struct ata_port_operations nv133_port_ops = { 541static struct ata_port_operations nv133_port_ops = {
@@ -511,7 +552,8 @@ static struct ata_port_operations nv133_port_ops = {
511 .thaw = ata_bmdma_thaw, 552 .thaw = ata_bmdma_thaw,
512 .error_handler = nv_error_handler, 553 .error_handler = nv_error_handler,
513 .post_internal_cmd = ata_bmdma_post_internal_cmd, 554 .post_internal_cmd = ata_bmdma_post_internal_cmd,
514 .cable_detect = nv_cable_detect, 555 .cable_detect = ata_cable_ignore,
556 .mode_filter = nv_mode_filter,
515 557
516 .bmdma_setup = ata_bmdma_setup, 558 .bmdma_setup = ata_bmdma_setup,
517 .bmdma_start = ata_bmdma_start, 559 .bmdma_start = ata_bmdma_start,
@@ -528,6 +570,7 @@ static struct ata_port_operations nv133_port_ops = {
528 .irq_on = ata_irq_on, 570 .irq_on = ata_irq_on,
529 571
530 .port_start = ata_sff_port_start, 572 .port_start = ata_sff_port_start,
573 .host_stop = nv_host_stop,
531}; 574};
532 575
533static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 576static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -614,7 +657,8 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
614 .port_ops = &amd100_port_ops 657 .port_ops = &amd100_port_ops
615 } 658 }
616 }; 659 };
617 const struct ata_port_info *ppi[] = { NULL, NULL }; 660 struct ata_port_info pi;
661 const struct ata_port_info *ppi[] = { &pi, NULL };
618 static int printed_version; 662 static int printed_version;
619 int type = id->driver_data; 663 int type = id->driver_data;
620 u8 fifo; 664 u8 fifo;
@@ -628,6 +672,19 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
628 if (type == 1 && pdev->revision > 0x7) 672 if (type == 1 && pdev->revision > 0x7)
629 type = 2; 673 type = 2;
630 674
675 /* Serenade ? */
676 if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
677 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
678 type = 6; /* UDMA 100 only */
679
680 /*
681 * Okay, type is determined now. Apply type-specific workarounds.
682 */
683 pi = info[type];
684
685 if (type < 3)
686 ata_pci_clear_simplex(pdev);
687
631 /* Check for AMD7411 */ 688 /* Check for AMD7411 */
632 if (type == 3) 689 if (type == 3)
633 /* FIFO is broken */ 690 /* FIFO is broken */
@@ -635,16 +692,17 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
635 else 692 else
636 pci_write_config_byte(pdev, 0x41, fifo | 0xF0); 693 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
637 694
638 /* Serenade ? */ 695 /* Cable detection on Nvidia chips doesn't work too well,
639 if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD && 696 * cache BIOS programmed UDMA mode.
640 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE) 697 */
641 type = 6; /* UDMA 100 only */ 698 if (type == 7 || type == 8) {
699 u32 udma;
642 700
643 if (type < 3) 701 pci_read_config_dword(pdev, 0x60, &udma);
644 ata_pci_clear_simplex(pdev); 702 pi.private_data = (void *)(unsigned long)udma;
703 }
645 704
646 /* And fire it up */ 705 /* And fire it up */
647 ppi[0] = &info[type];
648 return ata_pci_init_one(pdev, ppi); 706 return ata_pci_init_one(pdev, ppi);
649} 707}
650 708
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 7842cc487359..a32e3c44a606 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -832,6 +832,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
832{ 832{
833 unsigned short config = WDSIZE_16; 833 unsigned short config = WDSIZE_16;
834 struct scatterlist *sg; 834 struct scatterlist *sg;
835 unsigned int si;
835 836
836 pr_debug("in atapi dma setup\n"); 837 pr_debug("in atapi dma setup\n");
837 /* Program the ATA_CTRL register with dir */ 838 /* Program the ATA_CTRL register with dir */
@@ -839,7 +840,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
839 /* fill the ATAPI DMA controller */ 840 /* fill the ATAPI DMA controller */
840 set_dma_config(CH_ATAPI_TX, config); 841 set_dma_config(CH_ATAPI_TX, config);
841 set_dma_x_modify(CH_ATAPI_TX, 2); 842 set_dma_x_modify(CH_ATAPI_TX, 2);
842 ata_for_each_sg(sg, qc) { 843 for_each_sg(qc->sg, sg, qc->n_elem, si) {
843 set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg)); 844 set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
844 set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1); 845 set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
845 } 846 }
@@ -848,7 +849,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
848 /* fill the ATAPI DMA controller */ 849 /* fill the ATAPI DMA controller */
849 set_dma_config(CH_ATAPI_RX, config); 850 set_dma_config(CH_ATAPI_RX, config);
850 set_dma_x_modify(CH_ATAPI_RX, 2); 851 set_dma_x_modify(CH_ATAPI_RX, 2);
851 ata_for_each_sg(sg, qc) { 852 for_each_sg(qc->sg, sg, qc->n_elem, si) {
852 set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg)); 853 set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
853 set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1); 854 set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
854 } 855 }
@@ -867,6 +868,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
867 struct ata_port *ap = qc->ap; 868 struct ata_port *ap = qc->ap;
868 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 869 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
869 struct scatterlist *sg; 870 struct scatterlist *sg;
871 unsigned int si;
870 872
871 pr_debug("in atapi dma start\n"); 873 pr_debug("in atapi dma start\n");
872 if (!(ap->udma_mask || ap->mwdma_mask)) 874 if (!(ap->udma_mask || ap->mwdma_mask))
@@ -881,7 +883,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
881 * data cache is enabled. Otherwise, this loop 883 * data cache is enabled. Otherwise, this loop
882 * is an empty loop and optimized out. 884 * is an empty loop and optimized out.
883 */ 885 */
884 ata_for_each_sg(sg, qc) { 886 for_each_sg(qc->sg, sg, qc->n_elem, si) {
885 flush_dcache_range(sg_dma_address(sg), 887 flush_dcache_range(sg_dma_address(sg),
886 sg_dma_address(sg) + sg_dma_len(sg)); 888 sg_dma_address(sg) + sg_dma_len(sg));
887 } 889 }
@@ -910,7 +912,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
910 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST); 912 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
911 913
912 /* Set transfer length to buffer len */ 914 /* Set transfer length to buffer len */
913 ata_for_each_sg(sg, qc) { 915 for_each_sg(qc->sg, sg, qc->n_elem, si) {
914 ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1)); 916 ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
915 } 917 }
916 918
@@ -932,6 +934,7 @@ static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
932{ 934{
933 struct ata_port *ap = qc->ap; 935 struct ata_port *ap = qc->ap;
934 struct scatterlist *sg; 936 struct scatterlist *sg;
937 unsigned int si;
935 938
936 pr_debug("in atapi dma stop\n"); 939 pr_debug("in atapi dma stop\n");
937 if (!(ap->udma_mask || ap->mwdma_mask)) 940 if (!(ap->udma_mask || ap->mwdma_mask))
@@ -950,7 +953,7 @@ static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
950 * data cache is enabled. Otherwise, this loop 953 * data cache is enabled. Otherwise, this loop
951 * is an empty loop and optimized out. 954 * is an empty loop and optimized out.
952 */ 955 */
953 ata_for_each_sg(sg, qc) { 956 for_each_sg(qc->sg, sg, qc->n_elem, si) {
954 invalidate_dcache_range( 957 invalidate_dcache_range(
955 sg_dma_address(sg), 958 sg_dma_address(sg),
956 sg_dma_address(sg) 959 sg_dma_address(sg)
@@ -1167,34 +1170,36 @@ static unsigned char bfin_bmdma_status(struct ata_port *ap)
1167 * Note: Original code is ata_data_xfer(). 1170 * Note: Original code is ata_data_xfer().
1168 */ 1171 */
1169 1172
1170static void bfin_data_xfer(struct ata_device *adev, unsigned char *buf, 1173static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
1171 unsigned int buflen, int write_data) 1174 unsigned int buflen, int rw)
1172{ 1175{
1173 struct ata_port *ap = adev->link->ap; 1176 struct ata_port *ap = dev->link->ap;
1174 unsigned int words = buflen >> 1;
1175 unsigned short *buf16 = (u16 *) buf;
1176 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1177 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1178 unsigned int words = buflen >> 1;
1179 unsigned short *buf16 = (u16 *)buf;
1177 1180
1178 /* Transfer multiple of 2 bytes */ 1181 /* Transfer multiple of 2 bytes */
1179 if (write_data) { 1182 if (rw == READ)
1180 write_atapi_data(base, words, buf16);
1181 } else {
1182 read_atapi_data(base, words, buf16); 1183 read_atapi_data(base, words, buf16);
1183 } 1184 else
1185 write_atapi_data(base, words, buf16);
1184 1186
1185 /* Transfer trailing 1 byte, if any. */ 1187 /* Transfer trailing 1 byte, if any. */
1186 if (unlikely(buflen & 0x01)) { 1188 if (unlikely(buflen & 0x01)) {
1187 unsigned short align_buf[1] = { 0 }; 1189 unsigned short align_buf[1] = { 0 };
1188 unsigned char *trailing_buf = buf + buflen - 1; 1190 unsigned char *trailing_buf = buf + buflen - 1;
1189 1191
1190 if (write_data) { 1192 if (rw == READ) {
1191 memcpy(align_buf, trailing_buf, 1);
1192 write_atapi_data(base, 1, align_buf);
1193 } else {
1194 read_atapi_data(base, 1, align_buf); 1193 read_atapi_data(base, 1, align_buf);
1195 memcpy(trailing_buf, align_buf, 1); 1194 memcpy(trailing_buf, align_buf, 1);
1195 } else {
1196 memcpy(align_buf, trailing_buf, 1);
1197 write_atapi_data(base, 1, align_buf);
1196 } 1198 }
1199 words++;
1197 } 1200 }
1201
1202 return words << 1;
1198} 1203}
1199 1204
1200/** 1205/**
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 33f7f0843f4f..d4590f546c49 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -198,7 +198,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
198 }; 198 };
199 const struct ata_port_info *ppi[2]; 199 const struct ata_port_info *ppi[2];
200 u8 pcicfg; 200 u8 pcicfg;
201 void *iomap[5]; 201 void __iomem *iomap[5];
202 struct ata_host *host; 202 struct ata_host *host;
203 struct ata_ioports *ioaddr; 203 struct ata_ioports *ioaddr;
204 int i, rc; 204 int i, rc;
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index c79f066c2bc9..68eb34929cec 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -847,15 +847,16 @@ static u32 hpt374_read_freq(struct pci_dev *pdev)
847 u32 freq; 847 u32 freq;
848 unsigned long io_base = pci_resource_start(pdev, 4); 848 unsigned long io_base = pci_resource_start(pdev, 4);
849 if (PCI_FUNC(pdev->devfn) & 1) { 849 if (PCI_FUNC(pdev->devfn) & 1) {
850 struct pci_dev *pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1); 850 struct pci_dev *pdev_0;
851
852 pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1);
851 /* Someone hot plugged the controller on us ? */ 853 /* Someone hot plugged the controller on us ? */
852 if (pdev_0 == NULL) 854 if (pdev_0 == NULL)
853 return 0; 855 return 0;
854 io_base = pci_resource_start(pdev_0, 4); 856 io_base = pci_resource_start(pdev_0, 4);
855 freq = inl(io_base + 0x90); 857 freq = inl(io_base + 0x90);
856 pci_dev_put(pdev_0); 858 pci_dev_put(pdev_0);
857 } 859 } else
858 else
859 freq = inl(io_base + 0x90); 860 freq = inl(io_base + 0x90);
860 return freq; 861 return freq;
861} 862}
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 842fe08a3c13..5b8586dac63b 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -224,6 +224,7 @@ static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
224 struct pata_icside_state *state = ap->host->private_data; 224 struct pata_icside_state *state = ap->host->private_data;
225 struct scatterlist *sg, *rsg = state->sg; 225 struct scatterlist *sg, *rsg = state->sg;
226 unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE; 226 unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE;
227 unsigned int si;
227 228
228 /* 229 /*
229 * We are simplex; BUG if we try to fiddle with DMA 230 * We are simplex; BUG if we try to fiddle with DMA
@@ -234,7 +235,7 @@ static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
234 /* 235 /*
235 * Copy ATAs scattered sg list into a contiguous array of sg 236 * Copy ATAs scattered sg list into a contiguous array of sg
236 */ 237 */
237 ata_for_each_sg(sg, qc) { 238 for_each_sg(qc->sg, sg, qc->n_elem, si) {
238 memcpy(rsg, sg, sizeof(*sg)); 239 memcpy(rsg, sg, sizeof(*sg));
239 rsg++; 240 rsg++;
240 } 241 }
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index ca9aae09daed..109ddd42c266 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -430,7 +430,7 @@ static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc)
430 return ata_qc_issue_prot(qc); 430 return ata_qc_issue_prot(qc);
431 } 431 }
432 printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command); 432 printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
433 return AC_ERR_INVALID; 433 return AC_ERR_DEV;
434} 434}
435 435
436/** 436/**
@@ -516,6 +516,37 @@ static void it821x_dev_config(struct ata_device *adev)
516 printk("(%dK stripe)", adev->id[146]); 516 printk("(%dK stripe)", adev->id[146]);
517 printk(".\n"); 517 printk(".\n");
518 } 518 }
519 /* This is a controller firmware triggered funny, don't
520 report the drive faulty! */
521 adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC;
522}
523
524/**
525 * it821x_ident_hack - Hack identify data up
526 * @ap: Port
527 *
528 * Walk the devices on this firmware driven port and slightly
529 * mash the identify data to stop us and common tools trying to
530 * use features not firmware supported. The firmware itself does
531 * some masking (eg SMART) but not enough.
532 *
533 * This is a bit of an abuse of the cable method, but it is the
534 * only method called at the right time. We could modify the libata
535 * core specifically for ident hacking but while we have one offender
536 * it seems better to keep the fallout localised.
537 */
538
539static int it821x_ident_hack(struct ata_port *ap)
540{
541 struct ata_device *adev;
542 ata_link_for_each_dev(adev, &ap->link) {
543 if (ata_dev_enabled(adev)) {
544 adev->id[84] &= ~(1 << 6); /* No FUA */
545 adev->id[85] &= ~(1 << 10); /* No HPA */
546 adev->id[76] = 0; /* No NCQ/AN etc */
547 }
548 }
549 return ata_cable_unknown(ap);
519} 550}
520 551
521 552
@@ -634,7 +665,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
634 .thaw = ata_bmdma_thaw, 665 .thaw = ata_bmdma_thaw,
635 .error_handler = ata_bmdma_error_handler, 666 .error_handler = ata_bmdma_error_handler,
636 .post_internal_cmd = ata_bmdma_post_internal_cmd, 667 .post_internal_cmd = ata_bmdma_post_internal_cmd,
637 .cable_detect = ata_cable_unknown, 668 .cable_detect = it821x_ident_hack,
638 669
639 .bmdma_setup = ata_bmdma_setup, 670 .bmdma_setup = ata_bmdma_setup,
640 .bmdma_start = ata_bmdma_start, 671 .bmdma_start = ata_bmdma_start,
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 120b5bfa7ce6..030878fedeb5 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -42,13 +42,13 @@ static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error)
42 return 0; 42 return 0;
43} 43}
44 44
45static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf, 45static unsigned int ixp4xx_mmio_data_xfer(struct ata_device *dev,
46 unsigned int buflen, int write_data) 46 unsigned char *buf, unsigned int buflen, int rw)
47{ 47{
48 unsigned int i; 48 unsigned int i;
49 unsigned int words = buflen >> 1; 49 unsigned int words = buflen >> 1;
50 u16 *buf16 = (u16 *) buf; 50 u16 *buf16 = (u16 *) buf;
51 struct ata_port *ap = adev->link->ap; 51 struct ata_port *ap = dev->link->ap;
52 void __iomem *mmio = ap->ioaddr.data_addr; 52 void __iomem *mmio = ap->ioaddr.data_addr;
53 struct ixp4xx_pata_data *data = ap->host->dev->platform_data; 53 struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
54 54
@@ -59,30 +59,32 @@ static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
59 udelay(100); 59 udelay(100);
60 60
61 /* Transfer multiple of 2 bytes */ 61 /* Transfer multiple of 2 bytes */
62 if (write_data) { 62 if (rw == READ)
63 for (i = 0; i < words; i++)
64 writew(buf16[i], mmio);
65 } else {
66 for (i = 0; i < words; i++) 63 for (i = 0; i < words; i++)
67 buf16[i] = readw(mmio); 64 buf16[i] = readw(mmio);
68 } 65 else
66 for (i = 0; i < words; i++)
67 writew(buf16[i], mmio);
69 68
70 /* Transfer trailing 1 byte, if any. */ 69 /* Transfer trailing 1 byte, if any. */
71 if (unlikely(buflen & 0x01)) { 70 if (unlikely(buflen & 0x01)) {
72 u16 align_buf[1] = { 0 }; 71 u16 align_buf[1] = { 0 };
73 unsigned char *trailing_buf = buf + buflen - 1; 72 unsigned char *trailing_buf = buf + buflen - 1;
74 73
75 if (write_data) { 74 if (rw == READ) {
76 memcpy(align_buf, trailing_buf, 1);
77 writew(align_buf[0], mmio);
78 } else {
79 align_buf[0] = readw(mmio); 75 align_buf[0] = readw(mmio);
80 memcpy(trailing_buf, align_buf, 1); 76 memcpy(trailing_buf, align_buf, 1);
77 } else {
78 memcpy(align_buf, trailing_buf, 1);
79 writew(align_buf[0], mmio);
81 } 80 }
81 words++;
82 } 82 }
83 83
84 udelay(100); 84 udelay(100);
85 *data->cs0_cfg |= 0x01; 85 *data->cs0_cfg |= 0x01;
86
87 return words << 1;
86} 88}
87 89
88static struct scsi_host_template ixp4xx_sht = { 90static struct scsi_host_template ixp4xx_sht = {
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 17159b5e1e43..333dc15f8ccf 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -28,7 +28,6 @@
28 * 28 *
29 * Unsupported but docs exist: 29 * Unsupported but docs exist:
30 * Appian/Adaptec AIC25VL01/Cirrus Logic PD7220 30 * Appian/Adaptec AIC25VL01/Cirrus Logic PD7220
31 * Winbond W83759A
32 * 31 *
33 * This driver handles legacy (that is "ISA/VLB side") IDE ports found 32 * This driver handles legacy (that is "ISA/VLB side") IDE ports found
34 * on PC class systems. There are three hybrid devices that are exceptions 33 * on PC class systems. There are three hybrid devices that are exceptions
@@ -36,7 +35,7 @@
36 * the MPIIX where the tuning is PCI side but the IDE is "ISA side". 35 * the MPIIX where the tuning is PCI side but the IDE is "ISA side".
37 * 36 *
38 * Specific support is included for the ht6560a/ht6560b/opti82c611a/ 37 * Specific support is included for the ht6560a/ht6560b/opti82c611a/
39 * opti82c465mv/promise 20230c/20630 38 * opti82c465mv/promise 20230c/20630/winbond83759A
40 * 39 *
41 * Use the autospeed and pio_mask options with: 40 * Use the autospeed and pio_mask options with:
42 * Appian ADI/2 aka CLPD7220 or AIC25VL01. 41 * Appian ADI/2 aka CLPD7220 or AIC25VL01.
@@ -47,9 +46,6 @@
47 * For now use autospeed and pio_mask as above with the W83759A. This may 46 * For now use autospeed and pio_mask as above with the W83759A. This may
48 * change. 47 * change.
49 * 48 *
50 * TODO
51 * Merge existing pata_qdi driver
52 *
53 */ 49 */
54 50
55#include <linux/kernel.h> 51#include <linux/kernel.h>
@@ -64,12 +60,13 @@
64#include <linux/platform_device.h> 60#include <linux/platform_device.h>
65 61
66#define DRV_NAME "pata_legacy" 62#define DRV_NAME "pata_legacy"
67#define DRV_VERSION "0.5.5" 63#define DRV_VERSION "0.6.5"
68 64
69#define NR_HOST 6 65#define NR_HOST 6
70 66
71static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 }; 67static int all;
72static int legacy_irq[NR_HOST] = { 14, 15, 11, 10, 8, 12 }; 68module_param(all, int, 0444);
69MODULE_PARM_DESC(all, "Grab all legacy port devices, even if PCI(0=off, 1=on)");
73 70
74struct legacy_data { 71struct legacy_data {
75 unsigned long timing; 72 unsigned long timing;
@@ -80,21 +77,107 @@ struct legacy_data {
80 77
81}; 78};
82 79
80enum controller {
81 BIOS = 0,
82 SNOOP = 1,
83 PDC20230 = 2,
84 HT6560A = 3,
85 HT6560B = 4,
86 OPTI611A = 5,
87 OPTI46X = 6,
88 QDI6500 = 7,
89 QDI6580 = 8,
90 QDI6580DP = 9, /* Dual channel mode is different */
91 W83759A = 10,
92
93 UNKNOWN = -1
94};
95
96
97struct legacy_probe {
98 unsigned char *name;
99 unsigned long port;
100 unsigned int irq;
101 unsigned int slot;
102 enum controller type;
103 unsigned long private;
104};
105
106struct legacy_controller {
107 const char *name;
108 struct ata_port_operations *ops;
109 unsigned int pio_mask;
110 unsigned int flags;
111 int (*setup)(struct platform_device *, struct legacy_probe *probe,
112 struct legacy_data *data);
113};
114
115static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
116
117static struct legacy_probe probe_list[NR_HOST];
83static struct legacy_data legacy_data[NR_HOST]; 118static struct legacy_data legacy_data[NR_HOST];
84static struct ata_host *legacy_host[NR_HOST]; 119static struct ata_host *legacy_host[NR_HOST];
85static int nr_legacy_host; 120static int nr_legacy_host;
86 121
87 122
88static int probe_all; /* Set to check all ISA port ranges */ 123static int probe_all; /* Set to check all ISA port ranges */
89static int ht6560a; /* HT 6560A on primary 1, secondary 2, both 3 */ 124static int ht6560a; /* HT 6560A on primary 1, second 2, both 3 */
90static int ht6560b; /* HT 6560A on primary 1, secondary 2, both 3 */ 125static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */
91static int opti82c611a; /* Opti82c611A on primary 1, secondary 2, both 3 */ 126static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
92static int opti82c46x; /* Opti 82c465MV present (pri/sec autodetect) */ 127static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
93static int autospeed; /* Chip present which snoops speed changes */ 128static int qdi; /* Set to probe QDI controllers */
94static int pio_mask = 0x1F; /* PIO range for autospeed devices */ 129static int winbond; /* Set to probe Winbond controllers,
130 give I/O port if non stdanard */
131static int autospeed; /* Chip present which snoops speed changes */
132static int pio_mask = 0x1F; /* PIO range for autospeed devices */
95static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ 133static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
96 134
97/** 135/**
136 * legacy_probe_add - Add interface to probe list
137 * @port: Controller port
138 * @irq: IRQ number
139 * @type: Controller type
140 * @private: Controller specific info
141 *
142 * Add an entry into the probe list for ATA controllers. This is used
143 * to add the default ISA slots and then to build up the table
144 * further according to other ISA/VLB/Weird device scans
145 *
146 * An I/O port list is used to keep ordering stable and sane, as we
147 * don't have any good way to talk about ordering otherwise
148 */
149
150static int legacy_probe_add(unsigned long port, unsigned int irq,
151 enum controller type, unsigned long private)
152{
153 struct legacy_probe *lp = &probe_list[0];
154 int i;
155 struct legacy_probe *free = NULL;
156
157 for (i = 0; i < NR_HOST; i++) {
158 if (lp->port == 0 && free == NULL)
159 free = lp;
160 /* Matching port, or the correct slot for ordering */
161 if (lp->port == port || legacy_port[i] == port) {
162 free = lp;
163 break;
164 }
165 lp++;
166 }
167 if (free == NULL) {
168 printk(KERN_ERR "pata_legacy: Too many interfaces.\n");
169 return -1;
170 }
171 /* Fill in the entry for later probing */
172 free->port = port;
173 free->irq = irq;
174 free->type = type;
175 free->private = private;
176 return 0;
177}
178
179
180/**
98 * legacy_set_mode - mode setting 181 * legacy_set_mode - mode setting
99 * @link: IDE link 182 * @link: IDE link
100 * @unused: Device that failed when error is returned 183 * @unused: Device that failed when error is returned
@@ -113,7 +196,8 @@ static int legacy_set_mode(struct ata_link *link, struct ata_device **unused)
113 196
114 ata_link_for_each_dev(dev, link) { 197 ata_link_for_each_dev(dev, link) {
115 if (ata_dev_enabled(dev)) { 198 if (ata_dev_enabled(dev)) {
116 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n"); 199 ata_dev_printk(dev, KERN_INFO,
200 "configured for PIO\n");
117 dev->pio_mode = XFER_PIO_0; 201 dev->pio_mode = XFER_PIO_0;
118 dev->xfer_mode = XFER_PIO_0; 202 dev->xfer_mode = XFER_PIO_0;
119 dev->xfer_shift = ATA_SHIFT_PIO; 203 dev->xfer_shift = ATA_SHIFT_PIO;
@@ -171,7 +255,7 @@ static struct ata_port_operations simple_port_ops = {
171 .irq_clear = ata_bmdma_irq_clear, 255 .irq_clear = ata_bmdma_irq_clear,
172 .irq_on = ata_irq_on, 256 .irq_on = ata_irq_on,
173 257
174 .port_start = ata_port_start, 258 .port_start = ata_sff_port_start,
175}; 259};
176 260
177static struct ata_port_operations legacy_port_ops = { 261static struct ata_port_operations legacy_port_ops = {
@@ -198,15 +282,16 @@ static struct ata_port_operations legacy_port_ops = {
198 .irq_clear = ata_bmdma_irq_clear, 282 .irq_clear = ata_bmdma_irq_clear,
199 .irq_on = ata_irq_on, 283 .irq_on = ata_irq_on,
200 284
201 .port_start = ata_port_start, 285 .port_start = ata_sff_port_start,
202}; 286};
203 287
204/* 288/*
205 * Promise 20230C and 20620 support 289 * Promise 20230C and 20620 support
206 * 290 *
207 * This controller supports PIO0 to PIO2. We set PIO timings conservatively to 291 * This controller supports PIO0 to PIO2. We set PIO timings
208 * allow for 50MHz Vesa Local Bus. The 20620 DMA support is weird being DMA to 292 * conservatively to allow for 50MHz Vesa Local Bus. The 20620 DMA
209 * controller and PIO'd to the host and not supported. 293 * support is weird being DMA to controller and PIO'd to the host
294 * and not supported.
210 */ 295 */
211 296
212static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev) 297static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
@@ -221,8 +306,7 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
221 local_irq_save(flags); 306 local_irq_save(flags);
222 307
223 /* Unlock the control interface */ 308 /* Unlock the control interface */
224 do 309 do {
225 {
226 inb(0x1F5); 310 inb(0x1F5);
227 outb(inb(0x1F2) | 0x80, 0x1F2); 311 outb(inb(0x1F2) | 0x80, 0x1F2);
228 inb(0x1F2); 312 inb(0x1F2);
@@ -231,7 +315,7 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
231 inb(0x1F2); 315 inb(0x1F2);
232 inb(0x1F2); 316 inb(0x1F2);
233 } 317 }
234 while((inb(0x1F2) & 0x80) && --tries); 318 while ((inb(0x1F2) & 0x80) && --tries);
235 319
236 local_irq_restore(flags); 320 local_irq_restore(flags);
237 321
@@ -249,13 +333,14 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
249 333
250} 334}
251 335
252static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) 336static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
337 unsigned char *buf, unsigned int buflen, int rw)
253{ 338{
254 struct ata_port *ap = adev->link->ap; 339 if (ata_id_has_dword_io(dev->id)) {
255 int slop = buflen & 3; 340 struct ata_port *ap = dev->link->ap;
256 unsigned long flags; 341 int slop = buflen & 3;
342 unsigned long flags;
257 343
258 if (ata_id_has_dword_io(adev->id)) {
259 local_irq_save(flags); 344 local_irq_save(flags);
260 345
261 /* Perform the 32bit I/O synchronization sequence */ 346 /* Perform the 32bit I/O synchronization sequence */
@@ -264,26 +349,27 @@ static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsig
264 ioread8(ap->ioaddr.nsect_addr); 349 ioread8(ap->ioaddr.nsect_addr);
265 350
266 /* Now the data */ 351 /* Now the data */
267 352 if (rw == READ)
268 if (write_data)
269 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
270 else
271 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); 353 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
354 else
355 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
272 356
273 if (unlikely(slop)) { 357 if (unlikely(slop)) {
274 __le32 pad = 0; 358 u32 pad;
275 if (write_data) { 359 if (rw == READ) {
276 memcpy(&pad, buf + buflen - slop, slop);
277 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
278 } else {
279 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); 360 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
280 memcpy(buf + buflen - slop, &pad, slop); 361 memcpy(buf + buflen - slop, &pad, slop);
362 } else {
363 memcpy(&pad, buf + buflen - slop, slop);
364 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
281 } 365 }
366 buflen += 4 - slop;
282 } 367 }
283 local_irq_restore(flags); 368 local_irq_restore(flags);
284 } 369 } else
285 else 370 buflen = ata_data_xfer_noirq(dev, buf, buflen, rw);
286 ata_data_xfer_noirq(adev, buf, buflen, write_data); 371
372 return buflen;
287} 373}
288 374
289static struct ata_port_operations pdc20230_port_ops = { 375static struct ata_port_operations pdc20230_port_ops = {
@@ -310,14 +396,14 @@ static struct ata_port_operations pdc20230_port_ops = {
310 .irq_clear = ata_bmdma_irq_clear, 396 .irq_clear = ata_bmdma_irq_clear,
311 .irq_on = ata_irq_on, 397 .irq_on = ata_irq_on,
312 398
313 .port_start = ata_port_start, 399 .port_start = ata_sff_port_start,
314}; 400};
315 401
316/* 402/*
317 * Holtek 6560A support 403 * Holtek 6560A support
318 * 404 *
319 * This controller supports PIO0 to PIO2 (no IORDY even though higher timings 405 * This controller supports PIO0 to PIO2 (no IORDY even though higher
320 * can be loaded). 406 * timings can be loaded).
321 */ 407 */
322 408
323static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev) 409static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
@@ -364,14 +450,14 @@ static struct ata_port_operations ht6560a_port_ops = {
364 .irq_clear = ata_bmdma_irq_clear, 450 .irq_clear = ata_bmdma_irq_clear,
365 .irq_on = ata_irq_on, 451 .irq_on = ata_irq_on,
366 452
367 .port_start = ata_port_start, 453 .port_start = ata_sff_port_start,
368}; 454};
369 455
370/* 456/*
371 * Holtek 6560B support 457 * Holtek 6560B support
372 * 458 *
373 * This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO setting 459 * This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO
374 * unless we see an ATAPI device in which case we force it off. 460 * setting unless we see an ATAPI device in which case we force it off.
375 * 461 *
376 * FIXME: need to implement 2nd channel support. 462 * FIXME: need to implement 2nd channel support.
377 */ 463 */
@@ -398,7 +484,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
398 if (adev->class != ATA_DEV_ATA) { 484 if (adev->class != ATA_DEV_ATA) {
399 u8 rconf = inb(0x3E6); 485 u8 rconf = inb(0x3E6);
400 if (rconf & 0x24) { 486 if (rconf & 0x24) {
401 rconf &= ~ 0x24; 487 rconf &= ~0x24;
402 outb(rconf, 0x3E6); 488 outb(rconf, 0x3E6);
403 } 489 }
404 } 490 }
@@ -423,13 +509,13 @@ static struct ata_port_operations ht6560b_port_ops = {
423 .qc_prep = ata_qc_prep, 509 .qc_prep = ata_qc_prep,
424 .qc_issue = ata_qc_issue_prot, 510 .qc_issue = ata_qc_issue_prot,
425 511
426 .data_xfer = ata_data_xfer, /* FIXME: Check 32bit and noirq */ 512 .data_xfer = ata_data_xfer, /* FIXME: Check 32bit and noirq */
427 513
428 .irq_handler = ata_interrupt, 514 .irq_handler = ata_interrupt,
429 .irq_clear = ata_bmdma_irq_clear, 515 .irq_clear = ata_bmdma_irq_clear,
430 .irq_on = ata_irq_on, 516 .irq_on = ata_irq_on,
431 517
432 .port_start = ata_port_start, 518 .port_start = ata_sff_port_start,
433}; 519};
434 520
435/* 521/*
@@ -462,7 +548,8 @@ static u8 opti_syscfg(u8 reg)
462 * This controller supports PIO0 to PIO3. 548 * This controller supports PIO0 to PIO3.
463 */ 549 */
464 550
465static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev) 551static void opti82c611a_set_piomode(struct ata_port *ap,
552 struct ata_device *adev)
466{ 553{
467 u8 active, recover, setup; 554 u8 active, recover, setup;
468 struct ata_timing t; 555 struct ata_timing t;
@@ -549,7 +636,7 @@ static struct ata_port_operations opti82c611a_port_ops = {
549 .irq_clear = ata_bmdma_irq_clear, 636 .irq_clear = ata_bmdma_irq_clear,
550 .irq_on = ata_irq_on, 637 .irq_on = ata_irq_on,
551 638
552 .port_start = ata_port_start, 639 .port_start = ata_sff_port_start,
553}; 640};
554 641
555/* 642/*
@@ -681,77 +768,398 @@ static struct ata_port_operations opti82c46x_port_ops = {
681 .irq_clear = ata_bmdma_irq_clear, 768 .irq_clear = ata_bmdma_irq_clear,
682 .irq_on = ata_irq_on, 769 .irq_on = ata_irq_on,
683 770
684 .port_start = ata_port_start, 771 .port_start = ata_sff_port_start,
685}; 772};
686 773
774static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
775{
776 struct ata_timing t;
777 struct legacy_data *qdi = ap->host->private_data;
778 int active, recovery;
779 u8 timing;
780
781 /* Get the timing data in cycles */
782 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
783
784 if (qdi->fast) {
785 active = 8 - FIT(t.active, 1, 8);
786 recovery = 18 - FIT(t.recover, 3, 18);
787 } else {
788 active = 9 - FIT(t.active, 2, 9);
789 recovery = 15 - FIT(t.recover, 0, 15);
790 }
791 timing = (recovery << 4) | active | 0x08;
792
793 qdi->clock[adev->devno] = timing;
794
795 outb(timing, qdi->timing);
796}
687 797
688/** 798/**
689 * legacy_init_one - attach a legacy interface 799 * qdi6580dp_set_piomode - PIO setup for dual channel
690 * @port: port number 800 * @ap: Port
691 * @io: I/O port start 801 * @adev: Device
692 * @ctrl: control port
693 * @irq: interrupt line 802 * @irq: interrupt line
694 * 803 *
695 * Register an ISA bus IDE interface. Such interfaces are PIO and we 804 * In dual channel mode the 6580 has one clock per channel and we have
696 * assume do not support IRQ sharing. 805 * to software clockswitch in qc_issue_prot.
697 */ 806 */
698 807
699static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl, int irq) 808static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
700{ 809{
701 struct legacy_data *ld = &legacy_data[nr_legacy_host]; 810 struct ata_timing t;
702 struct ata_host *host; 811 struct legacy_data *qdi = ap->host->private_data;
703 struct ata_port *ap; 812 int active, recovery;
704 struct platform_device *pdev; 813 u8 timing;
705 struct ata_port_operations *ops = &legacy_port_ops;
706 void __iomem *io_addr, *ctrl_addr;
707 int pio_modes = pio_mask;
708 u32 mask = (1 << port);
709 u32 iordy = (iordy_mask & mask) ? 0: ATA_FLAG_NO_IORDY;
710 int ret;
711 814
712 pdev = platform_device_register_simple(DRV_NAME, nr_legacy_host, NULL, 0); 815 /* Get the timing data in cycles */
713 if (IS_ERR(pdev)) 816 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
714 return PTR_ERR(pdev); 817
818 if (qdi->fast) {
819 active = 8 - FIT(t.active, 1, 8);
820 recovery = 18 - FIT(t.recover, 3, 18);
821 } else {
822 active = 9 - FIT(t.active, 2, 9);
823 recovery = 15 - FIT(t.recover, 0, 15);
824 }
825 timing = (recovery << 4) | active | 0x08;
715 826
716 ret = -EBUSY; 827 qdi->clock[adev->devno] = timing;
717 if (devm_request_region(&pdev->dev, io, 8, "pata_legacy") == NULL ||
718 devm_request_region(&pdev->dev, ctrl, 1, "pata_legacy") == NULL)
719 goto fail;
720 828
721 ret = -ENOMEM; 829 outb(timing, qdi->timing + 2 * ap->port_no);
722 io_addr = devm_ioport_map(&pdev->dev, io, 8); 830 /* Clear the FIFO */
723 ctrl_addr = devm_ioport_map(&pdev->dev, ctrl, 1); 831 if (adev->class != ATA_DEV_ATA)
724 if (!io_addr || !ctrl_addr) 832 outb(0x5F, qdi->timing + 3);
725 goto fail; 833}
726 834
727 if (ht6560a & mask) { 835/**
728 ops = &ht6560a_port_ops; 836 * qdi6580_set_piomode - PIO setup for single channel
729 pio_modes = 0x07; 837 * @ap: Port
730 iordy = ATA_FLAG_NO_IORDY; 838 * @adev: Device
731 } 839 *
732 if (ht6560b & mask) { 840 * In single channel mode the 6580 has one clock per device and we can
733 ops = &ht6560b_port_ops; 841 * avoid the requirement to clock switch. We also have to load the timing
734 pio_modes = 0x1F; 842 * into the right clock according to whether we are master or slave.
735 } 843 */
736 if (opti82c611a & mask) { 844
737 ops = &opti82c611a_port_ops; 845static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
738 pio_modes = 0x0F; 846{
847 struct ata_timing t;
848 struct legacy_data *qdi = ap->host->private_data;
849 int active, recovery;
850 u8 timing;
851
852 /* Get the timing data in cycles */
853 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
854
855 if (qdi->fast) {
856 active = 8 - FIT(t.active, 1, 8);
857 recovery = 18 - FIT(t.recover, 3, 18);
858 } else {
859 active = 9 - FIT(t.active, 2, 9);
860 recovery = 15 - FIT(t.recover, 0, 15);
739 } 861 }
740 if (opti82c46x & mask) { 862 timing = (recovery << 4) | active | 0x08;
741 ops = &opti82c46x_port_ops; 863 qdi->clock[adev->devno] = timing;
742 pio_modes = 0x0F; 864 outb(timing, qdi->timing + 2 * adev->devno);
865 /* Clear the FIFO */
866 if (adev->class != ATA_DEV_ATA)
867 outb(0x5F, qdi->timing + 3);
868}
869
870/**
871 * qdi_qc_issue_prot - command issue
872 * @qc: command pending
873 *
874 * Called when the libata layer is about to issue a command. We wrap
875 * this interface so that we can load the correct ATA timings.
876 */
877
878static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
879{
880 struct ata_port *ap = qc->ap;
881 struct ata_device *adev = qc->dev;
882 struct legacy_data *qdi = ap->host->private_data;
883
884 if (qdi->clock[adev->devno] != qdi->last) {
885 if (adev->pio_mode) {
886 qdi->last = qdi->clock[adev->devno];
887 outb(qdi->clock[adev->devno], qdi->timing +
888 2 * ap->port_no);
889 }
743 } 890 }
891 return ata_qc_issue_prot(qc);
892}
744 893
745 /* Probe for automatically detectable controllers */ 894static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf,
895 unsigned int buflen, int rw)
896{
897 struct ata_port *ap = adev->link->ap;
898 int slop = buflen & 3;
746 899
747 if (io == 0x1F0 && ops == &legacy_port_ops) { 900 if (ata_id_has_dword_io(adev->id)) {
748 unsigned long flags; 901 if (rw == WRITE)
902 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
903 else
904 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
749 905
750 local_irq_save(flags); 906 if (unlikely(slop)) {
907 u32 pad;
908 if (rw == WRITE) {
909 memcpy(&pad, buf + buflen - slop, slop);
910 pad = le32_to_cpu(pad);
911 iowrite32(pad, ap->ioaddr.data_addr);
912 } else {
913 pad = ioread32(ap->ioaddr.data_addr);
914 pad = cpu_to_le32(pad);
915 memcpy(buf + buflen - slop, &pad, slop);
916 }
917 }
918 return (buflen + 3) & ~3;
919 } else
920 return ata_data_xfer(adev, buf, buflen, rw);
921}
922
923static int qdi_port(struct platform_device *dev,
924 struct legacy_probe *lp, struct legacy_data *ld)
925{
926 if (devm_request_region(&dev->dev, lp->private, 4, "qdi") == NULL)
927 return -EBUSY;
928 ld->timing = lp->private;
929 return 0;
930}
931
932static struct ata_port_operations qdi6500_port_ops = {
933 .set_piomode = qdi6500_set_piomode,
934
935 .tf_load = ata_tf_load,
936 .tf_read = ata_tf_read,
937 .check_status = ata_check_status,
938 .exec_command = ata_exec_command,
939 .dev_select = ata_std_dev_select,
940
941 .freeze = ata_bmdma_freeze,
942 .thaw = ata_bmdma_thaw,
943 .error_handler = ata_bmdma_error_handler,
944 .post_internal_cmd = ata_bmdma_post_internal_cmd,
945 .cable_detect = ata_cable_40wire,
946
947 .qc_prep = ata_qc_prep,
948 .qc_issue = qdi_qc_issue_prot,
949
950 .data_xfer = vlb32_data_xfer,
951
952 .irq_handler = ata_interrupt,
953 .irq_clear = ata_bmdma_irq_clear,
954 .irq_on = ata_irq_on,
955
956 .port_start = ata_sff_port_start,
957};
958
959static struct ata_port_operations qdi6580_port_ops = {
960 .set_piomode = qdi6580_set_piomode,
961
962 .tf_load = ata_tf_load,
963 .tf_read = ata_tf_read,
964 .check_status = ata_check_status,
965 .exec_command = ata_exec_command,
966 .dev_select = ata_std_dev_select,
967
968 .freeze = ata_bmdma_freeze,
969 .thaw = ata_bmdma_thaw,
970 .error_handler = ata_bmdma_error_handler,
971 .post_internal_cmd = ata_bmdma_post_internal_cmd,
972 .cable_detect = ata_cable_40wire,
973
974 .qc_prep = ata_qc_prep,
975 .qc_issue = ata_qc_issue_prot,
976
977 .data_xfer = vlb32_data_xfer,
978
979 .irq_handler = ata_interrupt,
980 .irq_clear = ata_bmdma_irq_clear,
981 .irq_on = ata_irq_on,
982
983 .port_start = ata_sff_port_start,
984};
985
986static struct ata_port_operations qdi6580dp_port_ops = {
987 .set_piomode = qdi6580dp_set_piomode,
988
989 .tf_load = ata_tf_load,
990 .tf_read = ata_tf_read,
991 .check_status = ata_check_status,
992 .exec_command = ata_exec_command,
993 .dev_select = ata_std_dev_select,
994
995 .freeze = ata_bmdma_freeze,
996 .thaw = ata_bmdma_thaw,
997 .error_handler = ata_bmdma_error_handler,
998 .post_internal_cmd = ata_bmdma_post_internal_cmd,
999 .cable_detect = ata_cable_40wire,
1000
1001 .qc_prep = ata_qc_prep,
1002 .qc_issue = qdi_qc_issue_prot,
1003
1004 .data_xfer = vlb32_data_xfer,
1005
1006 .irq_handler = ata_interrupt,
1007 .irq_clear = ata_bmdma_irq_clear,
1008 .irq_on = ata_irq_on,
1009
1010 .port_start = ata_sff_port_start,
1011};
1012
1013static DEFINE_SPINLOCK(winbond_lock);
1014
1015static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
1016{
1017 unsigned long flags;
1018 spin_lock_irqsave(&winbond_lock, flags);
1019 outb(reg, port + 0x01);
1020 outb(val, port + 0x02);
1021 spin_unlock_irqrestore(&winbond_lock, flags);
1022}
1023
1024static u8 winbond_readcfg(unsigned long port, u8 reg)
1025{
1026 u8 val;
1027
1028 unsigned long flags;
1029 spin_lock_irqsave(&winbond_lock, flags);
1030 outb(reg, port + 0x01);
1031 val = inb(port + 0x02);
1032 spin_unlock_irqrestore(&winbond_lock, flags);
1033
1034 return val;
1035}
1036
1037static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
1038{
1039 struct ata_timing t;
1040 struct legacy_data *winbond = ap->host->private_data;
1041 int active, recovery;
1042 u8 reg;
1043 int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
1044
1045 reg = winbond_readcfg(winbond->timing, 0x81);
1046
1047 /* Get the timing data in cycles */
1048 if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
1049 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
1050 else
1051 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
1052
1053 active = (FIT(t.active, 3, 17) - 1) & 0x0F;
1054 recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F;
1055 timing = (active << 4) | recovery;
1056 winbond_writecfg(winbond->timing, timing, reg);
1057
1058 /* Load the setup timing */
1059
1060 reg = 0x35;
1061 if (adev->class != ATA_DEV_ATA)
1062 reg |= 0x08; /* FIFO off */
1063 if (!ata_pio_need_iordy(adev))
1064 reg |= 0x02; /* IORDY off */
1065 reg |= (FIT(t.setup, 0, 3) << 6);
1066 winbond_writecfg(winbond->timing, timing + 1, reg);
1067}
1068
1069static int winbond_port(struct platform_device *dev,
1070 struct legacy_probe *lp, struct legacy_data *ld)
1071{
1072 if (devm_request_region(&dev->dev, lp->private, 4, "winbond") == NULL)
1073 return -EBUSY;
1074 ld->timing = lp->private;
1075 return 0;
1076}
1077
1078static struct ata_port_operations winbond_port_ops = {
1079 .set_piomode = winbond_set_piomode,
1080
1081 .tf_load = ata_tf_load,
1082 .tf_read = ata_tf_read,
1083 .check_status = ata_check_status,
1084 .exec_command = ata_exec_command,
1085 .dev_select = ata_std_dev_select,
1086
1087 .freeze = ata_bmdma_freeze,
1088 .thaw = ata_bmdma_thaw,
1089 .error_handler = ata_bmdma_error_handler,
1090 .post_internal_cmd = ata_bmdma_post_internal_cmd,
1091 .cable_detect = ata_cable_40wire,
1092
1093 .qc_prep = ata_qc_prep,
1094 .qc_issue = ata_qc_issue_prot,
1095
1096 .data_xfer = vlb32_data_xfer,
1097
1098 .irq_clear = ata_bmdma_irq_clear,
1099 .irq_on = ata_irq_on,
751 1100
1101 .port_start = ata_sff_port_start,
1102};
1103
1104static struct legacy_controller controllers[] = {
1105 {"BIOS", &legacy_port_ops, 0x1F,
1106 ATA_FLAG_NO_IORDY, NULL },
1107 {"Snooping", &simple_port_ops, 0x1F,
1108 0 , NULL },
1109 {"PDC20230", &pdc20230_port_ops, 0x7,
1110 ATA_FLAG_NO_IORDY, NULL },
1111 {"HT6560A", &ht6560a_port_ops, 0x07,
1112 ATA_FLAG_NO_IORDY, NULL },
1113 {"HT6560B", &ht6560b_port_ops, 0x1F,
1114 ATA_FLAG_NO_IORDY, NULL },
1115 {"OPTI82C611A", &opti82c611a_port_ops, 0x0F,
1116 0 , NULL },
1117 {"OPTI82C46X", &opti82c46x_port_ops, 0x0F,
1118 0 , NULL },
1119 {"QDI6500", &qdi6500_port_ops, 0x07,
1120 ATA_FLAG_NO_IORDY, qdi_port },
1121 {"QDI6580", &qdi6580_port_ops, 0x1F,
1122 0 , qdi_port },
1123 {"QDI6580DP", &qdi6580dp_port_ops, 0x1F,
1124 0 , qdi_port },
1125 {"W83759A", &winbond_port_ops, 0x1F,
1126 0 , winbond_port }
1127};
1128
1129/**
1130 * probe_chip_type - Discover controller
1131 * @probe: Probe entry to check
1132 *
1133 * Probe an ATA port and identify the type of controller. We don't
1134 * check if the controller appears to be driveless at this point.
1135 */
1136
1137static __init int probe_chip_type(struct legacy_probe *probe)
1138{
1139 int mask = 1 << probe->slot;
1140
1141 if (winbond && (probe->port == 0x1F0 || probe->port == 0x170)) {
1142 u8 reg = winbond_readcfg(winbond, 0x81);
1143 reg |= 0x80; /* jumpered mode off */
1144 winbond_writecfg(winbond, 0x81, reg);
1145 reg = winbond_readcfg(winbond, 0x83);
1146 reg |= 0xF0; /* local control */
1147 winbond_writecfg(winbond, 0x83, reg);
1148 reg = winbond_readcfg(winbond, 0x85);
1149 reg |= 0xF0; /* programmable timing */
1150 winbond_writecfg(winbond, 0x85, reg);
1151
1152 reg = winbond_readcfg(winbond, 0x81);
1153
1154 if (reg & mask)
1155 return W83759A;
1156 }
1157 if (probe->port == 0x1F0) {
1158 unsigned long flags;
1159 local_irq_save(flags);
752 /* Probes */ 1160 /* Probes */
753 inb(0x1F5);
754 outb(inb(0x1F2) | 0x80, 0x1F2); 1161 outb(inb(0x1F2) | 0x80, 0x1F2);
1162 inb(0x1F5);
755 inb(0x1F2); 1163 inb(0x1F2);
756 inb(0x3F6); 1164 inb(0x3F6);
757 inb(0x3F6); 1165 inb(0x3F6);
@@ -760,29 +1168,83 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
760 1168
761 if ((inb(0x1F2) & 0x80) == 0) { 1169 if ((inb(0x1F2) & 0x80) == 0) {
762 /* PDC20230c or 20630 ? */ 1170 /* PDC20230c or 20630 ? */
763 printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller detected.\n"); 1171 printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller"
764 pio_modes = 0x07; 1172 " detected.\n");
765 ops = &pdc20230_port_ops;
766 iordy = ATA_FLAG_NO_IORDY;
767 udelay(100); 1173 udelay(100);
768 inb(0x1F5); 1174 inb(0x1F5);
1175 local_irq_restore(flags);
1176 return PDC20230;
769 } else { 1177 } else {
770 outb(0x55, 0x1F2); 1178 outb(0x55, 0x1F2);
771 inb(0x1F2); 1179 inb(0x1F2);
772 inb(0x1F2); 1180 inb(0x1F2);
773 if (inb(0x1F2) == 0x00) { 1181 if (inb(0x1F2) == 0x00)
774 printk(KERN_INFO "PDC20230-B VLB ATA controller detected.\n"); 1182 printk(KERN_INFO "PDC20230-B VLB ATA "
775 } 1183 "controller detected.\n");
1184 local_irq_restore(flags);
1185 return BIOS;
776 } 1186 }
777 local_irq_restore(flags); 1187 local_irq_restore(flags);
778 } 1188 }
779 1189
1190 if (ht6560a & mask)
1191 return HT6560A;
1192 if (ht6560b & mask)
1193 return HT6560B;
1194 if (opti82c611a & mask)
1195 return OPTI611A;
1196 if (opti82c46x & mask)
1197 return OPTI46X;
1198 if (autospeed & mask)
1199 return SNOOP;
1200 return BIOS;
1201}
1202
1203
1204/**
1205 * legacy_init_one - attach a legacy interface
1206 * @pl: probe record
1207 *
1208 * Register an ISA bus IDE interface. Such interfaces are PIO and we
1209 * assume do not support IRQ sharing.
1210 */
1211
1212static __init int legacy_init_one(struct legacy_probe *probe)
1213{
1214 struct legacy_controller *controller = &controllers[probe->type];
1215 int pio_modes = controller->pio_mask;
1216 unsigned long io = probe->port;
1217 u32 mask = (1 << probe->slot);
1218 struct ata_port_operations *ops = controller->ops;
1219 struct legacy_data *ld = &legacy_data[probe->slot];
1220 struct ata_host *host = NULL;
1221 struct ata_port *ap;
1222 struct platform_device *pdev;
1223 struct ata_device *dev;
1224 void __iomem *io_addr, *ctrl_addr;
1225 u32 iordy = (iordy_mask & mask) ? 0: ATA_FLAG_NO_IORDY;
1226 int ret;
780 1227
781 /* Chip does mode setting by command snooping */ 1228 iordy |= controller->flags;
782 if (ops == &legacy_port_ops && (autospeed & mask)) 1229
783 ops = &simple_port_ops; 1230 pdev = platform_device_register_simple(DRV_NAME, probe->slot, NULL, 0);
1231 if (IS_ERR(pdev))
1232 return PTR_ERR(pdev);
1233
1234 ret = -EBUSY;
1235 if (devm_request_region(&pdev->dev, io, 8, "pata_legacy") == NULL ||
1236 devm_request_region(&pdev->dev, io + 0x0206, 1,
1237 "pata_legacy") == NULL)
1238 goto fail;
784 1239
785 ret = -ENOMEM; 1240 ret = -ENOMEM;
1241 io_addr = devm_ioport_map(&pdev->dev, io, 8);
1242 ctrl_addr = devm_ioport_map(&pdev->dev, io + 0x0206, 1);
1243 if (!io_addr || !ctrl_addr)
1244 goto fail;
1245 if (controller->setup)
1246 if (controller->setup(pdev, probe, ld) < 0)
1247 goto fail;
786 host = ata_host_alloc(&pdev->dev, 1); 1248 host = ata_host_alloc(&pdev->dev, 1);
787 if (!host) 1249 if (!host)
788 goto fail; 1250 goto fail;
@@ -795,19 +1257,29 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
795 ap->ioaddr.altstatus_addr = ctrl_addr; 1257 ap->ioaddr.altstatus_addr = ctrl_addr;
796 ap->ioaddr.ctl_addr = ctrl_addr; 1258 ap->ioaddr.ctl_addr = ctrl_addr;
797 ata_std_ports(&ap->ioaddr); 1259 ata_std_ports(&ap->ioaddr);
798 ap->private_data = ld; 1260 ap->host->private_data = ld;
799 1261
800 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, ctrl); 1262 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, io + 0x0206);
801 1263
802 ret = ata_host_activate(host, irq, ata_interrupt, 0, &legacy_sht); 1264 ret = ata_host_activate(host, probe->irq, ata_interrupt, 0,
1265 &legacy_sht);
803 if (ret) 1266 if (ret)
804 goto fail; 1267 goto fail;
805
806 legacy_host[nr_legacy_host++] = dev_get_drvdata(&pdev->dev);
807 ld->platform_dev = pdev; 1268 ld->platform_dev = pdev;
808 return 0;
809 1269
1270 /* Nothing found means we drop the port as its probably not there */
1271
1272 ret = -ENODEV;
1273 ata_link_for_each_dev(dev, &ap->link) {
1274 if (!ata_dev_absent(dev)) {
1275 legacy_host[probe->slot] = host;
1276 ld->platform_dev = pdev;
1277 return 0;
1278 }
1279 }
810fail: 1280fail:
1281 if (host)
1282 ata_host_detach(host);
811 platform_device_unregister(pdev); 1283 platform_device_unregister(pdev);
812 return ret; 1284 return ret;
813} 1285}
@@ -818,13 +1290,15 @@ fail:
818 * @master: set this if we find an ATA master 1290 * @master: set this if we find an ATA master
819 * @master: set this if we find an ATA secondary 1291 * @master: set this if we find an ATA secondary
820 * 1292 *
821 * A small number of vendors implemented early PCI ATA interfaces on bridge logic 1293 * A small number of vendors implemented early PCI ATA interfaces
822 * without the ATA interface being PCI visible. Where we have a matching PCI driver 1294 * on bridge logic without the ATA interface being PCI visible.
823 * we must skip the relevant device here. If we don't know about it then the legacy 1295 * Where we have a matching PCI driver we must skip the relevant
824 * driver is the right driver anyway. 1296 * device here. If we don't know about it then the legacy driver
1297 * is the right driver anyway.
825 */ 1298 */
826 1299
827static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *secondary) 1300static void __init legacy_check_special_cases(struct pci_dev *p, int *primary,
1301 int *secondary)
828{ 1302{
829 /* Cyrix CS5510 pre SFF MWDMA ATA on the bridge */ 1303 /* Cyrix CS5510 pre SFF MWDMA ATA on the bridge */
830 if (p->vendor == 0x1078 && p->device == 0x0000) { 1304 if (p->vendor == 0x1078 && p->device == 0x0000) {
@@ -840,7 +1314,8 @@ static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *sec
840 if (p->vendor == 0x8086 && p->device == 0x1234) { 1314 if (p->vendor == 0x8086 && p->device == 0x1234) {
841 u16 r; 1315 u16 r;
842 pci_read_config_word(p, 0x6C, &r); 1316 pci_read_config_word(p, 0x6C, &r);
843 if (r & 0x8000) { /* ATA port enabled */ 1317 if (r & 0x8000) {
1318 /* ATA port enabled */
844 if (r & 0x4000) 1319 if (r & 0x4000)
845 *secondary = 1; 1320 *secondary = 1;
846 else 1321 else
@@ -850,6 +1325,114 @@ static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *sec
850 } 1325 }
851} 1326}
852 1327
1328static __init void probe_opti_vlb(void)
1329{
1330 /* If an OPTI 82C46X is present find out where the channels are */
1331 static const char *optis[4] = {
1332 "3/463MV", "5MV",
1333 "5MVA", "5MVB"
1334 };
1335 u8 chans = 1;
1336 u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6;
1337
1338 opti82c46x = 3; /* Assume master and slave first */
1339 printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n",
1340 optis[ctrl]);
1341 if (ctrl == 3)
1342 chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1;
1343 ctrl = opti_syscfg(0xAC);
1344 /* Check enabled and this port is the 465MV port. On the
1345 MVB we may have two channels */
1346 if (ctrl & 8) {
1347 if (chans == 2) {
1348 legacy_probe_add(0x1F0, 14, OPTI46X, 0);
1349 legacy_probe_add(0x170, 15, OPTI46X, 0);
1350 }
1351 if (ctrl & 4)
1352 legacy_probe_add(0x170, 15, OPTI46X, 0);
1353 else
1354 legacy_probe_add(0x1F0, 14, OPTI46X, 0);
1355 } else
1356 legacy_probe_add(0x1F0, 14, OPTI46X, 0);
1357}
1358
1359static __init void qdi65_identify_port(u8 r, u8 res, unsigned long port)
1360{
1361 static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
1362 /* Check card type */
1363 if ((r & 0xF0) == 0xC0) {
1364 /* QD6500: single channel */
1365 if (r & 8)
1366 /* Disabled ? */
1367 return;
1368 legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
1369 QDI6500, port);
1370 }
1371 if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
1372 /* QD6580: dual channel */
1373 if (!request_region(port + 2 , 2, "pata_qdi")) {
1374 release_region(port, 2);
1375 return;
1376 }
1377 res = inb(port + 3);
1378 /* Single channel mode ? */
1379 if (res & 1)
1380 legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
1381 QDI6580, port);
1382 else { /* Dual channel mode */
1383 legacy_probe_add(0x1F0, 14, QDI6580DP, port);
1384 /* port + 0x02, r & 0x04 */
1385 legacy_probe_add(0x170, 15, QDI6580DP, port + 2);
1386 }
1387 release_region(port + 2, 2);
1388 }
1389}
1390
1391static __init void probe_qdi_vlb(void)
1392{
1393 unsigned long flags;
1394 static const unsigned long qd_port[2] = { 0x30, 0xB0 };
1395 int i;
1396
1397 /*
1398 * Check each possible QD65xx base address
1399 */
1400
1401 for (i = 0; i < 2; i++) {
1402 unsigned long port = qd_port[i];
1403 u8 r, res;
1404
1405
1406 if (request_region(port, 2, "pata_qdi")) {
1407 /* Check for a card */
1408 local_irq_save(flags);
1409 /* I have no h/w that needs this delay but it
1410 is present in the historic code */
1411 r = inb(port);
1412 udelay(1);
1413 outb(0x19, port);
1414 udelay(1);
1415 res = inb(port);
1416 udelay(1);
1417 outb(r, port);
1418 udelay(1);
1419 local_irq_restore(flags);
1420
1421 /* Fail */
1422 if (res == 0x19) {
1423 release_region(port, 2);
1424 continue;
1425 }
1426 /* Passes the presence test */
1427 r = inb(port + 1);
1428 udelay(1);
1429 /* Check port agrees with port set */
1430 if ((r & 2) >> 1 == i)
1431 qdi65_identify_port(r, res, port);
1432 release_region(port, 2);
1433 }
1434 }
1435}
853 1436
854/** 1437/**
855 * legacy_init - attach legacy interfaces 1438 * legacy_init - attach legacy interfaces
@@ -867,15 +1450,17 @@ static __init int legacy_init(void)
867 int ct = 0; 1450 int ct = 0;
868 int primary = 0; 1451 int primary = 0;
869 int secondary = 0; 1452 int secondary = 0;
870 int last_port = NR_HOST; 1453 int pci_present = 0;
1454 struct legacy_probe *pl = &probe_list[0];
1455 int slot = 0;
871 1456
872 struct pci_dev *p = NULL; 1457 struct pci_dev *p = NULL;
873 1458
874 for_each_pci_dev(p) { 1459 for_each_pci_dev(p) {
875 int r; 1460 int r;
876 /* Check for any overlap of the system ATA mappings. Native mode controllers 1461 /* Check for any overlap of the system ATA mappings. Native
877 stuck on these addresses or some devices in 'raid' mode won't be found by 1462 mode controllers stuck on these addresses or some devices
878 the storage class test */ 1463 in 'raid' mode won't be found by the storage class test */
879 for (r = 0; r < 6; r++) { 1464 for (r = 0; r < 6; r++) {
880 if (pci_resource_start(p, r) == 0x1f0) 1465 if (pci_resource_start(p, r) == 0x1f0)
881 primary = 1; 1466 primary = 1;
@@ -885,49 +1470,39 @@ static __init int legacy_init(void)
885 /* Check for special cases */ 1470 /* Check for special cases */
886 legacy_check_special_cases(p, &primary, &secondary); 1471 legacy_check_special_cases(p, &primary, &secondary);
887 1472
888 /* If PCI bus is present then don't probe for tertiary legacy ports */ 1473 /* If PCI bus is present then don't probe for tertiary
889 if (probe_all == 0) 1474 legacy ports */
890 last_port = 2; 1475 pci_present = 1;
891 } 1476 }
892 1477
893 /* If an OPTI 82C46X is present find out where the channels are */ 1478 if (winbond == 1)
894 if (opti82c46x) { 1479 winbond = 0x130; /* Default port, alt is 1B0 */
895 static const char *optis[4] = { 1480
896 "3/463MV", "5MV", 1481 if (primary == 0 || all)
897 "5MVA", "5MVB" 1482 legacy_probe_add(0x1F0, 14, UNKNOWN, 0);
898 }; 1483 if (secondary == 0 || all)
899 u8 chans = 1; 1484 legacy_probe_add(0x170, 15, UNKNOWN, 0);
900 u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6; 1485
901 1486 if (probe_all || !pci_present) {
902 opti82c46x = 3; /* Assume master and slave first */ 1487 /* ISA/VLB extra ports */
903 printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n", optis[ctrl]); 1488 legacy_probe_add(0x1E8, 11, UNKNOWN, 0);
904 if (ctrl == 3) 1489 legacy_probe_add(0x168, 10, UNKNOWN, 0);
905 chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1; 1490 legacy_probe_add(0x1E0, 8, UNKNOWN, 0);
906 ctrl = opti_syscfg(0xAC); 1491 legacy_probe_add(0x160, 12, UNKNOWN, 0);
907 /* Check enabled and this port is the 465MV port. On the
908 MVB we may have two channels */
909 if (ctrl & 8) {
910 if (ctrl & 4)
911 opti82c46x = 2; /* Slave */
912 else
913 opti82c46x = 1; /* Master */
914 if (chans == 2)
915 opti82c46x = 3; /* Master and Slave */
916 } /* Slave only */
917 else if (chans == 1)
918 opti82c46x = 1;
919 } 1492 }
920 1493
921 for (i = 0; i < last_port; i++) { 1494 if (opti82c46x)
922 /* Skip primary if we have seen a PCI one */ 1495 probe_opti_vlb();
923 if (i == 0 && primary == 1) 1496 if (qdi)
924 continue; 1497 probe_qdi_vlb();
925 /* Skip secondary if we have seen a PCI one */ 1498
926 if (i == 1 && secondary == 1) 1499 for (i = 0; i < NR_HOST; i++, pl++) {
1500 if (pl->port == 0)
927 continue; 1501 continue;
928 if (legacy_init_one(i, legacy_port[i], 1502 if (pl->type == UNKNOWN)
929 legacy_port[i] + 0x0206, 1503 pl->type = probe_chip_type(pl);
930 legacy_irq[i]) == 0) 1504 pl->slot = slot++;
1505 if (legacy_init_one(pl) == 0)
931 ct++; 1506 ct++;
932 } 1507 }
933 if (ct != 0) 1508 if (ct != 0)
@@ -941,11 +1516,8 @@ static __exit void legacy_exit(void)
941 1516
942 for (i = 0; i < nr_legacy_host; i++) { 1517 for (i = 0; i < nr_legacy_host; i++) {
943 struct legacy_data *ld = &legacy_data[i]; 1518 struct legacy_data *ld = &legacy_data[i];
944
945 ata_host_detach(legacy_host[i]); 1519 ata_host_detach(legacy_host[i]);
946 platform_device_unregister(ld->platform_dev); 1520 platform_device_unregister(ld->platform_dev);
947 if (ld->timing)
948 release_region(ld->timing, 2);
949 } 1521 }
950} 1522}
951 1523
@@ -960,9 +1532,9 @@ module_param(ht6560a, int, 0);
960module_param(ht6560b, int, 0); 1532module_param(ht6560b, int, 0);
961module_param(opti82c611a, int, 0); 1533module_param(opti82c611a, int, 0);
962module_param(opti82c46x, int, 0); 1534module_param(opti82c46x, int, 0);
1535module_param(qdi, int, 0);
963module_param(pio_mask, int, 0); 1536module_param(pio_mask, int, 0);
964module_param(iordy_mask, int, 0); 1537module_param(iordy_mask, int, 0);
965 1538
966module_init(legacy_init); 1539module_init(legacy_init);
967module_exit(legacy_exit); 1540module_exit(legacy_exit);
968
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 1a7ca37168b2..5413ebfa72e5 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -364,7 +364,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
364{ 364{
365 unsigned int ipb_freq; 365 unsigned int ipb_freq;
366 struct resource res_mem; 366 struct resource res_mem;
367 int ata_irq = NO_IRQ; 367 int ata_irq;
368 struct mpc52xx_ata __iomem *ata_regs; 368 struct mpc52xx_ata __iomem *ata_regs;
369 struct mpc52xx_ata_priv *priv; 369 struct mpc52xx_ata_priv *priv;
370 int rv; 370 int rv;
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
new file mode 100644
index 000000000000..1c1b83541d13
--- /dev/null
+++ b/drivers/ata/pata_ninja32.c
@@ -0,0 +1,214 @@
1/*
2 * pata_ninja32.c - Ninja32 PATA for new ATA layer
3 * (C) 2007 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Note: The controller like many controllers has shared timings for
7 * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
8 * in the dma_stop function. Thus we actually don't need a set_dmamode
9 * method as the PIO method is always called and will set the right PIO
10 * timing parameters.
11 *
12 * The Ninja32 Cardbus is not a generic SFF controller. Instead it is
13 * laid out as follows off BAR 0. This is based upon Mark Lord's delkin
14 * driver and the extensive analysis done by the BSD developers, notably
15 * ITOH Yasufumi.
16 *
17 * Base + 0x00 IRQ Status
18 * Base + 0x01 IRQ control
19 * Base + 0x02 Chipset control
20 * Base + 0x04 VDMA and reset control + wait bits
21 * Base + 0x08 BMIMBA
22 * Base + 0x0C DMA Length
23 * Base + 0x10 Taskfile
24 * Base + 0x18 BMDMA Status ?
25 * Base + 0x1C
26 * Base + 0x1D Bus master control
27 * bit 0 = enable
28 * bit 1 = 0 write/1 read
29 * bit 2 = 1 sgtable
30 * bit 3 = go
31 * bit 4-6 wait bits
32 * bit 7 = done
33 * Base + 0x1E AltStatus
34 * Base + 0x1F timing register
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/blkdev.h>
42#include <linux/delay.h>
43#include <scsi/scsi_host.h>
44#include <linux/libata.h>
45
46#define DRV_NAME "pata_ninja32"
47#define DRV_VERSION "0.0.1"
48
49
50/**
51 * ninja32_set_piomode - set initial PIO mode data
52 * @ap: ATA interface
53 * @adev: ATA device
54 *
55 * Called to do the PIO mode setup. Our timing registers are shared
56 * but we want to set the PIO timing by default.
57 */
58
59static void ninja32_set_piomode(struct ata_port *ap, struct ata_device *adev)
60{
61 static u16 pio_timing[5] = {
62 0xd6, 0x85, 0x44, 0x33, 0x13
63 };
64 iowrite8(pio_timing[adev->pio_mode - XFER_PIO_0],
65 ap->ioaddr.bmdma_addr + 0x1f);
66 ap->private_data = adev;
67}
68
69
70static void ninja32_dev_select(struct ata_port *ap, unsigned int device)
71{
72 struct ata_device *adev = &ap->link.device[device];
73 if (ap->private_data != adev) {
74 iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f);
75 ata_std_dev_select(ap, device);
76 ninja32_set_piomode(ap, adev);
77 }
78}
79
80static struct scsi_host_template ninja32_sht = {
81 .module = THIS_MODULE,
82 .name = DRV_NAME,
83 .ioctl = ata_scsi_ioctl,
84 .queuecommand = ata_scsi_queuecmd,
85 .can_queue = ATA_DEF_QUEUE,
86 .this_id = ATA_SHT_THIS_ID,
87 .sg_tablesize = LIBATA_MAX_PRD,
88 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
89 .emulated = ATA_SHT_EMULATED,
90 .use_clustering = ATA_SHT_USE_CLUSTERING,
91 .proc_name = DRV_NAME,
92 .dma_boundary = ATA_DMA_BOUNDARY,
93 .slave_configure = ata_scsi_slave_config,
94 .slave_destroy = ata_scsi_slave_destroy,
95 .bios_param = ata_std_bios_param,
96};
97
98static struct ata_port_operations ninja32_port_ops = {
99 .set_piomode = ninja32_set_piomode,
100 .mode_filter = ata_pci_default_filter,
101
102 .tf_load = ata_tf_load,
103 .tf_read = ata_tf_read,
104 .check_status = ata_check_status,
105 .exec_command = ata_exec_command,
106 .dev_select = ninja32_dev_select,
107
108 .freeze = ata_bmdma_freeze,
109 .thaw = ata_bmdma_thaw,
110 .error_handler = ata_bmdma_error_handler,
111 .post_internal_cmd = ata_bmdma_post_internal_cmd,
112 .cable_detect = ata_cable_40wire,
113
114 .bmdma_setup = ata_bmdma_setup,
115 .bmdma_start = ata_bmdma_start,
116 .bmdma_stop = ata_bmdma_stop,
117 .bmdma_status = ata_bmdma_status,
118
119 .qc_prep = ata_qc_prep,
120 .qc_issue = ata_qc_issue_prot,
121
122 .data_xfer = ata_data_xfer,
123
124 .irq_handler = ata_interrupt,
125 .irq_clear = ata_bmdma_irq_clear,
126 .irq_on = ata_irq_on,
127
128 .port_start = ata_sff_port_start,
129};
130
131static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
132{
133 struct ata_host *host;
134 struct ata_port *ap;
135 void __iomem *base;
136 int rc;
137
138 host = ata_host_alloc(&dev->dev, 1);
139 if (!host)
140 return -ENOMEM;
141 ap = host->ports[0];
142
143 /* Set up the PCI device */
144 rc = pcim_enable_device(dev);
145 if (rc)
146 return rc;
147 rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME);
148 if (rc == -EBUSY)
149 pcim_pin_device(dev);
150 if (rc)
151 return rc;
152
153 host->iomap = pcim_iomap_table(dev);
154 rc = pci_set_dma_mask(dev, ATA_DMA_MASK);
155 if (rc)
156 return rc;
157 rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK);
158 if (rc)
159 return rc;
160 pci_set_master(dev);
161
162 /* Set up the register mappings */
163 base = host->iomap[0];
164 if (!base)
165 return -ENOMEM;
166 ap->ops = &ninja32_port_ops;
167 ap->pio_mask = 0x1F;
168 ap->flags |= ATA_FLAG_SLAVE_POSS;
169
170 ap->ioaddr.cmd_addr = base + 0x10;
171 ap->ioaddr.ctl_addr = base + 0x1E;
172 ap->ioaddr.altstatus_addr = base + 0x1E;
173 ap->ioaddr.bmdma_addr = base;
174 ata_std_ports(&ap->ioaddr);
175
176 iowrite8(0x05, base + 0x01); /* Enable interrupt lines */
177 iowrite8(0xB3, base + 0x02); /* Burst, ?? setup */
178 iowrite8(0x00, base + 0x04); /* WAIT0 ? */
179 /* FIXME: Should we disable them at remove ? */
180 return ata_host_activate(host, dev->irq, ata_interrupt,
181 IRQF_SHARED, &ninja32_sht);
182}
183
184static const struct pci_device_id ninja32[] = {
185 { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
186 { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
187 { },
188};
189
190static struct pci_driver ninja32_pci_driver = {
191 .name = DRV_NAME,
192 .id_table = ninja32,
193 .probe = ninja32_init_one,
194 .remove = ata_pci_remove_one
195};
196
197static int __init ninja32_init(void)
198{
199 return pci_register_driver(&ninja32_pci_driver);
200}
201
202static void __exit ninja32_exit(void)
203{
204 pci_unregister_driver(&ninja32_pci_driver);
205}
206
207MODULE_AUTHOR("Alan Cox");
208MODULE_DESCRIPTION("low-level driver for Ninja32 ATA");
209MODULE_LICENSE("GPL");
210MODULE_DEVICE_TABLE(pci, ninja32);
211MODULE_VERSION(DRV_VERSION);
212
213module_init(ninja32_init);
214module_exit(ninja32_exit);
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index fd36099428a4..3e7f6a9da28b 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -42,7 +42,7 @@
42 42
43 43
44#define DRV_NAME "pata_pcmcia" 44#define DRV_NAME "pata_pcmcia"
45#define DRV_VERSION "0.3.2" 45#define DRV_VERSION "0.3.3"
46 46
47/* 47/*
48 * Private data structure to glue stuff together 48 * Private data structure to glue stuff together
@@ -86,6 +86,47 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
86 return ata_do_set_mode(link, r_failed_dev); 86 return ata_do_set_mode(link, r_failed_dev);
87} 87}
88 88
89/**
90 * pcmcia_set_mode_8bit - PCMCIA specific mode setup
91 * @link: link
92 * @r_failed_dev: Return pointer for failed device
93 *
94 * For the simple emulated 8bit stuff the less we do the better.
95 */
96
97static int pcmcia_set_mode_8bit(struct ata_link *link,
98 struct ata_device **r_failed_dev)
99{
100 return 0;
101}
102
103/**
104 * ata_data_xfer_8bit - Transfer data by 8bit PIO
105 * @dev: device to target
106 * @buf: data buffer
107 * @buflen: buffer length
108 * @rw: read/write
109 *
110 * Transfer data from/to the device data register by 8 bit PIO.
111 *
112 * LOCKING:
113 * Inherited from caller.
114 */
115
116static unsigned int ata_data_xfer_8bit(struct ata_device *dev,
117 unsigned char *buf, unsigned int buflen, int rw)
118{
119 struct ata_port *ap = dev->link->ap;
120
121 if (rw == READ)
122 ioread8_rep(ap->ioaddr.data_addr, buf, buflen);
123 else
124 iowrite8_rep(ap->ioaddr.data_addr, buf, buflen);
125
126 return buflen;
127}
128
129
89static struct scsi_host_template pcmcia_sht = { 130static struct scsi_host_template pcmcia_sht = {
90 .module = THIS_MODULE, 131 .module = THIS_MODULE,
91 .name = DRV_NAME, 132 .name = DRV_NAME,
@@ -129,6 +170,31 @@ static struct ata_port_operations pcmcia_port_ops = {
129 .port_start = ata_sff_port_start, 170 .port_start = ata_sff_port_start,
130}; 171};
131 172
173static struct ata_port_operations pcmcia_8bit_port_ops = {
174 .set_mode = pcmcia_set_mode_8bit,
175 .tf_load = ata_tf_load,
176 .tf_read = ata_tf_read,
177 .check_status = ata_check_status,
178 .exec_command = ata_exec_command,
179 .dev_select = ata_std_dev_select,
180
181 .freeze = ata_bmdma_freeze,
182 .thaw = ata_bmdma_thaw,
183 .error_handler = ata_bmdma_error_handler,
184 .post_internal_cmd = ata_bmdma_post_internal_cmd,
185 .cable_detect = ata_cable_40wire,
186
187 .qc_prep = ata_qc_prep,
188 .qc_issue = ata_qc_issue_prot,
189
190 .data_xfer = ata_data_xfer_8bit,
191
192 .irq_clear = ata_bmdma_irq_clear,
193 .irq_on = ata_irq_on,
194
195 .port_start = ata_sff_port_start,
196};
197
132#define CS_CHECK(fn, ret) \ 198#define CS_CHECK(fn, ret) \
133do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 199do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
134 200
@@ -153,9 +219,12 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
153 cistpl_cftable_entry_t dflt; 219 cistpl_cftable_entry_t dflt;
154 } *stk = NULL; 220 } *stk = NULL;
155 cistpl_cftable_entry_t *cfg; 221 cistpl_cftable_entry_t *cfg;
156 int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM; 222 int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM, p;
157 unsigned long io_base, ctl_base; 223 unsigned long io_base, ctl_base;
158 void __iomem *io_addr, *ctl_addr; 224 void __iomem *io_addr, *ctl_addr;
225 int n_ports = 1;
226
227 struct ata_port_operations *ops = &pcmcia_port_ops;
159 228
160 info = kzalloc(sizeof(*info), GFP_KERNEL); 229 info = kzalloc(sizeof(*info), GFP_KERNEL);
161 if (info == NULL) 230 if (info == NULL)
@@ -282,27 +351,32 @@ next_entry:
282 /* FIXME: Could be more ports at base + 0x10 but we only deal with 351 /* FIXME: Could be more ports at base + 0x10 but we only deal with
283 one right now */ 352 one right now */
284 if (pdev->io.NumPorts1 >= 0x20) 353 if (pdev->io.NumPorts1 >= 0x20)
285 printk(KERN_WARNING DRV_NAME ": second channel not yet supported.\n"); 354 n_ports = 2;
286 355
356 if (pdev->manf_id == 0x0097 && pdev->card_id == 0x1620)
357 ops = &pcmcia_8bit_port_ops;
287 /* 358 /*
288 * Having done the PCMCIA plumbing the ATA side is relatively 359 * Having done the PCMCIA plumbing the ATA side is relatively
289 * sane. 360 * sane.
290 */ 361 */
291 ret = -ENOMEM; 362 ret = -ENOMEM;
292 host = ata_host_alloc(&pdev->dev, 1); 363 host = ata_host_alloc(&pdev->dev, n_ports);
293 if (!host) 364 if (!host)
294 goto failed; 365 goto failed;
295 ap = host->ports[0];
296 366
297 ap->ops = &pcmcia_port_ops; 367 for (p = 0; p < n_ports; p++) {
298 ap->pio_mask = 1; /* ISA so PIO 0 cycles */ 368 ap = host->ports[p];
299 ap->flags |= ATA_FLAG_SLAVE_POSS;
300 ap->ioaddr.cmd_addr = io_addr;
301 ap->ioaddr.altstatus_addr = ctl_addr;
302 ap->ioaddr.ctl_addr = ctl_addr;
303 ata_std_ports(&ap->ioaddr);
304 369
305 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base); 370 ap->ops = ops;
371 ap->pio_mask = 1; /* ISA so PIO 0 cycles */
372 ap->flags |= ATA_FLAG_SLAVE_POSS;
373 ap->ioaddr.cmd_addr = io_addr + 0x10 * p;
374 ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p;
375 ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p;
376 ata_std_ports(&ap->ioaddr);
377
378 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base);
379 }
306 380
307 /* activate */ 381 /* activate */
308 ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_interrupt, 382 ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_interrupt,
@@ -360,6 +434,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
360 PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704), 434 PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
361 PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904), 435 PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904),
362 PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */ 436 PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */
437 PCMCIA_DEVICE_MANF_CARD(0x0097, 0x1620), /* TI emulated */
363 PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */ 438 PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
364 PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d), 439 PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
365 PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */ 440 PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 2622577521a1..028af5dbeed6 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -348,7 +348,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
348 ata_id_c_string(pair->id, model_num, ATA_ID_PROD, 348 ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
349 ATA_ID_PROD_LEN + 1); 349 ATA_ID_PROD_LEN + 1);
350 /* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */ 350 /* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */
351 if (strstr(model_num, "Maxtor") == 0 && pair->dma_mode == XFER_UDMA_6) 351 if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6)
352 mask &= ~ (1 << (6 + ATA_SHIFT_UDMA)); 352 mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
353 353
354 return ata_pci_default_filter(adev, mask); 354 return ata_pci_default_filter(adev, mask);
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 6c9689b59b06..3ed866723e0c 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -168,8 +168,7 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
168 pdc202xx_set_dmamode(ap, qc->dev); 168 pdc202xx_set_dmamode(ap, qc->dev);
169 169
170 /* Cases the state machine will not complete correctly without help */ 170 /* Cases the state machine will not complete correctly without help */
171 if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA) 171 if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATAPI_PROT_DMA) {
172 {
173 len = qc->nbytes / 2; 172 len = qc->nbytes / 2;
174 173
175 if (tf->flags & ATA_TFLAG_WRITE) 174 if (tf->flags & ATA_TFLAG_WRITE)
@@ -208,7 +207,7 @@ static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
208 void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no); 207 void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
209 208
210 /* Cases the state machine will not complete correctly */ 209 /* Cases the state machine will not complete correctly */
211 if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) { 210 if (tf->protocol == ATAPI_PROT_DMA || (tf->flags & ATA_TFLAG_LBA48)) {
212 iowrite32(0, atapi_reg); 211 iowrite32(0, atapi_reg);
213 iowrite8(ioread8(clock) & ~sel66, clock); 212 iowrite8(ioread8(clock) & ~sel66, clock);
214 } 213 }
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index a4c0e502cb42..9f308ed76cc8 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -124,29 +124,33 @@ static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
124 return ata_qc_issue_prot(qc); 124 return ata_qc_issue_prot(qc);
125} 125}
126 126
127static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) 127static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf,
128 unsigned int buflen, int rw)
128{ 129{
129 struct ata_port *ap = adev->link->ap; 130 if (ata_id_has_dword_io(dev->id)) {
130 int slop = buflen & 3; 131 struct ata_port *ap = dev->link->ap;
132 int slop = buflen & 3;
131 133
132 if (ata_id_has_dword_io(adev->id)) { 134 if (rw == READ)
133 if (write_data)
134 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
135 else
136 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); 135 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
136 else
137 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
137 138
138 if (unlikely(slop)) { 139 if (unlikely(slop)) {
139 __le32 pad = 0; 140 u32 pad;
140 if (write_data) { 141 if (rw == READ) {
141 memcpy(&pad, buf + buflen - slop, slop);
142 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
143 } else {
144 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); 142 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
145 memcpy(buf + buflen - slop, &pad, slop); 143 memcpy(buf + buflen - slop, &pad, slop);
144 } else {
145 memcpy(&pad, buf + buflen - slop, slop);
146 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
146 } 147 }
148 buflen += 4 - slop;
147 } 149 }
148 } else 150 } else
149 ata_data_xfer(adev, buf, buflen, write_data); 151 buflen = ata_data_xfer(dev, buf, buflen, rw);
152
153 return buflen;
150} 154}
151 155
152static struct scsi_host_template qdi_sht = { 156static struct scsi_host_template qdi_sht = {
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index ea2ef9fc15be..55055b27524c 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -768,45 +768,47 @@ static u8 scc_bmdma_status (struct ata_port *ap)
768 768
769/** 769/**
770 * scc_data_xfer - Transfer data by PIO 770 * scc_data_xfer - Transfer data by PIO
771 * @adev: device for this I/O 771 * @dev: device for this I/O
772 * @buf: data buffer 772 * @buf: data buffer
773 * @buflen: buffer length 773 * @buflen: buffer length
774 * @write_data: read/write 774 * @rw: read/write
775 * 775 *
776 * Note: Original code is ata_data_xfer(). 776 * Note: Original code is ata_data_xfer().
777 */ 777 */
778 778
779static void scc_data_xfer (struct ata_device *adev, unsigned char *buf, 779static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
780 unsigned int buflen, int write_data) 780 unsigned int buflen, int rw)
781{ 781{
782 struct ata_port *ap = adev->link->ap; 782 struct ata_port *ap = dev->link->ap;
783 unsigned int words = buflen >> 1; 783 unsigned int words = buflen >> 1;
784 unsigned int i; 784 unsigned int i;
785 u16 *buf16 = (u16 *) buf; 785 u16 *buf16 = (u16 *) buf;
786 void __iomem *mmio = ap->ioaddr.data_addr; 786 void __iomem *mmio = ap->ioaddr.data_addr;
787 787
788 /* Transfer multiple of 2 bytes */ 788 /* Transfer multiple of 2 bytes */
789 if (write_data) { 789 if (rw == READ)
790 for (i = 0; i < words; i++)
791 out_be32(mmio, cpu_to_le16(buf16[i]));
792 } else {
793 for (i = 0; i < words; i++) 790 for (i = 0; i < words; i++)
794 buf16[i] = le16_to_cpu(in_be32(mmio)); 791 buf16[i] = le16_to_cpu(in_be32(mmio));
795 } 792 else
793 for (i = 0; i < words; i++)
794 out_be32(mmio, cpu_to_le16(buf16[i]));
796 795
797 /* Transfer trailing 1 byte, if any. */ 796 /* Transfer trailing 1 byte, if any. */
798 if (unlikely(buflen & 0x01)) { 797 if (unlikely(buflen & 0x01)) {
799 u16 align_buf[1] = { 0 }; 798 u16 align_buf[1] = { 0 };
800 unsigned char *trailing_buf = buf + buflen - 1; 799 unsigned char *trailing_buf = buf + buflen - 1;
801 800
802 if (write_data) { 801 if (rw == READ) {
803 memcpy(align_buf, trailing_buf, 1);
804 out_be32(mmio, cpu_to_le16(align_buf[0]));
805 } else {
806 align_buf[0] = le16_to_cpu(in_be32(mmio)); 802 align_buf[0] = le16_to_cpu(in_be32(mmio));
807 memcpy(trailing_buf, align_buf, 1); 803 memcpy(trailing_buf, align_buf, 1);
804 } else {
805 memcpy(align_buf, trailing_buf, 1);
806 out_be32(mmio, cpu_to_le16(align_buf[0]));
808 } 807 }
808 words++;
809 } 809 }
810
811 return words << 1;
810} 812}
811 813
812/** 814/**
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 8bed88873720..9c523fbf529e 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -41,7 +41,7 @@
41#include <linux/libata.h> 41#include <linux/libata.h>
42 42
43#define DRV_NAME "pata_serverworks" 43#define DRV_NAME "pata_serverworks"
44#define DRV_VERSION "0.4.2" 44#define DRV_VERSION "0.4.3"
45 45
46#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */ 46#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
47#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */ 47#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
@@ -102,7 +102,7 @@ static int osb4_cable(struct ata_port *ap) {
102} 102}
103 103
104/** 104/**
105 * csb4_cable - CSB5/6 cable detect 105 * csb_cable - CSB5/6 cable detect
106 * @ap: ATA port to check 106 * @ap: ATA port to check
107 * 107 *
108 * Serverworks default arrangement is to use the drive side detection 108 * Serverworks default arrangement is to use the drive side detection
@@ -110,7 +110,7 @@ static int osb4_cable(struct ata_port *ap) {
110 */ 110 */
111 111
112static int csb_cable(struct ata_port *ap) { 112static int csb_cable(struct ata_port *ap) {
113 return ATA_CBL_PATA80; 113 return ATA_CBL_PATA_UNK;
114} 114}
115 115
116struct sv_cable_table { 116struct sv_cable_table {
@@ -231,7 +231,6 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
231 return ata_pci_default_filter(adev, mask); 231 return ata_pci_default_filter(adev, mask);
232} 232}
233 233
234
235/** 234/**
236 * serverworks_set_piomode - set initial PIO mode data 235 * serverworks_set_piomode - set initial PIO mode data
237 * @ap: ATA interface 236 * @ap: ATA interface
@@ -243,7 +242,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
243static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev) 242static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev)
244{ 243{
245 static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 }; 244 static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
246 int offset = 1 + (2 * ap->port_no) - adev->devno; 245 int offset = 1 + 2 * ap->port_no - adev->devno;
247 int devbits = (2 * ap->port_no + adev->devno) * 4; 246 int devbits = (2 * ap->port_no + adev->devno) * 4;
248 u16 csb5_pio; 247 u16 csb5_pio;
249 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 248 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 453d72bf2598..39627ab684bf 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -185,7 +185,8 @@ static int via_cable_detect(struct ata_port *ap) {
185 if (ata66 & (0x10100000 >> (16 * ap->port_no))) 185 if (ata66 & (0x10100000 >> (16 * ap->port_no)))
186 return ATA_CBL_PATA80; 186 return ATA_CBL_PATA80;
187 /* Check with ACPI so we can spot BIOS reported SATA bridges */ 187 /* Check with ACPI so we can spot BIOS reported SATA bridges */
188 if (ata_acpi_cbl_80wire(ap)) 188 if (ata_acpi_init_gtm(ap) &&
189 ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
189 return ATA_CBL_PATA80; 190 return ATA_CBL_PATA80;
190 return ATA_CBL_PATA40; 191 return ATA_CBL_PATA40;
191} 192}
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
index 7116a9e7a8b2..99c92eda217b 100644
--- a/drivers/ata/pata_winbond.c
+++ b/drivers/ata/pata_winbond.c
@@ -92,29 +92,33 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
92} 92}
93 93
94 94
95static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) 95static unsigned int winbond_data_xfer(struct ata_device *dev,
96 unsigned char *buf, unsigned int buflen, int rw)
96{ 97{
97 struct ata_port *ap = adev->link->ap; 98 struct ata_port *ap = dev->link->ap;
98 int slop = buflen & 3; 99 int slop = buflen & 3;
99 100
100 if (ata_id_has_dword_io(adev->id)) { 101 if (ata_id_has_dword_io(dev->id)) {
101 if (write_data) 102 if (rw == READ)
102 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
103 else
104 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); 103 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
104 else
105 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
105 106
106 if (unlikely(slop)) { 107 if (unlikely(slop)) {
107 __le32 pad = 0; 108 u32 pad;
108 if (write_data) { 109 if (rw == READ) {
109 memcpy(&pad, buf + buflen - slop, slop);
110 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
111 } else {
112 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); 110 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
113 memcpy(buf + buflen - slop, &pad, slop); 111 memcpy(buf + buflen - slop, &pad, slop);
112 } else {
113 memcpy(&pad, buf + buflen - slop, slop);
114 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
114 } 115 }
116 buflen += 4 - slop;
115 } 117 }
116 } else 118 } else
117 ata_data_xfer(adev, buf, buflen, write_data); 119 buflen = ata_data_xfer(dev, buf, buflen, rw);
120
121 return buflen;
118} 122}
119 123
120static struct scsi_host_template winbond_sht = { 124static struct scsi_host_template winbond_sht = {
@@ -191,7 +195,7 @@ static __init int winbond_init_one(unsigned long port)
191 reg = winbond_readcfg(port, 0x81); 195 reg = winbond_readcfg(port, 0x81);
192 196
193 if (!(reg & 0x03)) /* Disabled */ 197 if (!(reg & 0x03)) /* Disabled */
194 return 0; 198 return -ENODEV;
195 199
196 for (i = 0; i < 2 ; i ++) { 200 for (i = 0; i < 2 ; i ++) {
197 unsigned long cmd_port = 0x1F0 - (0x80 * i); 201 unsigned long cmd_port = 0x1F0 - (0x80 * i);
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index bd4c2a3c88d7..8e1b7e9c0ae4 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -321,8 +321,9 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
321 u8 *buf = pp->pkt, *last_buf = NULL; 321 u8 *buf = pp->pkt, *last_buf = NULL;
322 int i = (2 + buf[3]) * 8; 322 int i = (2 + buf[3]) * 8;
323 u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0); 323 u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
324 unsigned int si;
324 325
325 ata_for_each_sg(sg, qc) { 326 for_each_sg(qc->sg, sg, qc->n_elem, si) {
326 u32 addr; 327 u32 addr;
327 u32 len; 328 u32 len;
328 329
@@ -455,7 +456,7 @@ static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
455 adma_packet_start(qc); 456 adma_packet_start(qc);
456 return 0; 457 return 0;
457 458
458 case ATA_PROT_ATAPI_DMA: 459 case ATAPI_PROT_DMA:
459 BUG(); 460 BUG();
460 break; 461 break;
461 462
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index d015b4adcfe0..922d7b2efba8 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -333,13 +333,14 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
333 struct prde *prd_ptr_to_indirect_ext = NULL; 333 struct prde *prd_ptr_to_indirect_ext = NULL;
334 unsigned indirect_ext_segment_sz = 0; 334 unsigned indirect_ext_segment_sz = 0;
335 dma_addr_t indirect_ext_segment_paddr; 335 dma_addr_t indirect_ext_segment_paddr;
336 unsigned int si;
336 337
337 VPRINTK("SATA FSL : cd = 0x%x, prd = 0x%x\n", cmd_desc, prd); 338 VPRINTK("SATA FSL : cd = 0x%x, prd = 0x%x\n", cmd_desc, prd);
338 339
339 indirect_ext_segment_paddr = cmd_desc_paddr + 340 indirect_ext_segment_paddr = cmd_desc_paddr +
340 SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16; 341 SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16;
341 342
342 ata_for_each_sg(sg, qc) { 343 for_each_sg(qc->sg, sg, qc->n_elem, si) {
343 dma_addr_t sg_addr = sg_dma_address(sg); 344 dma_addr_t sg_addr = sg_dma_address(sg);
344 u32 sg_len = sg_dma_len(sg); 345 u32 sg_len = sg_dma_len(sg);
345 346
@@ -417,7 +418,7 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
417 } 418 }
418 419
419 /* setup "ACMD - atapi command" in cmd. desc. if this is ATAPI cmd */ 420 /* setup "ACMD - atapi command" in cmd. desc. if this is ATAPI cmd */
420 if (is_atapi_taskfile(&qc->tf)) { 421 if (ata_is_atapi(qc->tf.protocol)) {
421 desc_info |= ATAPI_CMD; 422 desc_info |= ATAPI_CMD;
422 memset((void *)&cd->acmd, 0, 32); 423 memset((void *)&cd->acmd, 0, 32);
423 memcpy((void *)&cd->acmd, qc->cdb, qc->dev->cdb_len); 424 memcpy((void *)&cd->acmd, qc->cdb, qc->dev->cdb_len);
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 323c087e8cc1..96e614a1c169 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -585,7 +585,7 @@ static struct ata_port_operations inic_port_ops = {
585}; 585};
586 586
587static struct ata_port_info inic_port_info = { 587static struct ata_port_info inic_port_info = {
588 /* For some reason, ATA_PROT_ATAPI is broken on this 588 /* For some reason, ATAPI_PROT_PIO is broken on this
589 * controller, and no, PIO_POLLING does't fix it. It somehow 589 * controller, and no, PIO_POLLING does't fix it. It somehow
590 * manages to report the wrong ireason and ignoring ireason 590 * manages to report the wrong ireason and ignoring ireason
591 * results in machine lock up. Tell libata to always prefer 591 * results in machine lock up. Tell libata to always prefer
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 37b850ae0845..7e72463a90eb 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1136,9 +1136,10 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
1136 struct mv_port_priv *pp = qc->ap->private_data; 1136 struct mv_port_priv *pp = qc->ap->private_data;
1137 struct scatterlist *sg; 1137 struct scatterlist *sg;
1138 struct mv_sg *mv_sg, *last_sg = NULL; 1138 struct mv_sg *mv_sg, *last_sg = NULL;
1139 unsigned int si;
1139 1140
1140 mv_sg = pp->sg_tbl; 1141 mv_sg = pp->sg_tbl;
1141 ata_for_each_sg(sg, qc) { 1142 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1142 dma_addr_t addr = sg_dma_address(sg); 1143 dma_addr_t addr = sg_dma_address(sg);
1143 u32 sg_len = sg_dma_len(sg); 1144 u32 sg_len = sg_dma_len(sg);
1144 1145
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index ed5dc7cb50cd..a0f98fdab7a0 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1336,21 +1336,18 @@ static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1336static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) 1336static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1337{ 1337{
1338 struct nv_adma_port_priv *pp = qc->ap->private_data; 1338 struct nv_adma_port_priv *pp = qc->ap->private_data;
1339 unsigned int idx;
1340 struct nv_adma_prd *aprd; 1339 struct nv_adma_prd *aprd;
1341 struct scatterlist *sg; 1340 struct scatterlist *sg;
1341 unsigned int si;
1342 1342
1343 VPRINTK("ENTER\n"); 1343 VPRINTK("ENTER\n");
1344 1344
1345 idx = 0; 1345 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1346 1346 aprd = (si < 5) ? &cpb->aprd[si] :
1347 ata_for_each_sg(sg, qc) { 1347 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1348 aprd = (idx < 5) ? &cpb->aprd[idx] : 1348 nv_adma_fill_aprd(qc, sg, si, aprd);
1349 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1350 nv_adma_fill_aprd(qc, sg, idx, aprd);
1351 idx++;
1352 } 1349 }
1353 if (idx > 5) 1350 if (si > 5)
1354 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag))); 1351 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1355 else 1352 else
1356 cpb->next_aprd = cpu_to_le64(0); 1353 cpb->next_aprd = cpu_to_le64(0);
@@ -1995,17 +1992,14 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1995{ 1992{
1996 struct ata_port *ap = qc->ap; 1993 struct ata_port *ap = qc->ap;
1997 struct scatterlist *sg; 1994 struct scatterlist *sg;
1998 unsigned int idx;
1999 struct nv_swncq_port_priv *pp = ap->private_data; 1995 struct nv_swncq_port_priv *pp = ap->private_data;
2000 struct ata_prd *prd; 1996 struct ata_prd *prd;
2001 1997 unsigned int si, idx;
2002 WARN_ON(qc->__sg == NULL);
2003 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2004 1998
2005 prd = pp->prd + ATA_MAX_PRD * qc->tag; 1999 prd = pp->prd + ATA_MAX_PRD * qc->tag;
2006 2000
2007 idx = 0; 2001 idx = 0;
2008 ata_for_each_sg(sg, qc) { 2002 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2009 u32 addr, offset; 2003 u32 addr, offset;
2010 u32 sg_len, len; 2004 u32 sg_len, len;
2011 2005
@@ -2027,8 +2021,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2027 } 2021 }
2028 } 2022 }
2029 2023
2030 if (idx) 2024 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2031 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2032} 2025}
2033 2026
2034static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap, 2027static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 7914def54fa3..a07d319f6e8c 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -450,19 +450,19 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
450 struct pdc_port_priv *pp = ap->private_data; 450 struct pdc_port_priv *pp = ap->private_data;
451 u8 *buf = pp->pkt; 451 u8 *buf = pp->pkt;
452 u32 *buf32 = (u32 *) buf; 452 u32 *buf32 = (u32 *) buf;
453 unsigned int dev_sel, feature, nbytes; 453 unsigned int dev_sel, feature;
454 454
455 /* set control bits (byte 0), zero delay seq id (byte 3), 455 /* set control bits (byte 0), zero delay seq id (byte 3),
456 * and seq id (byte 2) 456 * and seq id (byte 2)
457 */ 457 */
458 switch (qc->tf.protocol) { 458 switch (qc->tf.protocol) {
459 case ATA_PROT_ATAPI_DMA: 459 case ATAPI_PROT_DMA:
460 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 460 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
461 buf32[0] = cpu_to_le32(PDC_PKT_READ); 461 buf32[0] = cpu_to_le32(PDC_PKT_READ);
462 else 462 else
463 buf32[0] = 0; 463 buf32[0] = 0;
464 break; 464 break;
465 case ATA_PROT_ATAPI_NODATA: 465 case ATAPI_PROT_NODATA:
466 buf32[0] = cpu_to_le32(PDC_PKT_NODATA); 466 buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
467 break; 467 break;
468 default: 468 default:
@@ -473,45 +473,37 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
473 buf32[2] = 0; /* no next-packet */ 473 buf32[2] = 0; /* no next-packet */
474 474
475 /* select drive */ 475 /* select drive */
476 if (sata_scr_valid(&ap->link)) { 476 if (sata_scr_valid(&ap->link))
477 dev_sel = PDC_DEVICE_SATA; 477 dev_sel = PDC_DEVICE_SATA;
478 } else { 478 else
479 dev_sel = ATA_DEVICE_OBS; 479 dev_sel = qc->tf.device;
480 if (qc->dev->devno != 0) 480
481 dev_sel |= ATA_DEV1;
482 }
483 buf[12] = (1 << 5) | ATA_REG_DEVICE; 481 buf[12] = (1 << 5) | ATA_REG_DEVICE;
484 buf[13] = dev_sel; 482 buf[13] = dev_sel;
485 buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY; 483 buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY;
486 buf[15] = dev_sel; /* once more, waiting for BSY to clear */ 484 buf[15] = dev_sel; /* once more, waiting for BSY to clear */
487 485
488 buf[16] = (1 << 5) | ATA_REG_NSECT; 486 buf[16] = (1 << 5) | ATA_REG_NSECT;
489 buf[17] = 0x00; 487 buf[17] = qc->tf.nsect;
490 buf[18] = (1 << 5) | ATA_REG_LBAL; 488 buf[18] = (1 << 5) | ATA_REG_LBAL;
491 buf[19] = 0x00; 489 buf[19] = qc->tf.lbal;
492 490
493 /* set feature and byte counter registers */ 491 /* set feature and byte counter registers */
494 if (qc->tf.protocol != ATA_PROT_ATAPI_DMA) { 492 if (qc->tf.protocol != ATAPI_PROT_DMA)
495 feature = PDC_FEATURE_ATAPI_PIO; 493 feature = PDC_FEATURE_ATAPI_PIO;
496 /* set byte counter register to real transfer byte count */ 494 else
497 nbytes = qc->nbytes;
498 if (nbytes > 0xffff)
499 nbytes = 0xffff;
500 } else {
501 feature = PDC_FEATURE_ATAPI_DMA; 495 feature = PDC_FEATURE_ATAPI_DMA;
502 /* set byte counter register to 0 */ 496
503 nbytes = 0;
504 }
505 buf[20] = (1 << 5) | ATA_REG_FEATURE; 497 buf[20] = (1 << 5) | ATA_REG_FEATURE;
506 buf[21] = feature; 498 buf[21] = feature;
507 buf[22] = (1 << 5) | ATA_REG_BYTEL; 499 buf[22] = (1 << 5) | ATA_REG_BYTEL;
508 buf[23] = nbytes & 0xFF; 500 buf[23] = qc->tf.lbam;
509 buf[24] = (1 << 5) | ATA_REG_BYTEH; 501 buf[24] = (1 << 5) | ATA_REG_BYTEH;
510 buf[25] = (nbytes >> 8) & 0xFF; 502 buf[25] = qc->tf.lbah;
511 503
512 /* send ATAPI packet command 0xA0 */ 504 /* send ATAPI packet command 0xA0 */
513 buf[26] = (1 << 5) | ATA_REG_CMD; 505 buf[26] = (1 << 5) | ATA_REG_CMD;
514 buf[27] = ATA_CMD_PACKET; 506 buf[27] = qc->tf.command;
515 507
516 /* select drive and check DRQ */ 508 /* select drive and check DRQ */
517 buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY; 509 buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY;
@@ -541,17 +533,15 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
541{ 533{
542 struct ata_port *ap = qc->ap; 534 struct ata_port *ap = qc->ap;
543 struct scatterlist *sg; 535 struct scatterlist *sg;
544 unsigned int idx;
545 const u32 SG_COUNT_ASIC_BUG = 41*4; 536 const u32 SG_COUNT_ASIC_BUG = 41*4;
537 unsigned int si, idx;
538 u32 len;
546 539
547 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 540 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
548 return; 541 return;
549 542
550 WARN_ON(qc->__sg == NULL);
551 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
552
553 idx = 0; 543 idx = 0;
554 ata_for_each_sg(sg, qc) { 544 for_each_sg(qc->sg, sg, qc->n_elem, si) {
555 u32 addr, offset; 545 u32 addr, offset;
556 u32 sg_len, len; 546 u32 sg_len, len;
557 547
@@ -578,29 +568,27 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
578 } 568 }
579 } 569 }
580 570
581 if (idx) { 571 len = le32_to_cpu(ap->prd[idx - 1].flags_len);
582 u32 len = le32_to_cpu(ap->prd[idx - 1].flags_len);
583 572
584 if (len > SG_COUNT_ASIC_BUG) { 573 if (len > SG_COUNT_ASIC_BUG) {
585 u32 addr; 574 u32 addr;
586 575
587 VPRINTK("Splitting last PRD.\n"); 576 VPRINTK("Splitting last PRD.\n");
588 577
589 addr = le32_to_cpu(ap->prd[idx - 1].addr); 578 addr = le32_to_cpu(ap->prd[idx - 1].addr);
590 ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG); 579 ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
591 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG); 580 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
592 581
593 addr = addr + len - SG_COUNT_ASIC_BUG; 582 addr = addr + len - SG_COUNT_ASIC_BUG;
594 len = SG_COUNT_ASIC_BUG; 583 len = SG_COUNT_ASIC_BUG;
595 ap->prd[idx].addr = cpu_to_le32(addr); 584 ap->prd[idx].addr = cpu_to_le32(addr);
596 ap->prd[idx].flags_len = cpu_to_le32(len); 585 ap->prd[idx].flags_len = cpu_to_le32(len);
597 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 586 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
598 587
599 idx++; 588 idx++;
600 }
601
602 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
603 } 589 }
590
591 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
604} 592}
605 593
606static void pdc_qc_prep(struct ata_queued_cmd *qc) 594static void pdc_qc_prep(struct ata_queued_cmd *qc)
@@ -627,14 +615,14 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
627 pdc_pkt_footer(&qc->tf, pp->pkt, i); 615 pdc_pkt_footer(&qc->tf, pp->pkt, i);
628 break; 616 break;
629 617
630 case ATA_PROT_ATAPI: 618 case ATAPI_PROT_PIO:
631 pdc_fill_sg(qc); 619 pdc_fill_sg(qc);
632 break; 620 break;
633 621
634 case ATA_PROT_ATAPI_DMA: 622 case ATAPI_PROT_DMA:
635 pdc_fill_sg(qc); 623 pdc_fill_sg(qc);
636 /*FALLTHROUGH*/ 624 /*FALLTHROUGH*/
637 case ATA_PROT_ATAPI_NODATA: 625 case ATAPI_PROT_NODATA:
638 pdc_atapi_pkt(qc); 626 pdc_atapi_pkt(qc);
639 break; 627 break;
640 628
@@ -754,8 +742,8 @@ static inline unsigned int pdc_host_intr(struct ata_port *ap,
754 switch (qc->tf.protocol) { 742 switch (qc->tf.protocol) {
755 case ATA_PROT_DMA: 743 case ATA_PROT_DMA:
756 case ATA_PROT_NODATA: 744 case ATA_PROT_NODATA:
757 case ATA_PROT_ATAPI_DMA: 745 case ATAPI_PROT_DMA:
758 case ATA_PROT_ATAPI_NODATA: 746 case ATAPI_PROT_NODATA:
759 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); 747 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
760 ata_qc_complete(qc); 748 ata_qc_complete(qc);
761 handled = 1; 749 handled = 1;
@@ -900,7 +888,7 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
900static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc) 888static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
901{ 889{
902 switch (qc->tf.protocol) { 890 switch (qc->tf.protocol) {
903 case ATA_PROT_ATAPI_NODATA: 891 case ATAPI_PROT_NODATA:
904 if (qc->dev->flags & ATA_DFLAG_CDB_INTR) 892 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
905 break; 893 break;
906 /*FALLTHROUGH*/ 894 /*FALLTHROUGH*/
@@ -908,7 +896,7 @@ static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
908 if (qc->tf.flags & ATA_TFLAG_POLLING) 896 if (qc->tf.flags & ATA_TFLAG_POLLING)
909 break; 897 break;
910 /*FALLTHROUGH*/ 898 /*FALLTHROUGH*/
911 case ATA_PROT_ATAPI_DMA: 899 case ATAPI_PROT_DMA:
912 case ATA_PROT_DMA: 900 case ATA_PROT_DMA:
913 pdc_packet_start(qc); 901 pdc_packet_start(qc);
914 return 0; 902 return 0;
@@ -922,16 +910,14 @@ static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
922 910
923static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 911static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
924{ 912{
925 WARN_ON(tf->protocol == ATA_PROT_DMA || 913 WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
926 tf->protocol == ATA_PROT_ATAPI_DMA);
927 ata_tf_load(ap, tf); 914 ata_tf_load(ap, tf);
928} 915}
929 916
930static void pdc_exec_command_mmio(struct ata_port *ap, 917static void pdc_exec_command_mmio(struct ata_port *ap,
931 const struct ata_taskfile *tf) 918 const struct ata_taskfile *tf)
932{ 919{
933 WARN_ON(tf->protocol == ATA_PROT_DMA || 920 WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
934 tf->protocol == ATA_PROT_ATAPI_DMA);
935 ata_exec_command(ap, tf); 921 ata_exec_command(ap, tf);
936} 922}
937 923
diff --git a/drivers/ata/sata_promise.h b/drivers/ata/sata_promise.h
index 6ee5e190262d..00d6000e546f 100644
--- a/drivers/ata/sata_promise.h
+++ b/drivers/ata/sata_promise.h
@@ -46,7 +46,7 @@ static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf,
46 unsigned int devno, u8 *buf) 46 unsigned int devno, u8 *buf)
47{ 47{
48 u8 dev_reg; 48 u8 dev_reg;
49 u32 *buf32 = (u32 *) buf; 49 __le32 *buf32 = (__le32 *) buf;
50 50
51 /* set control bits (byte 0), zero delay seq id (byte 3), 51 /* set control bits (byte 0), zero delay seq id (byte 3),
52 * and seq id (byte 2) 52 * and seq id (byte 2)
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index c68b241805fd..91cc12c82040 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -287,14 +287,10 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
287 struct scatterlist *sg; 287 struct scatterlist *sg;
288 struct ata_port *ap = qc->ap; 288 struct ata_port *ap = qc->ap;
289 struct qs_port_priv *pp = ap->private_data; 289 struct qs_port_priv *pp = ap->private_data;
290 unsigned int nelem;
291 u8 *prd = pp->pkt + QS_CPB_BYTES; 290 u8 *prd = pp->pkt + QS_CPB_BYTES;
291 unsigned int si;
292 292
293 WARN_ON(qc->__sg == NULL); 293 for_each_sg(qc->sg, sg, qc->n_elem, si) {
294 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
295
296 nelem = 0;
297 ata_for_each_sg(sg, qc) {
298 u64 addr; 294 u64 addr;
299 u32 len; 295 u32 len;
300 296
@@ -306,12 +302,11 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
306 *(__le32 *)prd = cpu_to_le32(len); 302 *(__le32 *)prd = cpu_to_le32(len);
307 prd += sizeof(u64); 303 prd += sizeof(u64);
308 304
309 VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem, 305 VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si,
310 (unsigned long long)addr, len); 306 (unsigned long long)addr, len);
311 nelem++;
312 } 307 }
313 308
314 return nelem; 309 return si;
315} 310}
316 311
317static void qs_qc_prep(struct ata_queued_cmd *qc) 312static void qs_qc_prep(struct ata_queued_cmd *qc)
@@ -376,7 +371,7 @@ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
376 qs_packet_start(qc); 371 qs_packet_start(qc);
377 return 0; 372 return 0;
378 373
379 case ATA_PROT_ATAPI_DMA: 374 case ATAPI_PROT_DMA:
380 BUG(); 375 BUG();
381 break; 376 break;
382 377
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index f5119bf40c24..0b8191b52f97 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -416,15 +416,14 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
416 */ 416 */
417 417
418 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 418 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
419 * The flag was turned on only for atapi devices. 419 * The flag was turned on only for atapi devices. No
420 * No need to check is_atapi_taskfile(&qc->tf) again. 420 * need to check ata_is_atapi(qc->tf.protocol) again.
421 */ 421 */
422 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 422 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
423 goto err_hsm; 423 goto err_hsm;
424 break; 424 break;
425 case HSM_ST_LAST: 425 case HSM_ST_LAST:
426 if (qc->tf.protocol == ATA_PROT_DMA || 426 if (ata_is_dma(qc->tf.protocol)) {
427 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
428 /* clear DMA-Start bit */ 427 /* clear DMA-Start bit */
429 ap->ops->bmdma_stop(qc); 428 ap->ops->bmdma_stop(qc);
430 429
@@ -451,8 +450,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
451 /* kick HSM in the ass */ 450 /* kick HSM in the ass */
452 ata_hsm_move(ap, qc, status, 0); 451 ata_hsm_move(ap, qc, status, 0);
453 452
454 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || 453 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
455 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
456 ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2); 454 ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
457 455
458 return; 456 return;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 864c1c1b8511..b4b1f91ea693 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -813,8 +813,9 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
813{ 813{
814 struct scatterlist *sg; 814 struct scatterlist *sg;
815 struct sil24_sge *last_sge = NULL; 815 struct sil24_sge *last_sge = NULL;
816 unsigned int si;
816 817
817 ata_for_each_sg(sg, qc) { 818 for_each_sg(qc->sg, sg, qc->n_elem, si) {
818 sge->addr = cpu_to_le64(sg_dma_address(sg)); 819 sge->addr = cpu_to_le64(sg_dma_address(sg));
819 sge->cnt = cpu_to_le32(sg_dma_len(sg)); 820 sge->cnt = cpu_to_le32(sg_dma_len(sg));
820 sge->flags = 0; 821 sge->flags = 0;
@@ -823,8 +824,7 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
823 sge++; 824 sge++;
824 } 825 }
825 826
826 if (likely(last_sge)) 827 last_sge->flags = cpu_to_le32(SGE_TRM);
827 last_sge->flags = cpu_to_le32(SGE_TRM);
828} 828}
829 829
830static int sil24_qc_defer(struct ata_queued_cmd *qc) 830static int sil24_qc_defer(struct ata_queued_cmd *qc)
@@ -852,9 +852,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc)
852 * set. 852 * set.
853 * 853 *
854 */ 854 */
855 int is_excl = (prot == ATA_PROT_ATAPI || 855 int is_excl = (ata_is_atapi(prot) ||
856 prot == ATA_PROT_ATAPI_NODATA ||
857 prot == ATA_PROT_ATAPI_DMA ||
858 (qc->flags & ATA_QCFLAG_RESULT_TF)); 856 (qc->flags & ATA_QCFLAG_RESULT_TF));
859 857
860 if (unlikely(ap->excl_link)) { 858 if (unlikely(ap->excl_link)) {
@@ -885,35 +883,21 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
885 883
886 cb = &pp->cmd_block[sil24_tag(qc->tag)]; 884 cb = &pp->cmd_block[sil24_tag(qc->tag)];
887 885
888 switch (qc->tf.protocol) { 886 if (!ata_is_atapi(qc->tf.protocol)) {
889 case ATA_PROT_PIO:
890 case ATA_PROT_DMA:
891 case ATA_PROT_NCQ:
892 case ATA_PROT_NODATA:
893 prb = &cb->ata.prb; 887 prb = &cb->ata.prb;
894 sge = cb->ata.sge; 888 sge = cb->ata.sge;
895 break; 889 } else {
896
897 case ATA_PROT_ATAPI:
898 case ATA_PROT_ATAPI_DMA:
899 case ATA_PROT_ATAPI_NODATA:
900 prb = &cb->atapi.prb; 890 prb = &cb->atapi.prb;
901 sge = cb->atapi.sge; 891 sge = cb->atapi.sge;
902 memset(cb->atapi.cdb, 0, 32); 892 memset(cb->atapi.cdb, 0, 32);
903 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len); 893 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
904 894
905 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 895 if (ata_is_data(qc->tf.protocol)) {
906 if (qc->tf.flags & ATA_TFLAG_WRITE) 896 if (qc->tf.flags & ATA_TFLAG_WRITE)
907 ctrl = PRB_CTRL_PACKET_WRITE; 897 ctrl = PRB_CTRL_PACKET_WRITE;
908 else 898 else
909 ctrl = PRB_CTRL_PACKET_READ; 899 ctrl = PRB_CTRL_PACKET_READ;
910 } 900 }
911 break;
912
913 default:
914 prb = NULL; /* shut up, gcc */
915 sge = NULL;
916 BUG();
917 } 901 }
918 902
919 prb->ctrl = cpu_to_le16(ctrl); 903 prb->ctrl = cpu_to_le16(ctrl);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 4d857185f33b..e3d56bc6726d 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -334,7 +334,7 @@ static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
334{ 334{
335 u32 addr; 335 u32 addr;
336 unsigned int dw = PDC_DIMM_APKT_PRD >> 2; 336 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
337 u32 *buf32 = (u32 *) buf; 337 __le32 *buf32 = (__le32 *) buf;
338 338
339 /* output ATA packet S/G table */ 339 /* output ATA packet S/G table */
340 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + 340 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
@@ -356,7 +356,7 @@ static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
356{ 356{
357 u32 addr; 357 u32 addr;
358 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2; 358 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
359 u32 *buf32 = (u32 *) buf; 359 __le32 *buf32 = (__le32 *) buf;
360 360
361 /* output Host DMA packet S/G table */ 361 /* output Host DMA packet S/G table */
362 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + 362 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
@@ -377,7 +377,7 @@ static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
377 unsigned int portno) 377 unsigned int portno)
378{ 378{
379 unsigned int i, dw; 379 unsigned int i, dw;
380 u32 *buf32 = (u32 *) buf; 380 __le32 *buf32 = (__le32 *) buf;
381 u8 dev_reg; 381 u8 dev_reg;
382 382
383 unsigned int dimm_sg = PDC_20621_DIMM_BASE + 383 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
@@ -429,7 +429,8 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
429 unsigned int portno) 429 unsigned int portno)
430{ 430{
431 unsigned int dw; 431 unsigned int dw;
432 u32 tmp, *buf32 = (u32 *) buf; 432 u32 tmp;
433 __le32 *buf32 = (__le32 *) buf;
433 434
434 unsigned int host_sg = PDC_20621_DIMM_BASE + 435 unsigned int host_sg = PDC_20621_DIMM_BASE +
435 (PDC_DIMM_WINDOW_STEP * portno) + 436 (PDC_DIMM_WINDOW_STEP * portno) +
@@ -473,7 +474,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
473 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; 474 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
474 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; 475 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
475 unsigned int portno = ap->port_no; 476 unsigned int portno = ap->port_no;
476 unsigned int i, idx, total_len = 0, sgt_len; 477 unsigned int i, si, idx, total_len = 0, sgt_len;
477 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 478 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
478 479
479 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); 480 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
@@ -487,7 +488,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
487 * Build S/G table 488 * Build S/G table
488 */ 489 */
489 idx = 0; 490 idx = 0;
490 ata_for_each_sg(sg, qc) { 491 for_each_sg(qc->sg, sg, qc->n_elem, si) {
491 buf[idx++] = cpu_to_le32(sg_dma_address(sg)); 492 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
492 buf[idx++] = cpu_to_le32(sg_dma_len(sg)); 493 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
493 total_len += sg_dma_len(sg); 494 total_len += sg_dma_len(sg);
@@ -700,7 +701,7 @@ static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
700 pdc20621_packet_start(qc); 701 pdc20621_packet_start(qc);
701 return 0; 702 return 0;
702 703
703 case ATA_PROT_ATAPI_DMA: 704 case ATAPI_PROT_DMA:
704 BUG(); 705 BUG();
705 break; 706 break;
706 707