aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
committerGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
commitcf9b59e9d3e008591d1f54830f570982bb307a0d (patch)
tree113478ce8fd8c832ba726ffdf59b82cb46356476 /drivers/ata
parent44504b2bebf8b5823c59484e73096a7d6574471d (diff)
parentf4b87dee923342505e1ddba8d34ce9de33e75050 (diff)
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and build failures in vio.c after merge. Conflicts: drivers/i2c/busses/i2c-cpm.c drivers/i2c/busses/i2c-mpc.c drivers/net/gianfar.c Also fixed up one line in arch/powerpc/kernel/vio.c to use the correct node pointer. Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/Kconfig20
-rw-r--r--drivers/ata/Makefile3
-rw-r--r--drivers/ata/ahci.c2544
-rw-r--r--drivers/ata/ahci.h343
-rw-r--r--drivers/ata/ahci_platform.c192
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libahci.c2216
-rw-r--r--drivers/ata/libata-core.c216
-rw-r--r--drivers/ata/libata-eh.c11
-rw-r--r--drivers/ata/libata-pmp.c32
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata-sff.c1387
-rw-r--r--drivers/ata/libata.h29
-rw-r--r--drivers/ata/pata_acpi.c8
-rw-r--r--drivers/ata/pata_ali.c2
-rw-r--r--drivers/ata/pata_at91.c1
-rw-r--r--drivers/ata/pata_atiixp.c2
-rw-r--r--drivers/ata/pata_bf54x.c87
-rw-r--r--drivers/ata/pata_cmd640.c13
-rw-r--r--drivers/ata/pata_cs5520.c2
-rw-r--r--drivers/ata/pata_cs5530.c4
-rw-r--r--drivers/ata/pata_hpt366.c2
-rw-r--r--drivers/ata/pata_hpt37x.c4
-rw-r--r--drivers/ata/pata_hpt3x2n.c2
-rw-r--r--drivers/ata/pata_icside.c5
-rw-r--r--drivers/ata/pata_it821x.c6
-rw-r--r--drivers/ata/pata_macio.c5
-rw-r--r--drivers/ata/pata_mpc52xx.c78
-rw-r--r--drivers/ata/pata_ns87415.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c12
-rw-r--r--drivers/ata/pata_oldpiix.c2
-rw-r--r--drivers/ata/pata_pcmcia.c53
-rw-r--r--drivers/ata/pata_pdc2027x.c4
-rw-r--r--drivers/ata/pata_pdc202xx_old.c2
-rw-r--r--drivers/ata/pata_platform.c1
-rw-r--r--drivers/ata/pata_radisys.c2
-rw-r--r--drivers/ata/pata_sc1200.c4
-rw-r--r--drivers/ata/pata_scc.c80
-rw-r--r--drivers/ata/pata_sch.c12
-rw-r--r--drivers/ata/pata_serverworks.c6
-rw-r--r--drivers/ata/pata_sil680.c30
-rw-r--r--drivers/ata/pata_via.c6
-rw-r--r--drivers/ata/pdc_adma.c74
-rw-r--r--drivers/ata/sata_inic162x.c25
-rw-r--r--drivers/ata/sata_mv.c47
-rw-r--r--drivers/ata/sata_nv.c263
-rw-r--r--drivers/ata/sata_promise.c32
-rw-r--r--drivers/ata/sata_qstor.c90
-rw-r--r--drivers/ata/sata_sil.c9
-rw-r--r--drivers/ata/sata_sil24.c9
-rw-r--r--drivers/ata/sata_svw.c2
-rw-r--r--drivers/ata/sata_sx4.c10
-rw-r--r--drivers/ata/sata_uli.c4
-rw-r--r--drivers/ata/sata_vsc.c10
54 files changed, 4144 insertions, 3866 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 01c52c415bdc..e68541f662b9 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -65,6 +65,14 @@ config SATA_AHCI
65 65
66 If unsure, say N. 66 If unsure, say N.
67 67
68config SATA_AHCI_PLATFORM
69 tristate "Platform AHCI SATA support"
70 help
71 This option enables support for Platform AHCI Serial ATA
72 controllers.
73
74 If unsure, say N.
75
68config SATA_SIL24 76config SATA_SIL24
69 tristate "Silicon Image 3124/3132 SATA support" 77 tristate "Silicon Image 3124/3132 SATA support"
70 depends on PCI 78 depends on PCI
@@ -73,6 +81,12 @@ config SATA_SIL24
73 81
74 If unsure, say N. 82 If unsure, say N.
75 83
84config SATA_INIC162X
85 tristate "Initio 162x SATA support"
86 depends on PCI
87 help
88 This option enables support for Initio 162x Serial ATA.
89
76config SATA_FSL 90config SATA_FSL
77 tristate "Freescale 3.0Gbps SATA support" 91 tristate "Freescale 3.0Gbps SATA support"
78 depends on FSL_SOC 92 depends on FSL_SOC
@@ -213,12 +227,6 @@ config SATA_VITESSE
213 227
214 If unsure, say N. 228 If unsure, say N.
215 229
216config SATA_INIC162X
217 tristate "Initio 162x SATA support"
218 depends on PCI
219 help
220 This option enables support for Initio 162x Serial ATA.
221
222config PATA_ACPI 230config PATA_ACPI
223 tristate "ACPI firmware driver for PATA" 231 tristate "ACPI firmware driver for PATA"
224 depends on ATA_ACPI 232 depends on ATA_ACPI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index fc936d4471d6..d0a93c4ad3ec 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -1,7 +1,8 @@
1 1
2obj-$(CONFIG_ATA) += libata.o 2obj-$(CONFIG_ATA) += libata.o
3 3
4obj-$(CONFIG_SATA_AHCI) += ahci.o 4obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
5obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
5obj-$(CONFIG_SATA_SVW) += sata_svw.o 6obj-$(CONFIG_SATA_SVW) += sata_svw.o
6obj-$(CONFIG_ATA_PIIX) += ata_piix.o 7obj-$(CONFIG_ATA_PIIX) += ata_piix.o
7obj-$(CONFIG_SATA_PROMISE) += sata_promise.o 8obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5326af28a410..8ca16f54e1ed 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -46,403 +46,48 @@
46#include <scsi/scsi_host.h> 46#include <scsi/scsi_host.h>
47#include <scsi/scsi_cmnd.h> 47#include <scsi/scsi_cmnd.h>
48#include <linux/libata.h> 48#include <linux/libata.h>
49#include "ahci.h"
49 50
50#define DRV_NAME "ahci" 51#define DRV_NAME "ahci"
51#define DRV_VERSION "3.0" 52#define DRV_VERSION "3.0"
52 53
53/* Enclosure Management Control */
54#define EM_CTRL_MSG_TYPE 0x000f0000
55
56/* Enclosure Management LED Message Type */
57#define EM_MSG_LED_HBA_PORT 0x0000000f
58#define EM_MSG_LED_PMP_SLOT 0x0000ff00
59#define EM_MSG_LED_VALUE 0xffff0000
60#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
61#define EM_MSG_LED_VALUE_OFF 0xfff80000
62#define EM_MSG_LED_VALUE_ON 0x00010000
63
64static int ahci_skip_host_reset;
65static int ahci_ignore_sss;
66
67module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
68MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
69
70module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
71MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
72
73static int ahci_enable_alpm(struct ata_port *ap,
74 enum link_pm policy);
75static void ahci_disable_alpm(struct ata_port *ap);
76static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
77static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
78 size_t size);
79static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
80 ssize_t size);
81
82enum { 54enum {
83 AHCI_PCI_BAR = 5, 55 AHCI_PCI_BAR = 5,
84 AHCI_MAX_PORTS = 32,
85 AHCI_MAX_SG = 168, /* hardware max is 64K */
86 AHCI_DMA_BOUNDARY = 0xffffffff,
87 AHCI_MAX_CMDS = 32,
88 AHCI_CMD_SZ = 32,
89 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
90 AHCI_RX_FIS_SZ = 256,
91 AHCI_CMD_TBL_CDB = 0x40,
92 AHCI_CMD_TBL_HDR_SZ = 0x80,
93 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
94 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
95 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
96 AHCI_RX_FIS_SZ,
97 AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
98 AHCI_CMD_TBL_AR_SZ +
99 (AHCI_RX_FIS_SZ * 16),
100 AHCI_IRQ_ON_SG = (1 << 31),
101 AHCI_CMD_ATAPI = (1 << 5),
102 AHCI_CMD_WRITE = (1 << 6),
103 AHCI_CMD_PREFETCH = (1 << 7),
104 AHCI_CMD_RESET = (1 << 8),
105 AHCI_CMD_CLR_BUSY = (1 << 10),
106
107 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
108 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
109 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
110
111 board_ahci = 0,
112 board_ahci_vt8251 = 1,
113 board_ahci_ign_iferr = 2,
114 board_ahci_sb600 = 3,
115 board_ahci_mv = 4,
116 board_ahci_sb700 = 5, /* for SB700 and SB800 */
117 board_ahci_mcp65 = 6,
118 board_ahci_nopmp = 7,
119 board_ahci_yesncq = 8,
120 board_ahci_nosntf = 9,
121
122 /* global controller registers */
123 HOST_CAP = 0x00, /* host capabilities */
124 HOST_CTL = 0x04, /* global host control */
125 HOST_IRQ_STAT = 0x08, /* interrupt status */
126 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
127 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
128 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
129 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
130 HOST_CAP2 = 0x24, /* host capabilities, extended */
131
132 /* HOST_CTL bits */
133 HOST_RESET = (1 << 0), /* reset controller; self-clear */
134 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
135 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
136
137 /* HOST_CAP bits */
138 HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
139 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
140 HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
141 HOST_CAP_PART = (1 << 13), /* Partial state capable */
142 HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
143 HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
144 HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
145 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
146 HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
147 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
148 HOST_CAP_LED = (1 << 25), /* Supports activity LED */
149 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
150 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
151 HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
152 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
153 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
154 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
155
156 /* HOST_CAP2 bits */
157 HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
158 HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
159 HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
160
161 /* registers for each SATA port */
162 PORT_LST_ADDR = 0x00, /* command list DMA addr */
163 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
164 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
165 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
166 PORT_IRQ_STAT = 0x10, /* interrupt status */
167 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
168 PORT_CMD = 0x18, /* port command */
169 PORT_TFDATA = 0x20, /* taskfile data */
170 PORT_SIG = 0x24, /* device TF signature */
171 PORT_CMD_ISSUE = 0x38, /* command issue */
172 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
173 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
174 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
175 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
176 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
177 PORT_FBS = 0x40, /* FIS-based Switching */
178
179 /* PORT_IRQ_{STAT,MASK} bits */
180 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
181 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
182 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
183 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
184 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
185 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
186 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
187 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
188
189 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
190 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
191 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
192 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
193 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
194 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
195 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
196 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
197 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
198
199 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
200 PORT_IRQ_IF_ERR |
201 PORT_IRQ_CONNECT |
202 PORT_IRQ_PHYRDY |
203 PORT_IRQ_UNK_FIS |
204 PORT_IRQ_BAD_PMP,
205 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
206 PORT_IRQ_TF_ERR |
207 PORT_IRQ_HBUS_DATA_ERR,
208 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
209 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
210 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
211
212 /* PORT_CMD bits */
213 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
214 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
215 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
216 PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
217 PORT_CMD_PMP = (1 << 17), /* PMP attached */
218 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
219 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
220 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
221 PORT_CMD_CLO = (1 << 3), /* Command list override */
222 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
223 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
224 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
225
226 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
227 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
228 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
229 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
230
231 PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
232 PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
233 PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
234 PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
235 PORT_FBS_SDE = (1 << 2), /* FBS single device error */
236 PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
237 PORT_FBS_EN = (1 << 0), /* Enable FBS */
238
239 /* hpriv->flags bits */
240 AHCI_HFLAG_NO_NCQ = (1 << 0),
241 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
242 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
243 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
244 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
245 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
246 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
247 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
248 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
249 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
250 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
251 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
252 link offline */
253 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
254
255 /* ap->flags bits */
256
257 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
258 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
259 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
260 ATA_FLAG_IPM,
261
262 ICH_MAP = 0x90, /* ICH MAP register */
263
264 /* em constants */
265 EM_MAX_SLOTS = 8,
266 EM_MAX_RETRY = 5,
267
268 /* em_ctl bits */
269 EM_CTL_RST = (1 << 9), /* Reset */
270 EM_CTL_TM = (1 << 8), /* Transmit Message */
271 EM_CTL_ALHD = (1 << 26), /* Activity LED */
272};
273
274struct ahci_cmd_hdr {
275 __le32 opts;
276 __le32 status;
277 __le32 tbl_addr;
278 __le32 tbl_addr_hi;
279 __le32 reserved[4];
280};
281
282struct ahci_sg {
283 __le32 addr;
284 __le32 addr_hi;
285 __le32 reserved;
286 __le32 flags_size;
287};
288
289struct ahci_em_priv {
290 enum sw_activity blink_policy;
291 struct timer_list timer;
292 unsigned long saved_activity;
293 unsigned long activity;
294 unsigned long led_state;
295};
296
297struct ahci_host_priv {
298 unsigned int flags; /* AHCI_HFLAG_* */
299 u32 cap; /* cap to use */
300 u32 cap2; /* cap2 to use */
301 u32 port_map; /* port map to use */
302 u32 saved_cap; /* saved initial cap */
303 u32 saved_cap2; /* saved initial cap2 */
304 u32 saved_port_map; /* saved initial port_map */
305 u32 em_loc; /* enclosure management location */
306}; 56};
307 57
308struct ahci_port_priv { 58enum board_ids {
309 struct ata_link *active_link; 59 /* board IDs by feature in alphabetical order */
310 struct ahci_cmd_hdr *cmd_slot; 60 board_ahci,
311 dma_addr_t cmd_slot_dma; 61 board_ahci_ign_iferr,
312 void *cmd_tbl; 62 board_ahci_nosntf,
313 dma_addr_t cmd_tbl_dma; 63
314 void *rx_fis; 64 /* board IDs for specific chipsets in alphabetical order */
315 dma_addr_t rx_fis_dma; 65 board_ahci_mcp65,
316 /* for NCQ spurious interrupt analysis */ 66 board_ahci_mcp77,
317 unsigned int ncq_saw_d2h:1; 67 board_ahci_mcp89,
318 unsigned int ncq_saw_dmas:1; 68 board_ahci_mv,
319 unsigned int ncq_saw_sdb:1; 69 board_ahci_sb600,
320 u32 intr_mask; /* interrupts to enable */ 70 board_ahci_sb700, /* for SB700 and SB800 */
321 bool fbs_supported; /* set iff FBS is supported */ 71 board_ahci_vt8251,
322 bool fbs_enabled; /* set iff FBS is enabled */ 72
323 int fbs_last_dev; /* save FBS.DEV of last FIS */ 73 /* aliases */
324 /* enclosure management info per PM slot */ 74 board_ahci_mcp_linux = board_ahci_mcp65,
325 struct ahci_em_priv em_priv[EM_MAX_SLOTS]; 75 board_ahci_mcp67 = board_ahci_mcp65,
76 board_ahci_mcp73 = board_ahci_mcp65,
77 board_ahci_mcp79 = board_ahci_mcp77,
326}; 78};
327 79
328static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
329static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
330static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 80static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
331static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
332static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
333static int ahci_port_start(struct ata_port *ap);
334static void ahci_port_stop(struct ata_port *ap);
335static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
336static void ahci_qc_prep(struct ata_queued_cmd *qc);
337static void ahci_freeze(struct ata_port *ap);
338static void ahci_thaw(struct ata_port *ap);
339static void ahci_enable_fbs(struct ata_port *ap);
340static void ahci_disable_fbs(struct ata_port *ap);
341static void ahci_pmp_attach(struct ata_port *ap);
342static void ahci_pmp_detach(struct ata_port *ap);
343static int ahci_softreset(struct ata_link *link, unsigned int *class,
344 unsigned long deadline);
345static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, 81static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
346 unsigned long deadline); 82 unsigned long deadline);
347static int ahci_hardreset(struct ata_link *link, unsigned int *class,
348 unsigned long deadline);
349static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 83static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
350 unsigned long deadline); 84 unsigned long deadline);
351static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, 85static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
352 unsigned long deadline); 86 unsigned long deadline);
353static void ahci_postreset(struct ata_link *link, unsigned int *class);
354static void ahci_error_handler(struct ata_port *ap);
355static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
356static int ahci_port_resume(struct ata_port *ap);
357static void ahci_dev_config(struct ata_device *dev);
358static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
359 u32 opts);
360#ifdef CONFIG_PM 87#ifdef CONFIG_PM
361static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
362static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 88static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
363static int ahci_pci_device_resume(struct pci_dev *pdev); 89static int ahci_pci_device_resume(struct pci_dev *pdev);
364#endif 90#endif
365static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
366static ssize_t ahci_activity_store(struct ata_device *dev,
367 enum sw_activity val);
368static void ahci_init_sw_activity(struct ata_link *link);
369
370static ssize_t ahci_show_host_caps(struct device *dev,
371 struct device_attribute *attr, char *buf);
372static ssize_t ahci_show_host_cap2(struct device *dev,
373 struct device_attribute *attr, char *buf);
374static ssize_t ahci_show_host_version(struct device *dev,
375 struct device_attribute *attr, char *buf);
376static ssize_t ahci_show_port_cmd(struct device *dev,
377 struct device_attribute *attr, char *buf);
378
379static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
380static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
381static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
382static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
383
384static struct device_attribute *ahci_shost_attrs[] = {
385 &dev_attr_link_power_management_policy,
386 &dev_attr_em_message_type,
387 &dev_attr_em_message,
388 &dev_attr_ahci_host_caps,
389 &dev_attr_ahci_host_cap2,
390 &dev_attr_ahci_host_version,
391 &dev_attr_ahci_port_cmd,
392 NULL
393};
394
395static struct device_attribute *ahci_sdev_attrs[] = {
396 &dev_attr_sw_activity,
397 &dev_attr_unload_heads,
398 NULL
399};
400
401static struct scsi_host_template ahci_sht = {
402 ATA_NCQ_SHT(DRV_NAME),
403 .can_queue = AHCI_MAX_CMDS - 1,
404 .sg_tablesize = AHCI_MAX_SG,
405 .dma_boundary = AHCI_DMA_BOUNDARY,
406 .shost_attrs = ahci_shost_attrs,
407 .sdev_attrs = ahci_sdev_attrs,
408};
409
410static struct ata_port_operations ahci_ops = {
411 .inherits = &sata_pmp_port_ops,
412
413 .qc_defer = ahci_pmp_qc_defer,
414 .qc_prep = ahci_qc_prep,
415 .qc_issue = ahci_qc_issue,
416 .qc_fill_rtf = ahci_qc_fill_rtf,
417
418 .freeze = ahci_freeze,
419 .thaw = ahci_thaw,
420 .softreset = ahci_softreset,
421 .hardreset = ahci_hardreset,
422 .postreset = ahci_postreset,
423 .pmp_softreset = ahci_softreset,
424 .error_handler = ahci_error_handler,
425 .post_internal_cmd = ahci_post_internal_cmd,
426 .dev_config = ahci_dev_config,
427
428 .scr_read = ahci_scr_read,
429 .scr_write = ahci_scr_write,
430 .pmp_attach = ahci_pmp_attach,
431 .pmp_detach = ahci_pmp_detach,
432
433 .enable_pm = ahci_enable_alpm,
434 .disable_pm = ahci_disable_alpm,
435 .em_show = ahci_led_show,
436 .em_store = ahci_led_store,
437 .sw_activity_show = ahci_activity_show,
438 .sw_activity_store = ahci_activity_store,
439#ifdef CONFIG_PM
440 .port_suspend = ahci_port_suspend,
441 .port_resume = ahci_port_resume,
442#endif
443 .port_start = ahci_port_start,
444 .port_stop = ahci_port_stop,
445};
446 91
447static struct ata_port_operations ahci_vt8251_ops = { 92static struct ata_port_operations ahci_vt8251_ops = {
448 .inherits = &ahci_ops, 93 .inherits = &ahci_ops,
@@ -463,6 +108,7 @@ static struct ata_port_operations ahci_sb600_ops = {
463#define AHCI_HFLAGS(flags) .private_data = (void *)(flags) 108#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
464 109
465static const struct ata_port_info ahci_port_info[] = { 110static const struct ata_port_info ahci_port_info[] = {
111 /* by features */
466 [board_ahci] = 112 [board_ahci] =
467 { 113 {
468 .flags = AHCI_FLAG_COMMON, 114 .flags = AHCI_FLAG_COMMON,
@@ -470,81 +116,83 @@ static const struct ata_port_info ahci_port_info[] = {
470 .udma_mask = ATA_UDMA6, 116 .udma_mask = ATA_UDMA6,
471 .port_ops = &ahci_ops, 117 .port_ops = &ahci_ops,
472 }, 118 },
473 [board_ahci_vt8251] = 119 [board_ahci_ign_iferr] =
474 { 120 {
475 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), 121 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
476 .flags = AHCI_FLAG_COMMON, 122 .flags = AHCI_FLAG_COMMON,
477 .pio_mask = ATA_PIO4, 123 .pio_mask = ATA_PIO4,
478 .udma_mask = ATA_UDMA6, 124 .udma_mask = ATA_UDMA6,
479 .port_ops = &ahci_vt8251_ops, 125 .port_ops = &ahci_ops,
480 }, 126 },
481 [board_ahci_ign_iferr] = 127 [board_ahci_nosntf] =
482 { 128 {
483 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), 129 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
484 .flags = AHCI_FLAG_COMMON, 130 .flags = AHCI_FLAG_COMMON,
485 .pio_mask = ATA_PIO4, 131 .pio_mask = ATA_PIO4,
486 .udma_mask = ATA_UDMA6, 132 .udma_mask = ATA_UDMA6,
487 .port_ops = &ahci_ops, 133 .port_ops = &ahci_ops,
488 }, 134 },
489 [board_ahci_sb600] = 135 /* by chipsets */
136 [board_ahci_mcp65] =
490 { 137 {
491 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 138 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
492 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 | 139 AHCI_HFLAG_YES_NCQ),
493 AHCI_HFLAG_32BIT_ONLY),
494 .flags = AHCI_FLAG_COMMON, 140 .flags = AHCI_FLAG_COMMON,
495 .pio_mask = ATA_PIO4, 141 .pio_mask = ATA_PIO4,
496 .udma_mask = ATA_UDMA6, 142 .udma_mask = ATA_UDMA6,
497 .port_ops = &ahci_sb600_ops, 143 .port_ops = &ahci_ops,
498 }, 144 },
499 [board_ahci_mv] = 145 [board_ahci_mcp77] =
500 { 146 {
501 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 147 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP),
502 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), 148 .flags = AHCI_FLAG_COMMON,
503 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
504 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
505 .pio_mask = ATA_PIO4, 149 .pio_mask = ATA_PIO4,
506 .udma_mask = ATA_UDMA6, 150 .udma_mask = ATA_UDMA6,
507 .port_ops = &ahci_ops, 151 .port_ops = &ahci_ops,
508 }, 152 },
509 [board_ahci_sb700] = /* for SB700 and SB800 */ 153 [board_ahci_mcp89] =
510 { 154 {
511 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), 155 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA),
512 .flags = AHCI_FLAG_COMMON, 156 .flags = AHCI_FLAG_COMMON,
513 .pio_mask = ATA_PIO4, 157 .pio_mask = ATA_PIO4,
514 .udma_mask = ATA_UDMA6, 158 .udma_mask = ATA_UDMA6,
515 .port_ops = &ahci_sb600_ops, 159 .port_ops = &ahci_ops,
516 }, 160 },
517 [board_ahci_mcp65] = 161 [board_ahci_mv] =
518 { 162 {
519 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), 163 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
520 .flags = AHCI_FLAG_COMMON, 164 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
165 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
166 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
521 .pio_mask = ATA_PIO4, 167 .pio_mask = ATA_PIO4,
522 .udma_mask = ATA_UDMA6, 168 .udma_mask = ATA_UDMA6,
523 .port_ops = &ahci_ops, 169 .port_ops = &ahci_ops,
524 }, 170 },
525 [board_ahci_nopmp] = 171 [board_ahci_sb600] =
526 { 172 {
527 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP), 173 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
174 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
175 AHCI_HFLAG_32BIT_ONLY),
528 .flags = AHCI_FLAG_COMMON, 176 .flags = AHCI_FLAG_COMMON,
529 .pio_mask = ATA_PIO4, 177 .pio_mask = ATA_PIO4,
530 .udma_mask = ATA_UDMA6, 178 .udma_mask = ATA_UDMA6,
531 .port_ops = &ahci_ops, 179 .port_ops = &ahci_sb600_ops,
532 }, 180 },
533 [board_ahci_yesncq] = 181 [board_ahci_sb700] = /* for SB700 and SB800 */
534 { 182 {
535 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), 183 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
536 .flags = AHCI_FLAG_COMMON, 184 .flags = AHCI_FLAG_COMMON,
537 .pio_mask = ATA_PIO4, 185 .pio_mask = ATA_PIO4,
538 .udma_mask = ATA_UDMA6, 186 .udma_mask = ATA_UDMA6,
539 .port_ops = &ahci_ops, 187 .port_ops = &ahci_sb600_ops,
540 }, 188 },
541 [board_ahci_nosntf] = 189 [board_ahci_vt8251] =
542 { 190 {
543 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF), 191 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
544 .flags = AHCI_FLAG_COMMON, 192 .flags = AHCI_FLAG_COMMON,
545 .pio_mask = ATA_PIO4, 193 .pio_mask = ATA_PIO4,
546 .udma_mask = ATA_UDMA6, 194 .udma_mask = ATA_UDMA6,
547 .port_ops = &ahci_ops, 195 .port_ops = &ahci_vt8251_ops,
548 }, 196 },
549}; 197};
550 198
@@ -629,82 +277,82 @@ static const struct pci_device_id ahci_pci_tbl[] = {
629 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ 277 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
630 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ 278 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
631 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ 279 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
632 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */ 280 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 }, /* MCP67 */
633 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */ 281 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 }, /* MCP67 */
634 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */ 282 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 }, /* MCP67 */
635 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */ 283 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 }, /* MCP67 */
636 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */ 284 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 }, /* MCP67 */
637 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */ 285 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 }, /* MCP67 */
638 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */ 286 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 }, /* MCP67 */
639 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */ 287 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 }, /* MCP67 */
640 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */ 288 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 }, /* MCP67 */
641 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */ 289 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 }, /* MCP67 */
642 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */ 290 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 }, /* MCP67 */
643 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */ 291 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 }, /* MCP67 */
644 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */ 292 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux }, /* Linux ID */
645 { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq }, /* Linux ID */ 293 { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux }, /* Linux ID */
646 { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq }, /* Linux ID */ 294 { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux }, /* Linux ID */
647 { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq }, /* Linux ID */ 295 { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux }, /* Linux ID */
648 { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq }, /* Linux ID */ 296 { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux }, /* Linux ID */
649 { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq }, /* Linux ID */ 297 { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux }, /* Linux ID */
650 { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq }, /* Linux ID */ 298 { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux }, /* Linux ID */
651 { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq }, /* Linux ID */ 299 { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux }, /* Linux ID */
652 { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq }, /* Linux ID */ 300 { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux }, /* Linux ID */
653 { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq }, /* Linux ID */ 301 { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux }, /* Linux ID */
654 { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq }, /* Linux ID */ 302 { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux }, /* Linux ID */
655 { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq }, /* Linux ID */ 303 { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux }, /* Linux ID */
656 { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq }, /* Linux ID */ 304 { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux }, /* Linux ID */
657 { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq }, /* Linux ID */ 305 { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux }, /* Linux ID */
658 { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq }, /* Linux ID */ 306 { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux }, /* Linux ID */
659 { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq }, /* Linux ID */ 307 { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux }, /* Linux ID */
660 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */ 308 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 }, /* MCP73 */
661 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */ 309 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 }, /* MCP73 */
662 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */ 310 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 }, /* MCP73 */
663 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */ 311 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 }, /* MCP73 */
664 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */ 312 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 }, /* MCP73 */
665 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */ 313 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 }, /* MCP73 */
666 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */ 314 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 }, /* MCP73 */
667 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */ 315 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 }, /* MCP73 */
668 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */ 316 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 }, /* MCP73 */
669 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */ 317 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 }, /* MCP73 */
670 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */ 318 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 }, /* MCP73 */
671 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */ 319 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 }, /* MCP73 */
672 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */ 320 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 }, /* MCP77 */
673 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */ 321 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 }, /* MCP77 */
674 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */ 322 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 }, /* MCP77 */
675 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */ 323 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 }, /* MCP77 */
676 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */ 324 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 }, /* MCP77 */
677 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */ 325 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 }, /* MCP77 */
678 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */ 326 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 }, /* MCP77 */
679 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */ 327 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 }, /* MCP77 */
680 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */ 328 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 }, /* MCP77 */
681 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ 329 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 }, /* MCP77 */
682 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ 330 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 }, /* MCP77 */
683 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ 331 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 }, /* MCP77 */
684 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */ 332 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 }, /* MCP79 */
685 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */ 333 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 }, /* MCP79 */
686 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */ 334 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 }, /* MCP79 */
687 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */ 335 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 }, /* MCP79 */
688 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */ 336 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 }, /* MCP79 */
689 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */ 337 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 }, /* MCP79 */
690 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */ 338 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 }, /* MCP79 */
691 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */ 339 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 }, /* MCP79 */
692 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */ 340 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 }, /* MCP79 */
693 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ 341 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 }, /* MCP79 */
694 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ 342 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 }, /* MCP79 */
695 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ 343 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 }, /* MCP79 */
696 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */ 344 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 }, /* MCP89 */
697 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */ 345 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 }, /* MCP89 */
698 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */ 346 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 }, /* MCP89 */
699 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */ 347 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 }, /* MCP89 */
700 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */ 348 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 }, /* MCP89 */
701 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */ 349 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 }, /* MCP89 */
702 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */ 350 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 }, /* MCP89 */
703 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */ 351 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 }, /* MCP89 */
704 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */ 352 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 }, /* MCP89 */
705 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */ 353 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 }, /* MCP89 */
706 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */ 354 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 }, /* MCP89 */
707 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */ 355 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 }, /* MCP89 */
708 356
709 /* SiS */ 357 /* SiS */
710 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 358 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
@@ -737,12 +385,6 @@ static struct pci_driver ahci_pci_driver = {
737#endif 385#endif
738}; 386};
739 387
740static int ahci_em_messages = 1;
741module_param(ahci_em_messages, int, 0444);
742/* add other LED protocol types when they become supported */
743MODULE_PARM_DESC(ahci_em_messages,
744 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
745
746#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE) 388#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
747static int marvell_enable; 389static int marvell_enable;
748#else 390#else
@@ -752,166 +394,15 @@ module_param(marvell_enable, int, 0644);
752MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)"); 394MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
753 395
754 396
755static inline int ahci_nr_ports(u32 cap) 397static void ahci_pci_save_initial_config(struct pci_dev *pdev,
756{ 398 struct ahci_host_priv *hpriv)
757 return (cap & 0x1f) + 1;
758}
759
760static inline void __iomem *__ahci_port_base(struct ata_host *host,
761 unsigned int port_no)
762{
763 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
764
765 return mmio + 0x100 + (port_no * 0x80);
766}
767
768static inline void __iomem *ahci_port_base(struct ata_port *ap)
769{
770 return __ahci_port_base(ap->host, ap->port_no);
771}
772
773static void ahci_enable_ahci(void __iomem *mmio)
774{
775 int i;
776 u32 tmp;
777
778 /* turn on AHCI_EN */
779 tmp = readl(mmio + HOST_CTL);
780 if (tmp & HOST_AHCI_EN)
781 return;
782
783 /* Some controllers need AHCI_EN to be written multiple times.
784 * Try a few times before giving up.
785 */
786 for (i = 0; i < 5; i++) {
787 tmp |= HOST_AHCI_EN;
788 writel(tmp, mmio + HOST_CTL);
789 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
790 if (tmp & HOST_AHCI_EN)
791 return;
792 msleep(10);
793 }
794
795 WARN_ON(1);
796}
797
798static ssize_t ahci_show_host_caps(struct device *dev,
799 struct device_attribute *attr, char *buf)
800{
801 struct Scsi_Host *shost = class_to_shost(dev);
802 struct ata_port *ap = ata_shost_to_port(shost);
803 struct ahci_host_priv *hpriv = ap->host->private_data;
804
805 return sprintf(buf, "%x\n", hpriv->cap);
806}
807
808static ssize_t ahci_show_host_cap2(struct device *dev,
809 struct device_attribute *attr, char *buf)
810{
811 struct Scsi_Host *shost = class_to_shost(dev);
812 struct ata_port *ap = ata_shost_to_port(shost);
813 struct ahci_host_priv *hpriv = ap->host->private_data;
814
815 return sprintf(buf, "%x\n", hpriv->cap2);
816}
817
818static ssize_t ahci_show_host_version(struct device *dev,
819 struct device_attribute *attr, char *buf)
820{
821 struct Scsi_Host *shost = class_to_shost(dev);
822 struct ata_port *ap = ata_shost_to_port(shost);
823 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
824
825 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
826}
827
828static ssize_t ahci_show_port_cmd(struct device *dev,
829 struct device_attribute *attr, char *buf)
830{
831 struct Scsi_Host *shost = class_to_shost(dev);
832 struct ata_port *ap = ata_shost_to_port(shost);
833 void __iomem *port_mmio = ahci_port_base(ap);
834
835 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
836}
837
838/**
839 * ahci_save_initial_config - Save and fixup initial config values
840 * @pdev: target PCI device
841 * @hpriv: host private area to store config values
842 *
843 * Some registers containing configuration info might be setup by
844 * BIOS and might be cleared on reset. This function saves the
845 * initial values of those registers into @hpriv such that they
846 * can be restored after controller reset.
847 *
848 * If inconsistent, config values are fixed up by this function.
849 *
850 * LOCKING:
851 * None.
852 */
853static void ahci_save_initial_config(struct pci_dev *pdev,
854 struct ahci_host_priv *hpriv)
855{ 399{
856 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; 400 unsigned int force_port_map = 0;
857 u32 cap, cap2, vers, port_map; 401 unsigned int mask_port_map = 0;
858 int i;
859 int mv;
860
861 /* make sure AHCI mode is enabled before accessing CAP */
862 ahci_enable_ahci(mmio);
863 402
864 /* Values prefixed with saved_ are written back to host after 403 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
865 * reset. Values without are used for driver operation. 404 dev_info(&pdev->dev, "JMB361 has only one port\n");
866 */ 405 force_port_map = 1;
867 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
868 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
869
870 /* CAP2 register is only defined for AHCI 1.2 and later */
871 vers = readl(mmio + HOST_VERSION);
872 if ((vers >> 16) > 1 ||
873 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
874 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
875 else
876 hpriv->saved_cap2 = cap2 = 0;
877
878 /* some chips have errata preventing 64bit use */
879 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
880 dev_printk(KERN_INFO, &pdev->dev,
881 "controller can't do 64bit DMA, forcing 32bit\n");
882 cap &= ~HOST_CAP_64;
883 }
884
885 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
886 dev_printk(KERN_INFO, &pdev->dev,
887 "controller can't do NCQ, turning off CAP_NCQ\n");
888 cap &= ~HOST_CAP_NCQ;
889 }
890
891 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
892 dev_printk(KERN_INFO, &pdev->dev,
893 "controller can do NCQ, turning on CAP_NCQ\n");
894 cap |= HOST_CAP_NCQ;
895 }
896
897 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
898 dev_printk(KERN_INFO, &pdev->dev,
899 "controller can't do PMP, turning off CAP_PMP\n");
900 cap &= ~HOST_CAP_PMP;
901 }
902
903 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
904 dev_printk(KERN_INFO, &pdev->dev,
905 "controller can't do SNTF, turning off CAP_SNTF\n");
906 cap &= ~HOST_CAP_SNTF;
907 }
908
909 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
910 port_map != 1) {
911 dev_printk(KERN_INFO, &pdev->dev,
912 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
913 port_map, 1);
914 port_map = 1;
915 } 406 }
916 407
917 /* 408 /*
@@ -921,469 +412,25 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
921 */ 412 */
922 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 413 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
923 if (pdev->device == 0x6121) 414 if (pdev->device == 0x6121)
924 mv = 0x3; 415 mask_port_map = 0x3;
925 else 416 else
926 mv = 0xf; 417 mask_port_map = 0xf;
927 dev_printk(KERN_ERR, &pdev->dev, 418 dev_info(&pdev->dev,
928 "MV_AHCI HACK: port_map %x -> %x\n",
929 port_map,
930 port_map & mv);
931 dev_printk(KERN_ERR, &pdev->dev,
932 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); 419 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
933
934 port_map &= mv;
935 } 420 }
936 421
937 /* cross check port_map and cap.n_ports */ 422 ahci_save_initial_config(&pdev->dev, hpriv, force_port_map,
938 if (port_map) { 423 mask_port_map);
939 int map_ports = 0;
940
941 for (i = 0; i < AHCI_MAX_PORTS; i++)
942 if (port_map & (1 << i))
943 map_ports++;
944
945 /* If PI has more ports than n_ports, whine, clear
946 * port_map and let it be generated from n_ports.
947 */
948 if (map_ports > ahci_nr_ports(cap)) {
949 dev_printk(KERN_WARNING, &pdev->dev,
950 "implemented port map (0x%x) contains more "
951 "ports than nr_ports (%u), using nr_ports\n",
952 port_map, ahci_nr_ports(cap));
953 port_map = 0;
954 }
955 }
956
957 /* fabricate port_map from cap.nr_ports */
958 if (!port_map) {
959 port_map = (1 << ahci_nr_ports(cap)) - 1;
960 dev_printk(KERN_WARNING, &pdev->dev,
961 "forcing PORTS_IMPL to 0x%x\n", port_map);
962
963 /* write the fixed up value to the PI register */
964 hpriv->saved_port_map = port_map;
965 }
966
967 /* record values to use during operation */
968 hpriv->cap = cap;
969 hpriv->cap2 = cap2;
970 hpriv->port_map = port_map;
971}
972
973/**
974 * ahci_restore_initial_config - Restore initial config
975 * @host: target ATA host
976 *
977 * Restore initial config stored by ahci_save_initial_config().
978 *
979 * LOCKING:
980 * None.
981 */
982static void ahci_restore_initial_config(struct ata_host *host)
983{
984 struct ahci_host_priv *hpriv = host->private_data;
985 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
986
987 writel(hpriv->saved_cap, mmio + HOST_CAP);
988 if (hpriv->saved_cap2)
989 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
990 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
991 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
992}
993
994static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
995{
996 static const int offset[] = {
997 [SCR_STATUS] = PORT_SCR_STAT,
998 [SCR_CONTROL] = PORT_SCR_CTL,
999 [SCR_ERROR] = PORT_SCR_ERR,
1000 [SCR_ACTIVE] = PORT_SCR_ACT,
1001 [SCR_NOTIFICATION] = PORT_SCR_NTF,
1002 };
1003 struct ahci_host_priv *hpriv = ap->host->private_data;
1004
1005 if (sc_reg < ARRAY_SIZE(offset) &&
1006 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
1007 return offset[sc_reg];
1008 return 0;
1009}
1010
1011static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1012{
1013 void __iomem *port_mmio = ahci_port_base(link->ap);
1014 int offset = ahci_scr_offset(link->ap, sc_reg);
1015
1016 if (offset) {
1017 *val = readl(port_mmio + offset);
1018 return 0;
1019 }
1020 return -EINVAL;
1021}
1022
1023static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1024{
1025 void __iomem *port_mmio = ahci_port_base(link->ap);
1026 int offset = ahci_scr_offset(link->ap, sc_reg);
1027
1028 if (offset) {
1029 writel(val, port_mmio + offset);
1030 return 0;
1031 }
1032 return -EINVAL;
1033}
1034
1035static void ahci_start_engine(struct ata_port *ap)
1036{
1037 void __iomem *port_mmio = ahci_port_base(ap);
1038 u32 tmp;
1039
1040 /* start DMA */
1041 tmp = readl(port_mmio + PORT_CMD);
1042 tmp |= PORT_CMD_START;
1043 writel(tmp, port_mmio + PORT_CMD);
1044 readl(port_mmio + PORT_CMD); /* flush */
1045}
1046
1047static int ahci_stop_engine(struct ata_port *ap)
1048{
1049 void __iomem *port_mmio = ahci_port_base(ap);
1050 u32 tmp;
1051
1052 tmp = readl(port_mmio + PORT_CMD);
1053
1054 /* check if the HBA is idle */
1055 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
1056 return 0;
1057
1058 /* setting HBA to idle */
1059 tmp &= ~PORT_CMD_START;
1060 writel(tmp, port_mmio + PORT_CMD);
1061
1062 /* wait for engine to stop. This could be as long as 500 msec */
1063 tmp = ata_wait_register(port_mmio + PORT_CMD,
1064 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
1065 if (tmp & PORT_CMD_LIST_ON)
1066 return -EIO;
1067
1068 return 0;
1069}
1070
1071static void ahci_start_fis_rx(struct ata_port *ap)
1072{
1073 void __iomem *port_mmio = ahci_port_base(ap);
1074 struct ahci_host_priv *hpriv = ap->host->private_data;
1075 struct ahci_port_priv *pp = ap->private_data;
1076 u32 tmp;
1077
1078 /* set FIS registers */
1079 if (hpriv->cap & HOST_CAP_64)
1080 writel((pp->cmd_slot_dma >> 16) >> 16,
1081 port_mmio + PORT_LST_ADDR_HI);
1082 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
1083
1084 if (hpriv->cap & HOST_CAP_64)
1085 writel((pp->rx_fis_dma >> 16) >> 16,
1086 port_mmio + PORT_FIS_ADDR_HI);
1087 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
1088
1089 /* enable FIS reception */
1090 tmp = readl(port_mmio + PORT_CMD);
1091 tmp |= PORT_CMD_FIS_RX;
1092 writel(tmp, port_mmio + PORT_CMD);
1093
1094 /* flush */
1095 readl(port_mmio + PORT_CMD);
1096} 424}
1097 425
1098static int ahci_stop_fis_rx(struct ata_port *ap) 426static int ahci_pci_reset_controller(struct ata_host *host)
1099{
1100 void __iomem *port_mmio = ahci_port_base(ap);
1101 u32 tmp;
1102
1103 /* disable FIS reception */
1104 tmp = readl(port_mmio + PORT_CMD);
1105 tmp &= ~PORT_CMD_FIS_RX;
1106 writel(tmp, port_mmio + PORT_CMD);
1107
1108 /* wait for completion, spec says 500ms, give it 1000 */
1109 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1110 PORT_CMD_FIS_ON, 10, 1000);
1111 if (tmp & PORT_CMD_FIS_ON)
1112 return -EBUSY;
1113
1114 return 0;
1115}
1116
1117static void ahci_power_up(struct ata_port *ap)
1118{
1119 struct ahci_host_priv *hpriv = ap->host->private_data;
1120 void __iomem *port_mmio = ahci_port_base(ap);
1121 u32 cmd;
1122
1123 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1124
1125 /* spin up device */
1126 if (hpriv->cap & HOST_CAP_SSS) {
1127 cmd |= PORT_CMD_SPIN_UP;
1128 writel(cmd, port_mmio + PORT_CMD);
1129 }
1130
1131 /* wake up link */
1132 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1133}
1134
1135static void ahci_disable_alpm(struct ata_port *ap)
1136{
1137 struct ahci_host_priv *hpriv = ap->host->private_data;
1138 void __iomem *port_mmio = ahci_port_base(ap);
1139 u32 cmd;
1140 struct ahci_port_priv *pp = ap->private_data;
1141
1142 /* IPM bits should be disabled by libata-core */
1143 /* get the existing command bits */
1144 cmd = readl(port_mmio + PORT_CMD);
1145
1146 /* disable ALPM and ASP */
1147 cmd &= ~PORT_CMD_ASP;
1148 cmd &= ~PORT_CMD_ALPE;
1149
1150 /* force the interface back to active */
1151 cmd |= PORT_CMD_ICC_ACTIVE;
1152
1153 /* write out new cmd value */
1154 writel(cmd, port_mmio + PORT_CMD);
1155 cmd = readl(port_mmio + PORT_CMD);
1156
1157 /* wait 10ms to be sure we've come out of any low power state */
1158 msleep(10);
1159
1160 /* clear out any PhyRdy stuff from interrupt status */
1161 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1162
1163 /* go ahead and clean out PhyRdy Change from Serror too */
1164 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1165
1166 /*
1167 * Clear flag to indicate that we should ignore all PhyRdy
1168 * state changes
1169 */
1170 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1171
1172 /*
1173 * Enable interrupts on Phy Ready.
1174 */
1175 pp->intr_mask |= PORT_IRQ_PHYRDY;
1176 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1177
1178 /*
1179 * don't change the link pm policy - we can be called
1180 * just to turn of link pm temporarily
1181 */
1182}
1183
1184static int ahci_enable_alpm(struct ata_port *ap,
1185 enum link_pm policy)
1186{
1187 struct ahci_host_priv *hpriv = ap->host->private_data;
1188 void __iomem *port_mmio = ahci_port_base(ap);
1189 u32 cmd;
1190 struct ahci_port_priv *pp = ap->private_data;
1191 u32 asp;
1192
1193 /* Make sure the host is capable of link power management */
1194 if (!(hpriv->cap & HOST_CAP_ALPM))
1195 return -EINVAL;
1196
1197 switch (policy) {
1198 case MAX_PERFORMANCE:
1199 case NOT_AVAILABLE:
1200 /*
1201 * if we came here with NOT_AVAILABLE,
1202 * it just means this is the first time we
1203 * have tried to enable - default to max performance,
1204 * and let the user go to lower power modes on request.
1205 */
1206 ahci_disable_alpm(ap);
1207 return 0;
1208 case MIN_POWER:
1209 /* configure HBA to enter SLUMBER */
1210 asp = PORT_CMD_ASP;
1211 break;
1212 case MEDIUM_POWER:
1213 /* configure HBA to enter PARTIAL */
1214 asp = 0;
1215 break;
1216 default:
1217 return -EINVAL;
1218 }
1219
1220 /*
1221 * Disable interrupts on Phy Ready. This keeps us from
1222 * getting woken up due to spurious phy ready interrupts
1223 * TBD - Hot plug should be done via polling now, is
1224 * that even supported?
1225 */
1226 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1227 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1228
1229 /*
1230 * Set a flag to indicate that we should ignore all PhyRdy
1231 * state changes since these can happen now whenever we
1232 * change link state
1233 */
1234 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1235
1236 /* get the existing command bits */
1237 cmd = readl(port_mmio + PORT_CMD);
1238
1239 /*
1240 * Set ASP based on Policy
1241 */
1242 cmd |= asp;
1243
1244 /*
1245 * Setting this bit will instruct the HBA to aggressively
1246 * enter a lower power link state when it's appropriate and
1247 * based on the value set above for ASP
1248 */
1249 cmd |= PORT_CMD_ALPE;
1250
1251 /* write out new cmd value */
1252 writel(cmd, port_mmio + PORT_CMD);
1253 cmd = readl(port_mmio + PORT_CMD);
1254
1255 /* IPM bits should be set by libata-core */
1256 return 0;
1257}
1258
1259#ifdef CONFIG_PM
1260static void ahci_power_down(struct ata_port *ap)
1261{
1262 struct ahci_host_priv *hpriv = ap->host->private_data;
1263 void __iomem *port_mmio = ahci_port_base(ap);
1264 u32 cmd, scontrol;
1265
1266 if (!(hpriv->cap & HOST_CAP_SSS))
1267 return;
1268
1269 /* put device into listen mode, first set PxSCTL.DET to 0 */
1270 scontrol = readl(port_mmio + PORT_SCR_CTL);
1271 scontrol &= ~0xf;
1272 writel(scontrol, port_mmio + PORT_SCR_CTL);
1273
1274 /* then set PxCMD.SUD to 0 */
1275 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1276 cmd &= ~PORT_CMD_SPIN_UP;
1277 writel(cmd, port_mmio + PORT_CMD);
1278}
1279#endif
1280
1281static void ahci_start_port(struct ata_port *ap)
1282{
1283 struct ahci_port_priv *pp = ap->private_data;
1284 struct ata_link *link;
1285 struct ahci_em_priv *emp;
1286 ssize_t rc;
1287 int i;
1288
1289 /* enable FIS reception */
1290 ahci_start_fis_rx(ap);
1291
1292 /* enable DMA */
1293 ahci_start_engine(ap);
1294
1295 /* turn on LEDs */
1296 if (ap->flags & ATA_FLAG_EM) {
1297 ata_for_each_link(link, ap, EDGE) {
1298 emp = &pp->em_priv[link->pmp];
1299
1300 /* EM Transmit bit maybe busy during init */
1301 for (i = 0; i < EM_MAX_RETRY; i++) {
1302 rc = ahci_transmit_led_message(ap,
1303 emp->led_state,
1304 4);
1305 if (rc == -EBUSY)
1306 msleep(1);
1307 else
1308 break;
1309 }
1310 }
1311 }
1312
1313 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1314 ata_for_each_link(link, ap, EDGE)
1315 ahci_init_sw_activity(link);
1316
1317}
1318
1319static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1320{
1321 int rc;
1322
1323 /* disable DMA */
1324 rc = ahci_stop_engine(ap);
1325 if (rc) {
1326 *emsg = "failed to stop engine";
1327 return rc;
1328 }
1329
1330 /* disable FIS reception */
1331 rc = ahci_stop_fis_rx(ap);
1332 if (rc) {
1333 *emsg = "failed stop FIS RX";
1334 return rc;
1335 }
1336
1337 return 0;
1338}
1339
1340static int ahci_reset_controller(struct ata_host *host)
1341{ 427{
1342 struct pci_dev *pdev = to_pci_dev(host->dev); 428 struct pci_dev *pdev = to_pci_dev(host->dev);
1343 struct ahci_host_priv *hpriv = host->private_data;
1344 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1345 u32 tmp;
1346 429
1347 /* we must be in AHCI mode, before using anything 430 ahci_reset_controller(host);
1348 * AHCI-specific, such as HOST_RESET.
1349 */
1350 ahci_enable_ahci(mmio);
1351
1352 /* global controller reset */
1353 if (!ahci_skip_host_reset) {
1354 tmp = readl(mmio + HOST_CTL);
1355 if ((tmp & HOST_RESET) == 0) {
1356 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1357 readl(mmio + HOST_CTL); /* flush */
1358 }
1359
1360 /*
1361 * to perform host reset, OS should set HOST_RESET
1362 * and poll until this bit is read to be "0".
1363 * reset must complete within 1 second, or
1364 * the hardware should be considered fried.
1365 */
1366 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1367 HOST_RESET, 10, 1000);
1368
1369 if (tmp & HOST_RESET) {
1370 dev_printk(KERN_ERR, host->dev,
1371 "controller reset failed (0x%x)\n", tmp);
1372 return -EIO;
1373 }
1374
1375 /* turn on AHCI mode */
1376 ahci_enable_ahci(mmio);
1377
1378 /* Some registers might be cleared on reset. Restore
1379 * initial values.
1380 */
1381 ahci_restore_initial_config(host);
1382 } else
1383 dev_printk(KERN_INFO, host->dev,
1384 "skipping global host reset\n");
1385 431
1386 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 432 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
433 struct ahci_host_priv *hpriv = host->private_data;
1387 u16 tmp16; 434 u16 tmp16;
1388 435
1389 /* configure PCS */ 436 /* configure PCS */
@@ -1397,267 +444,10 @@ static int ahci_reset_controller(struct ata_host *host)
1397 return 0; 444 return 0;
1398} 445}
1399 446
1400static void ahci_sw_activity(struct ata_link *link) 447static void ahci_pci_init_controller(struct ata_host *host)
1401{
1402 struct ata_port *ap = link->ap;
1403 struct ahci_port_priv *pp = ap->private_data;
1404 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1405
1406 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1407 return;
1408
1409 emp->activity++;
1410 if (!timer_pending(&emp->timer))
1411 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1412}
1413
1414static void ahci_sw_activity_blink(unsigned long arg)
1415{
1416 struct ata_link *link = (struct ata_link *)arg;
1417 struct ata_port *ap = link->ap;
1418 struct ahci_port_priv *pp = ap->private_data;
1419 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1420 unsigned long led_message = emp->led_state;
1421 u32 activity_led_state;
1422 unsigned long flags;
1423
1424 led_message &= EM_MSG_LED_VALUE;
1425 led_message |= ap->port_no | (link->pmp << 8);
1426
1427 /* check to see if we've had activity. If so,
1428 * toggle state of LED and reset timer. If not,
1429 * turn LED to desired idle state.
1430 */
1431 spin_lock_irqsave(ap->lock, flags);
1432 if (emp->saved_activity != emp->activity) {
1433 emp->saved_activity = emp->activity;
1434 /* get the current LED state */
1435 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1436
1437 if (activity_led_state)
1438 activity_led_state = 0;
1439 else
1440 activity_led_state = 1;
1441
1442 /* clear old state */
1443 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1444
1445 /* toggle state */
1446 led_message |= (activity_led_state << 16);
1447 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1448 } else {
1449 /* switch to idle */
1450 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1451 if (emp->blink_policy == BLINK_OFF)
1452 led_message |= (1 << 16);
1453 }
1454 spin_unlock_irqrestore(ap->lock, flags);
1455 ahci_transmit_led_message(ap, led_message, 4);
1456}
1457
1458static void ahci_init_sw_activity(struct ata_link *link)
1459{
1460 struct ata_port *ap = link->ap;
1461 struct ahci_port_priv *pp = ap->private_data;
1462 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1463
1464 /* init activity stats, setup timer */
1465 emp->saved_activity = emp->activity = 0;
1466 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1467
1468 /* check our blink policy and set flag for link if it's enabled */
1469 if (emp->blink_policy)
1470 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1471}
1472
1473static int ahci_reset_em(struct ata_host *host)
1474{
1475 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1476 u32 em_ctl;
1477
1478 em_ctl = readl(mmio + HOST_EM_CTL);
1479 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1480 return -EINVAL;
1481
1482 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1483 return 0;
1484}
1485
1486static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1487 ssize_t size)
1488{
1489 struct ahci_host_priv *hpriv = ap->host->private_data;
1490 struct ahci_port_priv *pp = ap->private_data;
1491 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1492 u32 em_ctl;
1493 u32 message[] = {0, 0};
1494 unsigned long flags;
1495 int pmp;
1496 struct ahci_em_priv *emp;
1497
1498 /* get the slot number from the message */
1499 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1500 if (pmp < EM_MAX_SLOTS)
1501 emp = &pp->em_priv[pmp];
1502 else
1503 return -EINVAL;
1504
1505 spin_lock_irqsave(ap->lock, flags);
1506
1507 /*
1508 * if we are still busy transmitting a previous message,
1509 * do not allow
1510 */
1511 em_ctl = readl(mmio + HOST_EM_CTL);
1512 if (em_ctl & EM_CTL_TM) {
1513 spin_unlock_irqrestore(ap->lock, flags);
1514 return -EBUSY;
1515 }
1516
1517 /*
1518 * create message header - this is all zero except for
1519 * the message size, which is 4 bytes.
1520 */
1521 message[0] |= (4 << 8);
1522
1523 /* ignore 0:4 of byte zero, fill in port info yourself */
1524 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1525
1526 /* write message to EM_LOC */
1527 writel(message[0], mmio + hpriv->em_loc);
1528 writel(message[1], mmio + hpriv->em_loc+4);
1529
1530 /* save off new led state for port/slot */
1531 emp->led_state = state;
1532
1533 /*
1534 * tell hardware to transmit the message
1535 */
1536 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1537
1538 spin_unlock_irqrestore(ap->lock, flags);
1539 return size;
1540}
1541
1542static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1543{
1544 struct ahci_port_priv *pp = ap->private_data;
1545 struct ata_link *link;
1546 struct ahci_em_priv *emp;
1547 int rc = 0;
1548
1549 ata_for_each_link(link, ap, EDGE) {
1550 emp = &pp->em_priv[link->pmp];
1551 rc += sprintf(buf, "%lx\n", emp->led_state);
1552 }
1553 return rc;
1554}
1555
1556static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1557 size_t size)
1558{
1559 int state;
1560 int pmp;
1561 struct ahci_port_priv *pp = ap->private_data;
1562 struct ahci_em_priv *emp;
1563
1564 state = simple_strtoul(buf, NULL, 0);
1565
1566 /* get the slot number from the message */
1567 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1568 if (pmp < EM_MAX_SLOTS)
1569 emp = &pp->em_priv[pmp];
1570 else
1571 return -EINVAL;
1572
1573 /* mask off the activity bits if we are in sw_activity
1574 * mode, user should turn off sw_activity before setting
1575 * activity led through em_message
1576 */
1577 if (emp->blink_policy)
1578 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1579
1580 return ahci_transmit_led_message(ap, state, size);
1581}
1582
1583static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1584{
1585 struct ata_link *link = dev->link;
1586 struct ata_port *ap = link->ap;
1587 struct ahci_port_priv *pp = ap->private_data;
1588 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1589 u32 port_led_state = emp->led_state;
1590
1591 /* save the desired Activity LED behavior */
1592 if (val == OFF) {
1593 /* clear LFLAG */
1594 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1595
1596 /* set the LED to OFF */
1597 port_led_state &= EM_MSG_LED_VALUE_OFF;
1598 port_led_state |= (ap->port_no | (link->pmp << 8));
1599 ahci_transmit_led_message(ap, port_led_state, 4);
1600 } else {
1601 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1602 if (val == BLINK_OFF) {
1603 /* set LED to ON for idle */
1604 port_led_state &= EM_MSG_LED_VALUE_OFF;
1605 port_led_state |= (ap->port_no | (link->pmp << 8));
1606 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1607 ahci_transmit_led_message(ap, port_led_state, 4);
1608 }
1609 }
1610 emp->blink_policy = val;
1611 return 0;
1612}
1613
1614static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1615{
1616 struct ata_link *link = dev->link;
1617 struct ata_port *ap = link->ap;
1618 struct ahci_port_priv *pp = ap->private_data;
1619 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1620
1621 /* display the saved value of activity behavior for this
1622 * disk.
1623 */
1624 return sprintf(buf, "%d\n", emp->blink_policy);
1625}
1626
1627static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1628 int port_no, void __iomem *mmio,
1629 void __iomem *port_mmio)
1630{
1631 const char *emsg = NULL;
1632 int rc;
1633 u32 tmp;
1634
1635 /* make sure port is not active */
1636 rc = ahci_deinit_port(ap, &emsg);
1637 if (rc)
1638 dev_printk(KERN_WARNING, &pdev->dev,
1639 "%s (%d)\n", emsg, rc);
1640
1641 /* clear SError */
1642 tmp = readl(port_mmio + PORT_SCR_ERR);
1643 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1644 writel(tmp, port_mmio + PORT_SCR_ERR);
1645
1646 /* clear port IRQ */
1647 tmp = readl(port_mmio + PORT_IRQ_STAT);
1648 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1649 if (tmp)
1650 writel(tmp, port_mmio + PORT_IRQ_STAT);
1651
1652 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1653}
1654
1655static void ahci_init_controller(struct ata_host *host)
1656{ 448{
1657 struct ahci_host_priv *hpriv = host->private_data; 449 struct ahci_host_priv *hpriv = host->private_data;
1658 struct pci_dev *pdev = to_pci_dev(host->dev); 450 struct pci_dev *pdev = to_pci_dev(host->dev);
1659 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1660 int i;
1661 void __iomem *port_mmio; 451 void __iomem *port_mmio;
1662 u32 tmp; 452 u32 tmp;
1663 int mv; 453 int mv;
@@ -1678,220 +468,7 @@ static void ahci_init_controller(struct ata_host *host)
1678 writel(tmp, port_mmio + PORT_IRQ_STAT); 468 writel(tmp, port_mmio + PORT_IRQ_STAT);
1679 } 469 }
1680 470
1681 for (i = 0; i < host->n_ports; i++) { 471 ahci_init_controller(host);
1682 struct ata_port *ap = host->ports[i];
1683
1684 port_mmio = ahci_port_base(ap);
1685 if (ata_port_is_dummy(ap))
1686 continue;
1687
1688 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1689 }
1690
1691 tmp = readl(mmio + HOST_CTL);
1692 VPRINTK("HOST_CTL 0x%x\n", tmp);
1693 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1694 tmp = readl(mmio + HOST_CTL);
1695 VPRINTK("HOST_CTL 0x%x\n", tmp);
1696}
1697
1698static void ahci_dev_config(struct ata_device *dev)
1699{
1700 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1701
1702 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1703 dev->max_sectors = 255;
1704 ata_dev_printk(dev, KERN_INFO,
1705 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1706 }
1707}
1708
1709static unsigned int ahci_dev_classify(struct ata_port *ap)
1710{
1711 void __iomem *port_mmio = ahci_port_base(ap);
1712 struct ata_taskfile tf;
1713 u32 tmp;
1714
1715 tmp = readl(port_mmio + PORT_SIG);
1716 tf.lbah = (tmp >> 24) & 0xff;
1717 tf.lbam = (tmp >> 16) & 0xff;
1718 tf.lbal = (tmp >> 8) & 0xff;
1719 tf.nsect = (tmp) & 0xff;
1720
1721 return ata_dev_classify(&tf);
1722}
1723
1724static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1725 u32 opts)
1726{
1727 dma_addr_t cmd_tbl_dma;
1728
1729 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1730
1731 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1732 pp->cmd_slot[tag].status = 0;
1733 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1734 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1735}
1736
1737static int ahci_kick_engine(struct ata_port *ap)
1738{
1739 void __iomem *port_mmio = ahci_port_base(ap);
1740 struct ahci_host_priv *hpriv = ap->host->private_data;
1741 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1742 u32 tmp;
1743 int busy, rc;
1744
1745 /* stop engine */
1746 rc = ahci_stop_engine(ap);
1747 if (rc)
1748 goto out_restart;
1749
1750 /* need to do CLO?
1751 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1752 */
1753 busy = status & (ATA_BUSY | ATA_DRQ);
1754 if (!busy && !sata_pmp_attached(ap)) {
1755 rc = 0;
1756 goto out_restart;
1757 }
1758
1759 if (!(hpriv->cap & HOST_CAP_CLO)) {
1760 rc = -EOPNOTSUPP;
1761 goto out_restart;
1762 }
1763
1764 /* perform CLO */
1765 tmp = readl(port_mmio + PORT_CMD);
1766 tmp |= PORT_CMD_CLO;
1767 writel(tmp, port_mmio + PORT_CMD);
1768
1769 rc = 0;
1770 tmp = ata_wait_register(port_mmio + PORT_CMD,
1771 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1772 if (tmp & PORT_CMD_CLO)
1773 rc = -EIO;
1774
1775 /* restart engine */
1776 out_restart:
1777 ahci_start_engine(ap);
1778 return rc;
1779}
1780
1781static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1782 struct ata_taskfile *tf, int is_cmd, u16 flags,
1783 unsigned long timeout_msec)
1784{
1785 const u32 cmd_fis_len = 5; /* five dwords */
1786 struct ahci_port_priv *pp = ap->private_data;
1787 void __iomem *port_mmio = ahci_port_base(ap);
1788 u8 *fis = pp->cmd_tbl;
1789 u32 tmp;
1790
1791 /* prep the command */
1792 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1793 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1794
1795 /* issue & wait */
1796 writel(1, port_mmio + PORT_CMD_ISSUE);
1797
1798 if (timeout_msec) {
1799 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1800 1, timeout_msec);
1801 if (tmp & 0x1) {
1802 ahci_kick_engine(ap);
1803 return -EBUSY;
1804 }
1805 } else
1806 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1807
1808 return 0;
1809}
1810
1811static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1812 int pmp, unsigned long deadline,
1813 int (*check_ready)(struct ata_link *link))
1814{
1815 struct ata_port *ap = link->ap;
1816 struct ahci_host_priv *hpriv = ap->host->private_data;
1817 const char *reason = NULL;
1818 unsigned long now, msecs;
1819 struct ata_taskfile tf;
1820 int rc;
1821
1822 DPRINTK("ENTER\n");
1823
1824 /* prepare for SRST (AHCI-1.1 10.4.1) */
1825 rc = ahci_kick_engine(ap);
1826 if (rc && rc != -EOPNOTSUPP)
1827 ata_link_printk(link, KERN_WARNING,
1828 "failed to reset engine (errno=%d)\n", rc);
1829
1830 ata_tf_init(link->device, &tf);
1831
1832 /* issue the first D2H Register FIS */
1833 msecs = 0;
1834 now = jiffies;
1835 if (time_after(now, deadline))
1836 msecs = jiffies_to_msecs(deadline - now);
1837
1838 tf.ctl |= ATA_SRST;
1839 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1840 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1841 rc = -EIO;
1842 reason = "1st FIS failed";
1843 goto fail;
1844 }
1845
1846 /* spec says at least 5us, but be generous and sleep for 1ms */
1847 msleep(1);
1848
1849 /* issue the second D2H Register FIS */
1850 tf.ctl &= ~ATA_SRST;
1851 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1852
1853 /* wait for link to become ready */
1854 rc = ata_wait_after_reset(link, deadline, check_ready);
1855 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1856 /*
1857 * Workaround for cases where link online status can't
1858 * be trusted. Treat device readiness timeout as link
1859 * offline.
1860 */
1861 ata_link_printk(link, KERN_INFO,
1862 "device not ready, treating as offline\n");
1863 *class = ATA_DEV_NONE;
1864 } else if (rc) {
1865 /* link occupied, -ENODEV too is an error */
1866 reason = "device not ready";
1867 goto fail;
1868 } else
1869 *class = ahci_dev_classify(ap);
1870
1871 DPRINTK("EXIT, class=%u\n", *class);
1872 return 0;
1873
1874 fail:
1875 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1876 return rc;
1877}
1878
1879static int ahci_check_ready(struct ata_link *link)
1880{
1881 void __iomem *port_mmio = ahci_port_base(link->ap);
1882 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1883
1884 return ata_check_ready(status);
1885}
1886
1887static int ahci_softreset(struct ata_link *link, unsigned int *class,
1888 unsigned long deadline)
1889{
1890 int pmp = sata_srst_pmp(link);
1891
1892 DPRINTK("ENTER\n");
1893
1894 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1895} 472}
1896 473
1897static int ahci_sb600_check_ready(struct ata_link *link) 474static int ahci_sb600_check_ready(struct ata_link *link)
@@ -1943,38 +520,6 @@ static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1943 return rc; 520 return rc;
1944} 521}
1945 522
1946static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1947 unsigned long deadline)
1948{
1949 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1950 struct ata_port *ap = link->ap;
1951 struct ahci_port_priv *pp = ap->private_data;
1952 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1953 struct ata_taskfile tf;
1954 bool online;
1955 int rc;
1956
1957 DPRINTK("ENTER\n");
1958
1959 ahci_stop_engine(ap);
1960
1961 /* clear D2H reception area to properly wait for D2H FIS */
1962 ata_tf_init(link->device, &tf);
1963 tf.command = 0x80;
1964 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1965
1966 rc = sata_link_hardreset(link, timing, deadline, &online,
1967 ahci_check_ready);
1968
1969 ahci_start_engine(ap);
1970
1971 if (online)
1972 *class = ahci_dev_classify(ap);
1973
1974 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1975 return rc;
1976}
1977
1978static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 523static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1979 unsigned long deadline) 524 unsigned long deadline)
1980{ 525{
@@ -2043,605 +588,12 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
2043 return rc; 588 return rc;
2044} 589}
2045 590
2046static void ahci_postreset(struct ata_link *link, unsigned int *class)
2047{
2048 struct ata_port *ap = link->ap;
2049 void __iomem *port_mmio = ahci_port_base(ap);
2050 u32 new_tmp, tmp;
2051
2052 ata_std_postreset(link, class);
2053
2054 /* Make sure port's ATAPI bit is set appropriately */
2055 new_tmp = tmp = readl(port_mmio + PORT_CMD);
2056 if (*class == ATA_DEV_ATAPI)
2057 new_tmp |= PORT_CMD_ATAPI;
2058 else
2059 new_tmp &= ~PORT_CMD_ATAPI;
2060 if (new_tmp != tmp) {
2061 writel(new_tmp, port_mmio + PORT_CMD);
2062 readl(port_mmio + PORT_CMD); /* flush */
2063 }
2064}
2065
2066static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
2067{
2068 struct scatterlist *sg;
2069 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
2070 unsigned int si;
2071
2072 VPRINTK("ENTER\n");
2073
2074 /*
2075 * Next, the S/G list.
2076 */
2077 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2078 dma_addr_t addr = sg_dma_address(sg);
2079 u32 sg_len = sg_dma_len(sg);
2080
2081 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
2082 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
2083 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
2084 }
2085
2086 return si;
2087}
2088
2089static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
2090{
2091 struct ata_port *ap = qc->ap;
2092 struct ahci_port_priv *pp = ap->private_data;
2093
2094 if (!sata_pmp_attached(ap) || pp->fbs_enabled)
2095 return ata_std_qc_defer(qc);
2096 else
2097 return sata_pmp_qc_defer_cmd_switch(qc);
2098}
2099
2100static void ahci_qc_prep(struct ata_queued_cmd *qc)
2101{
2102 struct ata_port *ap = qc->ap;
2103 struct ahci_port_priv *pp = ap->private_data;
2104 int is_atapi = ata_is_atapi(qc->tf.protocol);
2105 void *cmd_tbl;
2106 u32 opts;
2107 const u32 cmd_fis_len = 5; /* five dwords */
2108 unsigned int n_elem;
2109
2110 /*
2111 * Fill in command table information. First, the header,
2112 * a SATA Register - Host to Device command FIS.
2113 */
2114 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2115
2116 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2117 if (is_atapi) {
2118 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2119 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2120 }
2121
2122 n_elem = 0;
2123 if (qc->flags & ATA_QCFLAG_DMAMAP)
2124 n_elem = ahci_fill_sg(qc, cmd_tbl);
2125
2126 /*
2127 * Fill in command slot information.
2128 */
2129 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2130 if (qc->tf.flags & ATA_TFLAG_WRITE)
2131 opts |= AHCI_CMD_WRITE;
2132 if (is_atapi)
2133 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2134
2135 ahci_fill_cmd_slot(pp, qc->tag, opts);
2136}
2137
2138static void ahci_fbs_dec_intr(struct ata_port *ap)
2139{
2140 struct ahci_port_priv *pp = ap->private_data;
2141 void __iomem *port_mmio = ahci_port_base(ap);
2142 u32 fbs = readl(port_mmio + PORT_FBS);
2143 int retries = 3;
2144
2145 DPRINTK("ENTER\n");
2146 BUG_ON(!pp->fbs_enabled);
2147
2148 /* time to wait for DEC is not specified by AHCI spec,
2149 * add a retry loop for safety.
2150 */
2151 writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
2152 fbs = readl(port_mmio + PORT_FBS);
2153 while ((fbs & PORT_FBS_DEC) && retries--) {
2154 udelay(1);
2155 fbs = readl(port_mmio + PORT_FBS);
2156 }
2157
2158 if (fbs & PORT_FBS_DEC)
2159 dev_printk(KERN_ERR, ap->host->dev,
2160 "failed to clear device error\n");
2161}
2162
2163static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2164{
2165 struct ahci_host_priv *hpriv = ap->host->private_data;
2166 struct ahci_port_priv *pp = ap->private_data;
2167 struct ata_eh_info *host_ehi = &ap->link.eh_info;
2168 struct ata_link *link = NULL;
2169 struct ata_queued_cmd *active_qc;
2170 struct ata_eh_info *active_ehi;
2171 bool fbs_need_dec = false;
2172 u32 serror;
2173
2174 /* determine active link with error */
2175 if (pp->fbs_enabled) {
2176 void __iomem *port_mmio = ahci_port_base(ap);
2177 u32 fbs = readl(port_mmio + PORT_FBS);
2178 int pmp = fbs >> PORT_FBS_DWE_OFFSET;
2179
2180 if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
2181 ata_link_online(&ap->pmp_link[pmp])) {
2182 link = &ap->pmp_link[pmp];
2183 fbs_need_dec = true;
2184 }
2185
2186 } else
2187 ata_for_each_link(link, ap, EDGE)
2188 if (ata_link_active(link))
2189 break;
2190
2191 if (!link)
2192 link = &ap->link;
2193
2194 active_qc = ata_qc_from_tag(ap, link->active_tag);
2195 active_ehi = &link->eh_info;
2196
2197 /* record irq stat */
2198 ata_ehi_clear_desc(host_ehi);
2199 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2200
2201 /* AHCI needs SError cleared; otherwise, it might lock up */
2202 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2203 ahci_scr_write(&ap->link, SCR_ERROR, serror);
2204 host_ehi->serror |= serror;
2205
2206 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
2207 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2208 irq_stat &= ~PORT_IRQ_IF_ERR;
2209
2210 if (irq_stat & PORT_IRQ_TF_ERR) {
2211 /* If qc is active, charge it; otherwise, the active
2212 * link. There's no active qc on NCQ errors. It will
2213 * be determined by EH by reading log page 10h.
2214 */
2215 if (active_qc)
2216 active_qc->err_mask |= AC_ERR_DEV;
2217 else
2218 active_ehi->err_mask |= AC_ERR_DEV;
2219
2220 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2221 host_ehi->serror &= ~SERR_INTERNAL;
2222 }
2223
2224 if (irq_stat & PORT_IRQ_UNK_FIS) {
2225 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2226
2227 active_ehi->err_mask |= AC_ERR_HSM;
2228 active_ehi->action |= ATA_EH_RESET;
2229 ata_ehi_push_desc(active_ehi,
2230 "unknown FIS %08x %08x %08x %08x" ,
2231 unk[0], unk[1], unk[2], unk[3]);
2232 }
2233
2234 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2235 active_ehi->err_mask |= AC_ERR_HSM;
2236 active_ehi->action |= ATA_EH_RESET;
2237 ata_ehi_push_desc(active_ehi, "incorrect PMP");
2238 }
2239
2240 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2241 host_ehi->err_mask |= AC_ERR_HOST_BUS;
2242 host_ehi->action |= ATA_EH_RESET;
2243 ata_ehi_push_desc(host_ehi, "host bus error");
2244 }
2245
2246 if (irq_stat & PORT_IRQ_IF_ERR) {
2247 if (fbs_need_dec)
2248 active_ehi->err_mask |= AC_ERR_DEV;
2249 else {
2250 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2251 host_ehi->action |= ATA_EH_RESET;
2252 }
2253
2254 ata_ehi_push_desc(host_ehi, "interface fatal error");
2255 }
2256
2257 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2258 ata_ehi_hotplugged(host_ehi);
2259 ata_ehi_push_desc(host_ehi, "%s",
2260 irq_stat & PORT_IRQ_CONNECT ?
2261 "connection status changed" : "PHY RDY changed");
2262 }
2263
2264 /* okay, let's hand over to EH */
2265
2266 if (irq_stat & PORT_IRQ_FREEZE)
2267 ata_port_freeze(ap);
2268 else if (fbs_need_dec) {
2269 ata_link_abort(link);
2270 ahci_fbs_dec_intr(ap);
2271 } else
2272 ata_port_abort(ap);
2273}
2274
2275static void ahci_port_intr(struct ata_port *ap)
2276{
2277 void __iomem *port_mmio = ahci_port_base(ap);
2278 struct ata_eh_info *ehi = &ap->link.eh_info;
2279 struct ahci_port_priv *pp = ap->private_data;
2280 struct ahci_host_priv *hpriv = ap->host->private_data;
2281 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2282 u32 status, qc_active = 0;
2283 int rc;
2284
2285 status = readl(port_mmio + PORT_IRQ_STAT);
2286 writel(status, port_mmio + PORT_IRQ_STAT);
2287
2288 /* ignore BAD_PMP while resetting */
2289 if (unlikely(resetting))
2290 status &= ~PORT_IRQ_BAD_PMP;
2291
2292 /* If we are getting PhyRdy, this is
2293 * just a power state change, we should
2294 * clear out this, plus the PhyRdy/Comm
2295 * Wake bits from Serror
2296 */
2297 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2298 (status & PORT_IRQ_PHYRDY)) {
2299 status &= ~PORT_IRQ_PHYRDY;
2300 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2301 }
2302
2303 if (unlikely(status & PORT_IRQ_ERROR)) {
2304 ahci_error_intr(ap, status);
2305 return;
2306 }
2307
2308 if (status & PORT_IRQ_SDB_FIS) {
2309 /* If SNotification is available, leave notification
2310 * handling to sata_async_notification(). If not,
2311 * emulate it by snooping SDB FIS RX area.
2312 *
2313 * Snooping FIS RX area is probably cheaper than
2314 * poking SNotification but some constrollers which
2315 * implement SNotification, ICH9 for example, don't
2316 * store AN SDB FIS into receive area.
2317 */
2318 if (hpriv->cap & HOST_CAP_SNTF)
2319 sata_async_notification(ap);
2320 else {
2321 /* If the 'N' bit in word 0 of the FIS is set,
2322 * we just received asynchronous notification.
2323 * Tell libata about it.
2324 *
2325 * Lack of SNotification should not appear in
2326 * ahci 1.2, so the workaround is unnecessary
2327 * when FBS is enabled.
2328 */
2329 if (pp->fbs_enabled)
2330 WARN_ON_ONCE(1);
2331 else {
2332 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2333 u32 f0 = le32_to_cpu(f[0]);
2334 if (f0 & (1 << 15))
2335 sata_async_notification(ap);
2336 }
2337 }
2338 }
2339
2340 /* pp->active_link is not reliable once FBS is enabled, both
2341 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
2342 * NCQ and non-NCQ commands may be in flight at the same time.
2343 */
2344 if (pp->fbs_enabled) {
2345 if (ap->qc_active) {
2346 qc_active = readl(port_mmio + PORT_SCR_ACT);
2347 qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
2348 }
2349 } else {
2350 /* pp->active_link is valid iff any command is in flight */
2351 if (ap->qc_active && pp->active_link->sactive)
2352 qc_active = readl(port_mmio + PORT_SCR_ACT);
2353 else
2354 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2355 }
2356
2357 rc = ata_qc_complete_multiple(ap, qc_active);
2358
2359 /* while resetting, invalid completions are expected */
2360 if (unlikely(rc < 0 && !resetting)) {
2361 ehi->err_mask |= AC_ERR_HSM;
2362 ehi->action |= ATA_EH_RESET;
2363 ata_port_freeze(ap);
2364 }
2365}
2366
2367static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2368{
2369 struct ata_host *host = dev_instance;
2370 struct ahci_host_priv *hpriv;
2371 unsigned int i, handled = 0;
2372 void __iomem *mmio;
2373 u32 irq_stat, irq_masked;
2374
2375 VPRINTK("ENTER\n");
2376
2377 hpriv = host->private_data;
2378 mmio = host->iomap[AHCI_PCI_BAR];
2379
2380 /* sigh. 0xffffffff is a valid return from h/w */
2381 irq_stat = readl(mmio + HOST_IRQ_STAT);
2382 if (!irq_stat)
2383 return IRQ_NONE;
2384
2385 irq_masked = irq_stat & hpriv->port_map;
2386
2387 spin_lock(&host->lock);
2388
2389 for (i = 0; i < host->n_ports; i++) {
2390 struct ata_port *ap;
2391
2392 if (!(irq_masked & (1 << i)))
2393 continue;
2394
2395 ap = host->ports[i];
2396 if (ap) {
2397 ahci_port_intr(ap);
2398 VPRINTK("port %u\n", i);
2399 } else {
2400 VPRINTK("port %u (no irq)\n", i);
2401 if (ata_ratelimit())
2402 dev_printk(KERN_WARNING, host->dev,
2403 "interrupt on disabled port %u\n", i);
2404 }
2405
2406 handled = 1;
2407 }
2408
2409 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2410 * it should be cleared after all the port events are cleared;
2411 * otherwise, it will raise a spurious interrupt after each
2412 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2413 * information.
2414 *
2415 * Also, use the unmasked value to clear interrupt as spurious
2416 * pending event on a dummy port might cause screaming IRQ.
2417 */
2418 writel(irq_stat, mmio + HOST_IRQ_STAT);
2419
2420 spin_unlock(&host->lock);
2421
2422 VPRINTK("EXIT\n");
2423
2424 return IRQ_RETVAL(handled);
2425}
2426
2427static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2428{
2429 struct ata_port *ap = qc->ap;
2430 void __iomem *port_mmio = ahci_port_base(ap);
2431 struct ahci_port_priv *pp = ap->private_data;
2432
2433 /* Keep track of the currently active link. It will be used
2434 * in completion path to determine whether NCQ phase is in
2435 * progress.
2436 */
2437 pp->active_link = qc->dev->link;
2438
2439 if (qc->tf.protocol == ATA_PROT_NCQ)
2440 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2441
2442 if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
2443 u32 fbs = readl(port_mmio + PORT_FBS);
2444 fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
2445 fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
2446 writel(fbs, port_mmio + PORT_FBS);
2447 pp->fbs_last_dev = qc->dev->link->pmp;
2448 }
2449
2450 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2451
2452 ahci_sw_activity(qc->dev->link);
2453
2454 return 0;
2455}
2456
2457static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2458{
2459 struct ahci_port_priv *pp = qc->ap->private_data;
2460 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2461
2462 if (pp->fbs_enabled)
2463 d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
2464
2465 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2466 return true;
2467}
2468
2469static void ahci_freeze(struct ata_port *ap)
2470{
2471 void __iomem *port_mmio = ahci_port_base(ap);
2472
2473 /* turn IRQ off */
2474 writel(0, port_mmio + PORT_IRQ_MASK);
2475}
2476
2477static void ahci_thaw(struct ata_port *ap)
2478{
2479 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2480 void __iomem *port_mmio = ahci_port_base(ap);
2481 u32 tmp;
2482 struct ahci_port_priv *pp = ap->private_data;
2483
2484 /* clear IRQ */
2485 tmp = readl(port_mmio + PORT_IRQ_STAT);
2486 writel(tmp, port_mmio + PORT_IRQ_STAT);
2487 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2488
2489 /* turn IRQ back on */
2490 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2491}
2492
2493static void ahci_error_handler(struct ata_port *ap)
2494{
2495 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2496 /* restart engine */
2497 ahci_stop_engine(ap);
2498 ahci_start_engine(ap);
2499 }
2500
2501 sata_pmp_error_handler(ap);
2502}
2503
2504static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2505{
2506 struct ata_port *ap = qc->ap;
2507
2508 /* make DMA engine forget about the failed command */
2509 if (qc->flags & ATA_QCFLAG_FAILED)
2510 ahci_kick_engine(ap);
2511}
2512
2513static void ahci_enable_fbs(struct ata_port *ap)
2514{
2515 struct ahci_port_priv *pp = ap->private_data;
2516 void __iomem *port_mmio = ahci_port_base(ap);
2517 u32 fbs;
2518 int rc;
2519
2520 if (!pp->fbs_supported)
2521 return;
2522
2523 fbs = readl(port_mmio + PORT_FBS);
2524 if (fbs & PORT_FBS_EN) {
2525 pp->fbs_enabled = true;
2526 pp->fbs_last_dev = -1; /* initialization */
2527 return;
2528 }
2529
2530 rc = ahci_stop_engine(ap);
2531 if (rc)
2532 return;
2533
2534 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
2535 fbs = readl(port_mmio + PORT_FBS);
2536 if (fbs & PORT_FBS_EN) {
2537 dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
2538 pp->fbs_enabled = true;
2539 pp->fbs_last_dev = -1; /* initialization */
2540 } else
2541 dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
2542
2543 ahci_start_engine(ap);
2544}
2545
2546static void ahci_disable_fbs(struct ata_port *ap)
2547{
2548 struct ahci_port_priv *pp = ap->private_data;
2549 void __iomem *port_mmio = ahci_port_base(ap);
2550 u32 fbs;
2551 int rc;
2552
2553 if (!pp->fbs_supported)
2554 return;
2555
2556 fbs = readl(port_mmio + PORT_FBS);
2557 if ((fbs & PORT_FBS_EN) == 0) {
2558 pp->fbs_enabled = false;
2559 return;
2560 }
2561
2562 rc = ahci_stop_engine(ap);
2563 if (rc)
2564 return;
2565
2566 writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
2567 fbs = readl(port_mmio + PORT_FBS);
2568 if (fbs & PORT_FBS_EN)
2569 dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
2570 else {
2571 dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
2572 pp->fbs_enabled = false;
2573 }
2574
2575 ahci_start_engine(ap);
2576}
2577
2578static void ahci_pmp_attach(struct ata_port *ap)
2579{
2580 void __iomem *port_mmio = ahci_port_base(ap);
2581 struct ahci_port_priv *pp = ap->private_data;
2582 u32 cmd;
2583
2584 cmd = readl(port_mmio + PORT_CMD);
2585 cmd |= PORT_CMD_PMP;
2586 writel(cmd, port_mmio + PORT_CMD);
2587
2588 ahci_enable_fbs(ap);
2589
2590 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2591 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2592}
2593
2594static void ahci_pmp_detach(struct ata_port *ap)
2595{
2596 void __iomem *port_mmio = ahci_port_base(ap);
2597 struct ahci_port_priv *pp = ap->private_data;
2598 u32 cmd;
2599
2600 ahci_disable_fbs(ap);
2601
2602 cmd = readl(port_mmio + PORT_CMD);
2603 cmd &= ~PORT_CMD_PMP;
2604 writel(cmd, port_mmio + PORT_CMD);
2605
2606 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2607 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2608}
2609
2610static int ahci_port_resume(struct ata_port *ap)
2611{
2612 ahci_power_up(ap);
2613 ahci_start_port(ap);
2614
2615 if (sata_pmp_attached(ap))
2616 ahci_pmp_attach(ap);
2617 else
2618 ahci_pmp_detach(ap);
2619
2620 return 0;
2621}
2622
2623#ifdef CONFIG_PM 591#ifdef CONFIG_PM
2624static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2625{
2626 const char *emsg = NULL;
2627 int rc;
2628
2629 rc = ahci_deinit_port(ap, &emsg);
2630 if (rc == 0)
2631 ahci_power_down(ap);
2632 else {
2633 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2634 ahci_start_port(ap);
2635 }
2636
2637 return rc;
2638}
2639
2640static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 592static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2641{ 593{
2642 struct ata_host *host = dev_get_drvdata(&pdev->dev); 594 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2643 struct ahci_host_priv *hpriv = host->private_data; 595 struct ahci_host_priv *hpriv = host->private_data;
2644 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 596 void __iomem *mmio = hpriv->mmio;
2645 u32 ctl; 597 u32 ctl;
2646 598
2647 if (mesg.event & PM_EVENT_SUSPEND && 599 if (mesg.event & PM_EVENT_SUSPEND &&
@@ -2675,11 +627,11 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
2675 return rc; 627 return rc;
2676 628
2677 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 629 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2678 rc = ahci_reset_controller(host); 630 rc = ahci_pci_reset_controller(host);
2679 if (rc) 631 if (rc)
2680 return rc; 632 return rc;
2681 633
2682 ahci_init_controller(host); 634 ahci_pci_init_controller(host);
2683 } 635 }
2684 636
2685 ata_host_resume(host); 637 ata_host_resume(host);
@@ -2688,92 +640,6 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
2688} 640}
2689#endif 641#endif
2690 642
2691static int ahci_port_start(struct ata_port *ap)
2692{
2693 struct ahci_host_priv *hpriv = ap->host->private_data;
2694 struct device *dev = ap->host->dev;
2695 struct ahci_port_priv *pp;
2696 void *mem;
2697 dma_addr_t mem_dma;
2698 size_t dma_sz, rx_fis_sz;
2699
2700 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2701 if (!pp)
2702 return -ENOMEM;
2703
2704 /* check FBS capability */
2705 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
2706 void __iomem *port_mmio = ahci_port_base(ap);
2707 u32 cmd = readl(port_mmio + PORT_CMD);
2708 if (cmd & PORT_CMD_FBSCP)
2709 pp->fbs_supported = true;
2710 else
2711 dev_printk(KERN_WARNING, dev,
2712 "The port is not capable of FBS\n");
2713 }
2714
2715 if (pp->fbs_supported) {
2716 dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
2717 rx_fis_sz = AHCI_RX_FIS_SZ * 16;
2718 } else {
2719 dma_sz = AHCI_PORT_PRIV_DMA_SZ;
2720 rx_fis_sz = AHCI_RX_FIS_SZ;
2721 }
2722
2723 mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
2724 if (!mem)
2725 return -ENOMEM;
2726 memset(mem, 0, dma_sz);
2727
2728 /*
2729 * First item in chunk of DMA memory: 32-slot command table,
2730 * 32 bytes each in size
2731 */
2732 pp->cmd_slot = mem;
2733 pp->cmd_slot_dma = mem_dma;
2734
2735 mem += AHCI_CMD_SLOT_SZ;
2736 mem_dma += AHCI_CMD_SLOT_SZ;
2737
2738 /*
2739 * Second item: Received-FIS area
2740 */
2741 pp->rx_fis = mem;
2742 pp->rx_fis_dma = mem_dma;
2743
2744 mem += rx_fis_sz;
2745 mem_dma += rx_fis_sz;
2746
2747 /*
2748 * Third item: data area for storing a single command
2749 * and its scatter-gather table
2750 */
2751 pp->cmd_tbl = mem;
2752 pp->cmd_tbl_dma = mem_dma;
2753
2754 /*
2755 * Save off initial list of interrupts to be enabled.
2756 * This could be changed later
2757 */
2758 pp->intr_mask = DEF_PORT_IRQ;
2759
2760 ap->private_data = pp;
2761
2762 /* engage engines, captain */
2763 return ahci_port_resume(ap);
2764}
2765
2766static void ahci_port_stop(struct ata_port *ap)
2767{
2768 const char *emsg = NULL;
2769 int rc;
2770
2771 /* de-initialize port */
2772 rc = ahci_deinit_port(ap, &emsg);
2773 if (rc)
2774 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2775}
2776
2777static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) 643static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2778{ 644{
2779 int rc; 645 int rc;
@@ -2806,31 +672,12 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2806 return 0; 672 return 0;
2807} 673}
2808 674
2809static void ahci_print_info(struct ata_host *host) 675static void ahci_pci_print_info(struct ata_host *host)
2810{ 676{
2811 struct ahci_host_priv *hpriv = host->private_data;
2812 struct pci_dev *pdev = to_pci_dev(host->dev); 677 struct pci_dev *pdev = to_pci_dev(host->dev);
2813 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2814 u32 vers, cap, cap2, impl, speed;
2815 const char *speed_s;
2816 u16 cc; 678 u16 cc;
2817 const char *scc_s; 679 const char *scc_s;
2818 680
2819 vers = readl(mmio + HOST_VERSION);
2820 cap = hpriv->cap;
2821 cap2 = hpriv->cap2;
2822 impl = hpriv->port_map;
2823
2824 speed = (cap >> 20) & 0xf;
2825 if (speed == 1)
2826 speed_s = "1.5";
2827 else if (speed == 2)
2828 speed_s = "3";
2829 else if (speed == 3)
2830 speed_s = "6";
2831 else
2832 speed_s = "?";
2833
2834 pci_read_config_word(pdev, 0x0a, &cc); 681 pci_read_config_word(pdev, 0x0a, &cc);
2835 if (cc == PCI_CLASS_STORAGE_IDE) 682 if (cc == PCI_CLASS_STORAGE_IDE)
2836 scc_s = "IDE"; 683 scc_s = "IDE";
@@ -2841,50 +688,7 @@ static void ahci_print_info(struct ata_host *host)
2841 else 688 else
2842 scc_s = "unknown"; 689 scc_s = "unknown";
2843 690
2844 dev_printk(KERN_INFO, &pdev->dev, 691 ahci_print_info(host, scc_s);
2845 "AHCI %02x%02x.%02x%02x "
2846 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2847 ,
2848
2849 (vers >> 24) & 0xff,
2850 (vers >> 16) & 0xff,
2851 (vers >> 8) & 0xff,
2852 vers & 0xff,
2853
2854 ((cap >> 8) & 0x1f) + 1,
2855 (cap & 0x1f) + 1,
2856 speed_s,
2857 impl,
2858 scc_s);
2859
2860 dev_printk(KERN_INFO, &pdev->dev,
2861 "flags: "
2862 "%s%s%s%s%s%s%s"
2863 "%s%s%s%s%s%s%s"
2864 "%s%s%s%s%s%s\n"
2865 ,
2866
2867 cap & HOST_CAP_64 ? "64bit " : "",
2868 cap & HOST_CAP_NCQ ? "ncq " : "",
2869 cap & HOST_CAP_SNTF ? "sntf " : "",
2870 cap & HOST_CAP_MPS ? "ilck " : "",
2871 cap & HOST_CAP_SSS ? "stag " : "",
2872 cap & HOST_CAP_ALPM ? "pm " : "",
2873 cap & HOST_CAP_LED ? "led " : "",
2874 cap & HOST_CAP_CLO ? "clo " : "",
2875 cap & HOST_CAP_ONLY ? "only " : "",
2876 cap & HOST_CAP_PMP ? "pmp " : "",
2877 cap & HOST_CAP_FBS ? "fbs " : "",
2878 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2879 cap & HOST_CAP_SSC ? "slum " : "",
2880 cap & HOST_CAP_PART ? "part " : "",
2881 cap & HOST_CAP_CCC ? "ccc " : "",
2882 cap & HOST_CAP_EMS ? "ems " : "",
2883 cap & HOST_CAP_SXS ? "sxs " : "",
2884 cap2 & HOST_CAP2_APST ? "apst " : "",
2885 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2886 cap2 & HOST_CAP2_BOH ? "boh " : ""
2887 );
2888} 692}
2889 693
2890/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is 694/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
@@ -3308,41 +1112,28 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3308 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) 1112 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
3309 pci_intx(pdev, 1); 1113 pci_intx(pdev, 1);
3310 1114
1115 hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
1116
3311 /* save initial config */ 1117 /* save initial config */
3312 ahci_save_initial_config(pdev, hpriv); 1118 ahci_pci_save_initial_config(pdev, hpriv);
3313 1119
3314 /* prepare host */ 1120 /* prepare host */
3315 if (hpriv->cap & HOST_CAP_NCQ) { 1121 if (hpriv->cap & HOST_CAP_NCQ) {
3316 pi.flags |= ATA_FLAG_NCQ; 1122 pi.flags |= ATA_FLAG_NCQ;
3317 /* Auto-activate optimization is supposed to be supported on 1123 /*
3318 all AHCI controllers indicating NCQ support, but it seems 1124 * Auto-activate optimization is supposed to be
3319 to be broken at least on some NVIDIA MCP79 chipsets. 1125 * supported on all AHCI controllers indicating NCQ
3320 Until we get info on which NVIDIA chipsets don't have this 1126 * capability, but it seems to be broken on some
3321 issue, if any, disable AA on all NVIDIA AHCIs. */ 1127 * chipsets including NVIDIAs.
3322 if (pdev->vendor != PCI_VENDOR_ID_NVIDIA) 1128 */
1129 if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA))
3323 pi.flags |= ATA_FLAG_FPDMA_AA; 1130 pi.flags |= ATA_FLAG_FPDMA_AA;
3324 } 1131 }
3325 1132
3326 if (hpriv->cap & HOST_CAP_PMP) 1133 if (hpriv->cap & HOST_CAP_PMP)
3327 pi.flags |= ATA_FLAG_PMP; 1134 pi.flags |= ATA_FLAG_PMP;
3328 1135
3329 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) { 1136 ahci_set_em_messages(hpriv, &pi);
3330 u8 messages;
3331 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
3332 u32 em_loc = readl(mmio + HOST_EM_LOC);
3333 u32 em_ctl = readl(mmio + HOST_EM_CTL);
3334
3335 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
3336
3337 /* we only support LED message type right now */
3338 if ((messages & 0x01) && (ahci_em_messages == 1)) {
3339 /* store em_loc */
3340 hpriv->em_loc = ((em_loc >> 16) * 4);
3341 pi.flags |= ATA_FLAG_EM;
3342 if (!(em_ctl & EM_CTL_ALHD))
3343 pi.flags |= ATA_FLAG_SW_ACTIVITY;
3344 }
3345 }
3346 1137
3347 if (ahci_broken_system_poweroff(pdev)) { 1138 if (ahci_broken_system_poweroff(pdev)) {
3348 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN; 1139 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
@@ -3372,7 +1163,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3372 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 1163 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3373 if (!host) 1164 if (!host)
3374 return -ENOMEM; 1165 return -ENOMEM;
3375 host->iomap = pcim_iomap_table(pdev);
3376 host->private_data = hpriv; 1166 host->private_data = hpriv;
3377 1167
3378 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 1168 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
@@ -3395,7 +1185,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3395 1185
3396 /* set enclosure management message type */ 1186 /* set enclosure management message type */
3397 if (ap->flags & ATA_FLAG_EM) 1187 if (ap->flags & ATA_FLAG_EM)
3398 ap->em_message_type = ahci_em_messages; 1188 ap->em_message_type = hpriv->em_msg_type;
3399 1189
3400 1190
3401 /* disabled/not-implemented port */ 1191 /* disabled/not-implemented port */
@@ -3414,12 +1204,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3414 if (rc) 1204 if (rc)
3415 return rc; 1205 return rc;
3416 1206
3417 rc = ahci_reset_controller(host); 1207 rc = ahci_pci_reset_controller(host);
3418 if (rc) 1208 if (rc)
3419 return rc; 1209 return rc;
3420 1210
3421 ahci_init_controller(host); 1211 ahci_pci_init_controller(host);
3422 ahci_print_info(host); 1212 ahci_pci_print_info(host);
3423 1213
3424 pci_set_master(pdev); 1214 pci_set_master(pdev);
3425 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, 1215 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
new file mode 100644
index 000000000000..7113c5724471
--- /dev/null
+++ b/drivers/ata/ahci.h
@@ -0,0 +1,343 @@
1/*
2 * ahci.h - Common AHCI SATA definitions and declarations
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35#ifndef _AHCI_H
36#define _AHCI_H
37
38#include <linux/libata.h>
39
40/* Enclosure Management Control */
41#define EM_CTRL_MSG_TYPE 0x000f0000
42
43/* Enclosure Management LED Message Type */
44#define EM_MSG_LED_HBA_PORT 0x0000000f
45#define EM_MSG_LED_PMP_SLOT 0x0000ff00
46#define EM_MSG_LED_VALUE 0xffff0000
47#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
48#define EM_MSG_LED_VALUE_OFF 0xfff80000
49#define EM_MSG_LED_VALUE_ON 0x00010000
50
51enum {
52 AHCI_MAX_PORTS = 32,
53 AHCI_MAX_SG = 168, /* hardware max is 64K */
54 AHCI_DMA_BOUNDARY = 0xffffffff,
55 AHCI_MAX_CMDS = 32,
56 AHCI_CMD_SZ = 32,
57 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
58 AHCI_RX_FIS_SZ = 256,
59 AHCI_CMD_TBL_CDB = 0x40,
60 AHCI_CMD_TBL_HDR_SZ = 0x80,
61 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
62 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
63 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
64 AHCI_RX_FIS_SZ,
65 AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
66 AHCI_CMD_TBL_AR_SZ +
67 (AHCI_RX_FIS_SZ * 16),
68 AHCI_IRQ_ON_SG = (1 << 31),
69 AHCI_CMD_ATAPI = (1 << 5),
70 AHCI_CMD_WRITE = (1 << 6),
71 AHCI_CMD_PREFETCH = (1 << 7),
72 AHCI_CMD_RESET = (1 << 8),
73 AHCI_CMD_CLR_BUSY = (1 << 10),
74
75 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
76 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78
79 /* global controller registers */
80 HOST_CAP = 0x00, /* host capabilities */
81 HOST_CTL = 0x04, /* global host control */
82 HOST_IRQ_STAT = 0x08, /* interrupt status */
83 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
84 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
85 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
86 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
87 HOST_CAP2 = 0x24, /* host capabilities, extended */
88
89 /* HOST_CTL bits */
90 HOST_RESET = (1 << 0), /* reset controller; self-clear */
91 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
93
94 /* HOST_CAP bits */
95 HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
96 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
97 HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
98 HOST_CAP_PART = (1 << 13), /* Partial state capable */
99 HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
100 HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
101 HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
102 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
103 HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
104 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
105 HOST_CAP_LED = (1 << 25), /* Supports activity LED */
106 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
107 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
108 HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
109 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
110 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
111 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
112
113 /* HOST_CAP2 bits */
114 HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
115 HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
116 HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
117
118 /* registers for each SATA port */
119 PORT_LST_ADDR = 0x00, /* command list DMA addr */
120 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
121 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
122 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
123 PORT_IRQ_STAT = 0x10, /* interrupt status */
124 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
125 PORT_CMD = 0x18, /* port command */
126 PORT_TFDATA = 0x20, /* taskfile data */
127 PORT_SIG = 0x24, /* device TF signature */
128 PORT_CMD_ISSUE = 0x38, /* command issue */
129 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
130 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
131 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
132 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
133 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
134 PORT_FBS = 0x40, /* FIS-based Switching */
135
136 /* PORT_IRQ_{STAT,MASK} bits */
137 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
138 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
139 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
140 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
141 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
142 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
143 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
144 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
145
146 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
147 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
148 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
149 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
150 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
151 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
152 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
153 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
154 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
155
156 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
157 PORT_IRQ_IF_ERR |
158 PORT_IRQ_CONNECT |
159 PORT_IRQ_PHYRDY |
160 PORT_IRQ_UNK_FIS |
161 PORT_IRQ_BAD_PMP,
162 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
163 PORT_IRQ_TF_ERR |
164 PORT_IRQ_HBUS_DATA_ERR,
165 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
166 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
167 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
168
169 /* PORT_CMD bits */
170 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
171 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
172 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
173 PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
174 PORT_CMD_PMP = (1 << 17), /* PMP attached */
175 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
176 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
177 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
178 PORT_CMD_CLO = (1 << 3), /* Command list override */
179 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
180 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
181 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
182
183 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
184 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
185 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
186 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
187
188 PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
189 PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
190 PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
191 PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
192 PORT_FBS_SDE = (1 << 2), /* FBS single device error */
193 PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
194 PORT_FBS_EN = (1 << 0), /* Enable FBS */
195
196 /* hpriv->flags bits */
197 AHCI_HFLAG_NO_NCQ = (1 << 0),
198 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
199 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
200 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
201 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
202 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
203 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
204 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
205 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
206 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
207 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
208 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
209 link offline */
210 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
211 AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
212
213 /* ap->flags bits */
214
215 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
216 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
217 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
218 ATA_FLAG_IPM,
219
220 ICH_MAP = 0x90, /* ICH MAP register */
221
222 /* em constants */
223 EM_MAX_SLOTS = 8,
224 EM_MAX_RETRY = 5,
225
226 /* em_ctl bits */
227 EM_CTL_RST = (1 << 9), /* Reset */
228 EM_CTL_TM = (1 << 8), /* Transmit Message */
229 EM_CTL_MR = (1 << 0), /* Message Recieved */
230 EM_CTL_ALHD = (1 << 26), /* Activity LED */
231 EM_CTL_XMT = (1 << 25), /* Transmit Only */
232 EM_CTL_SMB = (1 << 24), /* Single Message Buffer */
233
234 /* em message type */
235 EM_MSG_TYPE_LED = (1 << 0), /* LED */
236 EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */
237 EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */
238 EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */
239};
240
241struct ahci_cmd_hdr {
242 __le32 opts;
243 __le32 status;
244 __le32 tbl_addr;
245 __le32 tbl_addr_hi;
246 __le32 reserved[4];
247};
248
249struct ahci_sg {
250 __le32 addr;
251 __le32 addr_hi;
252 __le32 reserved;
253 __le32 flags_size;
254};
255
256struct ahci_em_priv {
257 enum sw_activity blink_policy;
258 struct timer_list timer;
259 unsigned long saved_activity;
260 unsigned long activity;
261 unsigned long led_state;
262};
263
264struct ahci_port_priv {
265 struct ata_link *active_link;
266 struct ahci_cmd_hdr *cmd_slot;
267 dma_addr_t cmd_slot_dma;
268 void *cmd_tbl;
269 dma_addr_t cmd_tbl_dma;
270 void *rx_fis;
271 dma_addr_t rx_fis_dma;
272 /* for NCQ spurious interrupt analysis */
273 unsigned int ncq_saw_d2h:1;
274 unsigned int ncq_saw_dmas:1;
275 unsigned int ncq_saw_sdb:1;
276 u32 intr_mask; /* interrupts to enable */
277 bool fbs_supported; /* set iff FBS is supported */
278 bool fbs_enabled; /* set iff FBS is enabled */
279 int fbs_last_dev; /* save FBS.DEV of last FIS */
280 /* enclosure management info per PM slot */
281 struct ahci_em_priv em_priv[EM_MAX_SLOTS];
282};
283
284struct ahci_host_priv {
285 void __iomem * mmio; /* bus-independant mem map */
286 unsigned int flags; /* AHCI_HFLAG_* */
287 u32 cap; /* cap to use */
288 u32 cap2; /* cap2 to use */
289 u32 port_map; /* port map to use */
290 u32 saved_cap; /* saved initial cap */
291 u32 saved_cap2; /* saved initial cap2 */
292 u32 saved_port_map; /* saved initial port_map */
293 u32 em_loc; /* enclosure management location */
294 u32 em_buf_sz; /* EM buffer size in byte */
295 u32 em_msg_type; /* EM message type */
296};
297
298extern int ahci_ignore_sss;
299
300extern struct scsi_host_template ahci_sht;
301extern struct ata_port_operations ahci_ops;
302
303void ahci_save_initial_config(struct device *dev,
304 struct ahci_host_priv *hpriv,
305 unsigned int force_port_map,
306 unsigned int mask_port_map);
307void ahci_init_controller(struct ata_host *host);
308int ahci_reset_controller(struct ata_host *host);
309
310int ahci_do_softreset(struct ata_link *link, unsigned int *class,
311 int pmp, unsigned long deadline,
312 int (*check_ready)(struct ata_link *link));
313
314int ahci_stop_engine(struct ata_port *ap);
315void ahci_start_engine(struct ata_port *ap);
316int ahci_check_ready(struct ata_link *link);
317int ahci_kick_engine(struct ata_port *ap);
318void ahci_set_em_messages(struct ahci_host_priv *hpriv,
319 struct ata_port_info *pi);
320int ahci_reset_em(struct ata_host *host);
321irqreturn_t ahci_interrupt(int irq, void *dev_instance);
322void ahci_print_info(struct ata_host *host, const char *scc_s);
323
324static inline void __iomem *__ahci_port_base(struct ata_host *host,
325 unsigned int port_no)
326{
327 struct ahci_host_priv *hpriv = host->private_data;
328 void __iomem *mmio = hpriv->mmio;
329
330 return mmio + 0x100 + (port_no * 0x80);
331}
332
333static inline void __iomem *ahci_port_base(struct ata_port *ap)
334{
335 return __ahci_port_base(ap->host, ap->port_no);
336}
337
338static inline int ahci_nr_ports(u32 cap)
339{
340 return (cap & 0x1f) + 1;
341}
342
343#endif /* _AHCI_H */
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
new file mode 100644
index 000000000000..5e11b160f247
--- /dev/null
+++ b/drivers/ata/ahci_platform.c
@@ -0,0 +1,192 @@
1/*
2 * AHCI SATA platform driver
3 *
4 * Copyright 2004-2005 Red Hat, Inc.
5 * Jeff Garzik <jgarzik@pobox.com>
6 * Copyright 2010 MontaVista Software, LLC.
7 * Anton Vorontsov <avorontsov@ru.mvista.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 */
14
15#include <linux/kernel.h>
16#include <linux/gfp.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/device.h>
21#include <linux/platform_device.h>
22#include <linux/libata.h>
23#include <linux/ahci_platform.h>
24#include "ahci.h"
25
26static int __init ahci_probe(struct platform_device *pdev)
27{
28 struct device *dev = &pdev->dev;
29 struct ahci_platform_data *pdata = dev->platform_data;
30 struct ata_port_info pi = {
31 .flags = AHCI_FLAG_COMMON,
32 .pio_mask = ATA_PIO4,
33 .udma_mask = ATA_UDMA6,
34 .port_ops = &ahci_ops,
35 };
36 const struct ata_port_info *ppi[] = { &pi, NULL };
37 struct ahci_host_priv *hpriv;
38 struct ata_host *host;
39 struct resource *mem;
40 int irq;
41 int n_ports;
42 int i;
43 int rc;
44
45 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
46 if (!mem) {
47 dev_err(dev, "no mmio space\n");
48 return -EINVAL;
49 }
50
51 irq = platform_get_irq(pdev, 0);
52 if (irq <= 0) {
53 dev_err(dev, "no irq\n");
54 return -EINVAL;
55 }
56
57 if (pdata && pdata->init) {
58 rc = pdata->init(dev);
59 if (rc)
60 return rc;
61 }
62
63 if (pdata && pdata->ata_port_info)
64 pi = *pdata->ata_port_info;
65
66 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
67 if (!hpriv) {
68 rc = -ENOMEM;
69 goto err0;
70 }
71
72 hpriv->flags |= (unsigned long)pi.private_data;
73
74 hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
75 if (!hpriv->mmio) {
76 dev_err(dev, "can't map %pR\n", mem);
77 rc = -ENOMEM;
78 goto err0;
79 }
80
81 ahci_save_initial_config(dev, hpriv,
82 pdata ? pdata->force_port_map : 0,
83 pdata ? pdata->mask_port_map : 0);
84
85 /* prepare host */
86 if (hpriv->cap & HOST_CAP_NCQ)
87 pi.flags |= ATA_FLAG_NCQ;
88
89 if (hpriv->cap & HOST_CAP_PMP)
90 pi.flags |= ATA_FLAG_PMP;
91
92 ahci_set_em_messages(hpriv, &pi);
93
94 /* CAP.NP sometimes indicate the index of the last enabled
95 * port, at other times, that of the last possible port, so
96 * determining the maximum port number requires looking at
97 * both CAP.NP and port_map.
98 */
99 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
100
101 host = ata_host_alloc_pinfo(dev, ppi, n_ports);
102 if (!host) {
103 rc = -ENOMEM;
104 goto err0;
105 }
106
107 host->private_data = hpriv;
108
109 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
110 host->flags |= ATA_HOST_PARALLEL_SCAN;
111 else
112 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
113
114 if (pi.flags & ATA_FLAG_EM)
115 ahci_reset_em(host);
116
117 for (i = 0; i < host->n_ports; i++) {
118 struct ata_port *ap = host->ports[i];
119
120 ata_port_desc(ap, "mmio %pR", mem);
121 ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
122
123 /* set initial link pm policy */
124 ap->pm_policy = NOT_AVAILABLE;
125
126 /* set enclosure management message type */
127 if (ap->flags & ATA_FLAG_EM)
128 ap->em_message_type = hpriv->em_msg_type;
129
130 /* disabled/not-implemented port */
131 if (!(hpriv->port_map & (1 << i)))
132 ap->ops = &ata_dummy_port_ops;
133 }
134
135 rc = ahci_reset_controller(host);
136 if (rc)
137 goto err0;
138
139 ahci_init_controller(host);
140 ahci_print_info(host, "platform");
141
142 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
143 &ahci_sht);
144 if (rc)
145 goto err0;
146
147 return 0;
148err0:
149 if (pdata && pdata->exit)
150 pdata->exit(dev);
151 return rc;
152}
153
154static int __devexit ahci_remove(struct platform_device *pdev)
155{
156 struct device *dev = &pdev->dev;
157 struct ahci_platform_data *pdata = dev->platform_data;
158 struct ata_host *host = dev_get_drvdata(dev);
159
160 ata_host_detach(host);
161
162 if (pdata && pdata->exit)
163 pdata->exit(dev);
164
165 return 0;
166}
167
168static struct platform_driver ahci_driver = {
169 .probe = ahci_probe,
170 .remove = __devexit_p(ahci_remove),
171 .driver = {
172 .name = "ahci",
173 .owner = THIS_MODULE,
174 },
175};
176
177static int __init ahci_init(void)
178{
179 return platform_driver_probe(&ahci_driver, ahci_probe);
180}
181module_init(ahci_init);
182
183static void __exit ahci_exit(void)
184{
185 platform_driver_unregister(&ahci_driver);
186}
187module_exit(ahci_exit);
188
189MODULE_DESCRIPTION("AHCI SATA platform driver");
190MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
191MODULE_LICENSE("GPL");
192MODULE_ALIAS("platform:ahci");
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 83bc49fac9bb..ec52fc618763 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -43,7 +43,7 @@
43 * driver the list of errata that are relevant is below, going back to 43 * driver the list of errata that are relevant is below, going back to
44 * PIIX4. Older device documentation is now a bit tricky to find. 44 * PIIX4. Older device documentation is now a bit tricky to find.
45 * 45 *
46 * The chipsets all follow very much the same design. The orginal Triton 46 * The chipsets all follow very much the same design. The original Triton
47 * series chipsets do _not_ support independant device timings, but this 47 * series chipsets do _not_ support independant device timings, but this
48 * is fixed in Triton II. With the odd mobile exception the chips then 48 * is fixed in Triton II. With the odd mobile exception the chips then
49 * change little except in gaining more modes until SATA arrives. This 49 * change little except in gaining more modes until SATA arrives. This
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
new file mode 100644
index 000000000000..1984a6e89e84
--- /dev/null
+++ b/drivers/ata/libahci.c
@@ -0,0 +1,2216 @@
1/*
2 * libahci.c - Common AHCI SATA low-level routines
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/gfp.h>
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/dma-mapping.h>
43#include <linux/device.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_cmnd.h>
46#include <linux/libata.h>
47#include "ahci.h"
48
49static int ahci_skip_host_reset;
50int ahci_ignore_sss;
51EXPORT_SYMBOL_GPL(ahci_ignore_sss);
52
53module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
54MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
55
56module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
57MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
58
59static int ahci_enable_alpm(struct ata_port *ap,
60 enum link_pm policy);
61static void ahci_disable_alpm(struct ata_port *ap);
62static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
63static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
64 size_t size);
65static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
66 ssize_t size);
67
68
69
70static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
71static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
72static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
73static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
74static int ahci_port_start(struct ata_port *ap);
75static void ahci_port_stop(struct ata_port *ap);
76static void ahci_qc_prep(struct ata_queued_cmd *qc);
77static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
78static void ahci_freeze(struct ata_port *ap);
79static void ahci_thaw(struct ata_port *ap);
80static void ahci_enable_fbs(struct ata_port *ap);
81static void ahci_disable_fbs(struct ata_port *ap);
82static void ahci_pmp_attach(struct ata_port *ap);
83static void ahci_pmp_detach(struct ata_port *ap);
84static int ahci_softreset(struct ata_link *link, unsigned int *class,
85 unsigned long deadline);
86static int ahci_hardreset(struct ata_link *link, unsigned int *class,
87 unsigned long deadline);
88static void ahci_postreset(struct ata_link *link, unsigned int *class);
89static void ahci_error_handler(struct ata_port *ap);
90static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
91static int ahci_port_resume(struct ata_port *ap);
92static void ahci_dev_config(struct ata_device *dev);
93static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
94 u32 opts);
95#ifdef CONFIG_PM
96static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
97#endif
98static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
99static ssize_t ahci_activity_store(struct ata_device *dev,
100 enum sw_activity val);
101static void ahci_init_sw_activity(struct ata_link *link);
102
103static ssize_t ahci_show_host_caps(struct device *dev,
104 struct device_attribute *attr, char *buf);
105static ssize_t ahci_show_host_cap2(struct device *dev,
106 struct device_attribute *attr, char *buf);
107static ssize_t ahci_show_host_version(struct device *dev,
108 struct device_attribute *attr, char *buf);
109static ssize_t ahci_show_port_cmd(struct device *dev,
110 struct device_attribute *attr, char *buf);
111static ssize_t ahci_read_em_buffer(struct device *dev,
112 struct device_attribute *attr, char *buf);
113static ssize_t ahci_store_em_buffer(struct device *dev,
114 struct device_attribute *attr,
115 const char *buf, size_t size);
116
117static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
118static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
119static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
120static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
121static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
122 ahci_read_em_buffer, ahci_store_em_buffer);
123
124static struct device_attribute *ahci_shost_attrs[] = {
125 &dev_attr_link_power_management_policy,
126 &dev_attr_em_message_type,
127 &dev_attr_em_message,
128 &dev_attr_ahci_host_caps,
129 &dev_attr_ahci_host_cap2,
130 &dev_attr_ahci_host_version,
131 &dev_attr_ahci_port_cmd,
132 &dev_attr_em_buffer,
133 NULL
134};
135
136static struct device_attribute *ahci_sdev_attrs[] = {
137 &dev_attr_sw_activity,
138 &dev_attr_unload_heads,
139 NULL
140};
141
142struct scsi_host_template ahci_sht = {
143 ATA_NCQ_SHT("ahci"),
144 .can_queue = AHCI_MAX_CMDS - 1,
145 .sg_tablesize = AHCI_MAX_SG,
146 .dma_boundary = AHCI_DMA_BOUNDARY,
147 .shost_attrs = ahci_shost_attrs,
148 .sdev_attrs = ahci_sdev_attrs,
149};
150EXPORT_SYMBOL_GPL(ahci_sht);
151
152struct ata_port_operations ahci_ops = {
153 .inherits = &sata_pmp_port_ops,
154
155 .qc_defer = ahci_pmp_qc_defer,
156 .qc_prep = ahci_qc_prep,
157 .qc_issue = ahci_qc_issue,
158 .qc_fill_rtf = ahci_qc_fill_rtf,
159
160 .freeze = ahci_freeze,
161 .thaw = ahci_thaw,
162 .softreset = ahci_softreset,
163 .hardreset = ahci_hardreset,
164 .postreset = ahci_postreset,
165 .pmp_softreset = ahci_softreset,
166 .error_handler = ahci_error_handler,
167 .post_internal_cmd = ahci_post_internal_cmd,
168 .dev_config = ahci_dev_config,
169
170 .scr_read = ahci_scr_read,
171 .scr_write = ahci_scr_write,
172 .pmp_attach = ahci_pmp_attach,
173 .pmp_detach = ahci_pmp_detach,
174
175 .enable_pm = ahci_enable_alpm,
176 .disable_pm = ahci_disable_alpm,
177 .em_show = ahci_led_show,
178 .em_store = ahci_led_store,
179 .sw_activity_show = ahci_activity_show,
180 .sw_activity_store = ahci_activity_store,
181#ifdef CONFIG_PM
182 .port_suspend = ahci_port_suspend,
183 .port_resume = ahci_port_resume,
184#endif
185 .port_start = ahci_port_start,
186 .port_stop = ahci_port_stop,
187};
188EXPORT_SYMBOL_GPL(ahci_ops);
189
190int ahci_em_messages = 1;
191EXPORT_SYMBOL_GPL(ahci_em_messages);
192module_param(ahci_em_messages, int, 0444);
193/* add other LED protocol types when they become supported */
194MODULE_PARM_DESC(ahci_em_messages,
195 "AHCI Enclosure Management Message control (0 = off, 1 = on)");
196
197static void ahci_enable_ahci(void __iomem *mmio)
198{
199 int i;
200 u32 tmp;
201
202 /* turn on AHCI_EN */
203 tmp = readl(mmio + HOST_CTL);
204 if (tmp & HOST_AHCI_EN)
205 return;
206
207 /* Some controllers need AHCI_EN to be written multiple times.
208 * Try a few times before giving up.
209 */
210 for (i = 0; i < 5; i++) {
211 tmp |= HOST_AHCI_EN;
212 writel(tmp, mmio + HOST_CTL);
213 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
214 if (tmp & HOST_AHCI_EN)
215 return;
216 msleep(10);
217 }
218
219 WARN_ON(1);
220}
221
222static ssize_t ahci_show_host_caps(struct device *dev,
223 struct device_attribute *attr, char *buf)
224{
225 struct Scsi_Host *shost = class_to_shost(dev);
226 struct ata_port *ap = ata_shost_to_port(shost);
227 struct ahci_host_priv *hpriv = ap->host->private_data;
228
229 return sprintf(buf, "%x\n", hpriv->cap);
230}
231
232static ssize_t ahci_show_host_cap2(struct device *dev,
233 struct device_attribute *attr, char *buf)
234{
235 struct Scsi_Host *shost = class_to_shost(dev);
236 struct ata_port *ap = ata_shost_to_port(shost);
237 struct ahci_host_priv *hpriv = ap->host->private_data;
238
239 return sprintf(buf, "%x\n", hpriv->cap2);
240}
241
242static ssize_t ahci_show_host_version(struct device *dev,
243 struct device_attribute *attr, char *buf)
244{
245 struct Scsi_Host *shost = class_to_shost(dev);
246 struct ata_port *ap = ata_shost_to_port(shost);
247 struct ahci_host_priv *hpriv = ap->host->private_data;
248 void __iomem *mmio = hpriv->mmio;
249
250 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
251}
252
253static ssize_t ahci_show_port_cmd(struct device *dev,
254 struct device_attribute *attr, char *buf)
255{
256 struct Scsi_Host *shost = class_to_shost(dev);
257 struct ata_port *ap = ata_shost_to_port(shost);
258 void __iomem *port_mmio = ahci_port_base(ap);
259
260 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
261}
262
263static ssize_t ahci_read_em_buffer(struct device *dev,
264 struct device_attribute *attr, char *buf)
265{
266 struct Scsi_Host *shost = class_to_shost(dev);
267 struct ata_port *ap = ata_shost_to_port(shost);
268 struct ahci_host_priv *hpriv = ap->host->private_data;
269 void __iomem *mmio = hpriv->mmio;
270 void __iomem *em_mmio = mmio + hpriv->em_loc;
271 u32 em_ctl, msg;
272 unsigned long flags;
273 size_t count;
274 int i;
275
276 spin_lock_irqsave(ap->lock, flags);
277
278 em_ctl = readl(mmio + HOST_EM_CTL);
279 if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
280 !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
281 spin_unlock_irqrestore(ap->lock, flags);
282 return -EINVAL;
283 }
284
285 if (!(em_ctl & EM_CTL_MR)) {
286 spin_unlock_irqrestore(ap->lock, flags);
287 return -EAGAIN;
288 }
289
290 if (!(em_ctl & EM_CTL_SMB))
291 em_mmio += hpriv->em_buf_sz;
292
293 count = hpriv->em_buf_sz;
294
295 /* the count should not be larger than PAGE_SIZE */
296 if (count > PAGE_SIZE) {
297 if (printk_ratelimit())
298 ata_port_printk(ap, KERN_WARNING,
299 "EM read buffer size too large: "
300 "buffer size %u, page size %lu\n",
301 hpriv->em_buf_sz, PAGE_SIZE);
302 count = PAGE_SIZE;
303 }
304
305 for (i = 0; i < count; i += 4) {
306 msg = readl(em_mmio + i);
307 buf[i] = msg & 0xff;
308 buf[i + 1] = (msg >> 8) & 0xff;
309 buf[i + 2] = (msg >> 16) & 0xff;
310 buf[i + 3] = (msg >> 24) & 0xff;
311 }
312
313 spin_unlock_irqrestore(ap->lock, flags);
314
315 return i;
316}
317
318static ssize_t ahci_store_em_buffer(struct device *dev,
319 struct device_attribute *attr,
320 const char *buf, size_t size)
321{
322 struct Scsi_Host *shost = class_to_shost(dev);
323 struct ata_port *ap = ata_shost_to_port(shost);
324 struct ahci_host_priv *hpriv = ap->host->private_data;
325 void __iomem *mmio = hpriv->mmio;
326 void __iomem *em_mmio = mmio + hpriv->em_loc;
327 u32 em_ctl, msg;
328 unsigned long flags;
329 int i;
330
331 /* check size validity */
332 if (!(ap->flags & ATA_FLAG_EM) ||
333 !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) ||
334 size % 4 || size > hpriv->em_buf_sz)
335 return -EINVAL;
336
337 spin_lock_irqsave(ap->lock, flags);
338
339 em_ctl = readl(mmio + HOST_EM_CTL);
340 if (em_ctl & EM_CTL_TM) {
341 spin_unlock_irqrestore(ap->lock, flags);
342 return -EBUSY;
343 }
344
345 for (i = 0; i < size; i += 4) {
346 msg = buf[i] | buf[i + 1] << 8 |
347 buf[i + 2] << 16 | buf[i + 3] << 24;
348 writel(msg, em_mmio + i);
349 }
350
351 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
352
353 spin_unlock_irqrestore(ap->lock, flags);
354
355 return size;
356}
357
358/**
359 * ahci_save_initial_config - Save and fixup initial config values
360 * @dev: target AHCI device
361 * @hpriv: host private area to store config values
362 * @force_port_map: force port map to a specified value
363 * @mask_port_map: mask out particular bits from port map
364 *
365 * Some registers containing configuration info might be setup by
366 * BIOS and might be cleared on reset. This function saves the
367 * initial values of those registers into @hpriv such that they
368 * can be restored after controller reset.
369 *
370 * If inconsistent, config values are fixed up by this function.
371 *
372 * LOCKING:
373 * None.
374 */
375void ahci_save_initial_config(struct device *dev,
376 struct ahci_host_priv *hpriv,
377 unsigned int force_port_map,
378 unsigned int mask_port_map)
379{
380 void __iomem *mmio = hpriv->mmio;
381 u32 cap, cap2, vers, port_map;
382 int i;
383
384 /* make sure AHCI mode is enabled before accessing CAP */
385 ahci_enable_ahci(mmio);
386
387 /* Values prefixed with saved_ are written back to host after
388 * reset. Values without are used for driver operation.
389 */
390 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
391 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
392
393 /* CAP2 register is only defined for AHCI 1.2 and later */
394 vers = readl(mmio + HOST_VERSION);
395 if ((vers >> 16) > 1 ||
396 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
397 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
398 else
399 hpriv->saved_cap2 = cap2 = 0;
400
401 /* some chips have errata preventing 64bit use */
402 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
403 dev_printk(KERN_INFO, dev,
404 "controller can't do 64bit DMA, forcing 32bit\n");
405 cap &= ~HOST_CAP_64;
406 }
407
408 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
409 dev_printk(KERN_INFO, dev,
410 "controller can't do NCQ, turning off CAP_NCQ\n");
411 cap &= ~HOST_CAP_NCQ;
412 }
413
414 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
415 dev_printk(KERN_INFO, dev,
416 "controller can do NCQ, turning on CAP_NCQ\n");
417 cap |= HOST_CAP_NCQ;
418 }
419
420 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
421 dev_printk(KERN_INFO, dev,
422 "controller can't do PMP, turning off CAP_PMP\n");
423 cap &= ~HOST_CAP_PMP;
424 }
425
426 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
427 dev_printk(KERN_INFO, dev,
428 "controller can't do SNTF, turning off CAP_SNTF\n");
429 cap &= ~HOST_CAP_SNTF;
430 }
431
432 if (force_port_map && port_map != force_port_map) {
433 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
434 port_map, force_port_map);
435 port_map = force_port_map;
436 }
437
438 if (mask_port_map) {
439 dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
440 port_map,
441 port_map & mask_port_map);
442 port_map &= mask_port_map;
443 }
444
445 /* cross check port_map and cap.n_ports */
446 if (port_map) {
447 int map_ports = 0;
448
449 for (i = 0; i < AHCI_MAX_PORTS; i++)
450 if (port_map & (1 << i))
451 map_ports++;
452
453 /* If PI has more ports than n_ports, whine, clear
454 * port_map and let it be generated from n_ports.
455 */
456 if (map_ports > ahci_nr_ports(cap)) {
457 dev_printk(KERN_WARNING, dev,
458 "implemented port map (0x%x) contains more "
459 "ports than nr_ports (%u), using nr_ports\n",
460 port_map, ahci_nr_ports(cap));
461 port_map = 0;
462 }
463 }
464
465 /* fabricate port_map from cap.nr_ports */
466 if (!port_map) {
467 port_map = (1 << ahci_nr_ports(cap)) - 1;
468 dev_printk(KERN_WARNING, dev,
469 "forcing PORTS_IMPL to 0x%x\n", port_map);
470
471 /* write the fixed up value to the PI register */
472 hpriv->saved_port_map = port_map;
473 }
474
475 /* record values to use during operation */
476 hpriv->cap = cap;
477 hpriv->cap2 = cap2;
478 hpriv->port_map = port_map;
479}
480EXPORT_SYMBOL_GPL(ahci_save_initial_config);
481
482/**
483 * ahci_restore_initial_config - Restore initial config
484 * @host: target ATA host
485 *
486 * Restore initial config stored by ahci_save_initial_config().
487 *
488 * LOCKING:
489 * None.
490 */
491static void ahci_restore_initial_config(struct ata_host *host)
492{
493 struct ahci_host_priv *hpriv = host->private_data;
494 void __iomem *mmio = hpriv->mmio;
495
496 writel(hpriv->saved_cap, mmio + HOST_CAP);
497 if (hpriv->saved_cap2)
498 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
499 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
500 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
501}
502
503static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
504{
505 static const int offset[] = {
506 [SCR_STATUS] = PORT_SCR_STAT,
507 [SCR_CONTROL] = PORT_SCR_CTL,
508 [SCR_ERROR] = PORT_SCR_ERR,
509 [SCR_ACTIVE] = PORT_SCR_ACT,
510 [SCR_NOTIFICATION] = PORT_SCR_NTF,
511 };
512 struct ahci_host_priv *hpriv = ap->host->private_data;
513
514 if (sc_reg < ARRAY_SIZE(offset) &&
515 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
516 return offset[sc_reg];
517 return 0;
518}
519
520static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
521{
522 void __iomem *port_mmio = ahci_port_base(link->ap);
523 int offset = ahci_scr_offset(link->ap, sc_reg);
524
525 if (offset) {
526 *val = readl(port_mmio + offset);
527 return 0;
528 }
529 return -EINVAL;
530}
531
532static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
533{
534 void __iomem *port_mmio = ahci_port_base(link->ap);
535 int offset = ahci_scr_offset(link->ap, sc_reg);
536
537 if (offset) {
538 writel(val, port_mmio + offset);
539 return 0;
540 }
541 return -EINVAL;
542}
543
544static int ahci_is_device_present(void __iomem *port_mmio)
545{
546 u8 status = readl(port_mmio + PORT_TFDATA) & 0xff;
547
548 /* Make sure PxTFD.STS.BSY and PxTFD.STS.DRQ are 0 */
549 if (status & (ATA_BUSY | ATA_DRQ))
550 return 0;
551
552 /* Make sure PxSSTS.DET is 3h */
553 status = readl(port_mmio + PORT_SCR_STAT) & 0xf;
554 if (status != 3)
555 return 0;
556 return 1;
557}
558
559void ahci_start_engine(struct ata_port *ap)
560{
561 void __iomem *port_mmio = ahci_port_base(ap);
562 u32 tmp;
563
564 if (!ahci_is_device_present(port_mmio))
565 return;
566
567 /* start DMA */
568 tmp = readl(port_mmio + PORT_CMD);
569 tmp |= PORT_CMD_START;
570 writel(tmp, port_mmio + PORT_CMD);
571 readl(port_mmio + PORT_CMD); /* flush */
572}
573EXPORT_SYMBOL_GPL(ahci_start_engine);
574
575int ahci_stop_engine(struct ata_port *ap)
576{
577 void __iomem *port_mmio = ahci_port_base(ap);
578 u32 tmp;
579
580 tmp = readl(port_mmio + PORT_CMD);
581
582 /* check if the HBA is idle */
583 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
584 return 0;
585
586 /* setting HBA to idle */
587 tmp &= ~PORT_CMD_START;
588 writel(tmp, port_mmio + PORT_CMD);
589
590 /* wait for engine to stop. This could be as long as 500 msec */
591 tmp = ata_wait_register(port_mmio + PORT_CMD,
592 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
593 if (tmp & PORT_CMD_LIST_ON)
594 return -EIO;
595
596 return 0;
597}
598EXPORT_SYMBOL_GPL(ahci_stop_engine);
599
600static void ahci_start_fis_rx(struct ata_port *ap)
601{
602 void __iomem *port_mmio = ahci_port_base(ap);
603 struct ahci_host_priv *hpriv = ap->host->private_data;
604 struct ahci_port_priv *pp = ap->private_data;
605 u32 tmp;
606
607 /* set FIS registers */
608 if (hpriv->cap & HOST_CAP_64)
609 writel((pp->cmd_slot_dma >> 16) >> 16,
610 port_mmio + PORT_LST_ADDR_HI);
611 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
612
613 if (hpriv->cap & HOST_CAP_64)
614 writel((pp->rx_fis_dma >> 16) >> 16,
615 port_mmio + PORT_FIS_ADDR_HI);
616 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
617
618 /* enable FIS reception */
619 tmp = readl(port_mmio + PORT_CMD);
620 tmp |= PORT_CMD_FIS_RX;
621 writel(tmp, port_mmio + PORT_CMD);
622
623 /* flush */
624 readl(port_mmio + PORT_CMD);
625}
626
627static int ahci_stop_fis_rx(struct ata_port *ap)
628{
629 void __iomem *port_mmio = ahci_port_base(ap);
630 u32 tmp;
631
632 /* disable FIS reception */
633 tmp = readl(port_mmio + PORT_CMD);
634 tmp &= ~PORT_CMD_FIS_RX;
635 writel(tmp, port_mmio + PORT_CMD);
636
637 /* wait for completion, spec says 500ms, give it 1000 */
638 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
639 PORT_CMD_FIS_ON, 10, 1000);
640 if (tmp & PORT_CMD_FIS_ON)
641 return -EBUSY;
642
643 return 0;
644}
645
646static void ahci_power_up(struct ata_port *ap)
647{
648 struct ahci_host_priv *hpriv = ap->host->private_data;
649 void __iomem *port_mmio = ahci_port_base(ap);
650 u32 cmd;
651
652 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
653
654 /* spin up device */
655 if (hpriv->cap & HOST_CAP_SSS) {
656 cmd |= PORT_CMD_SPIN_UP;
657 writel(cmd, port_mmio + PORT_CMD);
658 }
659
660 /* wake up link */
661 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
662}
663
664static void ahci_disable_alpm(struct ata_port *ap)
665{
666 struct ahci_host_priv *hpriv = ap->host->private_data;
667 void __iomem *port_mmio = ahci_port_base(ap);
668 u32 cmd;
669 struct ahci_port_priv *pp = ap->private_data;
670
671 /* IPM bits should be disabled by libata-core */
672 /* get the existing command bits */
673 cmd = readl(port_mmio + PORT_CMD);
674
675 /* disable ALPM and ASP */
676 cmd &= ~PORT_CMD_ASP;
677 cmd &= ~PORT_CMD_ALPE;
678
679 /* force the interface back to active */
680 cmd |= PORT_CMD_ICC_ACTIVE;
681
682 /* write out new cmd value */
683 writel(cmd, port_mmio + PORT_CMD);
684 cmd = readl(port_mmio + PORT_CMD);
685
686 /* wait 10ms to be sure we've come out of any low power state */
687 msleep(10);
688
689 /* clear out any PhyRdy stuff from interrupt status */
690 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
691
692 /* go ahead and clean out PhyRdy Change from Serror too */
693 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
694
695 /*
696 * Clear flag to indicate that we should ignore all PhyRdy
697 * state changes
698 */
699 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
700
701 /*
702 * Enable interrupts on Phy Ready.
703 */
704 pp->intr_mask |= PORT_IRQ_PHYRDY;
705 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
706
707 /*
708 * don't change the link pm policy - we can be called
709 * just to turn of link pm temporarily
710 */
711}
712
713static int ahci_enable_alpm(struct ata_port *ap,
714 enum link_pm policy)
715{
716 struct ahci_host_priv *hpriv = ap->host->private_data;
717 void __iomem *port_mmio = ahci_port_base(ap);
718 u32 cmd;
719 struct ahci_port_priv *pp = ap->private_data;
720 u32 asp;
721
722 /* Make sure the host is capable of link power management */
723 if (!(hpriv->cap & HOST_CAP_ALPM))
724 return -EINVAL;
725
726 switch (policy) {
727 case MAX_PERFORMANCE:
728 case NOT_AVAILABLE:
729 /*
730 * if we came here with NOT_AVAILABLE,
731 * it just means this is the first time we
732 * have tried to enable - default to max performance,
733 * and let the user go to lower power modes on request.
734 */
735 ahci_disable_alpm(ap);
736 return 0;
737 case MIN_POWER:
738 /* configure HBA to enter SLUMBER */
739 asp = PORT_CMD_ASP;
740 break;
741 case MEDIUM_POWER:
742 /* configure HBA to enter PARTIAL */
743 asp = 0;
744 break;
745 default:
746 return -EINVAL;
747 }
748
749 /*
750 * Disable interrupts on Phy Ready. This keeps us from
751 * getting woken up due to spurious phy ready interrupts
752 * TBD - Hot plug should be done via polling now, is
753 * that even supported?
754 */
755 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
756 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
757
758 /*
759 * Set a flag to indicate that we should ignore all PhyRdy
760 * state changes since these can happen now whenever we
761 * change link state
762 */
763 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
764
765 /* get the existing command bits */
766 cmd = readl(port_mmio + PORT_CMD);
767
768 /*
769 * Set ASP based on Policy
770 */
771 cmd |= asp;
772
773 /*
774 * Setting this bit will instruct the HBA to aggressively
775 * enter a lower power link state when it's appropriate and
776 * based on the value set above for ASP
777 */
778 cmd |= PORT_CMD_ALPE;
779
780 /* write out new cmd value */
781 writel(cmd, port_mmio + PORT_CMD);
782 cmd = readl(port_mmio + PORT_CMD);
783
784 /* IPM bits should be set by libata-core */
785 return 0;
786}
787
788#ifdef CONFIG_PM
789static void ahci_power_down(struct ata_port *ap)
790{
791 struct ahci_host_priv *hpriv = ap->host->private_data;
792 void __iomem *port_mmio = ahci_port_base(ap);
793 u32 cmd, scontrol;
794
795 if (!(hpriv->cap & HOST_CAP_SSS))
796 return;
797
798 /* put device into listen mode, first set PxSCTL.DET to 0 */
799 scontrol = readl(port_mmio + PORT_SCR_CTL);
800 scontrol &= ~0xf;
801 writel(scontrol, port_mmio + PORT_SCR_CTL);
802
803 /* then set PxCMD.SUD to 0 */
804 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
805 cmd &= ~PORT_CMD_SPIN_UP;
806 writel(cmd, port_mmio + PORT_CMD);
807}
808#endif
809
810static void ahci_start_port(struct ata_port *ap)
811{
812 struct ahci_port_priv *pp = ap->private_data;
813 struct ata_link *link;
814 struct ahci_em_priv *emp;
815 ssize_t rc;
816 int i;
817
818 /* enable FIS reception */
819 ahci_start_fis_rx(ap);
820
821 /* enable DMA */
822 ahci_start_engine(ap);
823
824 /* turn on LEDs */
825 if (ap->flags & ATA_FLAG_EM) {
826 ata_for_each_link(link, ap, EDGE) {
827 emp = &pp->em_priv[link->pmp];
828
829 /* EM Transmit bit maybe busy during init */
830 for (i = 0; i < EM_MAX_RETRY; i++) {
831 rc = ahci_transmit_led_message(ap,
832 emp->led_state,
833 4);
834 if (rc == -EBUSY)
835 msleep(1);
836 else
837 break;
838 }
839 }
840 }
841
842 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
843 ata_for_each_link(link, ap, EDGE)
844 ahci_init_sw_activity(link);
845
846}
847
848static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
849{
850 int rc;
851
852 /* disable DMA */
853 rc = ahci_stop_engine(ap);
854 if (rc) {
855 *emsg = "failed to stop engine";
856 return rc;
857 }
858
859 /* disable FIS reception */
860 rc = ahci_stop_fis_rx(ap);
861 if (rc) {
862 *emsg = "failed stop FIS RX";
863 return rc;
864 }
865
866 return 0;
867}
868
869int ahci_reset_controller(struct ata_host *host)
870{
871 struct ahci_host_priv *hpriv = host->private_data;
872 void __iomem *mmio = hpriv->mmio;
873 u32 tmp;
874
875 /* we must be in AHCI mode, before using anything
876 * AHCI-specific, such as HOST_RESET.
877 */
878 ahci_enable_ahci(mmio);
879
880 /* global controller reset */
881 if (!ahci_skip_host_reset) {
882 tmp = readl(mmio + HOST_CTL);
883 if ((tmp & HOST_RESET) == 0) {
884 writel(tmp | HOST_RESET, mmio + HOST_CTL);
885 readl(mmio + HOST_CTL); /* flush */
886 }
887
888 /*
889 * to perform host reset, OS should set HOST_RESET
890 * and poll until this bit is read to be "0".
891 * reset must complete within 1 second, or
892 * the hardware should be considered fried.
893 */
894 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
895 HOST_RESET, 10, 1000);
896
897 if (tmp & HOST_RESET) {
898 dev_printk(KERN_ERR, host->dev,
899 "controller reset failed (0x%x)\n", tmp);
900 return -EIO;
901 }
902
903 /* turn on AHCI mode */
904 ahci_enable_ahci(mmio);
905
906 /* Some registers might be cleared on reset. Restore
907 * initial values.
908 */
909 ahci_restore_initial_config(host);
910 } else
911 dev_printk(KERN_INFO, host->dev,
912 "skipping global host reset\n");
913
914 return 0;
915}
916EXPORT_SYMBOL_GPL(ahci_reset_controller);
917
918static void ahci_sw_activity(struct ata_link *link)
919{
920 struct ata_port *ap = link->ap;
921 struct ahci_port_priv *pp = ap->private_data;
922 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
923
924 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
925 return;
926
927 emp->activity++;
928 if (!timer_pending(&emp->timer))
929 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
930}
931
932static void ahci_sw_activity_blink(unsigned long arg)
933{
934 struct ata_link *link = (struct ata_link *)arg;
935 struct ata_port *ap = link->ap;
936 struct ahci_port_priv *pp = ap->private_data;
937 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
938 unsigned long led_message = emp->led_state;
939 u32 activity_led_state;
940 unsigned long flags;
941
942 led_message &= EM_MSG_LED_VALUE;
943 led_message |= ap->port_no | (link->pmp << 8);
944
945 /* check to see if we've had activity. If so,
946 * toggle state of LED and reset timer. If not,
947 * turn LED to desired idle state.
948 */
949 spin_lock_irqsave(ap->lock, flags);
950 if (emp->saved_activity != emp->activity) {
951 emp->saved_activity = emp->activity;
952 /* get the current LED state */
953 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
954
955 if (activity_led_state)
956 activity_led_state = 0;
957 else
958 activity_led_state = 1;
959
960 /* clear old state */
961 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
962
963 /* toggle state */
964 led_message |= (activity_led_state << 16);
965 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
966 } else {
967 /* switch to idle */
968 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
969 if (emp->blink_policy == BLINK_OFF)
970 led_message |= (1 << 16);
971 }
972 spin_unlock_irqrestore(ap->lock, flags);
973 ahci_transmit_led_message(ap, led_message, 4);
974}
975
976static void ahci_init_sw_activity(struct ata_link *link)
977{
978 struct ata_port *ap = link->ap;
979 struct ahci_port_priv *pp = ap->private_data;
980 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
981
982 /* init activity stats, setup timer */
983 emp->saved_activity = emp->activity = 0;
984 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
985
986 /* check our blink policy and set flag for link if it's enabled */
987 if (emp->blink_policy)
988 link->flags |= ATA_LFLAG_SW_ACTIVITY;
989}
990
991int ahci_reset_em(struct ata_host *host)
992{
993 struct ahci_host_priv *hpriv = host->private_data;
994 void __iomem *mmio = hpriv->mmio;
995 u32 em_ctl;
996
997 em_ctl = readl(mmio + HOST_EM_CTL);
998 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
999 return -EINVAL;
1000
1001 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1002 return 0;
1003}
1004EXPORT_SYMBOL_GPL(ahci_reset_em);
1005
1006static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1007 ssize_t size)
1008{
1009 struct ahci_host_priv *hpriv = ap->host->private_data;
1010 struct ahci_port_priv *pp = ap->private_data;
1011 void __iomem *mmio = hpriv->mmio;
1012 u32 em_ctl;
1013 u32 message[] = {0, 0};
1014 unsigned long flags;
1015 int pmp;
1016 struct ahci_em_priv *emp;
1017
1018 /* get the slot number from the message */
1019 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1020 if (pmp < EM_MAX_SLOTS)
1021 emp = &pp->em_priv[pmp];
1022 else
1023 return -EINVAL;
1024
1025 spin_lock_irqsave(ap->lock, flags);
1026
1027 /*
1028 * if we are still busy transmitting a previous message,
1029 * do not allow
1030 */
1031 em_ctl = readl(mmio + HOST_EM_CTL);
1032 if (em_ctl & EM_CTL_TM) {
1033 spin_unlock_irqrestore(ap->lock, flags);
1034 return -EBUSY;
1035 }
1036
1037 if (hpriv->em_msg_type & EM_MSG_TYPE_LED) {
1038 /*
1039 * create message header - this is all zero except for
1040 * the message size, which is 4 bytes.
1041 */
1042 message[0] |= (4 << 8);
1043
1044 /* ignore 0:4 of byte zero, fill in port info yourself */
1045 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1046
1047 /* write message to EM_LOC */
1048 writel(message[0], mmio + hpriv->em_loc);
1049 writel(message[1], mmio + hpriv->em_loc+4);
1050
1051 /*
1052 * tell hardware to transmit the message
1053 */
1054 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1055 }
1056
1057 /* save off new led state for port/slot */
1058 emp->led_state = state;
1059
1060 spin_unlock_irqrestore(ap->lock, flags);
1061 return size;
1062}
1063
1064static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1065{
1066 struct ahci_port_priv *pp = ap->private_data;
1067 struct ata_link *link;
1068 struct ahci_em_priv *emp;
1069 int rc = 0;
1070
1071 ata_for_each_link(link, ap, EDGE) {
1072 emp = &pp->em_priv[link->pmp];
1073 rc += sprintf(buf, "%lx\n", emp->led_state);
1074 }
1075 return rc;
1076}
1077
1078static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1079 size_t size)
1080{
1081 int state;
1082 int pmp;
1083 struct ahci_port_priv *pp = ap->private_data;
1084 struct ahci_em_priv *emp;
1085
1086 state = simple_strtoul(buf, NULL, 0);
1087
1088 /* get the slot number from the message */
1089 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1090 if (pmp < EM_MAX_SLOTS)
1091 emp = &pp->em_priv[pmp];
1092 else
1093 return -EINVAL;
1094
1095 /* mask off the activity bits if we are in sw_activity
1096 * mode, user should turn off sw_activity before setting
1097 * activity led through em_message
1098 */
1099 if (emp->blink_policy)
1100 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1101
1102 return ahci_transmit_led_message(ap, state, size);
1103}
1104
1105static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1106{
1107 struct ata_link *link = dev->link;
1108 struct ata_port *ap = link->ap;
1109 struct ahci_port_priv *pp = ap->private_data;
1110 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1111 u32 port_led_state = emp->led_state;
1112
1113 /* save the desired Activity LED behavior */
1114 if (val == OFF) {
1115 /* clear LFLAG */
1116 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1117
1118 /* set the LED to OFF */
1119 port_led_state &= EM_MSG_LED_VALUE_OFF;
1120 port_led_state |= (ap->port_no | (link->pmp << 8));
1121 ahci_transmit_led_message(ap, port_led_state, 4);
1122 } else {
1123 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1124 if (val == BLINK_OFF) {
1125 /* set LED to ON for idle */
1126 port_led_state &= EM_MSG_LED_VALUE_OFF;
1127 port_led_state |= (ap->port_no | (link->pmp << 8));
1128 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1129 ahci_transmit_led_message(ap, port_led_state, 4);
1130 }
1131 }
1132 emp->blink_policy = val;
1133 return 0;
1134}
1135
1136static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1137{
1138 struct ata_link *link = dev->link;
1139 struct ata_port *ap = link->ap;
1140 struct ahci_port_priv *pp = ap->private_data;
1141 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1142
1143 /* display the saved value of activity behavior for this
1144 * disk.
1145 */
1146 return sprintf(buf, "%d\n", emp->blink_policy);
1147}
1148
1149static void ahci_port_init(struct device *dev, struct ata_port *ap,
1150 int port_no, void __iomem *mmio,
1151 void __iomem *port_mmio)
1152{
1153 const char *emsg = NULL;
1154 int rc;
1155 u32 tmp;
1156
1157 /* make sure port is not active */
1158 rc = ahci_deinit_port(ap, &emsg);
1159 if (rc)
1160 dev_warn(dev, "%s (%d)\n", emsg, rc);
1161
1162 /* clear SError */
1163 tmp = readl(port_mmio + PORT_SCR_ERR);
1164 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1165 writel(tmp, port_mmio + PORT_SCR_ERR);
1166
1167 /* clear port IRQ */
1168 tmp = readl(port_mmio + PORT_IRQ_STAT);
1169 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1170 if (tmp)
1171 writel(tmp, port_mmio + PORT_IRQ_STAT);
1172
1173 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1174}
1175
1176void ahci_init_controller(struct ata_host *host)
1177{
1178 struct ahci_host_priv *hpriv = host->private_data;
1179 void __iomem *mmio = hpriv->mmio;
1180 int i;
1181 void __iomem *port_mmio;
1182 u32 tmp;
1183
1184 for (i = 0; i < host->n_ports; i++) {
1185 struct ata_port *ap = host->ports[i];
1186
1187 port_mmio = ahci_port_base(ap);
1188 if (ata_port_is_dummy(ap))
1189 continue;
1190
1191 ahci_port_init(host->dev, ap, i, mmio, port_mmio);
1192 }
1193
1194 tmp = readl(mmio + HOST_CTL);
1195 VPRINTK("HOST_CTL 0x%x\n", tmp);
1196 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1197 tmp = readl(mmio + HOST_CTL);
1198 VPRINTK("HOST_CTL 0x%x\n", tmp);
1199}
1200EXPORT_SYMBOL_GPL(ahci_init_controller);
1201
1202static void ahci_dev_config(struct ata_device *dev)
1203{
1204 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1205
1206 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1207 dev->max_sectors = 255;
1208 ata_dev_printk(dev, KERN_INFO,
1209 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1210 }
1211}
1212
1213static unsigned int ahci_dev_classify(struct ata_port *ap)
1214{
1215 void __iomem *port_mmio = ahci_port_base(ap);
1216 struct ata_taskfile tf;
1217 u32 tmp;
1218
1219 tmp = readl(port_mmio + PORT_SIG);
1220 tf.lbah = (tmp >> 24) & 0xff;
1221 tf.lbam = (tmp >> 16) & 0xff;
1222 tf.lbal = (tmp >> 8) & 0xff;
1223 tf.nsect = (tmp) & 0xff;
1224
1225 return ata_dev_classify(&tf);
1226}
1227
1228static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1229 u32 opts)
1230{
1231 dma_addr_t cmd_tbl_dma;
1232
1233 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1234
1235 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1236 pp->cmd_slot[tag].status = 0;
1237 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1238 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1239}
1240
1241int ahci_kick_engine(struct ata_port *ap)
1242{
1243 void __iomem *port_mmio = ahci_port_base(ap);
1244 struct ahci_host_priv *hpriv = ap->host->private_data;
1245 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1246 u32 tmp;
1247 int busy, rc;
1248
1249 /* stop engine */
1250 rc = ahci_stop_engine(ap);
1251 if (rc)
1252 goto out_restart;
1253
1254 /* need to do CLO?
1255 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1256 */
1257 busy = status & (ATA_BUSY | ATA_DRQ);
1258 if (!busy && !sata_pmp_attached(ap)) {
1259 rc = 0;
1260 goto out_restart;
1261 }
1262
1263 if (!(hpriv->cap & HOST_CAP_CLO)) {
1264 rc = -EOPNOTSUPP;
1265 goto out_restart;
1266 }
1267
1268 /* perform CLO */
1269 tmp = readl(port_mmio + PORT_CMD);
1270 tmp |= PORT_CMD_CLO;
1271 writel(tmp, port_mmio + PORT_CMD);
1272
1273 rc = 0;
1274 tmp = ata_wait_register(port_mmio + PORT_CMD,
1275 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1276 if (tmp & PORT_CMD_CLO)
1277 rc = -EIO;
1278
1279 /* restart engine */
1280 out_restart:
1281 ahci_start_engine(ap);
1282 return rc;
1283}
1284EXPORT_SYMBOL_GPL(ahci_kick_engine);
1285
1286static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1287 struct ata_taskfile *tf, int is_cmd, u16 flags,
1288 unsigned long timeout_msec)
1289{
1290 const u32 cmd_fis_len = 5; /* five dwords */
1291 struct ahci_port_priv *pp = ap->private_data;
1292 void __iomem *port_mmio = ahci_port_base(ap);
1293 u8 *fis = pp->cmd_tbl;
1294 u32 tmp;
1295
1296 /* prep the command */
1297 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1298 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1299
1300 /* issue & wait */
1301 writel(1, port_mmio + PORT_CMD_ISSUE);
1302
1303 if (timeout_msec) {
1304 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1305 1, timeout_msec);
1306 if (tmp & 0x1) {
1307 ahci_kick_engine(ap);
1308 return -EBUSY;
1309 }
1310 } else
1311 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1312
1313 return 0;
1314}
1315
1316int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1317 int pmp, unsigned long deadline,
1318 int (*check_ready)(struct ata_link *link))
1319{
1320 struct ata_port *ap = link->ap;
1321 struct ahci_host_priv *hpriv = ap->host->private_data;
1322 const char *reason = NULL;
1323 unsigned long now, msecs;
1324 struct ata_taskfile tf;
1325 int rc;
1326
1327 DPRINTK("ENTER\n");
1328
1329 /* prepare for SRST (AHCI-1.1 10.4.1) */
1330 rc = ahci_kick_engine(ap);
1331 if (rc && rc != -EOPNOTSUPP)
1332 ata_link_printk(link, KERN_WARNING,
1333 "failed to reset engine (errno=%d)\n", rc);
1334
1335 ata_tf_init(link->device, &tf);
1336
1337 /* issue the first D2H Register FIS */
1338 msecs = 0;
1339 now = jiffies;
1340 if (time_after(now, deadline))
1341 msecs = jiffies_to_msecs(deadline - now);
1342
1343 tf.ctl |= ATA_SRST;
1344 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1345 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1346 rc = -EIO;
1347 reason = "1st FIS failed";
1348 goto fail;
1349 }
1350
1351 /* spec says at least 5us, but be generous and sleep for 1ms */
1352 msleep(1);
1353
1354 /* issue the second D2H Register FIS */
1355 tf.ctl &= ~ATA_SRST;
1356 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1357
1358 /* wait for link to become ready */
1359 rc = ata_wait_after_reset(link, deadline, check_ready);
1360 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1361 /*
1362 * Workaround for cases where link online status can't
1363 * be trusted. Treat device readiness timeout as link
1364 * offline.
1365 */
1366 ata_link_printk(link, KERN_INFO,
1367 "device not ready, treating as offline\n");
1368 *class = ATA_DEV_NONE;
1369 } else if (rc) {
1370 /* link occupied, -ENODEV too is an error */
1371 reason = "device not ready";
1372 goto fail;
1373 } else
1374 *class = ahci_dev_classify(ap);
1375
1376 DPRINTK("EXIT, class=%u\n", *class);
1377 return 0;
1378
1379 fail:
1380 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1381 return rc;
1382}
1383
1384int ahci_check_ready(struct ata_link *link)
1385{
1386 void __iomem *port_mmio = ahci_port_base(link->ap);
1387 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1388
1389 return ata_check_ready(status);
1390}
1391EXPORT_SYMBOL_GPL(ahci_check_ready);
1392
1393static int ahci_softreset(struct ata_link *link, unsigned int *class,
1394 unsigned long deadline)
1395{
1396 int pmp = sata_srst_pmp(link);
1397
1398 DPRINTK("ENTER\n");
1399
1400 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1401}
1402EXPORT_SYMBOL_GPL(ahci_do_softreset);
1403
1404static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1405 unsigned long deadline)
1406{
1407 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1408 struct ata_port *ap = link->ap;
1409 struct ahci_port_priv *pp = ap->private_data;
1410 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1411 struct ata_taskfile tf;
1412 bool online;
1413 int rc;
1414
1415 DPRINTK("ENTER\n");
1416
1417 ahci_stop_engine(ap);
1418
1419 /* clear D2H reception area to properly wait for D2H FIS */
1420 ata_tf_init(link->device, &tf);
1421 tf.command = 0x80;
1422 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1423
1424 rc = sata_link_hardreset(link, timing, deadline, &online,
1425 ahci_check_ready);
1426
1427 ahci_start_engine(ap);
1428
1429 if (online)
1430 *class = ahci_dev_classify(ap);
1431
1432 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1433 return rc;
1434}
1435
1436static void ahci_postreset(struct ata_link *link, unsigned int *class)
1437{
1438 struct ata_port *ap = link->ap;
1439 void __iomem *port_mmio = ahci_port_base(ap);
1440 u32 new_tmp, tmp;
1441
1442 ata_std_postreset(link, class);
1443
1444 /* Make sure port's ATAPI bit is set appropriately */
1445 new_tmp = tmp = readl(port_mmio + PORT_CMD);
1446 if (*class == ATA_DEV_ATAPI)
1447 new_tmp |= PORT_CMD_ATAPI;
1448 else
1449 new_tmp &= ~PORT_CMD_ATAPI;
1450 if (new_tmp != tmp) {
1451 writel(new_tmp, port_mmio + PORT_CMD);
1452 readl(port_mmio + PORT_CMD); /* flush */
1453 }
1454}
1455
1456static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1457{
1458 struct scatterlist *sg;
1459 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1460 unsigned int si;
1461
1462 VPRINTK("ENTER\n");
1463
1464 /*
1465 * Next, the S/G list.
1466 */
1467 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1468 dma_addr_t addr = sg_dma_address(sg);
1469 u32 sg_len = sg_dma_len(sg);
1470
1471 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1472 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1473 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1474 }
1475
1476 return si;
1477}
1478
1479static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
1480{
1481 struct ata_port *ap = qc->ap;
1482 struct ahci_port_priv *pp = ap->private_data;
1483
1484 if (!sata_pmp_attached(ap) || pp->fbs_enabled)
1485 return ata_std_qc_defer(qc);
1486 else
1487 return sata_pmp_qc_defer_cmd_switch(qc);
1488}
1489
1490static void ahci_qc_prep(struct ata_queued_cmd *qc)
1491{
1492 struct ata_port *ap = qc->ap;
1493 struct ahci_port_priv *pp = ap->private_data;
1494 int is_atapi = ata_is_atapi(qc->tf.protocol);
1495 void *cmd_tbl;
1496 u32 opts;
1497 const u32 cmd_fis_len = 5; /* five dwords */
1498 unsigned int n_elem;
1499
1500 /*
1501 * Fill in command table information. First, the header,
1502 * a SATA Register - Host to Device command FIS.
1503 */
1504 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1505
1506 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1507 if (is_atapi) {
1508 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1509 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1510 }
1511
1512 n_elem = 0;
1513 if (qc->flags & ATA_QCFLAG_DMAMAP)
1514 n_elem = ahci_fill_sg(qc, cmd_tbl);
1515
1516 /*
1517 * Fill in command slot information.
1518 */
1519 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1520 if (qc->tf.flags & ATA_TFLAG_WRITE)
1521 opts |= AHCI_CMD_WRITE;
1522 if (is_atapi)
1523 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1524
1525 ahci_fill_cmd_slot(pp, qc->tag, opts);
1526}
1527
1528static void ahci_fbs_dec_intr(struct ata_port *ap)
1529{
1530 struct ahci_port_priv *pp = ap->private_data;
1531 void __iomem *port_mmio = ahci_port_base(ap);
1532 u32 fbs = readl(port_mmio + PORT_FBS);
1533 int retries = 3;
1534
1535 DPRINTK("ENTER\n");
1536 BUG_ON(!pp->fbs_enabled);
1537
1538 /* time to wait for DEC is not specified by AHCI spec,
1539 * add a retry loop for safety.
1540 */
1541 writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
1542 fbs = readl(port_mmio + PORT_FBS);
1543 while ((fbs & PORT_FBS_DEC) && retries--) {
1544 udelay(1);
1545 fbs = readl(port_mmio + PORT_FBS);
1546 }
1547
1548 if (fbs & PORT_FBS_DEC)
1549 dev_printk(KERN_ERR, ap->host->dev,
1550 "failed to clear device error\n");
1551}
1552
1553static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1554{
1555 struct ahci_host_priv *hpriv = ap->host->private_data;
1556 struct ahci_port_priv *pp = ap->private_data;
1557 struct ata_eh_info *host_ehi = &ap->link.eh_info;
1558 struct ata_link *link = NULL;
1559 struct ata_queued_cmd *active_qc;
1560 struct ata_eh_info *active_ehi;
1561 bool fbs_need_dec = false;
1562 u32 serror;
1563
1564 /* determine active link with error */
1565 if (pp->fbs_enabled) {
1566 void __iomem *port_mmio = ahci_port_base(ap);
1567 u32 fbs = readl(port_mmio + PORT_FBS);
1568 int pmp = fbs >> PORT_FBS_DWE_OFFSET;
1569
1570 if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
1571 ata_link_online(&ap->pmp_link[pmp])) {
1572 link = &ap->pmp_link[pmp];
1573 fbs_need_dec = true;
1574 }
1575
1576 } else
1577 ata_for_each_link(link, ap, EDGE)
1578 if (ata_link_active(link))
1579 break;
1580
1581 if (!link)
1582 link = &ap->link;
1583
1584 active_qc = ata_qc_from_tag(ap, link->active_tag);
1585 active_ehi = &link->eh_info;
1586
1587 /* record irq stat */
1588 ata_ehi_clear_desc(host_ehi);
1589 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1590
1591 /* AHCI needs SError cleared; otherwise, it might lock up */
1592 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
1593 ahci_scr_write(&ap->link, SCR_ERROR, serror);
1594 host_ehi->serror |= serror;
1595
1596 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1597 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1598 irq_stat &= ~PORT_IRQ_IF_ERR;
1599
1600 if (irq_stat & PORT_IRQ_TF_ERR) {
1601 /* If qc is active, charge it; otherwise, the active
1602 * link. There's no active qc on NCQ errors. It will
1603 * be determined by EH by reading log page 10h.
1604 */
1605 if (active_qc)
1606 active_qc->err_mask |= AC_ERR_DEV;
1607 else
1608 active_ehi->err_mask |= AC_ERR_DEV;
1609
1610 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1611 host_ehi->serror &= ~SERR_INTERNAL;
1612 }
1613
1614 if (irq_stat & PORT_IRQ_UNK_FIS) {
1615 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1616
1617 active_ehi->err_mask |= AC_ERR_HSM;
1618 active_ehi->action |= ATA_EH_RESET;
1619 ata_ehi_push_desc(active_ehi,
1620 "unknown FIS %08x %08x %08x %08x" ,
1621 unk[0], unk[1], unk[2], unk[3]);
1622 }
1623
1624 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
1625 active_ehi->err_mask |= AC_ERR_HSM;
1626 active_ehi->action |= ATA_EH_RESET;
1627 ata_ehi_push_desc(active_ehi, "incorrect PMP");
1628 }
1629
1630 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1631 host_ehi->err_mask |= AC_ERR_HOST_BUS;
1632 host_ehi->action |= ATA_EH_RESET;
1633 ata_ehi_push_desc(host_ehi, "host bus error");
1634 }
1635
1636 if (irq_stat & PORT_IRQ_IF_ERR) {
1637 if (fbs_need_dec)
1638 active_ehi->err_mask |= AC_ERR_DEV;
1639 else {
1640 host_ehi->err_mask |= AC_ERR_ATA_BUS;
1641 host_ehi->action |= ATA_EH_RESET;
1642 }
1643
1644 ata_ehi_push_desc(host_ehi, "interface fatal error");
1645 }
1646
1647 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1648 ata_ehi_hotplugged(host_ehi);
1649 ata_ehi_push_desc(host_ehi, "%s",
1650 irq_stat & PORT_IRQ_CONNECT ?
1651 "connection status changed" : "PHY RDY changed");
1652 }
1653
1654 /* okay, let's hand over to EH */
1655
1656 if (irq_stat & PORT_IRQ_FREEZE)
1657 ata_port_freeze(ap);
1658 else if (fbs_need_dec) {
1659 ata_link_abort(link);
1660 ahci_fbs_dec_intr(ap);
1661 } else
1662 ata_port_abort(ap);
1663}
1664
1665static void ahci_port_intr(struct ata_port *ap)
1666{
1667 void __iomem *port_mmio = ahci_port_base(ap);
1668 struct ata_eh_info *ehi = &ap->link.eh_info;
1669 struct ahci_port_priv *pp = ap->private_data;
1670 struct ahci_host_priv *hpriv = ap->host->private_data;
1671 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
1672 u32 status, qc_active = 0;
1673 int rc;
1674
1675 status = readl(port_mmio + PORT_IRQ_STAT);
1676 writel(status, port_mmio + PORT_IRQ_STAT);
1677
1678 /* ignore BAD_PMP while resetting */
1679 if (unlikely(resetting))
1680 status &= ~PORT_IRQ_BAD_PMP;
1681
1682 /* If we are getting PhyRdy, this is
1683 * just a power state change, we should
1684 * clear out this, plus the PhyRdy/Comm
1685 * Wake bits from Serror
1686 */
1687 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
1688 (status & PORT_IRQ_PHYRDY)) {
1689 status &= ~PORT_IRQ_PHYRDY;
1690 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1691 }
1692
1693 if (unlikely(status & PORT_IRQ_ERROR)) {
1694 ahci_error_intr(ap, status);
1695 return;
1696 }
1697
1698 if (status & PORT_IRQ_SDB_FIS) {
1699 /* If SNotification is available, leave notification
1700 * handling to sata_async_notification(). If not,
1701 * emulate it by snooping SDB FIS RX area.
1702 *
1703 * Snooping FIS RX area is probably cheaper than
1704 * poking SNotification but some constrollers which
1705 * implement SNotification, ICH9 for example, don't
1706 * store AN SDB FIS into receive area.
1707 */
1708 if (hpriv->cap & HOST_CAP_SNTF)
1709 sata_async_notification(ap);
1710 else {
1711 /* If the 'N' bit in word 0 of the FIS is set,
1712 * we just received asynchronous notification.
1713 * Tell libata about it.
1714 *
1715 * Lack of SNotification should not appear in
1716 * ahci 1.2, so the workaround is unnecessary
1717 * when FBS is enabled.
1718 */
1719 if (pp->fbs_enabled)
1720 WARN_ON_ONCE(1);
1721 else {
1722 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
1723 u32 f0 = le32_to_cpu(f[0]);
1724 if (f0 & (1 << 15))
1725 sata_async_notification(ap);
1726 }
1727 }
1728 }
1729
1730 /* pp->active_link is not reliable once FBS is enabled, both
1731 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
1732 * NCQ and non-NCQ commands may be in flight at the same time.
1733 */
1734 if (pp->fbs_enabled) {
1735 if (ap->qc_active) {
1736 qc_active = readl(port_mmio + PORT_SCR_ACT);
1737 qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
1738 }
1739 } else {
1740 /* pp->active_link is valid iff any command is in flight */
1741 if (ap->qc_active && pp->active_link->sactive)
1742 qc_active = readl(port_mmio + PORT_SCR_ACT);
1743 else
1744 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1745 }
1746
1747
1748 rc = ata_qc_complete_multiple(ap, qc_active);
1749
1750 /* while resetting, invalid completions are expected */
1751 if (unlikely(rc < 0 && !resetting)) {
1752 ehi->err_mask |= AC_ERR_HSM;
1753 ehi->action |= ATA_EH_RESET;
1754 ata_port_freeze(ap);
1755 }
1756}
1757
1758irqreturn_t ahci_interrupt(int irq, void *dev_instance)
1759{
1760 struct ata_host *host = dev_instance;
1761 struct ahci_host_priv *hpriv;
1762 unsigned int i, handled = 0;
1763 void __iomem *mmio;
1764 u32 irq_stat, irq_masked;
1765
1766 VPRINTK("ENTER\n");
1767
1768 hpriv = host->private_data;
1769 mmio = hpriv->mmio;
1770
1771 /* sigh. 0xffffffff is a valid return from h/w */
1772 irq_stat = readl(mmio + HOST_IRQ_STAT);
1773 if (!irq_stat)
1774 return IRQ_NONE;
1775
1776 irq_masked = irq_stat & hpriv->port_map;
1777
1778 spin_lock(&host->lock);
1779
1780 for (i = 0; i < host->n_ports; i++) {
1781 struct ata_port *ap;
1782
1783 if (!(irq_masked & (1 << i)))
1784 continue;
1785
1786 ap = host->ports[i];
1787 if (ap) {
1788 ahci_port_intr(ap);
1789 VPRINTK("port %u\n", i);
1790 } else {
1791 VPRINTK("port %u (no irq)\n", i);
1792 if (ata_ratelimit())
1793 dev_printk(KERN_WARNING, host->dev,
1794 "interrupt on disabled port %u\n", i);
1795 }
1796
1797 handled = 1;
1798 }
1799
1800 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
1801 * it should be cleared after all the port events are cleared;
1802 * otherwise, it will raise a spurious interrupt after each
1803 * valid one. Please read section 10.6.2 of ahci 1.1 for more
1804 * information.
1805 *
1806 * Also, use the unmasked value to clear interrupt as spurious
1807 * pending event on a dummy port might cause screaming IRQ.
1808 */
1809 writel(irq_stat, mmio + HOST_IRQ_STAT);
1810
1811 spin_unlock(&host->lock);
1812
1813 VPRINTK("EXIT\n");
1814
1815 return IRQ_RETVAL(handled);
1816}
1817EXPORT_SYMBOL_GPL(ahci_interrupt);
1818
1819static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1820{
1821 struct ata_port *ap = qc->ap;
1822 void __iomem *port_mmio = ahci_port_base(ap);
1823 struct ahci_port_priv *pp = ap->private_data;
1824
1825 /* Keep track of the currently active link. It will be used
1826 * in completion path to determine whether NCQ phase is in
1827 * progress.
1828 */
1829 pp->active_link = qc->dev->link;
1830
1831 if (qc->tf.protocol == ATA_PROT_NCQ)
1832 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1833
1834 if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
1835 u32 fbs = readl(port_mmio + PORT_FBS);
1836 fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
1837 fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
1838 writel(fbs, port_mmio + PORT_FBS);
1839 pp->fbs_last_dev = qc->dev->link->pmp;
1840 }
1841
1842 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1843
1844 ahci_sw_activity(qc->dev->link);
1845
1846 return 0;
1847}
1848
1849static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
1850{
1851 struct ahci_port_priv *pp = qc->ap->private_data;
1852 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1853
1854 if (pp->fbs_enabled)
1855 d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
1856
1857 ata_tf_from_fis(d2h_fis, &qc->result_tf);
1858 return true;
1859}
1860
1861static void ahci_freeze(struct ata_port *ap)
1862{
1863 void __iomem *port_mmio = ahci_port_base(ap);
1864
1865 /* turn IRQ off */
1866 writel(0, port_mmio + PORT_IRQ_MASK);
1867}
1868
1869static void ahci_thaw(struct ata_port *ap)
1870{
1871 struct ahci_host_priv *hpriv = ap->host->private_data;
1872 void __iomem *mmio = hpriv->mmio;
1873 void __iomem *port_mmio = ahci_port_base(ap);
1874 u32 tmp;
1875 struct ahci_port_priv *pp = ap->private_data;
1876
1877 /* clear IRQ */
1878 tmp = readl(port_mmio + PORT_IRQ_STAT);
1879 writel(tmp, port_mmio + PORT_IRQ_STAT);
1880 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
1881
1882 /* turn IRQ back on */
1883 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1884}
1885
1886static void ahci_error_handler(struct ata_port *ap)
1887{
1888 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1889 /* restart engine */
1890 ahci_stop_engine(ap);
1891 ahci_start_engine(ap);
1892 }
1893
1894 sata_pmp_error_handler(ap);
1895}
1896
1897static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1898{
1899 struct ata_port *ap = qc->ap;
1900
1901 /* make DMA engine forget about the failed command */
1902 if (qc->flags & ATA_QCFLAG_FAILED)
1903 ahci_kick_engine(ap);
1904}
1905
1906static void ahci_enable_fbs(struct ata_port *ap)
1907{
1908 struct ahci_port_priv *pp = ap->private_data;
1909 void __iomem *port_mmio = ahci_port_base(ap);
1910 u32 fbs;
1911 int rc;
1912
1913 if (!pp->fbs_supported)
1914 return;
1915
1916 fbs = readl(port_mmio + PORT_FBS);
1917 if (fbs & PORT_FBS_EN) {
1918 pp->fbs_enabled = true;
1919 pp->fbs_last_dev = -1; /* initialization */
1920 return;
1921 }
1922
1923 rc = ahci_stop_engine(ap);
1924 if (rc)
1925 return;
1926
1927 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
1928 fbs = readl(port_mmio + PORT_FBS);
1929 if (fbs & PORT_FBS_EN) {
1930 dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
1931 pp->fbs_enabled = true;
1932 pp->fbs_last_dev = -1; /* initialization */
1933 } else
1934 dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
1935
1936 ahci_start_engine(ap);
1937}
1938
1939static void ahci_disable_fbs(struct ata_port *ap)
1940{
1941 struct ahci_port_priv *pp = ap->private_data;
1942 void __iomem *port_mmio = ahci_port_base(ap);
1943 u32 fbs;
1944 int rc;
1945
1946 if (!pp->fbs_supported)
1947 return;
1948
1949 fbs = readl(port_mmio + PORT_FBS);
1950 if ((fbs & PORT_FBS_EN) == 0) {
1951 pp->fbs_enabled = false;
1952 return;
1953 }
1954
1955 rc = ahci_stop_engine(ap);
1956 if (rc)
1957 return;
1958
1959 writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
1960 fbs = readl(port_mmio + PORT_FBS);
1961 if (fbs & PORT_FBS_EN)
1962 dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
1963 else {
1964 dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
1965 pp->fbs_enabled = false;
1966 }
1967
1968 ahci_start_engine(ap);
1969}
1970
1971static void ahci_pmp_attach(struct ata_port *ap)
1972{
1973 void __iomem *port_mmio = ahci_port_base(ap);
1974 struct ahci_port_priv *pp = ap->private_data;
1975 u32 cmd;
1976
1977 cmd = readl(port_mmio + PORT_CMD);
1978 cmd |= PORT_CMD_PMP;
1979 writel(cmd, port_mmio + PORT_CMD);
1980
1981 ahci_enable_fbs(ap);
1982
1983 pp->intr_mask |= PORT_IRQ_BAD_PMP;
1984 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1985}
1986
1987static void ahci_pmp_detach(struct ata_port *ap)
1988{
1989 void __iomem *port_mmio = ahci_port_base(ap);
1990 struct ahci_port_priv *pp = ap->private_data;
1991 u32 cmd;
1992
1993 ahci_disable_fbs(ap);
1994
1995 cmd = readl(port_mmio + PORT_CMD);
1996 cmd &= ~PORT_CMD_PMP;
1997 writel(cmd, port_mmio + PORT_CMD);
1998
1999 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2000 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2001}
2002
2003static int ahci_port_resume(struct ata_port *ap)
2004{
2005 ahci_power_up(ap);
2006 ahci_start_port(ap);
2007
2008 if (sata_pmp_attached(ap))
2009 ahci_pmp_attach(ap);
2010 else
2011 ahci_pmp_detach(ap);
2012
2013 return 0;
2014}
2015
2016#ifdef CONFIG_PM
2017static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2018{
2019 const char *emsg = NULL;
2020 int rc;
2021
2022 rc = ahci_deinit_port(ap, &emsg);
2023 if (rc == 0)
2024 ahci_power_down(ap);
2025 else {
2026 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2027 ahci_start_port(ap);
2028 }
2029
2030 return rc;
2031}
2032#endif
2033
2034static int ahci_port_start(struct ata_port *ap)
2035{
2036 struct ahci_host_priv *hpriv = ap->host->private_data;
2037 struct device *dev = ap->host->dev;
2038 struct ahci_port_priv *pp;
2039 void *mem;
2040 dma_addr_t mem_dma;
2041 size_t dma_sz, rx_fis_sz;
2042
2043 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2044 if (!pp)
2045 return -ENOMEM;
2046
2047 /* check FBS capability */
2048 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
2049 void __iomem *port_mmio = ahci_port_base(ap);
2050 u32 cmd = readl(port_mmio + PORT_CMD);
2051 if (cmd & PORT_CMD_FBSCP)
2052 pp->fbs_supported = true;
2053 else
2054 dev_printk(KERN_WARNING, dev,
2055 "The port is not capable of FBS\n");
2056 }
2057
2058 if (pp->fbs_supported) {
2059 dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
2060 rx_fis_sz = AHCI_RX_FIS_SZ * 16;
2061 } else {
2062 dma_sz = AHCI_PORT_PRIV_DMA_SZ;
2063 rx_fis_sz = AHCI_RX_FIS_SZ;
2064 }
2065
2066 mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
2067 if (!mem)
2068 return -ENOMEM;
2069 memset(mem, 0, dma_sz);
2070
2071 /*
2072 * First item in chunk of DMA memory: 32-slot command table,
2073 * 32 bytes each in size
2074 */
2075 pp->cmd_slot = mem;
2076 pp->cmd_slot_dma = mem_dma;
2077
2078 mem += AHCI_CMD_SLOT_SZ;
2079 mem_dma += AHCI_CMD_SLOT_SZ;
2080
2081 /*
2082 * Second item: Received-FIS area
2083 */
2084 pp->rx_fis = mem;
2085 pp->rx_fis_dma = mem_dma;
2086
2087 mem += rx_fis_sz;
2088 mem_dma += rx_fis_sz;
2089
2090 /*
2091 * Third item: data area for storing a single command
2092 * and its scatter-gather table
2093 */
2094 pp->cmd_tbl = mem;
2095 pp->cmd_tbl_dma = mem_dma;
2096
2097 /*
2098 * Save off initial list of interrupts to be enabled.
2099 * This could be changed later
2100 */
2101 pp->intr_mask = DEF_PORT_IRQ;
2102
2103 ap->private_data = pp;
2104
2105 /* engage engines, captain */
2106 return ahci_port_resume(ap);
2107}
2108
2109static void ahci_port_stop(struct ata_port *ap)
2110{
2111 const char *emsg = NULL;
2112 int rc;
2113
2114 /* de-initialize port */
2115 rc = ahci_deinit_port(ap, &emsg);
2116 if (rc)
2117 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2118}
2119
2120void ahci_print_info(struct ata_host *host, const char *scc_s)
2121{
2122 struct ahci_host_priv *hpriv = host->private_data;
2123 void __iomem *mmio = hpriv->mmio;
2124 u32 vers, cap, cap2, impl, speed;
2125 const char *speed_s;
2126
2127 vers = readl(mmio + HOST_VERSION);
2128 cap = hpriv->cap;
2129 cap2 = hpriv->cap2;
2130 impl = hpriv->port_map;
2131
2132 speed = (cap >> 20) & 0xf;
2133 if (speed == 1)
2134 speed_s = "1.5";
2135 else if (speed == 2)
2136 speed_s = "3";
2137 else if (speed == 3)
2138 speed_s = "6";
2139 else
2140 speed_s = "?";
2141
2142 dev_info(host->dev,
2143 "AHCI %02x%02x.%02x%02x "
2144 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2145 ,
2146
2147 (vers >> 24) & 0xff,
2148 (vers >> 16) & 0xff,
2149 (vers >> 8) & 0xff,
2150 vers & 0xff,
2151
2152 ((cap >> 8) & 0x1f) + 1,
2153 (cap & 0x1f) + 1,
2154 speed_s,
2155 impl,
2156 scc_s);
2157
2158 dev_info(host->dev,
2159 "flags: "
2160 "%s%s%s%s%s%s%s"
2161 "%s%s%s%s%s%s%s"
2162 "%s%s%s%s%s%s\n"
2163 ,
2164
2165 cap & HOST_CAP_64 ? "64bit " : "",
2166 cap & HOST_CAP_NCQ ? "ncq " : "",
2167 cap & HOST_CAP_SNTF ? "sntf " : "",
2168 cap & HOST_CAP_MPS ? "ilck " : "",
2169 cap & HOST_CAP_SSS ? "stag " : "",
2170 cap & HOST_CAP_ALPM ? "pm " : "",
2171 cap & HOST_CAP_LED ? "led " : "",
2172 cap & HOST_CAP_CLO ? "clo " : "",
2173 cap & HOST_CAP_ONLY ? "only " : "",
2174 cap & HOST_CAP_PMP ? "pmp " : "",
2175 cap & HOST_CAP_FBS ? "fbs " : "",
2176 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2177 cap & HOST_CAP_SSC ? "slum " : "",
2178 cap & HOST_CAP_PART ? "part " : "",
2179 cap & HOST_CAP_CCC ? "ccc " : "",
2180 cap & HOST_CAP_EMS ? "ems " : "",
2181 cap & HOST_CAP_SXS ? "sxs " : "",
2182 cap2 & HOST_CAP2_APST ? "apst " : "",
2183 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2184 cap2 & HOST_CAP2_BOH ? "boh " : ""
2185 );
2186}
2187EXPORT_SYMBOL_GPL(ahci_print_info);
2188
2189void ahci_set_em_messages(struct ahci_host_priv *hpriv,
2190 struct ata_port_info *pi)
2191{
2192 u8 messages;
2193 void __iomem *mmio = hpriv->mmio;
2194 u32 em_loc = readl(mmio + HOST_EM_LOC);
2195 u32 em_ctl = readl(mmio + HOST_EM_CTL);
2196
2197 if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
2198 return;
2199
2200 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2201
2202 if (messages) {
2203 /* store em_loc */
2204 hpriv->em_loc = ((em_loc >> 16) * 4);
2205 hpriv->em_buf_sz = ((em_loc & 0xff) * 4);
2206 hpriv->em_msg_type = messages;
2207 pi->flags |= ATA_FLAG_EM;
2208 if (!(em_ctl & EM_CTL_ALHD))
2209 pi->flags |= ATA_FLAG_SW_ACTIVITY;
2210 }
2211}
2212EXPORT_SYMBOL_GPL(ahci_set_em_messages);
2213
2214MODULE_AUTHOR("Jeff Garzik");
2215MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
2216MODULE_LICENSE("GPL");
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 49cffb6094a3..c47373f01f89 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -65,6 +65,7 @@
65#include <linux/libata.h> 65#include <linux/libata.h>
66#include <asm/byteorder.h> 66#include <asm/byteorder.h>
67#include <linux/cdrom.h> 67#include <linux/cdrom.h>
68#include <linux/ratelimit.h>
68 69
69#include "libata.h" 70#include "libata.h"
70 71
@@ -96,7 +97,6 @@ static void ata_dev_xfermask(struct ata_device *dev);
96static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 97static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
97 98
98unsigned int ata_print_id = 1; 99unsigned int ata_print_id = 1;
99static struct workqueue_struct *ata_wq;
100 100
101struct workqueue_struct *ata_aux_wq; 101struct workqueue_struct *ata_aux_wq;
102 102
@@ -1685,52 +1685,6 @@ unsigned long ata_id_xfermask(const u16 *id)
1685 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1685 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1686} 1686}
1687 1687
1688/**
1689 * ata_pio_queue_task - Queue port_task
1690 * @ap: The ata_port to queue port_task for
1691 * @data: data for @fn to use
1692 * @delay: delay time in msecs for workqueue function
1693 *
1694 * Schedule @fn(@data) for execution after @delay jiffies using
1695 * port_task. There is one port_task per port and it's the
1696 * user(low level driver)'s responsibility to make sure that only
1697 * one task is active at any given time.
1698 *
1699 * libata core layer takes care of synchronization between
1700 * port_task and EH. ata_pio_queue_task() may be ignored for EH
1701 * synchronization.
1702 *
1703 * LOCKING:
1704 * Inherited from caller.
1705 */
1706void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1707{
1708 ap->port_task_data = data;
1709
1710 /* may fail if ata_port_flush_task() in progress */
1711 queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1712}
1713
1714/**
1715 * ata_port_flush_task - Flush port_task
1716 * @ap: The ata_port to flush port_task for
1717 *
1718 * After this function completes, port_task is guranteed not to
1719 * be running or scheduled.
1720 *
1721 * LOCKING:
1722 * Kernel thread context (may sleep)
1723 */
1724void ata_port_flush_task(struct ata_port *ap)
1725{
1726 DPRINTK("ENTER\n");
1727
1728 cancel_rearming_delayed_work(&ap->port_task);
1729
1730 if (ata_msg_ctl(ap))
1731 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1732}
1733
1734static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1688static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1735{ 1689{
1736 struct completion *waiting = qc->private_data; 1690 struct completion *waiting = qc->private_data;
@@ -1852,7 +1806,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1852 1806
1853 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1807 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1854 1808
1855 ata_port_flush_task(ap); 1809 ata_sff_flush_pio_task(ap);
1856 1810
1857 if (!rc) { 1811 if (!rc) {
1858 spin_lock_irqsave(ap->lock, flags); 1812 spin_lock_irqsave(ap->lock, flags);
@@ -1906,22 +1860,6 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1906 ap->qc_active = preempted_qc_active; 1860 ap->qc_active = preempted_qc_active;
1907 ap->nr_active_links = preempted_nr_active_links; 1861 ap->nr_active_links = preempted_nr_active_links;
1908 1862
1909 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1910 * Until those drivers are fixed, we detect the condition
1911 * here, fail the command with AC_ERR_SYSTEM and reenable the
1912 * port.
1913 *
1914 * Note that this doesn't change any behavior as internal
1915 * command failure results in disabling the device in the
1916 * higher layer for LLDDs without new reset/EH callbacks.
1917 *
1918 * Kill the following code as soon as those drivers are fixed.
1919 */
1920 if (ap->flags & ATA_FLAG_DISABLED) {
1921 err_mask |= AC_ERR_SYSTEM;
1922 ata_port_probe(ap);
1923 }
1924
1925 spin_unlock_irqrestore(ap->lock, flags); 1863 spin_unlock_irqrestore(ap->lock, flags);
1926 1864
1927 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1865 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
@@ -2767,8 +2705,6 @@ int ata_bus_probe(struct ata_port *ap)
2767 int rc; 2705 int rc;
2768 struct ata_device *dev; 2706 struct ata_device *dev;
2769 2707
2770 ata_port_probe(ap);
2771
2772 ata_for_each_dev(dev, &ap->link, ALL) 2708 ata_for_each_dev(dev, &ap->link, ALL)
2773 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2709 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2774 2710
@@ -2796,8 +2732,7 @@ int ata_bus_probe(struct ata_port *ap)
2796 ap->ops->phy_reset(ap); 2732 ap->ops->phy_reset(ap);
2797 2733
2798 ata_for_each_dev(dev, &ap->link, ALL) { 2734 ata_for_each_dev(dev, &ap->link, ALL) {
2799 if (!(ap->flags & ATA_FLAG_DISABLED) && 2735 if (dev->class != ATA_DEV_UNKNOWN)
2800 dev->class != ATA_DEV_UNKNOWN)
2801 classes[dev->devno] = dev->class; 2736 classes[dev->devno] = dev->class;
2802 else 2737 else
2803 classes[dev->devno] = ATA_DEV_NONE; 2738 classes[dev->devno] = ATA_DEV_NONE;
@@ -2805,8 +2740,6 @@ int ata_bus_probe(struct ata_port *ap)
2805 dev->class = ATA_DEV_UNKNOWN; 2740 dev->class = ATA_DEV_UNKNOWN;
2806 } 2741 }
2807 2742
2808 ata_port_probe(ap);
2809
2810 /* read IDENTIFY page and configure devices. We have to do the identify 2743 /* read IDENTIFY page and configure devices. We have to do the identify
2811 specific sequence bass-ackwards so that PDIAG- is released by 2744 specific sequence bass-ackwards so that PDIAG- is released by
2812 the slave device */ 2745 the slave device */
@@ -2856,8 +2789,6 @@ int ata_bus_probe(struct ata_port *ap)
2856 ata_for_each_dev(dev, &ap->link, ENABLED) 2789 ata_for_each_dev(dev, &ap->link, ENABLED)
2857 return 0; 2790 return 0;
2858 2791
2859 /* no device present, disable port */
2860 ata_port_disable(ap);
2861 return -ENODEV; 2792 return -ENODEV;
2862 2793
2863 fail: 2794 fail:
@@ -2889,22 +2820,6 @@ int ata_bus_probe(struct ata_port *ap)
2889} 2820}
2890 2821
2891/** 2822/**
2892 * ata_port_probe - Mark port as enabled
2893 * @ap: Port for which we indicate enablement
2894 *
2895 * Modify @ap data structure such that the system
2896 * thinks that the entire port is enabled.
2897 *
2898 * LOCKING: host lock, or some other form of
2899 * serialization.
2900 */
2901
2902void ata_port_probe(struct ata_port *ap)
2903{
2904 ap->flags &= ~ATA_FLAG_DISABLED;
2905}
2906
2907/**
2908 * sata_print_link_status - Print SATA link status 2823 * sata_print_link_status - Print SATA link status
2909 * @link: SATA link to printk link status about 2824 * @link: SATA link to printk link status about
2910 * 2825 *
@@ -2951,26 +2866,6 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
2951} 2866}
2952 2867
2953/** 2868/**
2954 * ata_port_disable - Disable port.
2955 * @ap: Port to be disabled.
2956 *
2957 * Modify @ap data structure such that the system
2958 * thinks that the entire port is disabled, and should
2959 * never attempt to probe or communicate with devices
2960 * on this port.
2961 *
2962 * LOCKING: host lock, or some other form of
2963 * serialization.
2964 */
2965
2966void ata_port_disable(struct ata_port *ap)
2967{
2968 ap->link.device[0].class = ATA_DEV_NONE;
2969 ap->link.device[1].class = ATA_DEV_NONE;
2970 ap->flags |= ATA_FLAG_DISABLED;
2971}
2972
2973/**
2974 * sata_down_spd_limit - adjust SATA spd limit downward 2869 * sata_down_spd_limit - adjust SATA spd limit downward
2975 * @link: Link to adjust SATA spd limit for 2870 * @link: Link to adjust SATA spd limit for
2976 * @spd_limit: Additional limit 2871 * @spd_limit: Additional limit
@@ -3631,9 +3526,15 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3631 int (*check_ready)(struct ata_link *link)) 3526 int (*check_ready)(struct ata_link *link))
3632{ 3527{
3633 unsigned long start = jiffies; 3528 unsigned long start = jiffies;
3634 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3529 unsigned long nodev_deadline;
3635 int warned = 0; 3530 int warned = 0;
3636 3531
3532 /* choose which 0xff timeout to use, read comment in libata.h */
3533 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3534 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3535 else
3536 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3537
3637 /* Slave readiness can't be tested separately from master. On 3538 /* Slave readiness can't be tested separately from master. On
3638 * M/S emulation configuration, this function should be called 3539 * M/S emulation configuration, this function should be called
3639 * only on the master and it will handle both master and slave. 3540 * only on the master and it will handle both master and slave.
@@ -3651,12 +3552,12 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3651 if (ready > 0) 3552 if (ready > 0)
3652 return 0; 3553 return 0;
3653 3554
3654 /* -ENODEV could be transient. Ignore -ENODEV if link 3555 /*
3556 * -ENODEV could be transient. Ignore -ENODEV if link
3655 * is online. Also, some SATA devices take a long 3557 * is online. Also, some SATA devices take a long
3656 * time to clear 0xff after reset. For example, 3558 * time to clear 0xff after reset. Wait for
3657 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum 3559 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3658 * GoVault needs even more than that. Wait for 3560 * offline.
3659 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3660 * 3561 *
3661 * Note that some PATA controllers (pata_ali) explode 3562 * Note that some PATA controllers (pata_ali) explode
3662 * if status register is read more than once when 3563 * if status register is read more than once when
@@ -5558,30 +5459,6 @@ void ata_host_resume(struct ata_host *host)
5558#endif 5459#endif
5559 5460
5560/** 5461/**
5561 * ata_port_start - Set port up for dma.
5562 * @ap: Port to initialize
5563 *
5564 * Called just after data structures for each port are
5565 * initialized. Allocates space for PRD table.
5566 *
5567 * May be used as the port_start() entry in ata_port_operations.
5568 *
5569 * LOCKING:
5570 * Inherited from caller.
5571 */
5572int ata_port_start(struct ata_port *ap)
5573{
5574 struct device *dev = ap->dev;
5575
5576 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5577 GFP_KERNEL);
5578 if (!ap->prd)
5579 return -ENOMEM;
5580
5581 return 0;
5582}
5583
5584/**
5585 * ata_dev_init - Initialize an ata_device structure 5462 * ata_dev_init - Initialize an ata_device structure
5586 * @dev: Device structure to initialize 5463 * @dev: Device structure to initialize
5587 * 5464 *
@@ -5709,12 +5586,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5709 5586
5710 ap->pflags |= ATA_PFLAG_INITIALIZING; 5587 ap->pflags |= ATA_PFLAG_INITIALIZING;
5711 ap->lock = &host->lock; 5588 ap->lock = &host->lock;
5712 ap->flags = ATA_FLAG_DISABLED;
5713 ap->print_id = -1; 5589 ap->print_id = -1;
5714 ap->ctl = ATA_DEVCTL_OBS;
5715 ap->host = host; 5590 ap->host = host;
5716 ap->dev = host->dev; 5591 ap->dev = host->dev;
5717 ap->last_ctl = 0xFF;
5718 5592
5719#if defined(ATA_VERBOSE_DEBUG) 5593#if defined(ATA_VERBOSE_DEBUG)
5720 /* turn on all debugging levels */ 5594 /* turn on all debugging levels */
@@ -5725,11 +5599,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5725 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5599 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5726#endif 5600#endif
5727 5601
5728#ifdef CONFIG_ATA_SFF
5729 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5730#else
5731 INIT_DELAYED_WORK(&ap->port_task, NULL);
5732#endif
5733 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5602 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5734 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5603 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5735 INIT_LIST_HEAD(&ap->eh_done_q); 5604 INIT_LIST_HEAD(&ap->eh_done_q);
@@ -5747,6 +5616,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5747 ap->stats.unhandled_irq = 1; 5616 ap->stats.unhandled_irq = 1;
5748 ap->stats.idle_irq = 1; 5617 ap->stats.idle_irq = 1;
5749#endif 5618#endif
5619 ata_sff_port_init(ap);
5620
5750 return ap; 5621 return ap;
5751} 5622}
5752 5623
@@ -6138,8 +6009,6 @@ static void async_port_probe(void *data, async_cookie_t cookie)
6138 struct ata_eh_info *ehi = &ap->link.eh_info; 6009 struct ata_eh_info *ehi = &ap->link.eh_info;
6139 unsigned long flags; 6010 unsigned long flags;
6140 6011
6141 ata_port_probe(ap);
6142
6143 /* kick EH for boot probing */ 6012 /* kick EH for boot probing */
6144 spin_lock_irqsave(ap->lock, flags); 6013 spin_lock_irqsave(ap->lock, flags);
6145 6014
@@ -6663,62 +6532,43 @@ static void __init ata_parse_force_param(void)
6663 6532
6664static int __init ata_init(void) 6533static int __init ata_init(void)
6665{ 6534{
6666 ata_parse_force_param(); 6535 int rc = -ENOMEM;
6667 6536
6668 /* 6537 ata_parse_force_param();
6669 * FIXME: In UP case, there is only one workqueue thread and if you
6670 * have more than one PIO device, latency is bloody awful, with
6671 * occasional multi-second "hiccups" as one PIO device waits for
6672 * another. It's an ugly wart that users DO occasionally complain
6673 * about; luckily most users have at most one PIO polled device.
6674 */
6675 ata_wq = create_workqueue("ata");
6676 if (!ata_wq)
6677 goto free_force_tbl;
6678 6538
6679 ata_aux_wq = create_singlethread_workqueue("ata_aux"); 6539 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6680 if (!ata_aux_wq) 6540 if (!ata_aux_wq)
6681 goto free_wq; 6541 goto fail;
6542
6543 rc = ata_sff_init();
6544 if (rc)
6545 goto fail;
6682 6546
6683 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6547 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6684 return 0; 6548 return 0;
6685 6549
6686free_wq: 6550fail:
6687 destroy_workqueue(ata_wq);
6688free_force_tbl:
6689 kfree(ata_force_tbl); 6551 kfree(ata_force_tbl);
6690 return -ENOMEM; 6552 if (ata_aux_wq)
6553 destroy_workqueue(ata_aux_wq);
6554 return rc;
6691} 6555}
6692 6556
6693static void __exit ata_exit(void) 6557static void __exit ata_exit(void)
6694{ 6558{
6559 ata_sff_exit();
6695 kfree(ata_force_tbl); 6560 kfree(ata_force_tbl);
6696 destroy_workqueue(ata_wq);
6697 destroy_workqueue(ata_aux_wq); 6561 destroy_workqueue(ata_aux_wq);
6698} 6562}
6699 6563
6700subsys_initcall(ata_init); 6564subsys_initcall(ata_init);
6701module_exit(ata_exit); 6565module_exit(ata_exit);
6702 6566
6703static unsigned long ratelimit_time; 6567static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6704static DEFINE_SPINLOCK(ata_ratelimit_lock);
6705 6568
6706int ata_ratelimit(void) 6569int ata_ratelimit(void)
6707{ 6570{
6708 int rc; 6571 return __ratelimit(&ratelimit);
6709 unsigned long flags;
6710
6711 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6712
6713 if (time_after(jiffies, ratelimit_time)) {
6714 rc = 1;
6715 ratelimit_time = jiffies + (HZ/5);
6716 } else
6717 rc = 0;
6718
6719 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6720
6721 return rc;
6722} 6572}
6723 6573
6724/** 6574/**
@@ -6826,11 +6676,9 @@ EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6826EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 6676EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6827EXPORT_SYMBOL_GPL(ata_mode_string); 6677EXPORT_SYMBOL_GPL(ata_mode_string);
6828EXPORT_SYMBOL_GPL(ata_id_xfermask); 6678EXPORT_SYMBOL_GPL(ata_id_xfermask);
6829EXPORT_SYMBOL_GPL(ata_port_start);
6830EXPORT_SYMBOL_GPL(ata_do_set_mode); 6679EXPORT_SYMBOL_GPL(ata_do_set_mode);
6831EXPORT_SYMBOL_GPL(ata_std_qc_defer); 6680EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6832EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 6681EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6833EXPORT_SYMBOL_GPL(ata_port_probe);
6834EXPORT_SYMBOL_GPL(ata_dev_disable); 6682EXPORT_SYMBOL_GPL(ata_dev_disable);
6835EXPORT_SYMBOL_GPL(sata_set_spd); 6683EXPORT_SYMBOL_GPL(sata_set_spd);
6836EXPORT_SYMBOL_GPL(ata_wait_after_reset); 6684EXPORT_SYMBOL_GPL(ata_wait_after_reset);
@@ -6842,7 +6690,6 @@ EXPORT_SYMBOL_GPL(sata_std_hardreset);
6842EXPORT_SYMBOL_GPL(ata_std_postreset); 6690EXPORT_SYMBOL_GPL(ata_std_postreset);
6843EXPORT_SYMBOL_GPL(ata_dev_classify); 6691EXPORT_SYMBOL_GPL(ata_dev_classify);
6844EXPORT_SYMBOL_GPL(ata_dev_pair); 6692EXPORT_SYMBOL_GPL(ata_dev_pair);
6845EXPORT_SYMBOL_GPL(ata_port_disable);
6846EXPORT_SYMBOL_GPL(ata_ratelimit); 6693EXPORT_SYMBOL_GPL(ata_ratelimit);
6847EXPORT_SYMBOL_GPL(ata_wait_register); 6694EXPORT_SYMBOL_GPL(ata_wait_register);
6848EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6695EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
@@ -6864,7 +6711,6 @@ EXPORT_SYMBOL_GPL(ata_id_c_string);
6864EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 6711EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6865EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6712EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6866 6713
6867EXPORT_SYMBOL_GPL(ata_pio_queue_task);
6868EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6714EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6869EXPORT_SYMBOL_GPL(ata_timing_find_mode); 6715EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6870EXPORT_SYMBOL_GPL(ata_timing_compute); 6716EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 9f6cfac0f2cc..f77a67303f8b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -550,8 +550,8 @@ void ata_scsi_error(struct Scsi_Host *host)
550 550
551 DPRINTK("ENTER\n"); 551 DPRINTK("ENTER\n");
552 552
553 /* synchronize with port task */ 553 /* make sure sff pio task is not running */
554 ata_port_flush_task(ap); 554 ata_sff_flush_pio_task(ap);
555 555
556 /* synchronize with host lock and sort out timeouts */ 556 /* synchronize with host lock and sort out timeouts */
557 557
@@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
879void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 879void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
880{ 880{
881 struct ata_port *ap = qc->ap; 881 struct ata_port *ap = qc->ap;
882 struct request_queue *q = qc->scsicmd->device->request_queue;
883 unsigned long flags;
882 884
883 WARN_ON(!ap->ops->error_handler); 885 WARN_ON(!ap->ops->error_handler);
884 886
@@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
890 * Note that ATA_QCFLAG_FAILED is unconditionally set after 892 * Note that ATA_QCFLAG_FAILED is unconditionally set after
891 * this function completes. 893 * this function completes.
892 */ 894 */
895 spin_lock_irqsave(q->queue_lock, flags);
893 blk_abort_request(qc->scsicmd->request); 896 blk_abort_request(qc->scsicmd->request);
897 spin_unlock_irqrestore(q->queue_lock, flags);
894} 898}
895 899
896/** 900/**
@@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
1624 } 1628 }
1625 1629
1626 /* okay, this error is ours */ 1630 /* okay, this error is ours */
1631 memset(&tf, 0, sizeof(tf));
1627 rc = ata_eh_read_log_10h(dev, &tag, &tf); 1632 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1628 if (rc) { 1633 if (rc) {
1629 ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1634 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
@@ -3679,7 +3684,7 @@ void ata_std_error_handler(struct ata_port *ap)
3679 ata_reset_fn_t hardreset = ops->hardreset; 3684 ata_reset_fn_t hardreset = ops->hardreset;
3680 3685
3681 /* ignore built-in hardreset if SCR access is not available */ 3686 /* ignore built-in hardreset if SCR access is not available */
3682 if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) 3687 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
3683 hardreset = NULL; 3688 hardreset = NULL;
3684 3689
3685 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3690 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 00305f41ed86..224faabd7b7e 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -231,10 +231,14 @@ static const char *sata_pmp_spec_rev_str(const u32 *gscr)
231 return "<unknown>"; 231 return "<unknown>";
232} 232}
233 233
234#define PMP_GSCR_SII_POL 129
235
234static int sata_pmp_configure(struct ata_device *dev, int print_info) 236static int sata_pmp_configure(struct ata_device *dev, int print_info)
235{ 237{
236 struct ata_port *ap = dev->link->ap; 238 struct ata_port *ap = dev->link->ap;
237 u32 *gscr = dev->gscr; 239 u32 *gscr = dev->gscr;
240 u16 vendor = sata_pmp_gscr_vendor(gscr);
241 u16 devid = sata_pmp_gscr_devid(gscr);
238 unsigned int err_mask = 0; 242 unsigned int err_mask = 0;
239 const char *reason; 243 const char *reason;
240 int nr_ports, rc; 244 int nr_ports, rc;
@@ -260,12 +264,34 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
260 goto fail; 264 goto fail;
261 } 265 }
262 266
267 /* Disable sending Early R_OK.
268 * With "cached read" HDD testing and multiple ports busy on a SATA
269 * host controller, 3726 PMP will very rarely drop a deferred
270 * R_OK that was intended for the host. Symptom will be all
271 * 5 drives under test will timeout, get reset, and recover.
272 */
273 if (vendor == 0x1095 && devid == 0x3726) {
274 u32 reg;
275
276 err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
277 if (err_mask) {
278 rc = -EIO;
279 reason = "failed to read Sil3726 Private Register";
280 goto fail;
281 }
282 reg &= ~0x1;
283 err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
284 if (err_mask) {
285 rc = -EIO;
286 reason = "failed to write Sil3726 Private Register";
287 goto fail;
288 }
289 }
290
263 if (print_info) { 291 if (print_info) {
264 ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, " 292 ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
265 "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n", 293 "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
266 sata_pmp_spec_rev_str(gscr), 294 sata_pmp_spec_rev_str(gscr), vendor, devid,
267 sata_pmp_gscr_vendor(gscr),
268 sata_pmp_gscr_devid(gscr),
269 sata_pmp_gscr_rev(gscr), 295 sata_pmp_gscr_rev(gscr),
270 nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN], 296 nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
271 gscr[SATA_PMP_GSCR_FEAT]); 297 gscr[SATA_PMP_GSCR_FEAT]);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0088cdeb0b1e..cfa9dd3d7253 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3345,9 +3345,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
3345 struct ata_link *link; 3345 struct ata_link *link;
3346 struct ata_device *dev; 3346 struct ata_device *dev;
3347 3347
3348 if (ap->flags & ATA_FLAG_DISABLED)
3349 return;
3350
3351 repeat: 3348 repeat:
3352 ata_for_each_link(link, ap, EDGE) { 3349 ata_for_each_link(link, ap, EDGE) {
3353 ata_for_each_dev(dev, link, ENABLED) { 3350 ata_for_each_dev(dev, link, ENABLED) {
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index e3877b6843c9..19ddf924944f 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -40,10 +40,12 @@
40 40
41#include "libata.h" 41#include "libata.h"
42 42
43static struct workqueue_struct *ata_sff_wq;
44
43const struct ata_port_operations ata_sff_port_ops = { 45const struct ata_port_operations ata_sff_port_ops = {
44 .inherits = &ata_base_port_ops, 46 .inherits = &ata_base_port_ops,
45 47
46 .qc_prep = ata_sff_qc_prep, 48 .qc_prep = ata_noop_qc_prep,
47 .qc_issue = ata_sff_qc_issue, 49 .qc_issue = ata_sff_qc_issue,
48 .qc_fill_rtf = ata_sff_qc_fill_rtf, 50 .qc_fill_rtf = ata_sff_qc_fill_rtf,
49 51
@@ -53,9 +55,7 @@ const struct ata_port_operations ata_sff_port_ops = {
53 .softreset = ata_sff_softreset, 55 .softreset = ata_sff_softreset,
54 .hardreset = sata_sff_hardreset, 56 .hardreset = sata_sff_hardreset,
55 .postreset = ata_sff_postreset, 57 .postreset = ata_sff_postreset,
56 .drain_fifo = ata_sff_drain_fifo,
57 .error_handler = ata_sff_error_handler, 58 .error_handler = ata_sff_error_handler,
58 .post_internal_cmd = ata_sff_post_internal_cmd,
59 59
60 .sff_dev_select = ata_sff_dev_select, 60 .sff_dev_select = ata_sff_dev_select,
61 .sff_check_status = ata_sff_check_status, 61 .sff_check_status = ata_sff_check_status,
@@ -63,178 +63,13 @@ const struct ata_port_operations ata_sff_port_ops = {
63 .sff_tf_read = ata_sff_tf_read, 63 .sff_tf_read = ata_sff_tf_read,
64 .sff_exec_command = ata_sff_exec_command, 64 .sff_exec_command = ata_sff_exec_command,
65 .sff_data_xfer = ata_sff_data_xfer, 65 .sff_data_xfer = ata_sff_data_xfer,
66 .sff_irq_on = ata_sff_irq_on,
67 .sff_irq_clear = ata_sff_irq_clear, 66 .sff_irq_clear = ata_sff_irq_clear,
67 .sff_drain_fifo = ata_sff_drain_fifo,
68 68
69 .lost_interrupt = ata_sff_lost_interrupt, 69 .lost_interrupt = ata_sff_lost_interrupt,
70
71 .port_start = ata_sff_port_start,
72}; 70};
73EXPORT_SYMBOL_GPL(ata_sff_port_ops); 71EXPORT_SYMBOL_GPL(ata_sff_port_ops);
74 72
75const struct ata_port_operations ata_bmdma_port_ops = {
76 .inherits = &ata_sff_port_ops,
77
78 .mode_filter = ata_bmdma_mode_filter,
79
80 .bmdma_setup = ata_bmdma_setup,
81 .bmdma_start = ata_bmdma_start,
82 .bmdma_stop = ata_bmdma_stop,
83 .bmdma_status = ata_bmdma_status,
84};
85EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
86
87const struct ata_port_operations ata_bmdma32_port_ops = {
88 .inherits = &ata_bmdma_port_ops,
89
90 .sff_data_xfer = ata_sff_data_xfer32,
91 .port_start = ata_sff_port_start32,
92};
93EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
94
95/**
96 * ata_fill_sg - Fill PCI IDE PRD table
97 * @qc: Metadata associated with taskfile to be transferred
98 *
99 * Fill PCI IDE PRD (scatter-gather) table with segments
100 * associated with the current disk command.
101 *
102 * LOCKING:
103 * spin_lock_irqsave(host lock)
104 *
105 */
106static void ata_fill_sg(struct ata_queued_cmd *qc)
107{
108 struct ata_port *ap = qc->ap;
109 struct scatterlist *sg;
110 unsigned int si, pi;
111
112 pi = 0;
113 for_each_sg(qc->sg, sg, qc->n_elem, si) {
114 u32 addr, offset;
115 u32 sg_len, len;
116
117 /* determine if physical DMA addr spans 64K boundary.
118 * Note h/w doesn't support 64-bit, so we unconditionally
119 * truncate dma_addr_t to u32.
120 */
121 addr = (u32) sg_dma_address(sg);
122 sg_len = sg_dma_len(sg);
123
124 while (sg_len) {
125 offset = addr & 0xffff;
126 len = sg_len;
127 if ((offset + sg_len) > 0x10000)
128 len = 0x10000 - offset;
129
130 ap->prd[pi].addr = cpu_to_le32(addr);
131 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
132 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
133
134 pi++;
135 sg_len -= len;
136 addr += len;
137 }
138 }
139
140 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
141}
142
143/**
144 * ata_fill_sg_dumb - Fill PCI IDE PRD table
145 * @qc: Metadata associated with taskfile to be transferred
146 *
147 * Fill PCI IDE PRD (scatter-gather) table with segments
148 * associated with the current disk command. Perform the fill
149 * so that we avoid writing any length 64K records for
150 * controllers that don't follow the spec.
151 *
152 * LOCKING:
153 * spin_lock_irqsave(host lock)
154 *
155 */
156static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
157{
158 struct ata_port *ap = qc->ap;
159 struct scatterlist *sg;
160 unsigned int si, pi;
161
162 pi = 0;
163 for_each_sg(qc->sg, sg, qc->n_elem, si) {
164 u32 addr, offset;
165 u32 sg_len, len, blen;
166
167 /* determine if physical DMA addr spans 64K boundary.
168 * Note h/w doesn't support 64-bit, so we unconditionally
169 * truncate dma_addr_t to u32.
170 */
171 addr = (u32) sg_dma_address(sg);
172 sg_len = sg_dma_len(sg);
173
174 while (sg_len) {
175 offset = addr & 0xffff;
176 len = sg_len;
177 if ((offset + sg_len) > 0x10000)
178 len = 0x10000 - offset;
179
180 blen = len & 0xffff;
181 ap->prd[pi].addr = cpu_to_le32(addr);
182 if (blen == 0) {
183 /* Some PATA chipsets like the CS5530 can't
184 cope with 0x0000 meaning 64K as the spec
185 says */
186 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
187 blen = 0x8000;
188 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
189 }
190 ap->prd[pi].flags_len = cpu_to_le32(blen);
191 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
192
193 pi++;
194 sg_len -= len;
195 addr += len;
196 }
197 }
198
199 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
200}
201
202/**
203 * ata_sff_qc_prep - Prepare taskfile for submission
204 * @qc: Metadata associated with taskfile to be prepared
205 *
206 * Prepare ATA taskfile for submission.
207 *
208 * LOCKING:
209 * spin_lock_irqsave(host lock)
210 */
211void ata_sff_qc_prep(struct ata_queued_cmd *qc)
212{
213 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
214 return;
215
216 ata_fill_sg(qc);
217}
218EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
219
220/**
221 * ata_sff_dumb_qc_prep - Prepare taskfile for submission
222 * @qc: Metadata associated with taskfile to be prepared
223 *
224 * Prepare ATA taskfile for submission.
225 *
226 * LOCKING:
227 * spin_lock_irqsave(host lock)
228 */
229void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
230{
231 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
232 return;
233
234 ata_fill_sg_dumb(qc);
235}
236EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
237
238/** 73/**
239 * ata_sff_check_status - Read device status reg & clear interrupt 74 * ata_sff_check_status - Read device status reg & clear interrupt
240 * @ap: port where the device is 75 * @ap: port where the device is
@@ -446,6 +281,27 @@ int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
446EXPORT_SYMBOL_GPL(ata_sff_wait_ready); 281EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
447 282
448/** 283/**
284 * ata_sff_set_devctl - Write device control reg
285 * @ap: port where the device is
286 * @ctl: value to write
287 *
288 * Writes ATA taskfile device control register.
289 *
290 * Note: may NOT be used as the sff_set_devctl() entry in
291 * ata_port_operations.
292 *
293 * LOCKING:
294 * Inherited from caller.
295 */
296static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
297{
298 if (ap->ops->sff_set_devctl)
299 ap->ops->sff_set_devctl(ap, ctl);
300 else
301 iowrite8(ctl, ap->ioaddr.ctl_addr);
302}
303
304/**
449 * ata_sff_dev_select - Select device 0/1 on ATA bus 305 * ata_sff_dev_select - Select device 0/1 on ATA bus
450 * @ap: ATA channel to manipulate 306 * @ap: ATA channel to manipulate
451 * @device: ATA device (numbered from zero) to select 307 * @device: ATA device (numbered from zero) to select
@@ -491,7 +347,7 @@ EXPORT_SYMBOL_GPL(ata_sff_dev_select);
491 * LOCKING: 347 * LOCKING:
492 * caller. 348 * caller.
493 */ 349 */
494void ata_dev_select(struct ata_port *ap, unsigned int device, 350static void ata_dev_select(struct ata_port *ap, unsigned int device,
495 unsigned int wait, unsigned int can_sleep) 351 unsigned int wait, unsigned int can_sleep)
496{ 352{
497 if (ata_msg_probe(ap)) 353 if (ata_msg_probe(ap))
@@ -517,24 +373,29 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
517 * Enable interrupts on a legacy IDE device using MMIO or PIO, 373 * Enable interrupts on a legacy IDE device using MMIO or PIO,
518 * wait for idle, clear any pending interrupts. 374 * wait for idle, clear any pending interrupts.
519 * 375 *
376 * Note: may NOT be used as the sff_irq_on() entry in
377 * ata_port_operations.
378 *
520 * LOCKING: 379 * LOCKING:
521 * Inherited from caller. 380 * Inherited from caller.
522 */ 381 */
523u8 ata_sff_irq_on(struct ata_port *ap) 382void ata_sff_irq_on(struct ata_port *ap)
524{ 383{
525 struct ata_ioports *ioaddr = &ap->ioaddr; 384 struct ata_ioports *ioaddr = &ap->ioaddr;
526 u8 tmp; 385
386 if (ap->ops->sff_irq_on) {
387 ap->ops->sff_irq_on(ap);
388 return;
389 }
527 390
528 ap->ctl &= ~ATA_NIEN; 391 ap->ctl &= ~ATA_NIEN;
529 ap->last_ctl = ap->ctl; 392 ap->last_ctl = ap->ctl;
530 393
531 if (ioaddr->ctl_addr) 394 if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
532 iowrite8(ap->ctl, ioaddr->ctl_addr); 395 ata_sff_set_devctl(ap, ap->ctl);
533 tmp = ata_wait_idle(ap); 396 ata_wait_idle(ap);
534 397
535 ap->ops->sff_irq_clear(ap); 398 ap->ops->sff_irq_clear(ap);
536
537 return tmp;
538} 399}
539EXPORT_SYMBOL_GPL(ata_sff_irq_on); 400EXPORT_SYMBOL_GPL(ata_sff_irq_on);
540 401
@@ -579,7 +440,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
579 if (ioaddr->ctl_addr) 440 if (ioaddr->ctl_addr)
580 iowrite8(tf->ctl, ioaddr->ctl_addr); 441 iowrite8(tf->ctl, ioaddr->ctl_addr);
581 ap->last_ctl = tf->ctl; 442 ap->last_ctl = tf->ctl;
582 ata_wait_idle(ap);
583 } 443 }
584 444
585 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 445 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -615,8 +475,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
615 iowrite8(tf->device, ioaddr->device_addr); 475 iowrite8(tf->device, ioaddr->device_addr);
616 VPRINTK("device 0x%X\n", tf->device); 476 VPRINTK("device 0x%X\n", tf->device);
617 } 477 }
618
619 ata_wait_idle(ap);
620} 478}
621EXPORT_SYMBOL_GPL(ata_sff_tf_load); 479EXPORT_SYMBOL_GPL(ata_sff_tf_load);
622 480
@@ -894,7 +752,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
894 do_write); 752 do_write);
895 } 753 }
896 754
897 if (!do_write) 755 if (!do_write && !PageSlab(page))
898 flush_dcache_page(page); 756 flush_dcache_page(page);
899 757
900 qc->curbytes += qc->sect_size; 758 qc->curbytes += qc->sect_size;
@@ -1165,7 +1023,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1165 qc = ata_qc_from_tag(ap, qc->tag); 1023 qc = ata_qc_from_tag(ap, qc->tag);
1166 if (qc) { 1024 if (qc) {
1167 if (likely(!(qc->err_mask & AC_ERR_HSM))) { 1025 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
1168 ap->ops->sff_irq_on(ap); 1026 ata_sff_irq_on(ap);
1169 ata_qc_complete(qc); 1027 ata_qc_complete(qc);
1170 } else 1028 } else
1171 ata_port_freeze(ap); 1029 ata_port_freeze(ap);
@@ -1181,7 +1039,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1181 } else { 1039 } else {
1182 if (in_wq) { 1040 if (in_wq) {
1183 spin_lock_irqsave(ap->lock, flags); 1041 spin_lock_irqsave(ap->lock, flags);
1184 ap->ops->sff_irq_on(ap); 1042 ata_sff_irq_on(ap);
1185 ata_qc_complete(qc); 1043 ata_qc_complete(qc);
1186 spin_unlock_irqrestore(ap->lock, flags); 1044 spin_unlock_irqrestore(ap->lock, flags);
1187 } else 1045 } else
@@ -1293,7 +1151,7 @@ fsm_start:
1293 if (in_wq) 1151 if (in_wq)
1294 spin_unlock_irqrestore(ap->lock, flags); 1152 spin_unlock_irqrestore(ap->lock, flags);
1295 1153
1296 /* if polling, ata_pio_task() handles the rest. 1154 /* if polling, ata_sff_pio_task() handles the rest.
1297 * otherwise, interrupt handler takes over from here. 1155 * otherwise, interrupt handler takes over from here.
1298 */ 1156 */
1299 break; 1157 break;
@@ -1458,14 +1316,38 @@ fsm_start:
1458} 1316}
1459EXPORT_SYMBOL_GPL(ata_sff_hsm_move); 1317EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1460 1318
1461void ata_pio_task(struct work_struct *work) 1319void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
1320{
1321 /* may fail if ata_sff_flush_pio_task() in progress */
1322 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
1323 msecs_to_jiffies(delay));
1324}
1325EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1326
1327void ata_sff_flush_pio_task(struct ata_port *ap)
1328{
1329 DPRINTK("ENTER\n");
1330
1331 cancel_rearming_delayed_work(&ap->sff_pio_task);
1332 ap->hsm_task_state = HSM_ST_IDLE;
1333
1334 if (ata_msg_ctl(ap))
1335 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1336}
1337
1338static void ata_sff_pio_task(struct work_struct *work)
1462{ 1339{
1463 struct ata_port *ap = 1340 struct ata_port *ap =
1464 container_of(work, struct ata_port, port_task.work); 1341 container_of(work, struct ata_port, sff_pio_task.work);
1465 struct ata_queued_cmd *qc = ap->port_task_data; 1342 struct ata_queued_cmd *qc;
1466 u8 status; 1343 u8 status;
1467 int poll_next; 1344 int poll_next;
1468 1345
1346 /* qc can be NULL if timeout occurred */
1347 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1348 if (!qc)
1349 return;
1350
1469fsm_start: 1351fsm_start:
1470 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); 1352 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1471 1353
@@ -1481,7 +1363,7 @@ fsm_start:
1481 msleep(2); 1363 msleep(2);
1482 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1364 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1483 if (status & ATA_BUSY) { 1365 if (status & ATA_BUSY) {
1484 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE); 1366 ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
1485 return; 1367 return;
1486 } 1368 }
1487 } 1369 }
@@ -1497,15 +1379,11 @@ fsm_start:
1497} 1379}
1498 1380
1499/** 1381/**
1500 * ata_sff_qc_issue - issue taskfile to device in proto-dependent manner 1382 * ata_sff_qc_issue - issue taskfile to a SFF controller
1501 * @qc: command to issue to device 1383 * @qc: command to issue to device
1502 * 1384 *
1503 * Using various libata functions and hooks, this function 1385 * This function issues a PIO or NODATA command to a SFF
1504 * starts an ATA command. ATA commands are grouped into 1386 * controller.
1505 * classes called "protocols", and issuing each type of protocol
1506 * is slightly different.
1507 *
1508 * May be used as the qc_issue() entry in ata_port_operations.
1509 * 1387 *
1510 * LOCKING: 1388 * LOCKING:
1511 * spin_lock_irqsave(host lock) 1389 * spin_lock_irqsave(host lock)
@@ -1520,23 +1398,8 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1520 /* Use polling pio if the LLD doesn't handle 1398 /* Use polling pio if the LLD doesn't handle
1521 * interrupt driven pio and atapi CDB interrupt. 1399 * interrupt driven pio and atapi CDB interrupt.
1522 */ 1400 */
1523 if (ap->flags & ATA_FLAG_PIO_POLLING) { 1401 if (ap->flags & ATA_FLAG_PIO_POLLING)
1524 switch (qc->tf.protocol) { 1402 qc->tf.flags |= ATA_TFLAG_POLLING;
1525 case ATA_PROT_PIO:
1526 case ATA_PROT_NODATA:
1527 case ATAPI_PROT_PIO:
1528 case ATAPI_PROT_NODATA:
1529 qc->tf.flags |= ATA_TFLAG_POLLING;
1530 break;
1531 case ATAPI_PROT_DMA:
1532 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
1533 /* see ata_dma_blacklisted() */
1534 BUG();
1535 break;
1536 default:
1537 break;
1538 }
1539 }
1540 1403
1541 /* select the device */ 1404 /* select the device */
1542 ata_dev_select(ap, qc->dev->devno, 1, 0); 1405 ata_dev_select(ap, qc->dev->devno, 1, 0);
@@ -1551,17 +1414,8 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1551 ap->hsm_task_state = HSM_ST_LAST; 1414 ap->hsm_task_state = HSM_ST_LAST;
1552 1415
1553 if (qc->tf.flags & ATA_TFLAG_POLLING) 1416 if (qc->tf.flags & ATA_TFLAG_POLLING)
1554 ata_pio_queue_task(ap, qc, 0); 1417 ata_sff_queue_pio_task(ap, 0);
1555
1556 break;
1557
1558 case ATA_PROT_DMA:
1559 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
1560 1418
1561 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
1562 ap->ops->bmdma_setup(qc); /* set up bmdma */
1563 ap->ops->bmdma_start(qc); /* initiate bmdma */
1564 ap->hsm_task_state = HSM_ST_LAST;
1565 break; 1419 break;
1566 1420
1567 case ATA_PROT_PIO: 1421 case ATA_PROT_PIO:
@@ -1573,20 +1427,21 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1573 if (qc->tf.flags & ATA_TFLAG_WRITE) { 1427 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1574 /* PIO data out protocol */ 1428 /* PIO data out protocol */
1575 ap->hsm_task_state = HSM_ST_FIRST; 1429 ap->hsm_task_state = HSM_ST_FIRST;
1576 ata_pio_queue_task(ap, qc, 0); 1430 ata_sff_queue_pio_task(ap, 0);
1577 1431
1578 /* always send first data block using 1432 /* always send first data block using the
1579 * the ata_pio_task() codepath. 1433 * ata_sff_pio_task() codepath.
1580 */ 1434 */
1581 } else { 1435 } else {
1582 /* PIO data in protocol */ 1436 /* PIO data in protocol */
1583 ap->hsm_task_state = HSM_ST; 1437 ap->hsm_task_state = HSM_ST;
1584 1438
1585 if (qc->tf.flags & ATA_TFLAG_POLLING) 1439 if (qc->tf.flags & ATA_TFLAG_POLLING)
1586 ata_pio_queue_task(ap, qc, 0); 1440 ata_sff_queue_pio_task(ap, 0);
1587 1441
1588 /* if polling, ata_pio_task() handles the rest. 1442 /* if polling, ata_sff_pio_task() handles the
1589 * otherwise, interrupt handler takes over from here. 1443 * rest. otherwise, interrupt handler takes
1444 * over from here.
1590 */ 1445 */
1591 } 1446 }
1592 1447
@@ -1604,19 +1459,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1604 /* send cdb by polling if no cdb interrupt */ 1459 /* send cdb by polling if no cdb interrupt */
1605 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 1460 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1606 (qc->tf.flags & ATA_TFLAG_POLLING)) 1461 (qc->tf.flags & ATA_TFLAG_POLLING))
1607 ata_pio_queue_task(ap, qc, 0); 1462 ata_sff_queue_pio_task(ap, 0);
1608 break;
1609
1610 case ATAPI_PROT_DMA:
1611 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
1612
1613 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
1614 ap->ops->bmdma_setup(qc); /* set up bmdma */
1615 ap->hsm_task_state = HSM_ST_FIRST;
1616
1617 /* send cdb by polling if no cdb interrupt */
1618 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1619 ata_pio_queue_task(ap, qc, 0);
1620 break; 1463 break;
1621 1464
1622 default: 1465 default:
@@ -1728,7 +1571,7 @@ unsigned int ata_sff_host_intr(struct ata_port *ap,
1728 goto idle_irq; 1571 goto idle_irq;
1729 } 1572 }
1730 1573
1731 /* ack bmdma irq events */ 1574 /* clear irq events */
1732 ap->ops->sff_irq_clear(ap); 1575 ap->ops->sff_irq_clear(ap);
1733 1576
1734 ata_sff_hsm_move(ap, qc, status, 0); 1577 ata_sff_hsm_move(ap, qc, status, 0);
@@ -1785,9 +1628,6 @@ retry:
1785 struct ata_port *ap = host->ports[i]; 1628 struct ata_port *ap = host->ports[i];
1786 struct ata_queued_cmd *qc; 1629 struct ata_queued_cmd *qc;
1787 1630
1788 if (unlikely(ap->flags & ATA_FLAG_DISABLED))
1789 continue;
1790
1791 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1631 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1792 if (qc) { 1632 if (qc) {
1793 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) 1633 if (!(qc->tf.flags & ATA_TFLAG_POLLING))
@@ -1862,11 +1702,8 @@ void ata_sff_lost_interrupt(struct ata_port *ap)
1862 1702
1863 /* Only one outstanding command per SFF channel */ 1703 /* Only one outstanding command per SFF channel */
1864 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1704 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1865 /* Check we have a live one.. */ 1705 /* We cannot lose an interrupt on a non-existent or polled command */
1866 if (qc == NULL || !(qc->flags & ATA_QCFLAG_ACTIVE)) 1706 if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1867 return;
1868 /* We cannot lose an interrupt on a polled command */
1869 if (qc->tf.flags & ATA_TFLAG_POLLING)
1870 return; 1707 return;
1871 /* See if the controller thinks it is still busy - if so the command 1708 /* See if the controller thinks it is still busy - if so the command
1872 isn't a lost IRQ but is still in progress */ 1709 isn't a lost IRQ but is still in progress */
@@ -1888,20 +1725,18 @@ EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1888 * ata_sff_freeze - Freeze SFF controller port 1725 * ata_sff_freeze - Freeze SFF controller port
1889 * @ap: port to freeze 1726 * @ap: port to freeze
1890 * 1727 *
1891 * Freeze BMDMA controller port. 1728 * Freeze SFF controller port.
1892 * 1729 *
1893 * LOCKING: 1730 * LOCKING:
1894 * Inherited from caller. 1731 * Inherited from caller.
1895 */ 1732 */
1896void ata_sff_freeze(struct ata_port *ap) 1733void ata_sff_freeze(struct ata_port *ap)
1897{ 1734{
1898 struct ata_ioports *ioaddr = &ap->ioaddr;
1899
1900 ap->ctl |= ATA_NIEN; 1735 ap->ctl |= ATA_NIEN;
1901 ap->last_ctl = ap->ctl; 1736 ap->last_ctl = ap->ctl;
1902 1737
1903 if (ioaddr->ctl_addr) 1738 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
1904 iowrite8(ap->ctl, ioaddr->ctl_addr); 1739 ata_sff_set_devctl(ap, ap->ctl);
1905 1740
1906 /* Under certain circumstances, some controllers raise IRQ on 1741 /* Under certain circumstances, some controllers raise IRQ on
1907 * ATA_NIEN manipulation. Also, many controllers fail to mask 1742 * ATA_NIEN manipulation. Also, many controllers fail to mask
@@ -1927,7 +1762,7 @@ void ata_sff_thaw(struct ata_port *ap)
1927 /* clear & re-enable interrupts */ 1762 /* clear & re-enable interrupts */
1928 ap->ops->sff_check_status(ap); 1763 ap->ops->sff_check_status(ap);
1929 ap->ops->sff_irq_clear(ap); 1764 ap->ops->sff_irq_clear(ap);
1930 ap->ops->sff_irq_on(ap); 1765 ata_sff_irq_on(ap);
1931} 1766}
1932EXPORT_SYMBOL_GPL(ata_sff_thaw); 1767EXPORT_SYMBOL_GPL(ata_sff_thaw);
1933 1768
@@ -2301,8 +2136,8 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2301 } 2136 }
2302 2137
2303 /* set up device control */ 2138 /* set up device control */
2304 if (ap->ioaddr.ctl_addr) { 2139 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
2305 iowrite8(ap->ctl, ap->ioaddr.ctl_addr); 2140 ata_sff_set_devctl(ap, ap->ctl);
2306 ap->last_ctl = ap->ctl; 2141 ap->last_ctl = ap->ctl;
2307 } 2142 }
2308} 2143}
@@ -2342,7 +2177,7 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2342EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); 2177EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2343 2178
2344/** 2179/**
2345 * ata_sff_error_handler - Stock error handler for BMDMA controller 2180 * ata_sff_error_handler - Stock error handler for SFF controller
2346 * @ap: port to handle error for 2181 * @ap: port to handle error for
2347 * 2182 *
2348 * Stock error handler for SFF controller. It can handle both 2183 * Stock error handler for SFF controller. It can handle both
@@ -2359,62 +2194,32 @@ void ata_sff_error_handler(struct ata_port *ap)
2359 ata_reset_fn_t hardreset = ap->ops->hardreset; 2194 ata_reset_fn_t hardreset = ap->ops->hardreset;
2360 struct ata_queued_cmd *qc; 2195 struct ata_queued_cmd *qc;
2361 unsigned long flags; 2196 unsigned long flags;
2362 int thaw = 0;
2363 2197
2364 qc = __ata_qc_from_tag(ap, ap->link.active_tag); 2198 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2365 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) 2199 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2366 qc = NULL; 2200 qc = NULL;
2367 2201
2368 /* reset PIO HSM and stop DMA engine */
2369 spin_lock_irqsave(ap->lock, flags); 2202 spin_lock_irqsave(ap->lock, flags);
2370 2203
2371 ap->hsm_task_state = HSM_ST_IDLE; 2204 /*
2372 2205 * We *MUST* do FIFO draining before we issue a reset as
2373 if (ap->ioaddr.bmdma_addr && 2206 * several devices helpfully clear their internal state and
2374 qc && (qc->tf.protocol == ATA_PROT_DMA || 2207 * will lock solid if we touch the data port post reset. Pass
2375 qc->tf.protocol == ATAPI_PROT_DMA)) { 2208 * qc in case anyone wants to do different PIO/DMA recovery or
2376 u8 host_stat; 2209 * has per command fixups
2377
2378 host_stat = ap->ops->bmdma_status(ap);
2379
2380 /* BMDMA controllers indicate host bus error by
2381 * setting DMA_ERR bit and timing out. As it wasn't
2382 * really a timeout event, adjust error mask and
2383 * cancel frozen state.
2384 */
2385 if (qc->err_mask == AC_ERR_TIMEOUT
2386 && (host_stat & ATA_DMA_ERR)) {
2387 qc->err_mask = AC_ERR_HOST_BUS;
2388 thaw = 1;
2389 }
2390
2391 ap->ops->bmdma_stop(qc);
2392 }
2393
2394 ata_sff_sync(ap); /* FIXME: We don't need this */
2395 ap->ops->sff_check_status(ap);
2396 ap->ops->sff_irq_clear(ap);
2397 /* We *MUST* do FIFO draining before we issue a reset as several
2398 * devices helpfully clear their internal state and will lock solid
2399 * if we touch the data port post reset. Pass qc in case anyone wants
2400 * to do different PIO/DMA recovery or has per command fixups
2401 */ 2210 */
2402 if (ap->ops->drain_fifo) 2211 if (ap->ops->sff_drain_fifo)
2403 ap->ops->drain_fifo(qc); 2212 ap->ops->sff_drain_fifo(qc);
2404 2213
2405 spin_unlock_irqrestore(ap->lock, flags); 2214 spin_unlock_irqrestore(ap->lock, flags);
2406 2215
2407 if (thaw) 2216 /* ignore ata_sff_softreset if ctl isn't accessible */
2408 ata_eh_thaw_port(ap);
2409
2410 /* PIO and DMA engines have been stopped, perform recovery */
2411
2412 /* Ignore ata_sff_softreset if ctl isn't accessible and
2413 * built-in hardresets if SCR access isn't available.
2414 */
2415 if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) 2217 if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
2416 softreset = NULL; 2218 softreset = NULL;
2417 if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) 2219
2220 /* ignore built-in hardresets if SCR access is not available */
2221 if ((hardreset == sata_std_hardreset ||
2222 hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2418 hardreset = NULL; 2223 hardreset = NULL;
2419 2224
2420 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, 2225 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
@@ -2423,73 +2228,6 @@ void ata_sff_error_handler(struct ata_port *ap)
2423EXPORT_SYMBOL_GPL(ata_sff_error_handler); 2228EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2424 2229
2425/** 2230/**
2426 * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller
2427 * @qc: internal command to clean up
2428 *
2429 * LOCKING:
2430 * Kernel thread context (may sleep)
2431 */
2432void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
2433{
2434 struct ata_port *ap = qc->ap;
2435 unsigned long flags;
2436
2437 spin_lock_irqsave(ap->lock, flags);
2438
2439 ap->hsm_task_state = HSM_ST_IDLE;
2440
2441 if (ap->ioaddr.bmdma_addr)
2442 ap->ops->bmdma_stop(qc);
2443
2444 spin_unlock_irqrestore(ap->lock, flags);
2445}
2446EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
2447
2448/**
2449 * ata_sff_port_start - Set port up for dma.
2450 * @ap: Port to initialize
2451 *
2452 * Called just after data structures for each port are
2453 * initialized. Allocates space for PRD table if the device
2454 * is DMA capable SFF.
2455 *
2456 * May be used as the port_start() entry in ata_port_operations.
2457 *
2458 * LOCKING:
2459 * Inherited from caller.
2460 */
2461int ata_sff_port_start(struct ata_port *ap)
2462{
2463 if (ap->ioaddr.bmdma_addr)
2464 return ata_port_start(ap);
2465 return 0;
2466}
2467EXPORT_SYMBOL_GPL(ata_sff_port_start);
2468
2469/**
2470 * ata_sff_port_start32 - Set port up for dma.
2471 * @ap: Port to initialize
2472 *
2473 * Called just after data structures for each port are
2474 * initialized. Allocates space for PRD table if the device
2475 * is DMA capable SFF.
2476 *
2477 * May be used as the port_start() entry in ata_port_operations for
2478 * devices that are capable of 32bit PIO.
2479 *
2480 * LOCKING:
2481 * Inherited from caller.
2482 */
2483int ata_sff_port_start32(struct ata_port *ap)
2484{
2485 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
2486 if (ap->ioaddr.bmdma_addr)
2487 return ata_port_start(ap);
2488 return 0;
2489}
2490EXPORT_SYMBOL_GPL(ata_sff_port_start32);
2491
2492/**
2493 * ata_sff_std_ports - initialize ioaddr with standard port offsets. 2231 * ata_sff_std_ports - initialize ioaddr with standard port offsets.
2494 * @ioaddr: IO address structure to be initialized 2232 * @ioaddr: IO address structure to be initialized
2495 * 2233 *
@@ -2515,302 +2253,8 @@ void ata_sff_std_ports(struct ata_ioports *ioaddr)
2515} 2253}
2516EXPORT_SYMBOL_GPL(ata_sff_std_ports); 2254EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2517 2255
2518unsigned long ata_bmdma_mode_filter(struct ata_device *adev,
2519 unsigned long xfer_mask)
2520{
2521 /* Filter out DMA modes if the device has been configured by
2522 the BIOS as PIO only */
2523
2524 if (adev->link->ap->ioaddr.bmdma_addr == NULL)
2525 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2526 return xfer_mask;
2527}
2528EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
2529
2530/**
2531 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2532 * @qc: Info associated with this ATA transaction.
2533 *
2534 * LOCKING:
2535 * spin_lock_irqsave(host lock)
2536 */
2537void ata_bmdma_setup(struct ata_queued_cmd *qc)
2538{
2539 struct ata_port *ap = qc->ap;
2540 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2541 u8 dmactl;
2542
2543 /* load PRD table addr. */
2544 mb(); /* make sure PRD table writes are visible to controller */
2545 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2546
2547 /* specify data direction, triple-check start bit is clear */
2548 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2549 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2550 if (!rw)
2551 dmactl |= ATA_DMA_WR;
2552 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2553
2554 /* issue r/w command */
2555 ap->ops->sff_exec_command(ap, &qc->tf);
2556}
2557EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2558
2559/**
2560 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
2561 * @qc: Info associated with this ATA transaction.
2562 *
2563 * LOCKING:
2564 * spin_lock_irqsave(host lock)
2565 */
2566void ata_bmdma_start(struct ata_queued_cmd *qc)
2567{
2568 struct ata_port *ap = qc->ap;
2569 u8 dmactl;
2570
2571 /* start host DMA transaction */
2572 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2573 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2574
2575 /* Strictly, one may wish to issue an ioread8() here, to
2576 * flush the mmio write. However, control also passes
2577 * to the hardware at this point, and it will interrupt
2578 * us when we are to resume control. So, in effect,
2579 * we don't care when the mmio write flushes.
2580 * Further, a read of the DMA status register _immediately_
2581 * following the write may not be what certain flaky hardware
2582 * is expected, so I think it is best to not add a readb()
2583 * without first all the MMIO ATA cards/mobos.
2584 * Or maybe I'm just being paranoid.
2585 *
2586 * FIXME: The posting of this write means I/O starts are
2587 * unneccessarily delayed for MMIO
2588 */
2589}
2590EXPORT_SYMBOL_GPL(ata_bmdma_start);
2591
2592/**
2593 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
2594 * @qc: Command we are ending DMA for
2595 *
2596 * Clears the ATA_DMA_START flag in the dma control register
2597 *
2598 * May be used as the bmdma_stop() entry in ata_port_operations.
2599 *
2600 * LOCKING:
2601 * spin_lock_irqsave(host lock)
2602 */
2603void ata_bmdma_stop(struct ata_queued_cmd *qc)
2604{
2605 struct ata_port *ap = qc->ap;
2606 void __iomem *mmio = ap->ioaddr.bmdma_addr;
2607
2608 /* clear start/stop bit */
2609 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
2610 mmio + ATA_DMA_CMD);
2611
2612 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
2613 ata_sff_dma_pause(ap);
2614}
2615EXPORT_SYMBOL_GPL(ata_bmdma_stop);
2616
2617/**
2618 * ata_bmdma_status - Read PCI IDE BMDMA status
2619 * @ap: Port associated with this ATA transaction.
2620 *
2621 * Read and return BMDMA status register.
2622 *
2623 * May be used as the bmdma_status() entry in ata_port_operations.
2624 *
2625 * LOCKING:
2626 * spin_lock_irqsave(host lock)
2627 */
2628u8 ata_bmdma_status(struct ata_port *ap)
2629{
2630 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
2631}
2632EXPORT_SYMBOL_GPL(ata_bmdma_status);
2633
2634/**
2635 * ata_bus_reset - reset host port and associated ATA channel
2636 * @ap: port to reset
2637 *
2638 * This is typically the first time we actually start issuing
2639 * commands to the ATA channel. We wait for BSY to clear, then
2640 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2641 * result. Determine what devices, if any, are on the channel
2642 * by looking at the device 0/1 error register. Look at the signature
2643 * stored in each device's taskfile registers, to determine if
2644 * the device is ATA or ATAPI.
2645 *
2646 * LOCKING:
2647 * PCI/etc. bus probe sem.
2648 * Obtains host lock.
2649 *
2650 * SIDE EFFECTS:
2651 * Sets ATA_FLAG_DISABLED if bus reset fails.
2652 *
2653 * DEPRECATED:
2654 * This function is only for drivers which still use old EH and
2655 * will be removed soon.
2656 */
2657void ata_bus_reset(struct ata_port *ap)
2658{
2659 struct ata_device *device = ap->link.device;
2660 struct ata_ioports *ioaddr = &ap->ioaddr;
2661 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2662 u8 err;
2663 unsigned int dev0, dev1 = 0, devmask = 0;
2664 int rc;
2665
2666 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
2667
2668 /* determine if device 0/1 are present */
2669 if (ap->flags & ATA_FLAG_SATA_RESET)
2670 dev0 = 1;
2671 else {
2672 dev0 = ata_devchk(ap, 0);
2673 if (slave_possible)
2674 dev1 = ata_devchk(ap, 1);
2675 }
2676
2677 if (dev0)
2678 devmask |= (1 << 0);
2679 if (dev1)
2680 devmask |= (1 << 1);
2681
2682 /* select device 0 again */
2683 ap->ops->sff_dev_select(ap, 0);
2684
2685 /* issue bus reset */
2686 if (ap->flags & ATA_FLAG_SRST) {
2687 rc = ata_bus_softreset(ap, devmask,
2688 ata_deadline(jiffies, 40000));
2689 if (rc && rc != -ENODEV)
2690 goto err_out;
2691 }
2692
2693 /*
2694 * determine by signature whether we have ATA or ATAPI devices
2695 */
2696 device[0].class = ata_sff_dev_classify(&device[0], dev0, &err);
2697 if ((slave_possible) && (err != 0x81))
2698 device[1].class = ata_sff_dev_classify(&device[1], dev1, &err);
2699
2700 /* is double-select really necessary? */
2701 if (device[1].class != ATA_DEV_NONE)
2702 ap->ops->sff_dev_select(ap, 1);
2703 if (device[0].class != ATA_DEV_NONE)
2704 ap->ops->sff_dev_select(ap, 0);
2705
2706 /* if no devices were detected, disable this port */
2707 if ((device[0].class == ATA_DEV_NONE) &&
2708 (device[1].class == ATA_DEV_NONE))
2709 goto err_out;
2710
2711 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2712 /* set up device control for ATA_FLAG_SATA_RESET */
2713 iowrite8(ap->ctl, ioaddr->ctl_addr);
2714 ap->last_ctl = ap->ctl;
2715 }
2716
2717 DPRINTK("EXIT\n");
2718 return;
2719
2720err_out:
2721 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2722 ata_port_disable(ap);
2723
2724 DPRINTK("EXIT\n");
2725}
2726EXPORT_SYMBOL_GPL(ata_bus_reset);
2727
2728#ifdef CONFIG_PCI 2256#ifdef CONFIG_PCI
2729 2257
2730/**
2731 * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
2732 * @pdev: PCI device
2733 *
2734 * Some PCI ATA devices report simplex mode but in fact can be told to
2735 * enter non simplex mode. This implements the necessary logic to
2736 * perform the task on such devices. Calling it on other devices will
2737 * have -undefined- behaviour.
2738 */
2739int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
2740{
2741 unsigned long bmdma = pci_resource_start(pdev, 4);
2742 u8 simplex;
2743
2744 if (bmdma == 0)
2745 return -ENOENT;
2746
2747 simplex = inb(bmdma + 0x02);
2748 outb(simplex & 0x60, bmdma + 0x02);
2749 simplex = inb(bmdma + 0x02);
2750 if (simplex & 0x80)
2751 return -EOPNOTSUPP;
2752 return 0;
2753}
2754EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
2755
2756/**
2757 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
2758 * @host: target ATA host
2759 *
2760 * Acquire PCI BMDMA resources and initialize @host accordingly.
2761 *
2762 * LOCKING:
2763 * Inherited from calling layer (may sleep).
2764 *
2765 * RETURNS:
2766 * 0 on success, -errno otherwise.
2767 */
2768int ata_pci_bmdma_init(struct ata_host *host)
2769{
2770 struct device *gdev = host->dev;
2771 struct pci_dev *pdev = to_pci_dev(gdev);
2772 int i, rc;
2773
2774 /* No BAR4 allocation: No DMA */
2775 if (pci_resource_start(pdev, 4) == 0)
2776 return 0;
2777
2778 /* TODO: If we get no DMA mask we should fall back to PIO */
2779 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
2780 if (rc)
2781 return rc;
2782 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
2783 if (rc)
2784 return rc;
2785
2786 /* request and iomap DMA region */
2787 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
2788 if (rc) {
2789 dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
2790 return -ENOMEM;
2791 }
2792 host->iomap = pcim_iomap_table(pdev);
2793
2794 for (i = 0; i < 2; i++) {
2795 struct ata_port *ap = host->ports[i];
2796 void __iomem *bmdma = host->iomap[4] + 8 * i;
2797
2798 if (ata_port_is_dummy(ap))
2799 continue;
2800
2801 ap->ioaddr.bmdma_addr = bmdma;
2802 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
2803 (ioread8(bmdma + 2) & 0x80))
2804 host->flags |= ATA_HOST_SIMPLEX;
2805
2806 ata_port_desc(ap, "bmdma 0x%llx",
2807 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
2808 }
2809
2810 return 0;
2811}
2812EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
2813
2814static int ata_resources_present(struct pci_dev *pdev, int port) 2258static int ata_resources_present(struct pci_dev *pdev, int port)
2815{ 2259{
2816 int i; 2260 int i;
@@ -2942,21 +2386,12 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2942 goto err_out; 2386 goto err_out;
2943 2387
2944 /* init DMA related stuff */ 2388 /* init DMA related stuff */
2945 rc = ata_pci_bmdma_init(host); 2389 ata_pci_bmdma_init(host);
2946 if (rc)
2947 goto err_bmdma;
2948 2390
2949 devres_remove_group(&pdev->dev, NULL); 2391 devres_remove_group(&pdev->dev, NULL);
2950 *r_host = host; 2392 *r_host = host;
2951 return 0; 2393 return 0;
2952 2394
2953err_bmdma:
2954 /* This is necessary because PCI and iomap resources are
2955 * merged and releasing the top group won't release the
2956 * acquired resources if some of those have been acquired
2957 * before entering this function.
2958 */
2959 pcim_iounmap_regions(pdev, 0xf);
2960err_out: 2395err_out:
2961 devres_release_group(&pdev->dev, NULL); 2396 devres_release_group(&pdev->dev, NULL);
2962 return rc; 2397 return rc;
@@ -3135,3 +2570,609 @@ out:
3135EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); 2570EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
3136 2571
3137#endif /* CONFIG_PCI */ 2572#endif /* CONFIG_PCI */
2573
2574const struct ata_port_operations ata_bmdma_port_ops = {
2575 .inherits = &ata_sff_port_ops,
2576
2577 .error_handler = ata_bmdma_error_handler,
2578 .post_internal_cmd = ata_bmdma_post_internal_cmd,
2579
2580 .qc_prep = ata_bmdma_qc_prep,
2581 .qc_issue = ata_bmdma_qc_issue,
2582
2583 .bmdma_setup = ata_bmdma_setup,
2584 .bmdma_start = ata_bmdma_start,
2585 .bmdma_stop = ata_bmdma_stop,
2586 .bmdma_status = ata_bmdma_status,
2587
2588 .port_start = ata_bmdma_port_start,
2589};
2590EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2591
2592const struct ata_port_operations ata_bmdma32_port_ops = {
2593 .inherits = &ata_bmdma_port_ops,
2594
2595 .sff_data_xfer = ata_sff_data_xfer32,
2596 .port_start = ata_bmdma_port_start32,
2597};
2598EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2599
2600/**
2601 * ata_bmdma_fill_sg - Fill PCI IDE PRD table
2602 * @qc: Metadata associated with taskfile to be transferred
2603 *
2604 * Fill PCI IDE PRD (scatter-gather) table with segments
2605 * associated with the current disk command.
2606 *
2607 * LOCKING:
2608 * spin_lock_irqsave(host lock)
2609 *
2610 */
2611static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2612{
2613 struct ata_port *ap = qc->ap;
2614 struct ata_bmdma_prd *prd = ap->bmdma_prd;
2615 struct scatterlist *sg;
2616 unsigned int si, pi;
2617
2618 pi = 0;
2619 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2620 u32 addr, offset;
2621 u32 sg_len, len;
2622
2623 /* determine if physical DMA addr spans 64K boundary.
2624 * Note h/w doesn't support 64-bit, so we unconditionally
2625 * truncate dma_addr_t to u32.
2626 */
2627 addr = (u32) sg_dma_address(sg);
2628 sg_len = sg_dma_len(sg);
2629
2630 while (sg_len) {
2631 offset = addr & 0xffff;
2632 len = sg_len;
2633 if ((offset + sg_len) > 0x10000)
2634 len = 0x10000 - offset;
2635
2636 prd[pi].addr = cpu_to_le32(addr);
2637 prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2638 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2639
2640 pi++;
2641 sg_len -= len;
2642 addr += len;
2643 }
2644 }
2645
2646 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2647}
2648
2649/**
2650 * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2651 * @qc: Metadata associated with taskfile to be transferred
2652 *
2653 * Fill PCI IDE PRD (scatter-gather) table with segments
2654 * associated with the current disk command. Perform the fill
2655 * so that we avoid writing any length 64K records for
2656 * controllers that don't follow the spec.
2657 *
2658 * LOCKING:
2659 * spin_lock_irqsave(host lock)
2660 *
2661 */
2662static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2663{
2664 struct ata_port *ap = qc->ap;
2665 struct ata_bmdma_prd *prd = ap->bmdma_prd;
2666 struct scatterlist *sg;
2667 unsigned int si, pi;
2668
2669 pi = 0;
2670 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2671 u32 addr, offset;
2672 u32 sg_len, len, blen;
2673
2674 /* determine if physical DMA addr spans 64K boundary.
2675 * Note h/w doesn't support 64-bit, so we unconditionally
2676 * truncate dma_addr_t to u32.
2677 */
2678 addr = (u32) sg_dma_address(sg);
2679 sg_len = sg_dma_len(sg);
2680
2681 while (sg_len) {
2682 offset = addr & 0xffff;
2683 len = sg_len;
2684 if ((offset + sg_len) > 0x10000)
2685 len = 0x10000 - offset;
2686
2687 blen = len & 0xffff;
2688 prd[pi].addr = cpu_to_le32(addr);
2689 if (blen == 0) {
2690 /* Some PATA chipsets like the CS5530 can't
2691 cope with 0x0000 meaning 64K as the spec
2692 says */
2693 prd[pi].flags_len = cpu_to_le32(0x8000);
2694 blen = 0x8000;
2695 prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2696 }
2697 prd[pi].flags_len = cpu_to_le32(blen);
2698 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2699
2700 pi++;
2701 sg_len -= len;
2702 addr += len;
2703 }
2704 }
2705
2706 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2707}
2708
2709/**
2710 * ata_bmdma_qc_prep - Prepare taskfile for submission
2711 * @qc: Metadata associated with taskfile to be prepared
2712 *
2713 * Prepare ATA taskfile for submission.
2714 *
2715 * LOCKING:
2716 * spin_lock_irqsave(host lock)
2717 */
2718void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2719{
2720 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2721 return;
2722
2723 ata_bmdma_fill_sg(qc);
2724}
2725EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2726
2727/**
2728 * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2729 * @qc: Metadata associated with taskfile to be prepared
2730 *
2731 * Prepare ATA taskfile for submission.
2732 *
2733 * LOCKING:
2734 * spin_lock_irqsave(host lock)
2735 */
2736void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2737{
2738 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2739 return;
2740
2741 ata_bmdma_fill_sg_dumb(qc);
2742}
2743EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2744
2745/**
2746 * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
2747 * @qc: command to issue to device
2748 *
2749 * This function issues a PIO, NODATA or DMA command to a
2750 * SFF/BMDMA controller. PIO and NODATA are handled by
2751 * ata_sff_qc_issue().
2752 *
2753 * LOCKING:
2754 * spin_lock_irqsave(host lock)
2755 *
2756 * RETURNS:
2757 * Zero on success, AC_ERR_* mask on failure
2758 */
2759unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2760{
2761 struct ata_port *ap = qc->ap;
2762
2763 /* see ata_dma_blacklisted() */
2764 BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) &&
2765 qc->tf.protocol == ATAPI_PROT_DMA);
2766
2767 /* defer PIO handling to sff_qc_issue */
2768 if (!ata_is_dma(qc->tf.protocol))
2769 return ata_sff_qc_issue(qc);
2770
2771 /* select the device */
2772 ata_dev_select(ap, qc->dev->devno, 1, 0);
2773
2774 /* start the command */
2775 switch (qc->tf.protocol) {
2776 case ATA_PROT_DMA:
2777 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2778
2779 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2780 ap->ops->bmdma_setup(qc); /* set up bmdma */
2781 ap->ops->bmdma_start(qc); /* initiate bmdma */
2782 ap->hsm_task_state = HSM_ST_LAST;
2783 break;
2784
2785 case ATAPI_PROT_DMA:
2786 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2787
2788 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2789 ap->ops->bmdma_setup(qc); /* set up bmdma */
2790 ap->hsm_task_state = HSM_ST_FIRST;
2791
2792 /* send cdb by polling if no cdb interrupt */
2793 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2794 ata_sff_queue_pio_task(ap, 0);
2795 break;
2796
2797 default:
2798 WARN_ON(1);
2799 return AC_ERR_SYSTEM;
2800 }
2801
2802 return 0;
2803}
2804EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2805
2806/**
2807 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
2808 * @ap: port to handle error for
2809 *
2810 * Stock error handler for BMDMA controller. It can handle both
2811 * PATA and SATA controllers. Most BMDMA controllers should be
2812 * able to use this EH as-is or with some added handling before
2813 * and after.
2814 *
2815 * LOCKING:
2816 * Kernel thread context (may sleep)
2817 */
2818void ata_bmdma_error_handler(struct ata_port *ap)
2819{
2820 struct ata_queued_cmd *qc;
2821 unsigned long flags;
2822 bool thaw = false;
2823
2824 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2825 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2826 qc = NULL;
2827
2828 /* reset PIO HSM and stop DMA engine */
2829 spin_lock_irqsave(ap->lock, flags);
2830
2831 if (qc && ata_is_dma(qc->tf.protocol)) {
2832 u8 host_stat;
2833
2834 host_stat = ap->ops->bmdma_status(ap);
2835
2836 /* BMDMA controllers indicate host bus error by
2837 * setting DMA_ERR bit and timing out. As it wasn't
2838 * really a timeout event, adjust error mask and
2839 * cancel frozen state.
2840 */
2841 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2842 qc->err_mask = AC_ERR_HOST_BUS;
2843 thaw = true;
2844 }
2845
2846 ap->ops->bmdma_stop(qc);
2847
2848 /* if we're gonna thaw, make sure IRQ is clear */
2849 if (thaw) {
2850 ap->ops->sff_check_status(ap);
2851 ap->ops->sff_irq_clear(ap);
2852 }
2853 }
2854
2855 spin_unlock_irqrestore(ap->lock, flags);
2856
2857 if (thaw)
2858 ata_eh_thaw_port(ap);
2859
2860 ata_sff_error_handler(ap);
2861}
2862EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2863
2864/**
2865 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
2866 * @qc: internal command to clean up
2867 *
2868 * LOCKING:
2869 * Kernel thread context (may sleep)
2870 */
2871void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2872{
2873 struct ata_port *ap = qc->ap;
2874 unsigned long flags;
2875
2876 if (ata_is_dma(qc->tf.protocol)) {
2877 spin_lock_irqsave(ap->lock, flags);
2878 ap->ops->bmdma_stop(qc);
2879 spin_unlock_irqrestore(ap->lock, flags);
2880 }
2881}
2882EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2883
2884/**
2885 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2886 * @qc: Info associated with this ATA transaction.
2887 *
2888 * LOCKING:
2889 * spin_lock_irqsave(host lock)
2890 */
2891void ata_bmdma_setup(struct ata_queued_cmd *qc)
2892{
2893 struct ata_port *ap = qc->ap;
2894 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2895 u8 dmactl;
2896
2897 /* load PRD table addr. */
2898 mb(); /* make sure PRD table writes are visible to controller */
2899 iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2900
2901 /* specify data direction, triple-check start bit is clear */
2902 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2903 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2904 if (!rw)
2905 dmactl |= ATA_DMA_WR;
2906 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2907
2908 /* issue r/w command */
2909 ap->ops->sff_exec_command(ap, &qc->tf);
2910}
2911EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2912
2913/**
2914 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
2915 * @qc: Info associated with this ATA transaction.
2916 *
2917 * LOCKING:
2918 * spin_lock_irqsave(host lock)
2919 */
2920void ata_bmdma_start(struct ata_queued_cmd *qc)
2921{
2922 struct ata_port *ap = qc->ap;
2923 u8 dmactl;
2924
2925 /* start host DMA transaction */
2926 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2927 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2928
2929 /* Strictly, one may wish to issue an ioread8() here, to
2930 * flush the mmio write. However, control also passes
2931 * to the hardware at this point, and it will interrupt
2932 * us when we are to resume control. So, in effect,
2933 * we don't care when the mmio write flushes.
2934 * Further, a read of the DMA status register _immediately_
2935 * following the write may not be what certain flaky hardware
2936 * is expected, so I think it is best to not add a readb()
2937 * without first all the MMIO ATA cards/mobos.
2938 * Or maybe I'm just being paranoid.
2939 *
2940 * FIXME: The posting of this write means I/O starts are
2941 * unneccessarily delayed for MMIO
2942 */
2943}
2944EXPORT_SYMBOL_GPL(ata_bmdma_start);
2945
2946/**
2947 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
2948 * @qc: Command we are ending DMA for
2949 *
2950 * Clears the ATA_DMA_START flag in the dma control register
2951 *
2952 * May be used as the bmdma_stop() entry in ata_port_operations.
2953 *
2954 * LOCKING:
2955 * spin_lock_irqsave(host lock)
2956 */
2957void ata_bmdma_stop(struct ata_queued_cmd *qc)
2958{
2959 struct ata_port *ap = qc->ap;
2960 void __iomem *mmio = ap->ioaddr.bmdma_addr;
2961
2962 /* clear start/stop bit */
2963 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
2964 mmio + ATA_DMA_CMD);
2965
2966 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
2967 ata_sff_dma_pause(ap);
2968}
2969EXPORT_SYMBOL_GPL(ata_bmdma_stop);
2970
2971/**
2972 * ata_bmdma_status - Read PCI IDE BMDMA status
2973 * @ap: Port associated with this ATA transaction.
2974 *
2975 * Read and return BMDMA status register.
2976 *
2977 * May be used as the bmdma_status() entry in ata_port_operations.
2978 *
2979 * LOCKING:
2980 * spin_lock_irqsave(host lock)
2981 */
2982u8 ata_bmdma_status(struct ata_port *ap)
2983{
2984 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
2985}
2986EXPORT_SYMBOL_GPL(ata_bmdma_status);
2987
2988
2989/**
2990 * ata_bmdma_port_start - Set port up for bmdma.
2991 * @ap: Port to initialize
2992 *
2993 * Called just after data structures for each port are
2994 * initialized. Allocates space for PRD table.
2995 *
2996 * May be used as the port_start() entry in ata_port_operations.
2997 *
2998 * LOCKING:
2999 * Inherited from caller.
3000 */
3001int ata_bmdma_port_start(struct ata_port *ap)
3002{
3003 if (ap->mwdma_mask || ap->udma_mask) {
3004 ap->bmdma_prd =
3005 dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
3006 &ap->bmdma_prd_dma, GFP_KERNEL);
3007 if (!ap->bmdma_prd)
3008 return -ENOMEM;
3009 }
3010
3011 return 0;
3012}
3013EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
3014
3015/**
3016 * ata_bmdma_port_start32 - Set port up for dma.
3017 * @ap: Port to initialize
3018 *
3019 * Called just after data structures for each port are
3020 * initialized. Enables 32bit PIO and allocates space for PRD
3021 * table.
3022 *
3023 * May be used as the port_start() entry in ata_port_operations for
3024 * devices that are capable of 32bit PIO.
3025 *
3026 * LOCKING:
3027 * Inherited from caller.
3028 */
3029int ata_bmdma_port_start32(struct ata_port *ap)
3030{
3031 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3032 return ata_bmdma_port_start(ap);
3033}
3034EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3035
3036#ifdef CONFIG_PCI
3037
3038/**
3039 * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
3040 * @pdev: PCI device
3041 *
3042 * Some PCI ATA devices report simplex mode but in fact can be told to
3043 * enter non simplex mode. This implements the necessary logic to
3044 * perform the task on such devices. Calling it on other devices will
3045 * have -undefined- behaviour.
3046 */
3047int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3048{
3049 unsigned long bmdma = pci_resource_start(pdev, 4);
3050 u8 simplex;
3051
3052 if (bmdma == 0)
3053 return -ENOENT;
3054
3055 simplex = inb(bmdma + 0x02);
3056 outb(simplex & 0x60, bmdma + 0x02);
3057 simplex = inb(bmdma + 0x02);
3058 if (simplex & 0x80)
3059 return -EOPNOTSUPP;
3060 return 0;
3061}
3062EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3063
3064static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3065{
3066 int i;
3067
3068 dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n",
3069 reason);
3070
3071 for (i = 0; i < 2; i++) {
3072 host->ports[i]->mwdma_mask = 0;
3073 host->ports[i]->udma_mask = 0;
3074 }
3075}
3076
3077/**
3078 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
3079 * @host: target ATA host
3080 *
3081 * Acquire PCI BMDMA resources and initialize @host accordingly.
3082 *
3083 * LOCKING:
3084 * Inherited from calling layer (may sleep).
3085 */
3086void ata_pci_bmdma_init(struct ata_host *host)
3087{
3088 struct device *gdev = host->dev;
3089 struct pci_dev *pdev = to_pci_dev(gdev);
3090 int i, rc;
3091
3092 /* No BAR4 allocation: No DMA */
3093 if (pci_resource_start(pdev, 4) == 0) {
3094 ata_bmdma_nodma(host, "BAR4 is zero");
3095 return;
3096 }
3097
3098 /*
3099 * Some controllers require BMDMA region to be initialized
3100 * even if DMA is not in use to clear IRQ status via
3101 * ->sff_irq_clear method. Try to initialize bmdma_addr
3102 * regardless of dma masks.
3103 */
3104 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3105 if (rc)
3106 ata_bmdma_nodma(host, "failed to set dma mask");
3107 if (!rc) {
3108 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3109 if (rc)
3110 ata_bmdma_nodma(host,
3111 "failed to set consistent dma mask");
3112 }
3113
3114 /* request and iomap DMA region */
3115 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3116 if (rc) {
3117 ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3118 return;
3119 }
3120 host->iomap = pcim_iomap_table(pdev);
3121
3122 for (i = 0; i < 2; i++) {
3123 struct ata_port *ap = host->ports[i];
3124 void __iomem *bmdma = host->iomap[4] + 8 * i;
3125
3126 if (ata_port_is_dummy(ap))
3127 continue;
3128
3129 ap->ioaddr.bmdma_addr = bmdma;
3130 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3131 (ioread8(bmdma + 2) & 0x80))
3132 host->flags |= ATA_HOST_SIMPLEX;
3133
3134 ata_port_desc(ap, "bmdma 0x%llx",
3135 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3136 }
3137}
3138EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3139
3140#endif /* CONFIG_PCI */
3141
3142/**
3143 * ata_sff_port_init - Initialize SFF/BMDMA ATA port
3144 * @ap: Port to initialize
3145 *
3146 * Called on port allocation to initialize SFF/BMDMA specific
3147 * fields.
3148 *
3149 * LOCKING:
3150 * None.
3151 */
3152void ata_sff_port_init(struct ata_port *ap)
3153{
3154 INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3155 ap->ctl = ATA_DEVCTL_OBS;
3156 ap->last_ctl = 0xFF;
3157}
3158
3159int __init ata_sff_init(void)
3160{
3161 /*
3162 * FIXME: In UP case, there is only one workqueue thread and if you
3163 * have more than one PIO device, latency is bloody awful, with
3164 * occasional multi-second "hiccups" as one PIO device waits for
3165 * another. It's an ugly wart that users DO occasionally complain
3166 * about; luckily most users have at most one PIO polled device.
3167 */
3168 ata_sff_wq = create_workqueue("ata_sff");
3169 if (!ata_sff_wq)
3170 return -ENOMEM;
3171
3172 return 0;
3173}
3174
3175void __exit ata_sff_exit(void)
3176{
3177 destroy_workqueue(ata_sff_wq);
3178}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 823e63096362..4b84ed60324a 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -38,17 +38,6 @@ struct ata_scsi_args {
38 void (*done)(struct scsi_cmnd *); 38 void (*done)(struct scsi_cmnd *);
39}; 39};
40 40
41static inline int ata_is_builtin_hardreset(ata_reset_fn_t reset)
42{
43 if (reset == sata_std_hardreset)
44 return 1;
45#ifdef CONFIG_ATA_SFF
46 if (reset == sata_sff_hardreset)
47 return 1;
48#endif
49 return 0;
50}
51
52/* libata-core.c */ 41/* libata-core.c */
53enum { 42enum {
54 /* flags for ata_dev_read_id() */ 43 /* flags for ata_dev_read_id() */
@@ -79,7 +68,6 @@ extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
79 u64 block, u32 n_block, unsigned int tf_flags, 68 u64 block, u32 n_block, unsigned int tf_flags,
80 unsigned int tag); 69 unsigned int tag);
81extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev); 70extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
82extern void ata_port_flush_task(struct ata_port *ap);
83extern unsigned ata_exec_internal(struct ata_device *dev, 71extern unsigned ata_exec_internal(struct ata_device *dev,
84 struct ata_taskfile *tf, const u8 *cdb, 72 struct ata_taskfile *tf, const u8 *cdb,
85 int dma_dir, void *buf, unsigned int buflen, 73 int dma_dir, void *buf, unsigned int buflen,
@@ -202,10 +190,19 @@ static inline int sata_pmp_attach(struct ata_device *dev)
202 190
203/* libata-sff.c */ 191/* libata-sff.c */
204#ifdef CONFIG_ATA_SFF 192#ifdef CONFIG_ATA_SFF
205extern void ata_dev_select(struct ata_port *ap, unsigned int device, 193extern void ata_sff_flush_pio_task(struct ata_port *ap);
206 unsigned int wait, unsigned int can_sleep); 194extern void ata_sff_port_init(struct ata_port *ap);
207extern u8 ata_irq_on(struct ata_port *ap); 195extern int ata_sff_init(void);
208extern void ata_pio_task(struct work_struct *work); 196extern void ata_sff_exit(void);
197#else /* CONFIG_ATA_SFF */
198static inline void ata_sff_flush_pio_task(struct ata_port *ap)
199{ }
200static inline void ata_sff_port_init(struct ata_port *ap)
201{ }
202static inline int ata_sff_init(void)
203{ return 0; }
204static inline void ata_sff_exit(void)
205{ }
209#endif /* CONFIG_ATA_SFF */ 206#endif /* CONFIG_ATA_SFF */
210 207
211#endif /* __LIBATA_H__ */ 208#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 1ea2be0f4b94..066b9f301ed5 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -101,7 +101,7 @@ static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device
101static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask) 101static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask)
102{ 102{
103 struct pata_acpi *acpi = adev->link->ap->private_data; 103 struct pata_acpi *acpi = adev->link->ap->private_data;
104 return ata_bmdma_mode_filter(adev, mask & acpi->mask[adev->devno]); 104 return mask & acpi->mask[adev->devno];
105} 105}
106 106
107/** 107/**
@@ -172,7 +172,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
172 struct pata_acpi *acpi = ap->private_data; 172 struct pata_acpi *acpi = ap->private_data;
173 173
174 if (acpi->gtm.flags & 0x10) 174 if (acpi->gtm.flags & 0x10)
175 return ata_sff_qc_issue(qc); 175 return ata_bmdma_qc_issue(qc);
176 176
177 if (adev != acpi->last) { 177 if (adev != acpi->last) {
178 pacpi_set_piomode(ap, adev); 178 pacpi_set_piomode(ap, adev);
@@ -180,7 +180,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
180 pacpi_set_dmamode(ap, adev); 180 pacpi_set_dmamode(ap, adev);
181 acpi->last = adev; 181 acpi->last = adev;
182 } 182 }
183 return ata_sff_qc_issue(qc); 183 return ata_bmdma_qc_issue(qc);
184} 184}
185 185
186/** 186/**
@@ -205,7 +205,7 @@ static int pacpi_port_start(struct ata_port *ap)
205 return -ENOMEM; 205 return -ENOMEM;
206 acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]); 206 acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]);
207 acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]); 207 acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]);
208 ret = ata_sff_port_start(ap); 208 ret = ata_bmdma_port_start(ap);
209 if (ret < 0) 209 if (ret < 0)
210 return ret; 210 return ret;
211 211
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index dc61b72f751c..f306e10c748d 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -124,7 +124,7 @@ static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
124 ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 124 ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
125 if (strstr(model_num, "WDC")) 125 if (strstr(model_num, "WDC"))
126 return mask &= ~ATA_MASK_UDMA; 126 return mask &= ~ATA_MASK_UDMA;
127 return ata_bmdma_mode_filter(adev, mask); 127 return mask;
128} 128}
129 129
130/** 130/**
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index c6a946aa252c..0da0dcc7dd08 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -202,7 +202,6 @@ static struct ata_port_operations pata_at91_port_ops = {
202 .sff_data_xfer = pata_at91_data_xfer_noirq, 202 .sff_data_xfer = pata_at91_data_xfer_noirq,
203 .set_piomode = pata_at91_set_piomode, 203 .set_piomode = pata_at91_set_piomode,
204 .cable_detect = ata_cable_40wire, 204 .cable_detect = ata_cable_40wire,
205 .port_start = ATA_OP_NULL,
206}; 205};
207 206
208static int __devinit pata_at91_probe(struct platform_device *pdev) 207static int __devinit pata_at91_probe(struct platform_device *pdev)
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index cbaf2eddac6b..44d88b380ddd 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -217,7 +217,7 @@ static struct scsi_host_template atiixp_sht = {
217static struct ata_port_operations atiixp_port_ops = { 217static struct ata_port_operations atiixp_port_ops = {
218 .inherits = &ata_bmdma_port_ops, 218 .inherits = &ata_bmdma_port_ops,
219 219
220 .qc_prep = ata_sff_dumb_qc_prep, 220 .qc_prep = ata_bmdma_dumb_qc_prep,
221 .bmdma_start = atiixp_bmdma_start, 221 .bmdma_start = atiixp_bmdma_start,
222 .bmdma_stop = atiixp_bmdma_stop, 222 .bmdma_stop = atiixp_bmdma_stop,
223 223
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 02c81f12c702..6422cfd13d0d 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -821,6 +821,18 @@ static void bfin_dev_select(struct ata_port *ap, unsigned int device)
821} 821}
822 822
823/** 823/**
824 * bfin_set_devctl - Write device control reg
825 * @ap: port where the device is
826 * @ctl: value to write
827 */
828
829static u8 bfin_set_devctl(struct ata_port *ap, u8 ctl)
830{
831 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
832 write_atapi_register(base, ATA_REG_CTRL, ctl);
833}
834
835/**
824 * bfin_bmdma_setup - Set up IDE DMA transaction 836 * bfin_bmdma_setup - Set up IDE DMA transaction
825 * @qc: Info associated with this ATA transaction. 837 * @qc: Info associated with this ATA transaction.
826 * 838 *
@@ -1216,56 +1228,6 @@ static void bfin_irq_clear(struct ata_port *ap)
1216} 1228}
1217 1229
1218/** 1230/**
1219 * bfin_irq_on - Enable interrupts on a port.
1220 * @ap: Port on which interrupts are enabled.
1221 *
1222 * Note: Original code is ata_sff_irq_on().
1223 */
1224
1225static unsigned char bfin_irq_on(struct ata_port *ap)
1226{
1227 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1228 u8 tmp;
1229
1230 dev_dbg(ap->dev, "in atapi irq on\n");
1231 ap->ctl &= ~ATA_NIEN;
1232 ap->last_ctl = ap->ctl;
1233
1234 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1235 tmp = ata_wait_idle(ap);
1236
1237 bfin_irq_clear(ap);
1238
1239 return tmp;
1240}
1241
1242/**
1243 * bfin_freeze - Freeze DMA controller port
1244 * @ap: port to freeze
1245 *
1246 * Note: Original code is ata_sff_freeze().
1247 */
1248
1249static void bfin_freeze(struct ata_port *ap)
1250{
1251 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1252
1253 dev_dbg(ap->dev, "in atapi dma freeze\n");
1254 ap->ctl |= ATA_NIEN;
1255 ap->last_ctl = ap->ctl;
1256
1257 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1258
1259 /* Under certain circumstances, some controllers raise IRQ on
1260 * ATA_NIEN manipulation. Also, many controllers fail to mask
1261 * previously pending IRQ on ATA_NIEN assertion. Clear it.
1262 */
1263 ap->ops->sff_check_status(ap);
1264
1265 bfin_irq_clear(ap);
1266}
1267
1268/**
1269 * bfin_thaw - Thaw DMA controller port 1231 * bfin_thaw - Thaw DMA controller port
1270 * @ap: port to thaw 1232 * @ap: port to thaw
1271 * 1233 *
@@ -1276,7 +1238,7 @@ void bfin_thaw(struct ata_port *ap)
1276{ 1238{
1277 dev_dbg(ap->dev, "in atapi dma thaw\n"); 1239 dev_dbg(ap->dev, "in atapi dma thaw\n");
1278 bfin_check_status(ap); 1240 bfin_check_status(ap);
1279 bfin_irq_on(ap); 1241 ata_sff_irq_on(ap);
1280} 1242}
1281 1243
1282/** 1244/**
@@ -1293,7 +1255,7 @@ static void bfin_postreset(struct ata_link *link, unsigned int *classes)
1293 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1255 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1294 1256
1295 /* re-enable interrupts */ 1257 /* re-enable interrupts */
1296 bfin_irq_on(ap); 1258 ata_sff_irq_on(ap);
1297 1259
1298 /* is double-select really necessary? */ 1260 /* is double-select really necessary? */
1299 if (classes[0] != ATA_DEV_NONE) 1261 if (classes[0] != ATA_DEV_NONE)
@@ -1438,18 +1400,12 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
1438 spin_lock_irqsave(&host->lock, flags); 1400 spin_lock_irqsave(&host->lock, flags);
1439 1401
1440 for (i = 0; i < host->n_ports; i++) { 1402 for (i = 0; i < host->n_ports; i++) {
1441 struct ata_port *ap; 1403 struct ata_port *ap = host->ports[i];
1404 struct ata_queued_cmd *qc;
1442 1405
1443 ap = host->ports[i]; 1406 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1444 if (ap && 1407 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1445 !(ap->flags & ATA_FLAG_DISABLED)) { 1408 handled |= bfin_ata_host_intr(ap, qc);
1446 struct ata_queued_cmd *qc;
1447
1448 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1449 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
1450 (qc->flags & ATA_QCFLAG_ACTIVE))
1451 handled |= bfin_ata_host_intr(ap, qc);
1452 }
1453 } 1409 }
1454 1410
1455 spin_unlock_irqrestore(&host->lock, flags); 1411 spin_unlock_irqrestore(&host->lock, flags);
@@ -1465,7 +1421,7 @@ static struct scsi_host_template bfin_sht = {
1465}; 1421};
1466 1422
1467static struct ata_port_operations bfin_pata_ops = { 1423static struct ata_port_operations bfin_pata_ops = {
1468 .inherits = &ata_sff_port_ops, 1424 .inherits = &ata_bmdma_port_ops,
1469 1425
1470 .set_piomode = bfin_set_piomode, 1426 .set_piomode = bfin_set_piomode,
1471 .set_dmamode = bfin_set_dmamode, 1427 .set_dmamode = bfin_set_dmamode,
@@ -1476,6 +1432,7 @@ static struct ata_port_operations bfin_pata_ops = {
1476 .sff_check_status = bfin_check_status, 1432 .sff_check_status = bfin_check_status,
1477 .sff_check_altstatus = bfin_check_altstatus, 1433 .sff_check_altstatus = bfin_check_altstatus,
1478 .sff_dev_select = bfin_dev_select, 1434 .sff_dev_select = bfin_dev_select,
1435 .sff_set_devctl = bfin_set_devctl,
1479 1436
1480 .bmdma_setup = bfin_bmdma_setup, 1437 .bmdma_setup = bfin_bmdma_setup,
1481 .bmdma_start = bfin_bmdma_start, 1438 .bmdma_start = bfin_bmdma_start,
@@ -1485,13 +1442,11 @@ static struct ata_port_operations bfin_pata_ops = {
1485 1442
1486 .qc_prep = ata_noop_qc_prep, 1443 .qc_prep = ata_noop_qc_prep,
1487 1444
1488 .freeze = bfin_freeze,
1489 .thaw = bfin_thaw, 1445 .thaw = bfin_thaw,
1490 .softreset = bfin_softreset, 1446 .softreset = bfin_softreset,
1491 .postreset = bfin_postreset, 1447 .postreset = bfin_postreset,
1492 1448
1493 .sff_irq_clear = bfin_irq_clear, 1449 .sff_irq_clear = bfin_irq_clear,
1494 .sff_irq_on = bfin_irq_on,
1495 1450
1496 .port_start = bfin_port_start, 1451 .port_start = bfin_port_start,
1497 .port_stop = bfin_port_stop, 1452 .port_stop = bfin_port_stop,
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 45896b3c6538..e5f289f59ca3 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -153,24 +153,20 @@ static int cmd640_port_start(struct ata_port *ap)
153 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 153 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
154 struct cmd640_reg *timing; 154 struct cmd640_reg *timing;
155 155
156 int ret = ata_sff_port_start(ap);
157 if (ret < 0)
158 return ret;
159
160 timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL); 156 timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL);
161 if (timing == NULL) 157 if (timing == NULL)
162 return -ENOMEM; 158 return -ENOMEM;
163 timing->last = -1; /* Force a load */ 159 timing->last = -1; /* Force a load */
164 ap->private_data = timing; 160 ap->private_data = timing;
165 return ret; 161 return 0;
166} 162}
167 163
168static struct scsi_host_template cmd640_sht = { 164static struct scsi_host_template cmd640_sht = {
169 ATA_BMDMA_SHT(DRV_NAME), 165 ATA_PIO_SHT(DRV_NAME),
170}; 166};
171 167
172static struct ata_port_operations cmd640_port_ops = { 168static struct ata_port_operations cmd640_port_ops = {
173 .inherits = &ata_bmdma_port_ops, 169 .inherits = &ata_sff_port_ops,
174 /* In theory xfer_noirq is not needed once we kill the prefetcher */ 170 /* In theory xfer_noirq is not needed once we kill the prefetcher */
175 .sff_data_xfer = ata_sff_data_xfer_noirq, 171 .sff_data_xfer = ata_sff_data_xfer_noirq,
176 .qc_issue = cmd640_qc_issue, 172 .qc_issue = cmd640_qc_issue,
@@ -181,13 +177,10 @@ static struct ata_port_operations cmd640_port_ops = {
181 177
182static void cmd640_hardware_init(struct pci_dev *pdev) 178static void cmd640_hardware_init(struct pci_dev *pdev)
183{ 179{
184 u8 r;
185 u8 ctrl; 180 u8 ctrl;
186 181
187 /* CMD640 detected, commiserations */ 182 /* CMD640 detected, commiserations */
188 pci_write_config_byte(pdev, 0x5B, 0x00); 183 pci_write_config_byte(pdev, 0x5B, 0x00);
189 /* Get version info */
190 pci_read_config_byte(pdev, CFR, &r);
191 /* PIO0 command cycles */ 184 /* PIO0 command cycles */
192 pci_write_config_byte(pdev, CMDTIM, 0); 185 pci_write_config_byte(pdev, CMDTIM, 0);
193 /* 512 byte bursts (sector) */ 186 /* 512 byte bursts (sector) */
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 95ebdac517f2..17c5f346ff01 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -110,7 +110,7 @@ static struct scsi_host_template cs5520_sht = {
110 110
111static struct ata_port_operations cs5520_port_ops = { 111static struct ata_port_operations cs5520_port_ops = {
112 .inherits = &ata_bmdma_port_ops, 112 .inherits = &ata_bmdma_port_ops,
113 .qc_prep = ata_sff_dumb_qc_prep, 113 .qc_prep = ata_bmdma_dumb_qc_prep,
114 .cable_detect = ata_cable_40wire, 114 .cable_detect = ata_cable_40wire,
115 .set_piomode = cs5520_set_piomode, 115 .set_piomode = cs5520_set_piomode,
116}; 116};
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 738ad2e14a97..e809a4233a81 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -156,7 +156,7 @@ static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc)
156 cs5530_set_dmamode(ap, adev); 156 cs5530_set_dmamode(ap, adev);
157 } 157 }
158 158
159 return ata_sff_qc_issue(qc); 159 return ata_bmdma_qc_issue(qc);
160} 160}
161 161
162static struct scsi_host_template cs5530_sht = { 162static struct scsi_host_template cs5530_sht = {
@@ -167,7 +167,7 @@ static struct scsi_host_template cs5530_sht = {
167static struct ata_port_operations cs5530_port_ops = { 167static struct ata_port_operations cs5530_port_ops = {
168 .inherits = &ata_bmdma_port_ops, 168 .inherits = &ata_bmdma_port_ops,
169 169
170 .qc_prep = ata_sff_dumb_qc_prep, 170 .qc_prep = ata_bmdma_dumb_qc_prep,
171 .qc_issue = cs5530_qc_issue, 171 .qc_issue = cs5530_qc_issue,
172 172
173 .cable_detect = ata_cable_40wire, 173 .cable_detect = ata_cable_40wire,
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index af49bfb57247..8580eb3cd54d 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -182,7 +182,7 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
182 } else if (adev->class == ATA_DEV_ATAPI) 182 } else if (adev->class == ATA_DEV_ATAPI)
183 mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 183 mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
184 184
185 return ata_bmdma_mode_filter(adev, mask); 185 return mask;
186} 186}
187 187
188static int hpt36x_cable_detect(struct ata_port *ap) 188static int hpt36x_cable_detect(struct ata_port *ap)
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 8839307a64cf..98b498b6907c 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -282,7 +282,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
282 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) 282 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
283 mask &= ~(0xE0 << ATA_SHIFT_UDMA); 283 mask &= ~(0xE0 << ATA_SHIFT_UDMA);
284 } 284 }
285 return ata_bmdma_mode_filter(adev, mask); 285 return mask;
286} 286}
287 287
288/** 288/**
@@ -298,7 +298,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
298 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) 298 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
299 mask &= ~(0xE0 << ATA_SHIFT_UDMA); 299 mask &= ~(0xE0 << ATA_SHIFT_UDMA);
300 } 300 }
301 return ata_bmdma_mode_filter(adev, mask); 301 return mask;
302} 302}
303 303
304/** 304/**
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 01457b266f3d..8b95aeba0e74 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -320,7 +320,7 @@ static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
320 320
321 hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23); 321 hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
322 } 322 }
323 return ata_sff_qc_issue(qc); 323 return ata_bmdma_qc_issue(qc);
324} 324}
325 325
326static struct scsi_host_template hpt3x2n_sht = { 326static struct scsi_host_template hpt3x2n_sht = {
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index fa812e206eeb..b56e8f722d20 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -321,7 +321,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
321} 321}
322 322
323static struct ata_port_operations pata_icside_port_ops = { 323static struct ata_port_operations pata_icside_port_ops = {
324 .inherits = &ata_sff_port_ops, 324 .inherits = &ata_bmdma_port_ops,
325 /* no need to build any PRD tables for DMA */ 325 /* no need to build any PRD tables for DMA */
326 .qc_prep = ata_noop_qc_prep, 326 .qc_prep = ata_noop_qc_prep,
327 .sff_data_xfer = ata_sff_data_xfer_noirq, 327 .sff_data_xfer = ata_sff_data_xfer_noirq,
@@ -333,7 +333,8 @@ static struct ata_port_operations pata_icside_port_ops = {
333 .cable_detect = ata_cable_40wire, 333 .cable_detect = ata_cable_40wire,
334 .set_dmamode = pata_icside_set_dmamode, 334 .set_dmamode = pata_icside_set_dmamode,
335 .postreset = pata_icside_postreset, 335 .postreset = pata_icside_postreset,
336 .post_internal_cmd = pata_icside_bmdma_stop, 336
337 .port_start = ATA_OP_NULL, /* don't need PRD table */
337}; 338};
338 339
339static void __devinit 340static void __devinit
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 5cb286fd839e..2bd2b002d14a 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -430,7 +430,7 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
430 case 0xFC: /* Internal 'report rebuild state' */ 430 case 0xFC: /* Internal 'report rebuild state' */
431 /* Arguably should just no-op this one */ 431 /* Arguably should just no-op this one */
432 case ATA_CMD_SET_FEATURES: 432 case ATA_CMD_SET_FEATURES:
433 return ata_sff_qc_issue(qc); 433 return ata_bmdma_qc_issue(qc);
434 } 434 }
435 printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command); 435 printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
436 return AC_ERR_DEV; 436 return AC_ERR_DEV;
@@ -448,7 +448,7 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
448static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc) 448static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc)
449{ 449{
450 it821x_passthru_dev_select(qc->ap, qc->dev->devno); 450 it821x_passthru_dev_select(qc->ap, qc->dev->devno);
451 return ata_sff_qc_issue(qc); 451 return ata_bmdma_qc_issue(qc);
452} 452}
453 453
454/** 454/**
@@ -739,7 +739,7 @@ static int it821x_port_start(struct ata_port *ap)
739 struct it821x_dev *itdev; 739 struct it821x_dev *itdev;
740 u8 conf; 740 u8 conf;
741 741
742 int ret = ata_sff_port_start(ap); 742 int ret = ata_bmdma_port_start(ap);
743 if (ret < 0) 743 if (ret < 0)
744 return ret; 744 return ret;
745 745
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index c74f13bc9876..b5b48e703cb7 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -720,6 +720,8 @@ static int pata_macio_port_start(struct ata_port *ap)
720 if (priv->dma_table_cpu == NULL) { 720 if (priv->dma_table_cpu == NULL) {
721 dev_err(priv->dev, "Unable to allocate DMA command list\n"); 721 dev_err(priv->dev, "Unable to allocate DMA command list\n");
722 ap->ioaddr.bmdma_addr = NULL; 722 ap->ioaddr.bmdma_addr = NULL;
723 ap->mwdma_mask = 0;
724 ap->udma_mask = 0;
723 } 725 }
724 return 0; 726 return 0;
725} 727}
@@ -917,7 +919,7 @@ static struct scsi_host_template pata_macio_sht = {
917}; 919};
918 920
919static struct ata_port_operations pata_macio_ops = { 921static struct ata_port_operations pata_macio_ops = {
920 .inherits = &ata_sff_port_ops, 922 .inherits = &ata_bmdma_port_ops,
921 923
922 .freeze = pata_macio_freeze, 924 .freeze = pata_macio_freeze,
923 .set_piomode = pata_macio_set_timings, 925 .set_piomode = pata_macio_set_timings,
@@ -925,7 +927,6 @@ static struct ata_port_operations pata_macio_ops = {
925 .cable_detect = pata_macio_cable_detect, 927 .cable_detect = pata_macio_cable_detect,
926 .sff_dev_select = pata_macio_dev_select, 928 .sff_dev_select = pata_macio_dev_select,
927 .qc_prep = pata_macio_qc_prep, 929 .qc_prep = pata_macio_qc_prep,
928 .mode_filter = ata_bmdma_mode_filter,
929 .bmdma_setup = pata_macio_bmdma_setup, 930 .bmdma_setup = pata_macio_bmdma_setup,
930 .bmdma_start = pata_macio_bmdma_start, 931 .bmdma_start = pata_macio_bmdma_start,
931 .bmdma_stop = pata_macio_bmdma_stop, 932 .bmdma_stop = pata_macio_bmdma_stop,
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 88e6b6008bd6..36afe2c1c747 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -64,13 +64,13 @@ struct mpc52xx_ata_priv {
64 64
65 65
66/* ATAPI-4 PIO specs (in ns) */ 66/* ATAPI-4 PIO specs (in ns) */
67static const int ataspec_t0[5] = {600, 383, 240, 180, 120}; 67static const u16 ataspec_t0[5] = {600, 383, 240, 180, 120};
68static const int ataspec_t1[5] = { 70, 50, 30, 30, 25}; 68static const u16 ataspec_t1[5] = { 70, 50, 30, 30, 25};
69static const int ataspec_t2_8[5] = {290, 290, 290, 80, 70}; 69static const u16 ataspec_t2_8[5] = {290, 290, 290, 80, 70};
70static const int ataspec_t2_16[5] = {165, 125, 100, 80, 70}; 70static const u16 ataspec_t2_16[5] = {165, 125, 100, 80, 70};
71static const int ataspec_t2i[5] = { 0, 0, 0, 70, 25}; 71static const u16 ataspec_t2i[5] = { 0, 0, 0, 70, 25};
72static const int ataspec_t4[5] = { 30, 20, 15, 10, 10}; 72static const u16 ataspec_t4[5] = { 30, 20, 15, 10, 10};
73static const int ataspec_ta[5] = { 35, 35, 35, 35, 35}; 73static const u16 ataspec_ta[5] = { 35, 35, 35, 35, 35};
74 74
75#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c))) 75#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c)))
76 76
@@ -78,13 +78,13 @@ static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
78 78
79/* ATAPI-4 MDMA specs (in clocks) */ 79/* ATAPI-4 MDMA specs (in clocks) */
80struct mdmaspec { 80struct mdmaspec {
81 u32 t0M; 81 u8 t0M;
82 u32 td; 82 u8 td;
83 u32 th; 83 u8 th;
84 u32 tj; 84 u8 tj;
85 u32 tkw; 85 u8 tkw;
86 u32 tm; 86 u8 tm;
87 u32 tn; 87 u8 tn;
88}; 88};
89 89
90static const struct mdmaspec mdmaspec66[3] = { 90static const struct mdmaspec mdmaspec66[3] = {
@@ -101,23 +101,23 @@ static const struct mdmaspec mdmaspec132[3] = {
101 101
102/* ATAPI-4 UDMA specs (in clocks) */ 102/* ATAPI-4 UDMA specs (in clocks) */
103struct udmaspec { 103struct udmaspec {
104 u32 tcyc; 104 u8 tcyc;
105 u32 t2cyc; 105 u8 t2cyc;
106 u32 tds; 106 u8 tds;
107 u32 tdh; 107 u8 tdh;
108 u32 tdvs; 108 u8 tdvs;
109 u32 tdvh; 109 u8 tdvh;
110 u32 tfs; 110 u8 tfs;
111 u32 tli; 111 u8 tli;
112 u32 tmli; 112 u8 tmli;
113 u32 taz; 113 u8 taz;
114 u32 tzah; 114 u8 tzah;
115 u32 tenv; 115 u8 tenv;
116 u32 tsr; 116 u8 tsr;
117 u32 trfs; 117 u8 trfs;
118 u32 trp; 118 u8 trp;
119 u32 tack; 119 u8 tack;
120 u32 tss; 120 u8 tss;
121}; 121};
122 122
123static const struct udmaspec udmaspec66[6] = { 123static const struct udmaspec udmaspec66[6] = {
@@ -270,7 +270,7 @@ mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
270{ 270{
271 struct mpc52xx_ata_timings *timing = &priv->timings[dev]; 271 struct mpc52xx_ata_timings *timing = &priv->timings[dev];
272 unsigned int ipb_period = priv->ipb_period; 272 unsigned int ipb_period = priv->ipb_period;
273 unsigned int t0, t1, t2_8, t2_16, t2i, t4, ta; 273 u32 t0, t1, t2_8, t2_16, t2i, t4, ta;
274 274
275 if ((pio < 0) || (pio > 4)) 275 if ((pio < 0) || (pio > 4))
276 return -EINVAL; 276 return -EINVAL;
@@ -299,8 +299,8 @@ mpc52xx_ata_compute_mdma_timings(struct mpc52xx_ata_priv *priv, int dev,
299 if (speed < 0 || speed > 2) 299 if (speed < 0 || speed > 2)
300 return -EINVAL; 300 return -EINVAL;
301 301
302 t->mdma1 = (s->t0M << 24) | (s->td << 16) | (s->tkw << 8) | (s->tm); 302 t->mdma1 = ((u32)s->t0M << 24) | ((u32)s->td << 16) | ((u32)s->tkw << 8) | s->tm;
303 t->mdma2 = (s->th << 24) | (s->tj << 16) | (s->tn << 8); 303 t->mdma2 = ((u32)s->th << 24) | ((u32)s->tj << 16) | ((u32)s->tn << 8);
304 t->using_udma = 0; 304 t->using_udma = 0;
305 305
306 return 0; 306 return 0;
@@ -316,11 +316,11 @@ mpc52xx_ata_compute_udma_timings(struct mpc52xx_ata_priv *priv, int dev,
316 if (speed < 0 || speed > 2) 316 if (speed < 0 || speed > 2)
317 return -EINVAL; 317 return -EINVAL;
318 318
319 t->udma1 = (s->t2cyc << 24) | (s->tcyc << 16) | (s->tds << 8) | s->tdh; 319 t->udma1 = ((u32)s->t2cyc << 24) | ((u32)s->tcyc << 16) | ((u32)s->tds << 8) | s->tdh;
320 t->udma2 = (s->tdvs << 24) | (s->tdvh << 16) | (s->tfs << 8) | s->tli; 320 t->udma2 = ((u32)s->tdvs << 24) | ((u32)s->tdvh << 16) | ((u32)s->tfs << 8) | s->tli;
321 t->udma3 = (s->tmli << 24) | (s->taz << 16) | (s->tenv << 8) | s->tsr; 321 t->udma3 = ((u32)s->tmli << 24) | ((u32)s->taz << 16) | ((u32)s->tenv << 8) | s->tsr;
322 t->udma4 = (s->tss << 24) | (s->trfs << 16) | (s->trp << 8) | s->tack; 322 t->udma4 = ((u32)s->tss << 24) | ((u32)s->trfs << 16) | ((u32)s->trp << 8) | s->tack;
323 t->udma5 = (s->tzah << 24); 323 t->udma5 = (u32)s->tzah << 24;
324 t->using_udma = 1; 324 t->using_udma = 1;
325 325
326 return 0; 326 return 0;
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index 830431f036a1..fdbba2d76d3e 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -126,7 +126,7 @@ static void ns87415_bmdma_setup(struct ata_queued_cmd *qc)
126 126
127 /* load PRD table addr. */ 127 /* load PRD table addr. */
128 mb(); /* make sure PRD table writes are visible to controller */ 128 mb(); /* make sure PRD table writes are visible to controller */
129 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); 129 iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
130 130
131 /* specify data direction, triple-check start bit is clear */ 131 /* specify data direction, triple-check start bit is clear */
132 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 132 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 005a44483a7b..3001109352ea 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -489,9 +489,8 @@ static void octeon_cf_exec_command16(struct ata_port *ap,
489 ata_wait_idle(ap); 489 ata_wait_idle(ap);
490} 490}
491 491
492static u8 octeon_cf_irq_on(struct ata_port *ap) 492static void octeon_cf_irq_on(struct ata_port *ap)
493{ 493{
494 return 0;
495} 494}
496 495
497static void octeon_cf_irq_clear(struct ata_port *ap) 496static void octeon_cf_irq_clear(struct ata_port *ap)
@@ -655,9 +654,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
655 ap = host->ports[i]; 654 ap = host->ports[i];
656 ocd = ap->dev->platform_data; 655 ocd = ap->dev->platform_data;
657 656
658 if (ap->flags & ATA_FLAG_DISABLED)
659 continue;
660
661 ocd = ap->dev->platform_data; 657 ocd = ap->dev->platform_data;
662 cf_port = ap->private_data; 658 cf_port = ap->private_data;
663 dma_int.u64 = 659 dma_int.u64 =
@@ -667,8 +663,7 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
667 663
668 qc = ata_qc_from_tag(ap, ap->link.active_tag); 664 qc = ata_qc_from_tag(ap, ap->link.active_tag);
669 665
670 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && 666 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) {
671 (qc->flags & ATA_QCFLAG_ACTIVE)) {
672 if (dma_int.s.done && !dma_cfg.s.en) { 667 if (dma_int.s.done && !dma_cfg.s.en) {
673 if (!sg_is_last(qc->cursg)) { 668 if (!sg_is_last(qc->cursg)) {
674 qc->cursg = sg_next(qc->cursg); 669 qc->cursg = sg_next(qc->cursg);
@@ -738,8 +733,7 @@ static void octeon_cf_delayed_finish(struct work_struct *work)
738 goto out; 733 goto out;
739 } 734 }
740 qc = ata_qc_from_tag(ap, ap->link.active_tag); 735 qc = ata_qc_from_tag(ap, ap->link.active_tag);
741 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && 736 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
742 (qc->flags & ATA_QCFLAG_ACTIVE))
743 octeon_cf_dma_finished(ap, qc); 737 octeon_cf_dma_finished(ap, qc);
744out: 738out:
745 spin_unlock_irqrestore(&host->lock, flags); 739 spin_unlock_irqrestore(&host->lock, flags);
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 5f6aba7eb0dd..988ef2627be3 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -200,7 +200,7 @@ static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc)
200 if (ata_dma_enabled(adev)) 200 if (ata_dma_enabled(adev))
201 oldpiix_set_dmamode(ap, adev); 201 oldpiix_set_dmamode(ap, adev);
202 } 202 }
203 return ata_sff_qc_issue(qc); 203 return ata_bmdma_qc_issue(qc);
204} 204}
205 205
206 206
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 3c3172d3c34e..118c28e8abaf 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -45,16 +45,6 @@
45#define DRV_NAME "pata_pcmcia" 45#define DRV_NAME "pata_pcmcia"
46#define DRV_VERSION "0.3.5" 46#define DRV_VERSION "0.3.5"
47 47
48/*
49 * Private data structure to glue stuff together
50 */
51
52struct ata_pcmcia_info {
53 struct pcmcia_device *pdev;
54 int ndev;
55 dev_node_t node;
56};
57
58/** 48/**
59 * pcmcia_set_mode - PCMCIA specific mode setup 49 * pcmcia_set_mode - PCMCIA specific mode setup
60 * @link: link 50 * @link: link
@@ -175,7 +165,7 @@ static struct ata_port_operations pcmcia_8bit_port_ops = {
175 .sff_data_xfer = ata_data_xfer_8bit, 165 .sff_data_xfer = ata_data_xfer_8bit,
176 .cable_detect = ata_cable_40wire, 166 .cable_detect = ata_cable_40wire,
177 .set_mode = pcmcia_set_mode_8bit, 167 .set_mode = pcmcia_set_mode_8bit,
178 .drain_fifo = pcmcia_8bit_drain_fifo, 168 .sff_drain_fifo = pcmcia_8bit_drain_fifo,
179}; 169};
180 170
181 171
@@ -248,7 +238,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
248{ 238{
249 struct ata_host *host; 239 struct ata_host *host;
250 struct ata_port *ap; 240 struct ata_port *ap;
251 struct ata_pcmcia_info *info;
252 struct pcmcia_config_check *stk = NULL; 241 struct pcmcia_config_check *stk = NULL;
253 int is_kme = 0, ret = -ENOMEM, p; 242 int is_kme = 0, ret = -ENOMEM, p;
254 unsigned long io_base, ctl_base; 243 unsigned long io_base, ctl_base;
@@ -256,19 +245,10 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
256 int n_ports = 1; 245 int n_ports = 1;
257 struct ata_port_operations *ops = &pcmcia_port_ops; 246 struct ata_port_operations *ops = &pcmcia_port_ops;
258 247
259 info = kzalloc(sizeof(*info), GFP_KERNEL);
260 if (info == NULL)
261 return -ENOMEM;
262
263 /* Glue stuff together. FIXME: We may be able to get rid of info with care */
264 info->pdev = pdev;
265 pdev->priv = info;
266
267 /* Set up attributes in order to probe card and get resources */ 248 /* Set up attributes in order to probe card and get resources */
268 pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 249 pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
269 pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 250 pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
270 pdev->io.IOAddrLines = 3; 251 pdev->io.IOAddrLines = 3;
271 pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
272 pdev->conf.Attributes = CONF_ENABLE_IRQ; 252 pdev->conf.Attributes = CONF_ENABLE_IRQ;
273 pdev->conf.IntType = INT_MEMORY_AND_IO; 253 pdev->conf.IntType = INT_MEMORY_AND_IO;
274 254
@@ -293,8 +273,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
293 } 273 }
294 io_base = pdev->io.BasePort1; 274 io_base = pdev->io.BasePort1;
295 ctl_base = stk->ctl_base; 275 ctl_base = stk->ctl_base;
296 ret = pcmcia_request_irq(pdev, &pdev->irq); 276 if (!pdev->irq)
297 if (ret)
298 goto failed; 277 goto failed;
299 278
300 ret = pcmcia_request_configuration(pdev, &pdev->conf); 279 ret = pcmcia_request_configuration(pdev, &pdev->conf);
@@ -344,21 +323,19 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
344 } 323 }
345 324
346 /* activate */ 325 /* activate */
347 ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_sff_interrupt, 326 ret = ata_host_activate(host, pdev->irq, ata_sff_interrupt,
348 IRQF_SHARED, &pcmcia_sht); 327 IRQF_SHARED, &pcmcia_sht);
349 if (ret) 328 if (ret)
350 goto failed; 329 goto failed;
351 330
352 info->ndev = 1; 331 pdev->priv = host;
353 kfree(stk); 332 kfree(stk);
354 return 0; 333 return 0;
355 334
356failed: 335failed:
357 kfree(stk); 336 kfree(stk);
358 info->ndev = 0;
359 pcmcia_disable_device(pdev); 337 pcmcia_disable_device(pdev);
360out1: 338out1:
361 kfree(info);
362 return ret; 339 return ret;
363} 340}
364 341
@@ -372,20 +349,12 @@ out1:
372 349
373static void pcmcia_remove_one(struct pcmcia_device *pdev) 350static void pcmcia_remove_one(struct pcmcia_device *pdev)
374{ 351{
375 struct ata_pcmcia_info *info = pdev->priv; 352 struct ata_host *host = pdev->priv;
376 struct device *dev = &pdev->dev; 353
377 354 if (host)
378 if (info != NULL) { 355 ata_host_detach(host);
379 /* If we have attached the device to the ATA layer, detach it */ 356
380 if (info->ndev) {
381 struct ata_host *host = dev_get_drvdata(dev);
382 ata_host_detach(host);
383 }
384 info->ndev = 0;
385 pdev->priv = NULL;
386 }
387 pcmcia_disable_device(pdev); 357 pcmcia_disable_device(pdev);
388 kfree(info);
389} 358}
390 359
391static struct pcmcia_device_id pcmcia_devices[] = { 360static struct pcmcia_device_id pcmcia_devices[] = {
@@ -424,6 +393,8 @@ static struct pcmcia_device_id pcmcia_devices[] = {
424 PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), 393 PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
425 PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), 394 PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
426 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), 395 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
396 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x55d5bffb),
397 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
427 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), 398 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
428 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), 399 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
429 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), 400 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
@@ -444,6 +415,8 @@ static struct pcmcia_device_id pcmcia_devices[] = {
444 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), 415 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
445 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), 416 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
446 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), 417 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
418 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x7558f133),
419 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
447 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), 420 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
448 PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), 421 PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
449 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), 422 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index ca5cad0fd80b..09f1f22c0307 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -265,7 +265,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
265 struct ata_device *pair = ata_dev_pair(adev); 265 struct ata_device *pair = ata_dev_pair(adev);
266 266
267 if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL) 267 if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL)
268 return ata_bmdma_mode_filter(adev, mask); 268 return mask;
269 269
270 /* Check for slave of a Maxtor at UDMA6 */ 270 /* Check for slave of a Maxtor at UDMA6 */
271 ata_id_c_string(pair->id, model_num, ATA_ID_PROD, 271 ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
@@ -274,7 +274,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
274 if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6) 274 if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6)
275 mask &= ~ (1 << (6 + ATA_SHIFT_UDMA)); 275 mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
276 276
277 return ata_bmdma_mode_filter(adev, mask); 277 return mask;
278} 278}
279 279
280/** 280/**
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 9ac0897cf8b0..fa1e2f3bc0fd 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -249,7 +249,7 @@ static int pdc2026x_port_start(struct ata_port *ap)
249 u8 burst = ioread8(bmdma + 0x1f); 249 u8 burst = ioread8(bmdma + 0x1f);
250 iowrite8(burst | 0x01, bmdma + 0x1f); 250 iowrite8(burst | 0x01, bmdma + 0x1f);
251 } 251 }
252 return ata_sff_port_start(ap); 252 return ata_bmdma_port_start(ap);
253} 253}
254 254
255/** 255/**
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 3f6ebc6c665a..50400fa120fe 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -53,7 +53,6 @@ static struct ata_port_operations pata_platform_port_ops = {
53 .sff_data_xfer = ata_sff_data_xfer_noirq, 53 .sff_data_xfer = ata_sff_data_xfer_noirq,
54 .cable_detect = ata_cable_unknown, 54 .cable_detect = ata_cable_unknown,
55 .set_mode = pata_platform_set_mode, 55 .set_mode = pata_platform_set_mode,
56 .port_start = ATA_OP_NULL,
57}; 56};
58 57
59static void pata_platform_setup_port(struct ata_ioports *ioaddr, 58static void pata_platform_setup_port(struct ata_ioports *ioaddr,
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index fc9602229acb..a5fa388e5398 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -179,7 +179,7 @@ static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc)
179 radisys_set_piomode(ap, adev); 179 radisys_set_piomode(ap, adev);
180 } 180 }
181 } 181 }
182 return ata_sff_qc_issue(qc); 182 return ata_bmdma_qc_issue(qc);
183} 183}
184 184
185 185
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index dfecc6f964b0..6b5b63a2fd8e 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -174,7 +174,7 @@ static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc)
174 sc1200_set_dmamode(ap, adev); 174 sc1200_set_dmamode(ap, adev);
175 } 175 }
176 176
177 return ata_sff_qc_issue(qc); 177 return ata_bmdma_qc_issue(qc);
178} 178}
179 179
180/** 180/**
@@ -209,7 +209,7 @@ static struct scsi_host_template sc1200_sht = {
209 209
210static struct ata_port_operations sc1200_port_ops = { 210static struct ata_port_operations sc1200_port_ops = {
211 .inherits = &ata_bmdma_port_ops, 211 .inherits = &ata_bmdma_port_ops,
212 .qc_prep = ata_sff_dumb_qc_prep, 212 .qc_prep = ata_bmdma_dumb_qc_prep,
213 .qc_issue = sc1200_qc_issue, 213 .qc_issue = sc1200_qc_issue,
214 .qc_defer = sc1200_qc_defer, 214 .qc_defer = sc1200_qc_defer,
215 .cable_detect = ata_cable_40wire, 215 .cable_detect = ata_cable_40wire,
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 4257d6b40af4..6f6193b707cb 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -265,7 +265,7 @@ unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
265 printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME); 265 printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
266 mask &= ~(0xE0 << ATA_SHIFT_UDMA); 266 mask &= ~(0xE0 << ATA_SHIFT_UDMA);
267 } 267 }
268 return ata_bmdma_mode_filter(adev, mask); 268 return mask;
269} 269}
270 270
271/** 271/**
@@ -416,6 +416,17 @@ static void scc_dev_select (struct ata_port *ap, unsigned int device)
416} 416}
417 417
418/** 418/**
419 * scc_set_devctl - Write device control reg
420 * @ap: port where the device is
421 * @ctl: value to write
422 */
423
424static void scc_set_devctl(struct ata_port *ap, u8 ctl)
425{
426 out_be32(ap->ioaddr.ctl_addr, ctl);
427}
428
429/**
419 * scc_bmdma_setup - Set up PCI IDE BMDMA transaction 430 * scc_bmdma_setup - Set up PCI IDE BMDMA transaction
420 * @qc: Info associated with this ATA transaction. 431 * @qc: Info associated with this ATA transaction.
421 * 432 *
@@ -430,7 +441,7 @@ static void scc_bmdma_setup (struct ata_queued_cmd *qc)
430 void __iomem *mmio = ap->ioaddr.bmdma_addr; 441 void __iomem *mmio = ap->ioaddr.bmdma_addr;
431 442
432 /* load PRD table addr */ 443 /* load PRD table addr */
433 out_be32(mmio + SCC_DMA_TABLE_OFS, ap->prd_dma); 444 out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma);
434 445
435 /* specify data direction, triple-check start bit is clear */ 446 /* specify data direction, triple-check start bit is clear */
436 dmactl = in_be32(mmio + SCC_DMA_CMD); 447 dmactl = in_be32(mmio + SCC_DMA_CMD);
@@ -501,8 +512,8 @@ static unsigned int scc_devchk (struct ata_port *ap,
501 * Note: Original code is ata_sff_wait_after_reset 512 * Note: Original code is ata_sff_wait_after_reset
502 */ 513 */
503 514
504int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, 515static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
505 unsigned long deadline) 516 unsigned long deadline)
506{ 517{
507 struct ata_port *ap = link->ap; 518 struct ata_port *ap = link->ap;
508 struct ata_ioports *ioaddr = &ap->ioaddr; 519 struct ata_ioports *ioaddr = &ap->ioaddr;
@@ -817,54 +828,6 @@ static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
817} 828}
818 829
819/** 830/**
820 * scc_irq_on - Enable interrupts on a port.
821 * @ap: Port on which interrupts are enabled.
822 *
823 * Note: Original code is ata_sff_irq_on().
824 */
825
826static u8 scc_irq_on (struct ata_port *ap)
827{
828 struct ata_ioports *ioaddr = &ap->ioaddr;
829 u8 tmp;
830
831 ap->ctl &= ~ATA_NIEN;
832 ap->last_ctl = ap->ctl;
833
834 out_be32(ioaddr->ctl_addr, ap->ctl);
835 tmp = ata_wait_idle(ap);
836
837 ap->ops->sff_irq_clear(ap);
838
839 return tmp;
840}
841
842/**
843 * scc_freeze - Freeze BMDMA controller port
844 * @ap: port to freeze
845 *
846 * Note: Original code is ata_sff_freeze().
847 */
848
849static void scc_freeze (struct ata_port *ap)
850{
851 struct ata_ioports *ioaddr = &ap->ioaddr;
852
853 ap->ctl |= ATA_NIEN;
854 ap->last_ctl = ap->ctl;
855
856 out_be32(ioaddr->ctl_addr, ap->ctl);
857
858 /* Under certain circumstances, some controllers raise IRQ on
859 * ATA_NIEN manipulation. Also, many controllers fail to mask
860 * previously pending IRQ on ATA_NIEN assertion. Clear it.
861 */
862 ap->ops->sff_check_status(ap);
863
864 ap->ops->sff_irq_clear(ap);
865}
866
867/**
868 * scc_pata_prereset - prepare for reset 831 * scc_pata_prereset - prepare for reset
869 * @ap: ATA port to be reset 832 * @ap: ATA port to be reset
870 * @deadline: deadline jiffies for the operation 833 * @deadline: deadline jiffies for the operation
@@ -903,8 +866,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes)
903 } 866 }
904 867
905 /* set up device control */ 868 /* set up device control */
906 if (ap->ioaddr.ctl_addr) 869 out_be32(ap->ioaddr.ctl_addr, ap->ctl);
907 out_be32(ap->ioaddr.ctl_addr, ap->ctl);
908 870
909 DPRINTK("EXIT\n"); 871 DPRINTK("EXIT\n");
910} 872}
@@ -930,7 +892,7 @@ static void scc_irq_clear (struct ata_port *ap)
930 * scc_port_start - Set port up for dma. 892 * scc_port_start - Set port up for dma.
931 * @ap: Port to initialize 893 * @ap: Port to initialize
932 * 894 *
933 * Allocate space for PRD table using ata_port_start(). 895 * Allocate space for PRD table using ata_bmdma_port_start().
934 * Set PRD table address for PTERADD. (PRD Transfer End Read) 896 * Set PRD table address for PTERADD. (PRD Transfer End Read)
935 */ 897 */
936 898
@@ -939,11 +901,11 @@ static int scc_port_start (struct ata_port *ap)
939 void __iomem *mmio = ap->ioaddr.bmdma_addr; 901 void __iomem *mmio = ap->ioaddr.bmdma_addr;
940 int rc; 902 int rc;
941 903
942 rc = ata_port_start(ap); 904 rc = ata_bmdma_port_start(ap);
943 if (rc) 905 if (rc)
944 return rc; 906 return rc;
945 907
946 out_be32(mmio + SCC_DMA_PTERADD, ap->prd_dma); 908 out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma);
947 return 0; 909 return 0;
948} 910}
949 911
@@ -978,6 +940,7 @@ static struct ata_port_operations scc_pata_ops = {
978 .sff_check_status = scc_check_status, 940 .sff_check_status = scc_check_status,
979 .sff_check_altstatus = scc_check_altstatus, 941 .sff_check_altstatus = scc_check_altstatus,
980 .sff_dev_select = scc_dev_select, 942 .sff_dev_select = scc_dev_select,
943 .sff_set_devctl = scc_set_devctl,
981 944
982 .bmdma_setup = scc_bmdma_setup, 945 .bmdma_setup = scc_bmdma_setup,
983 .bmdma_start = scc_bmdma_start, 946 .bmdma_start = scc_bmdma_start,
@@ -985,14 +948,11 @@ static struct ata_port_operations scc_pata_ops = {
985 .bmdma_status = scc_bmdma_status, 948 .bmdma_status = scc_bmdma_status,
986 .sff_data_xfer = scc_data_xfer, 949 .sff_data_xfer = scc_data_xfer,
987 950
988 .freeze = scc_freeze,
989 .prereset = scc_pata_prereset, 951 .prereset = scc_pata_prereset,
990 .softreset = scc_softreset, 952 .softreset = scc_softreset,
991 .postreset = scc_postreset, 953 .postreset = scc_postreset,
992 .post_internal_cmd = scc_bmdma_stop,
993 954
994 .sff_irq_clear = scc_irq_clear, 955 .sff_irq_clear = scc_irq_clear,
995 .sff_irq_on = scc_irq_on,
996 956
997 .port_start = scc_port_start, 957 .port_start = scc_port_start,
998 .port_stop = scc_port_stop, 958 .port_stop = scc_port_stop,
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index 99cceb458e2a..86b3d0133c7c 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -174,22 +174,12 @@ static int __devinit sch_init_one(struct pci_dev *pdev,
174{ 174{
175 static int printed_version; 175 static int printed_version;
176 const struct ata_port_info *ppi[] = { &sch_port_info, NULL }; 176 const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
177 struct ata_host *host;
178 int rc;
179 177
180 if (!printed_version++) 178 if (!printed_version++)
181 dev_printk(KERN_DEBUG, &pdev->dev, 179 dev_printk(KERN_DEBUG, &pdev->dev,
182 "version " DRV_VERSION "\n"); 180 "version " DRV_VERSION "\n");
183 181
184 /* enable device and prepare host */ 182 return ata_pci_sff_init_one(pdev, ppi, &sch_sht, NULL, 0);
185 rc = pcim_enable_device(pdev);
186 if (rc)
187 return rc;
188 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
189 if (rc)
190 return rc;
191 pci_set_master(pdev);
192 return ata_pci_sff_activate_host(host, ata_sff_interrupt, &sch_sht);
193} 183}
194 184
195static int __init sch_init(void) 185static int __init sch_init(void)
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 9524d54035f7..43ea389df2b3 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -198,7 +198,7 @@ static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned l
198{ 198{
199 if (adev->class == ATA_DEV_ATA) 199 if (adev->class == ATA_DEV_ATA)
200 mask &= ~ATA_MASK_UDMA; 200 mask &= ~ATA_MASK_UDMA;
201 return ata_bmdma_mode_filter(adev, mask); 201 return mask;
202} 202}
203 203
204 204
@@ -218,7 +218,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
218 218
219 /* Disk, UDMA */ 219 /* Disk, UDMA */
220 if (adev->class != ATA_DEV_ATA) 220 if (adev->class != ATA_DEV_ATA)
221 return ata_bmdma_mode_filter(adev, mask); 221 return mask;
222 222
223 /* Actually do need to check */ 223 /* Actually do need to check */
224 ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 224 ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
@@ -227,7 +227,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
227 if (!strcmp(p, model_num)) 227 if (!strcmp(p, model_num))
228 mask &= ~(0xE0 << ATA_SHIFT_UDMA); 228 mask &= ~(0xE0 << ATA_SHIFT_UDMA);
229 } 229 }
230 return ata_bmdma_mode_filter(adev, mask); 230 return mask;
231} 231}
232 232
233/** 233/**
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index c6c589c23ffc..43faf106f647 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -190,15 +190,37 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
190 pci_write_config_word(pdev, ua, ultra); 190 pci_write_config_word(pdev, ua, ultra);
191} 191}
192 192
193/**
194 * sil680_sff_exec_command - issue ATA command to host controller
195 * @ap: port to which command is being issued
196 * @tf: ATA taskfile register set
197 *
198 * Issues ATA command, with proper synchronization with interrupt
199 * handler / other threads. Use our MMIO space for PCI posting to avoid
200 * a hideously slow cycle all the way to the device.
201 *
202 * LOCKING:
203 * spin_lock_irqsave(host lock)
204 */
205void sil680_sff_exec_command(struct ata_port *ap,
206 const struct ata_taskfile *tf)
207{
208 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
209 iowrite8(tf->command, ap->ioaddr.command_addr);
210 ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
211}
212
193static struct scsi_host_template sil680_sht = { 213static struct scsi_host_template sil680_sht = {
194 ATA_BMDMA_SHT(DRV_NAME), 214 ATA_BMDMA_SHT(DRV_NAME),
195}; 215};
196 216
217
197static struct ata_port_operations sil680_port_ops = { 218static struct ata_port_operations sil680_port_ops = {
198 .inherits = &ata_bmdma32_port_ops, 219 .inherits = &ata_bmdma32_port_ops,
199 .cable_detect = sil680_cable_detect, 220 .sff_exec_command = sil680_sff_exec_command,
200 .set_piomode = sil680_set_piomode, 221 .cable_detect = sil680_cable_detect,
201 .set_dmamode = sil680_set_dmamode, 222 .set_piomode = sil680_set_piomode,
223 .set_dmamode = sil680_set_dmamode,
202}; 224};
203 225
204/** 226/**
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 741e7cb69d8c..7e3e0a5598b7 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -355,7 +355,7 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
355 mask &= ~ ATA_MASK_UDMA; 355 mask &= ~ ATA_MASK_UDMA;
356 } 356 }
357 } 357 }
358 return ata_bmdma_mode_filter(dev, mask); 358 return mask;
359} 359}
360 360
361/** 361/**
@@ -417,8 +417,6 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
417 tf->lbam, 417 tf->lbam,
418 tf->lbah); 418 tf->lbah);
419 } 419 }
420
421 ata_wait_idle(ap);
422} 420}
423 421
424static int via_port_start(struct ata_port *ap) 422static int via_port_start(struct ata_port *ap)
@@ -426,7 +424,7 @@ static int via_port_start(struct ata_port *ap)
426 struct via_port *vp; 424 struct via_port *vp;
427 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 425 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
428 426
429 int ret = ata_sff_port_start(ap); 427 int ret = ata_bmdma_port_start(ap);
430 if (ret < 0) 428 if (ret < 0)
431 return ret; 429 return ret;
432 430
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 5904cfdb8dbe..adbe0426c8f0 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -324,10 +324,8 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
324 VPRINTK("ENTER\n"); 324 VPRINTK("ENTER\n");
325 325
326 adma_enter_reg_mode(qc->ap); 326 adma_enter_reg_mode(qc->ap);
327 if (qc->tf.protocol != ATA_PROT_DMA) { 327 if (qc->tf.protocol != ATA_PROT_DMA)
328 ata_sff_qc_prep(qc);
329 return; 328 return;
330 }
331 329
332 buf[i++] = 0; /* Response flags */ 330 buf[i++] = 0; /* Response flags */
333 buf[i++] = 0; /* reserved */ 331 buf[i++] = 0; /* reserved */
@@ -442,8 +440,6 @@ static inline unsigned int adma_intr_pkt(struct ata_host *host)
442 continue; 440 continue;
443 handled = 1; 441 handled = 1;
444 adma_enter_reg_mode(ap); 442 adma_enter_reg_mode(ap);
445 if (ap->flags & ATA_FLAG_DISABLED)
446 continue;
447 pp = ap->private_data; 443 pp = ap->private_data;
448 if (!pp || pp->state != adma_state_pkt) 444 if (!pp || pp->state != adma_state_pkt)
449 continue; 445 continue;
@@ -484,42 +480,38 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host)
484 unsigned int handled = 0, port_no; 480 unsigned int handled = 0, port_no;
485 481
486 for (port_no = 0; port_no < host->n_ports; ++port_no) { 482 for (port_no = 0; port_no < host->n_ports; ++port_no) {
487 struct ata_port *ap; 483 struct ata_port *ap = host->ports[port_no];
488 ap = host->ports[port_no]; 484 struct adma_port_priv *pp = ap->private_data;
489 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) { 485 struct ata_queued_cmd *qc;
490 struct ata_queued_cmd *qc; 486
491 struct adma_port_priv *pp = ap->private_data; 487 if (!pp || pp->state != adma_state_mmio)
492 if (!pp || pp->state != adma_state_mmio) 488 continue;
489 qc = ata_qc_from_tag(ap, ap->link.active_tag);
490 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
491
492 /* check main status, clearing INTRQ */
493 u8 status = ata_sff_check_status(ap);
494 if ((status & ATA_BUSY))
493 continue; 495 continue;
494 qc = ata_qc_from_tag(ap, ap->link.active_tag); 496 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
495 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { 497 ap->print_id, qc->tf.protocol, status);
496 498
497 /* check main status, clearing INTRQ */ 499 /* complete taskfile transaction */
498 u8 status = ata_sff_check_status(ap); 500 pp->state = adma_state_idle;
499 if ((status & ATA_BUSY)) 501 qc->err_mask |= ac_err_mask(status);
500 continue; 502 if (!qc->err_mask)
501 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", 503 ata_qc_complete(qc);
502 ap->print_id, qc->tf.protocol, status); 504 else {
503 505 struct ata_eh_info *ehi = &ap->link.eh_info;
504 /* complete taskfile transaction */ 506 ata_ehi_clear_desc(ehi);
505 pp->state = adma_state_idle; 507 ata_ehi_push_desc(ehi, "status 0x%02X", status);
506 qc->err_mask |= ac_err_mask(status); 508
507 if (!qc->err_mask) 509 if (qc->err_mask == AC_ERR_DEV)
508 ata_qc_complete(qc); 510 ata_port_abort(ap);
509 else { 511 else
510 struct ata_eh_info *ehi = 512 ata_port_freeze(ap);
511 &ap->link.eh_info;
512 ata_ehi_clear_desc(ehi);
513 ata_ehi_push_desc(ehi,
514 "status 0x%02X", status);
515
516 if (qc->err_mask == AC_ERR_DEV)
517 ata_port_abort(ap);
518 else
519 ata_port_freeze(ap);
520 }
521 handled = 1;
522 } 513 }
514 handled = 1;
523 } 515 }
524 } 516 }
525 return handled; 517 return handled;
@@ -562,11 +554,7 @@ static int adma_port_start(struct ata_port *ap)
562{ 554{
563 struct device *dev = ap->host->dev; 555 struct device *dev = ap->host->dev;
564 struct adma_port_priv *pp; 556 struct adma_port_priv *pp;
565 int rc;
566 557
567 rc = ata_port_start(ap);
568 if (rc)
569 return rc;
570 adma_enter_reg_mode(ap); 558 adma_enter_reg_mode(ap);
571 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 559 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
572 if (!pp) 560 if (!pp)
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 27dc6c86a4cd..a36149ebf4a2 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -415,22 +415,11 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance)
415 415
416 spin_lock(&host->lock); 416 spin_lock(&host->lock);
417 417
418 for (i = 0; i < NR_PORTS; i++) { 418 for (i = 0; i < NR_PORTS; i++)
419 struct ata_port *ap = host->ports[i]; 419 if (host_irq_stat & (HIRQ_PORT0 << i)) {
420 420 inic_host_intr(host->ports[i]);
421 if (!(host_irq_stat & (HIRQ_PORT0 << i)))
422 continue;
423
424 if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) {
425 inic_host_intr(ap);
426 handled++; 421 handled++;
427 } else {
428 if (ata_ratelimit())
429 dev_printk(KERN_ERR, host->dev, "interrupt "
430 "from disabled port %d (0x%x)\n",
431 i, host_irq_stat);
432 } 422 }
433 }
434 423
435 spin_unlock(&host->lock); 424 spin_unlock(&host->lock);
436 425
@@ -679,8 +668,7 @@ static void init_port(struct ata_port *ap)
679 memset(pp->pkt, 0, sizeof(struct inic_pkt)); 668 memset(pp->pkt, 0, sizeof(struct inic_pkt));
680 memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE); 669 memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
681 670
682 /* setup PRD and CPB lookup table addresses */ 671 /* setup CPB lookup table addresses */
683 writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
684 writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR); 672 writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
685} 673}
686 674
@@ -694,7 +682,6 @@ static int inic_port_start(struct ata_port *ap)
694{ 682{
695 struct device *dev = ap->host->dev; 683 struct device *dev = ap->host->dev;
696 struct inic_port_priv *pp; 684 struct inic_port_priv *pp;
697 int rc;
698 685
699 /* alloc and initialize private data */ 686 /* alloc and initialize private data */
700 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 687 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
@@ -703,10 +690,6 @@ static int inic_port_start(struct ata_port *ap)
703 ap->private_data = pp; 690 ap->private_data = pp;
704 691
705 /* Alloc resources */ 692 /* Alloc resources */
706 rc = ata_port_start(ap);
707 if (rc)
708 return rc;
709
710 pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt), 693 pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
711 &pp->pkt_dma, GFP_KERNEL); 694 &pp->pkt_dma, GFP_KERNEL);
712 if (!pp->pkt) 695 if (!pp->pkt)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 71cc0d42f9e1..f3471bc949d3 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -686,16 +686,27 @@ static struct ata_port_operations mv5_ops = {
686}; 686};
687 687
688static struct ata_port_operations mv6_ops = { 688static struct ata_port_operations mv6_ops = {
689 .inherits = &mv5_ops, 689 .inherits = &ata_bmdma_port_ops,
690
691 .lost_interrupt = ATA_OP_NULL,
692
693 .qc_defer = mv_qc_defer,
694 .qc_prep = mv_qc_prep,
695 .qc_issue = mv_qc_issue,
696
690 .dev_config = mv6_dev_config, 697 .dev_config = mv6_dev_config,
691 .scr_read = mv_scr_read,
692 .scr_write = mv_scr_write,
693 698
699 .freeze = mv_eh_freeze,
700 .thaw = mv_eh_thaw,
701 .hardreset = mv_hardreset,
702 .softreset = mv_softreset,
694 .pmp_hardreset = mv_pmp_hardreset, 703 .pmp_hardreset = mv_pmp_hardreset,
695 .pmp_softreset = mv_softreset, 704 .pmp_softreset = mv_softreset,
696 .softreset = mv_softreset,
697 .error_handler = mv_pmp_error_handler, 705 .error_handler = mv_pmp_error_handler,
698 706
707 .scr_read = mv_scr_read,
708 .scr_write = mv_scr_write,
709
699 .sff_check_status = mv_sff_check_status, 710 .sff_check_status = mv_sff_check_status,
700 .sff_irq_clear = mv_sff_irq_clear, 711 .sff_irq_clear = mv_sff_irq_clear,
701 .check_atapi_dma = mv_check_atapi_dma, 712 .check_atapi_dma = mv_check_atapi_dma,
@@ -703,6 +714,9 @@ static struct ata_port_operations mv6_ops = {
703 .bmdma_start = mv_bmdma_start, 714 .bmdma_start = mv_bmdma_start,
704 .bmdma_stop = mv_bmdma_stop, 715 .bmdma_stop = mv_bmdma_stop,
705 .bmdma_status = mv_bmdma_status, 716 .bmdma_status = mv_bmdma_status,
717
718 .port_start = mv_port_start,
719 .port_stop = mv_port_stop,
706}; 720};
707 721
708static struct ata_port_operations mv_iie_ops = { 722static struct ata_port_operations mv_iie_ops = {
@@ -2248,7 +2262,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2248 } 2262 }
2249 2263
2250 if (qc->tf.flags & ATA_TFLAG_POLLING) 2264 if (qc->tf.flags & ATA_TFLAG_POLLING)
2251 ata_pio_queue_task(ap, qc, 0); 2265 ata_sff_queue_pio_task(ap, 0);
2252 return 0; 2266 return 0;
2253} 2267}
2254 2268
@@ -2344,7 +2358,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2344 if (IS_GEN_II(hpriv)) 2358 if (IS_GEN_II(hpriv))
2345 return mv_qc_issue_fis(qc); 2359 return mv_qc_issue_fis(qc);
2346 } 2360 }
2347 return ata_sff_qc_issue(qc); 2361 return ata_bmdma_qc_issue(qc);
2348} 2362}
2349 2363
2350static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) 2364static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
@@ -2355,13 +2369,9 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2355 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) 2369 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2356 return NULL; 2370 return NULL;
2357 qc = ata_qc_from_tag(ap, ap->link.active_tag); 2371 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2358 if (qc) { 2372 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2359 if (qc->tf.flags & ATA_TFLAG_POLLING) 2373 return qc;
2360 qc = NULL; 2374 return NULL;
2361 else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
2362 qc = NULL;
2363 }
2364 return qc;
2365} 2375}
2366 2376
2367static void mv_pmp_error_handler(struct ata_port *ap) 2377static void mv_pmp_error_handler(struct ata_port *ap)
@@ -2546,9 +2556,7 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2546 char *when = "idle"; 2556 char *when = "idle";
2547 2557
2548 ata_ehi_clear_desc(ehi); 2558 ata_ehi_clear_desc(ehi);
2549 if (ap->flags & ATA_FLAG_DISABLED) { 2559 if (edma_was_enabled) {
2550 when = "disabled";
2551 } else if (edma_was_enabled) {
2552 when = "EDMA enabled"; 2560 when = "EDMA enabled";
2553 } else { 2561 } else {
2554 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 2562 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
@@ -2782,10 +2790,6 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2782 struct mv_port_priv *pp; 2790 struct mv_port_priv *pp;
2783 int edma_was_enabled; 2791 int edma_was_enabled;
2784 2792
2785 if (ap->flags & ATA_FLAG_DISABLED) {
2786 mv_unexpected_intr(ap, 0);
2787 return;
2788 }
2789 /* 2793 /*
2790 * Grab a snapshot of the EDMA_EN flag setting, 2794 * Grab a snapshot of the EDMA_EN flag setting,
2791 * so that we have a consistent view for this port, 2795 * so that we have a consistent view for this port,
@@ -3656,9 +3660,6 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3656 /* special case: control/altstatus doesn't have ATA_REG_ address */ 3660 /* special case: control/altstatus doesn't have ATA_REG_ address */
3657 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST; 3661 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3658 3662
3659 /* unused: */
3660 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
3661
3662 /* Clear any currently outstanding port interrupt conditions */ 3663 /* Clear any currently outstanding port interrupt conditions */
3663 serr = port_mmio + mv_scr_offset(SCR_ERROR); 3664 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3664 writelfl(readl(serr), serr); 3665 writelfl(readl(serr), serr);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 2a98b09ab735..baa8f0d2c86f 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -272,7 +272,7 @@ enum ncq_saw_flag_list {
272}; 272};
273 273
274struct nv_swncq_port_priv { 274struct nv_swncq_port_priv {
275 struct ata_prd *prd; /* our SG list */ 275 struct ata_bmdma_prd *prd; /* our SG list */
276 dma_addr_t prd_dma; /* and its DMA mapping */ 276 dma_addr_t prd_dma; /* and its DMA mapping */
277 void __iomem *sactive_block; 277 void __iomem *sactive_block;
278 void __iomem *irq_block; 278 void __iomem *irq_block;
@@ -933,107 +933,108 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
933 933
934 for (i = 0; i < host->n_ports; i++) { 934 for (i = 0; i < host->n_ports; i++) {
935 struct ata_port *ap = host->ports[i]; 935 struct ata_port *ap = host->ports[i];
936 struct nv_adma_port_priv *pp = ap->private_data;
937 void __iomem *mmio = pp->ctl_block;
938 u16 status;
939 u32 gen_ctl;
940 u32 notifier, notifier_error;
941
936 notifier_clears[i] = 0; 942 notifier_clears[i] = 0;
937 943
938 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { 944 /* if ADMA is disabled, use standard ata interrupt handler */
939 struct nv_adma_port_priv *pp = ap->private_data; 945 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
940 void __iomem *mmio = pp->ctl_block; 946 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
941 u16 status; 947 >> (NV_INT_PORT_SHIFT * i);
942 u32 gen_ctl; 948 handled += nv_host_intr(ap, irq_stat);
943 u32 notifier, notifier_error; 949 continue;
944 950 }
945 /* if ADMA is disabled, use standard ata interrupt handler */
946 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
947 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
948 >> (NV_INT_PORT_SHIFT * i);
949 handled += nv_host_intr(ap, irq_stat);
950 continue;
951 }
952 951
953 /* if in ATA register mode, check for standard interrupts */ 952 /* if in ATA register mode, check for standard interrupts */
954 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { 953 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
955 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) 954 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
956 >> (NV_INT_PORT_SHIFT * i); 955 >> (NV_INT_PORT_SHIFT * i);
957 if (ata_tag_valid(ap->link.active_tag)) 956 if (ata_tag_valid(ap->link.active_tag))
958 /** NV_INT_DEV indication seems unreliable at times 957 /** NV_INT_DEV indication seems unreliable
959 at least in ADMA mode. Force it on always when a 958 at times at least in ADMA mode. Force it
960 command is active, to prevent losing interrupts. */ 959 on always when a command is active, to
961 irq_stat |= NV_INT_DEV; 960 prevent losing interrupts. */
962 handled += nv_host_intr(ap, irq_stat); 961 irq_stat |= NV_INT_DEV;
963 } 962 handled += nv_host_intr(ap, irq_stat);
963 }
964
965 notifier = readl(mmio + NV_ADMA_NOTIFIER);
966 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
967 notifier_clears[i] = notifier | notifier_error;
968
969 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
970
971 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
972 !notifier_error)
973 /* Nothing to do */
974 continue;
975
976 status = readw(mmio + NV_ADMA_STAT);
977
978 /*
979 * Clear status. Ensure the controller sees the
980 * clearing before we start looking at any of the CPB
981 * statuses, so that any CPB completions after this
982 * point in the handler will raise another interrupt.
983 */
984 writew(status, mmio + NV_ADMA_STAT);
985 readw(mmio + NV_ADMA_STAT); /* flush posted write */
986 rmb();
987
988 handled++; /* irq handled if we got here */
964 989
965 notifier = readl(mmio + NV_ADMA_NOTIFIER); 990 /* freeze if hotplugged or controller error */
966 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); 991 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
967 notifier_clears[i] = notifier | notifier_error; 992 NV_ADMA_STAT_HOTUNPLUG |
968 993 NV_ADMA_STAT_TIMEOUT |
969 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); 994 NV_ADMA_STAT_SERROR))) {
970 995 struct ata_eh_info *ehi = &ap->link.eh_info;
971 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && 996
972 !notifier_error) 997 ata_ehi_clear_desc(ehi);
973 /* Nothing to do */ 998 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
974 continue; 999 if (status & NV_ADMA_STAT_TIMEOUT) {
975 1000 ehi->err_mask |= AC_ERR_SYSTEM;
976 status = readw(mmio + NV_ADMA_STAT); 1001 ata_ehi_push_desc(ehi, "timeout");
977 1002 } else if (status & NV_ADMA_STAT_HOTPLUG) {
978 /* Clear status. Ensure the controller sees the clearing before we start 1003 ata_ehi_hotplugged(ehi);
979 looking at any of the CPB statuses, so that any CPB completions after 1004 ata_ehi_push_desc(ehi, "hotplug");
980 this point in the handler will raise another interrupt. */ 1005 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
981 writew(status, mmio + NV_ADMA_STAT); 1006 ata_ehi_hotplugged(ehi);
982 readw(mmio + NV_ADMA_STAT); /* flush posted write */ 1007 ata_ehi_push_desc(ehi, "hot unplug");
983 rmb(); 1008 } else if (status & NV_ADMA_STAT_SERROR) {
984 1009 /* let EH analyze SError and figure out cause */
985 handled++; /* irq handled if we got here */ 1010 ata_ehi_push_desc(ehi, "SError");
986 1011 } else
987 /* freeze if hotplugged or controller error */ 1012 ata_ehi_push_desc(ehi, "unknown");
988 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | 1013 ata_port_freeze(ap);
989 NV_ADMA_STAT_HOTUNPLUG | 1014 continue;
990 NV_ADMA_STAT_TIMEOUT | 1015 }
991 NV_ADMA_STAT_SERROR))) { 1016
992 struct ata_eh_info *ehi = &ap->link.eh_info; 1017 if (status & (NV_ADMA_STAT_DONE |
993 1018 NV_ADMA_STAT_CPBERR |
994 ata_ehi_clear_desc(ehi); 1019 NV_ADMA_STAT_CMD_COMPLETE)) {
995 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status); 1020 u32 check_commands = notifier_clears[i];
996 if (status & NV_ADMA_STAT_TIMEOUT) { 1021 int pos, error = 0;
997 ehi->err_mask |= AC_ERR_SYSTEM; 1022
998 ata_ehi_push_desc(ehi, "timeout"); 1023 if (status & NV_ADMA_STAT_CPBERR) {
999 } else if (status & NV_ADMA_STAT_HOTPLUG) { 1024 /* check all active commands */
1000 ata_ehi_hotplugged(ehi); 1025 if (ata_tag_valid(ap->link.active_tag))
1001 ata_ehi_push_desc(ehi, "hotplug"); 1026 check_commands = 1 <<
1002 } else if (status & NV_ADMA_STAT_HOTUNPLUG) { 1027 ap->link.active_tag;
1003 ata_ehi_hotplugged(ehi); 1028 else
1004 ata_ehi_push_desc(ehi, "hot unplug"); 1029 check_commands = ap->link.sactive;
1005 } else if (status & NV_ADMA_STAT_SERROR) {
1006 /* let libata analyze SError and figure out the cause */
1007 ata_ehi_push_desc(ehi, "SError");
1008 } else
1009 ata_ehi_push_desc(ehi, "unknown");
1010 ata_port_freeze(ap);
1011 continue;
1012 } 1030 }
1013 1031
1014 if (status & (NV_ADMA_STAT_DONE | 1032 /* check CPBs for completed commands */
1015 NV_ADMA_STAT_CPBERR | 1033 while ((pos = ffs(check_commands)) && !error) {
1016 NV_ADMA_STAT_CMD_COMPLETE)) { 1034 pos--;
1017 u32 check_commands = notifier_clears[i]; 1035 error = nv_adma_check_cpb(ap, pos,
1018 int pos, error = 0;
1019
1020 if (status & NV_ADMA_STAT_CPBERR) {
1021 /* Check all active commands */
1022 if (ata_tag_valid(ap->link.active_tag))
1023 check_commands = 1 <<
1024 ap->link.active_tag;
1025 else
1026 check_commands = ap->
1027 link.sactive;
1028 }
1029
1030 /** Check CPBs for completed commands */
1031 while ((pos = ffs(check_commands)) && !error) {
1032 pos--;
1033 error = nv_adma_check_cpb(ap, pos,
1034 notifier_error & (1 << pos)); 1036 notifier_error & (1 << pos));
1035 check_commands &= ~(1 << pos); 1037 check_commands &= ~(1 << pos);
1036 }
1037 } 1038 }
1038 } 1039 }
1039 } 1040 }
@@ -1130,7 +1131,7 @@ static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1130 struct nv_adma_port_priv *pp = qc->ap->private_data; 1131 struct nv_adma_port_priv *pp = qc->ap->private_data;
1131 1132
1132 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) 1133 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1133 ata_sff_post_internal_cmd(qc); 1134 ata_bmdma_post_internal_cmd(qc);
1134} 1135}
1135 1136
1136static int nv_adma_port_start(struct ata_port *ap) 1137static int nv_adma_port_start(struct ata_port *ap)
@@ -1155,7 +1156,8 @@ static int nv_adma_port_start(struct ata_port *ap)
1155 if (rc) 1156 if (rc)
1156 return rc; 1157 return rc;
1157 1158
1158 rc = ata_port_start(ap); 1159 /* we might fallback to bmdma, allocate bmdma resources */
1160 rc = ata_bmdma_port_start(ap);
1159 if (rc) 1161 if (rc)
1160 return rc; 1162 return rc;
1161 1163
@@ -1407,7 +1409,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1407 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && 1409 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1408 (qc->flags & ATA_QCFLAG_DMAMAP)); 1410 (qc->flags & ATA_QCFLAG_DMAMAP));
1409 nv_adma_register_mode(qc->ap); 1411 nv_adma_register_mode(qc->ap);
1410 ata_sff_qc_prep(qc); 1412 ata_bmdma_qc_prep(qc);
1411 return; 1413 return;
1412 } 1414 }
1413 1415
@@ -1466,7 +1468,7 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1466 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && 1468 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1467 (qc->flags & ATA_QCFLAG_DMAMAP)); 1469 (qc->flags & ATA_QCFLAG_DMAMAP));
1468 nv_adma_register_mode(qc->ap); 1470 nv_adma_register_mode(qc->ap);
1469 return ata_sff_qc_issue(qc); 1471 return ata_bmdma_qc_issue(qc);
1470 } else 1472 } else
1471 nv_adma_mode(qc->ap); 1473 nv_adma_mode(qc->ap);
1472 1474
@@ -1498,22 +1500,19 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1498 spin_lock_irqsave(&host->lock, flags); 1500 spin_lock_irqsave(&host->lock, flags);
1499 1501
1500 for (i = 0; i < host->n_ports; i++) { 1502 for (i = 0; i < host->n_ports; i++) {
1501 struct ata_port *ap; 1503 struct ata_port *ap = host->ports[i];
1502 1504 struct ata_queued_cmd *qc;
1503 ap = host->ports[i];
1504 if (ap &&
1505 !(ap->flags & ATA_FLAG_DISABLED)) {
1506 struct ata_queued_cmd *qc;
1507 1505
1508 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1506 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1509 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) 1507 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1510 handled += ata_sff_host_intr(ap, qc); 1508 handled += ata_sff_host_intr(ap, qc);
1511 else 1509 } else {
1512 // No request pending? Clear interrupt status 1510 /*
1513 // anyway, in case there's one pending. 1511 * No request pending? Clear interrupt status
1514 ap->ops->sff_check_status(ap); 1512 * anyway, in case there's one pending.
1513 */
1514 ap->ops->sff_check_status(ap);
1515 } 1515 }
1516
1517 } 1516 }
1518 1517
1519 spin_unlock_irqrestore(&host->lock, flags); 1518 spin_unlock_irqrestore(&host->lock, flags);
@@ -1526,11 +1525,7 @@ static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1526 int i, handled = 0; 1525 int i, handled = 0;
1527 1526
1528 for (i = 0; i < host->n_ports; i++) { 1527 for (i = 0; i < host->n_ports; i++) {
1529 struct ata_port *ap = host->ports[i]; 1528 handled += nv_host_intr(host->ports[i], irq_stat);
1530
1531 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1532 handled += nv_host_intr(ap, irq_stat);
1533
1534 irq_stat >>= NV_INT_PORT_SHIFT; 1529 irq_stat >>= NV_INT_PORT_SHIFT;
1535 } 1530 }
1536 1531
@@ -1744,7 +1739,7 @@ static void nv_adma_error_handler(struct ata_port *ap)
1744 readw(mmio + NV_ADMA_CTL); /* flush posted write */ 1739 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1745 } 1740 }
1746 1741
1747 ata_sff_error_handler(ap); 1742 ata_bmdma_error_handler(ap);
1748} 1743}
1749 1744
1750static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) 1745static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -1870,7 +1865,7 @@ static void nv_swncq_error_handler(struct ata_port *ap)
1870 ehc->i.action |= ATA_EH_RESET; 1865 ehc->i.action |= ATA_EH_RESET;
1871 } 1866 }
1872 1867
1873 ata_sff_error_handler(ap); 1868 ata_bmdma_error_handler(ap);
1874} 1869}
1875 1870
1876#ifdef CONFIG_PM 1871#ifdef CONFIG_PM
@@ -1991,7 +1986,8 @@ static int nv_swncq_port_start(struct ata_port *ap)
1991 struct nv_swncq_port_priv *pp; 1986 struct nv_swncq_port_priv *pp;
1992 int rc; 1987 int rc;
1993 1988
1994 rc = ata_port_start(ap); 1989 /* we might fallback to bmdma, allocate bmdma resources */
1990 rc = ata_bmdma_port_start(ap);
1995 if (rc) 1991 if (rc)
1996 return rc; 1992 return rc;
1997 1993
@@ -2016,7 +2012,7 @@ static int nv_swncq_port_start(struct ata_port *ap)
2016static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) 2012static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
2017{ 2013{
2018 if (qc->tf.protocol != ATA_PROT_NCQ) { 2014 if (qc->tf.protocol != ATA_PROT_NCQ) {
2019 ata_sff_qc_prep(qc); 2015 ata_bmdma_qc_prep(qc);
2020 return; 2016 return;
2021 } 2017 }
2022 2018
@@ -2031,7 +2027,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2031 struct ata_port *ap = qc->ap; 2027 struct ata_port *ap = qc->ap;
2032 struct scatterlist *sg; 2028 struct scatterlist *sg;
2033 struct nv_swncq_port_priv *pp = ap->private_data; 2029 struct nv_swncq_port_priv *pp = ap->private_data;
2034 struct ata_prd *prd; 2030 struct ata_bmdma_prd *prd;
2035 unsigned int si, idx; 2031 unsigned int si, idx;
2036 2032
2037 prd = pp->prd + ATA_MAX_PRD * qc->tag; 2033 prd = pp->prd + ATA_MAX_PRD * qc->tag;
@@ -2092,7 +2088,7 @@ static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2092 struct nv_swncq_port_priv *pp = ap->private_data; 2088 struct nv_swncq_port_priv *pp = ap->private_data;
2093 2089
2094 if (qc->tf.protocol != ATA_PROT_NCQ) 2090 if (qc->tf.protocol != ATA_PROT_NCQ)
2095 return ata_sff_qc_issue(qc); 2091 return ata_bmdma_qc_issue(qc);
2096 2092
2097 DPRINTK("Enter\n"); 2093 DPRINTK("Enter\n");
2098 2094
@@ -2380,16 +2376,14 @@ static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2380 for (i = 0; i < host->n_ports; i++) { 2376 for (i = 0; i < host->n_ports; i++) {
2381 struct ata_port *ap = host->ports[i]; 2377 struct ata_port *ap = host->ports[i];
2382 2378
2383 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { 2379 if (ap->link.sactive) {
2384 if (ap->link.sactive) { 2380 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2385 nv_swncq_host_interrupt(ap, (u16)irq_stat); 2381 handled = 1;
2386 handled = 1; 2382 } else {
2387 } else { 2383 if (irq_stat) /* reserve Hotplug */
2388 if (irq_stat) /* reserve Hotplug */ 2384 nv_swncq_irq_clear(ap, 0xfff0);
2389 nv_swncq_irq_clear(ap, 0xfff0);
2390 2385
2391 handled += nv_host_intr(ap, (u8)irq_stat); 2386 handled += nv_host_intr(ap, (u8)irq_stat);
2392 }
2393 } 2387 }
2394 irq_stat >>= NV_INT_PORT_SHIFT_MCP55; 2388 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2395 } 2389 }
@@ -2479,8 +2473,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2479 } 2473 }
2480 2474
2481 pci_set_master(pdev); 2475 pci_set_master(pdev);
2482 return ata_host_activate(host, pdev->irq, ipriv->irq_handler, 2476 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2483 IRQF_SHARED, ipriv->sht);
2484} 2477}
2485 2478
2486#ifdef CONFIG_PM 2479#ifdef CONFIG_PM
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 5356ec00d2b4..f03ad48273ff 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -333,7 +333,8 @@ static int pdc_common_port_start(struct ata_port *ap)
333 struct pdc_port_priv *pp; 333 struct pdc_port_priv *pp;
334 int rc; 334 int rc;
335 335
336 rc = ata_port_start(ap); 336 /* we use the same prd table as bmdma, allocate it */
337 rc = ata_bmdma_port_start(ap);
337 if (rc) 338 if (rc)
338 return rc; 339 return rc;
339 340
@@ -499,7 +500,7 @@ static int pdc_sata_scr_write(struct ata_link *link,
499static void pdc_atapi_pkt(struct ata_queued_cmd *qc) 500static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
500{ 501{
501 struct ata_port *ap = qc->ap; 502 struct ata_port *ap = qc->ap;
502 dma_addr_t sg_table = ap->prd_dma; 503 dma_addr_t sg_table = ap->bmdma_prd_dma;
503 unsigned int cdb_len = qc->dev->cdb_len; 504 unsigned int cdb_len = qc->dev->cdb_len;
504 u8 *cdb = qc->cdb; 505 u8 *cdb = qc->cdb;
505 struct pdc_port_priv *pp = ap->private_data; 506 struct pdc_port_priv *pp = ap->private_data;
@@ -587,6 +588,7 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
587static void pdc_fill_sg(struct ata_queued_cmd *qc) 588static void pdc_fill_sg(struct ata_queued_cmd *qc)
588{ 589{
589 struct ata_port *ap = qc->ap; 590 struct ata_port *ap = qc->ap;
591 struct ata_bmdma_prd *prd = ap->bmdma_prd;
590 struct scatterlist *sg; 592 struct scatterlist *sg;
591 const u32 SG_COUNT_ASIC_BUG = 41*4; 593 const u32 SG_COUNT_ASIC_BUG = 41*4;
592 unsigned int si, idx; 594 unsigned int si, idx;
@@ -613,8 +615,8 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
613 if ((offset + sg_len) > 0x10000) 615 if ((offset + sg_len) > 0x10000)
614 len = 0x10000 - offset; 616 len = 0x10000 - offset;
615 617
616 ap->prd[idx].addr = cpu_to_le32(addr); 618 prd[idx].addr = cpu_to_le32(addr);
617 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); 619 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
618 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 620 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
619 621
620 idx++; 622 idx++;
@@ -623,27 +625,27 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
623 } 625 }
624 } 626 }
625 627
626 len = le32_to_cpu(ap->prd[idx - 1].flags_len); 628 len = le32_to_cpu(prd[idx - 1].flags_len);
627 629
628 if (len > SG_COUNT_ASIC_BUG) { 630 if (len > SG_COUNT_ASIC_BUG) {
629 u32 addr; 631 u32 addr;
630 632
631 VPRINTK("Splitting last PRD.\n"); 633 VPRINTK("Splitting last PRD.\n");
632 634
633 addr = le32_to_cpu(ap->prd[idx - 1].addr); 635 addr = le32_to_cpu(prd[idx - 1].addr);
634 ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG); 636 prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
635 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG); 637 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
636 638
637 addr = addr + len - SG_COUNT_ASIC_BUG; 639 addr = addr + len - SG_COUNT_ASIC_BUG;
638 len = SG_COUNT_ASIC_BUG; 640 len = SG_COUNT_ASIC_BUG;
639 ap->prd[idx].addr = cpu_to_le32(addr); 641 prd[idx].addr = cpu_to_le32(addr);
640 ap->prd[idx].flags_len = cpu_to_le32(len); 642 prd[idx].flags_len = cpu_to_le32(len);
641 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 643 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
642 644
643 idx++; 645 idx++;
644 } 646 }
645 647
646 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 648 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
647} 649}
648 650
649static void pdc_qc_prep(struct ata_queued_cmd *qc) 651static void pdc_qc_prep(struct ata_queued_cmd *qc)
@@ -658,7 +660,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
658 pdc_fill_sg(qc); 660 pdc_fill_sg(qc);
659 /*FALLTHROUGH*/ 661 /*FALLTHROUGH*/
660 case ATA_PROT_NODATA: 662 case ATA_PROT_NODATA:
661 i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma, 663 i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma,
662 qc->dev->devno, pp->pkt); 664 qc->dev->devno, pp->pkt);
663 if (qc->tf.flags & ATA_TFLAG_LBA48) 665 if (qc->tf.flags & ATA_TFLAG_LBA48)
664 i = pdc_prep_lba48(&qc->tf, pp->pkt, i); 666 i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
@@ -838,7 +840,7 @@ static void pdc_error_handler(struct ata_port *ap)
838 if (!(ap->pflags & ATA_PFLAG_FROZEN)) 840 if (!(ap->pflags & ATA_PFLAG_FROZEN))
839 pdc_reset_port(ap); 841 pdc_reset_port(ap);
840 842
841 ata_std_error_handler(ap); 843 ata_sff_error_handler(ap);
842} 844}
843 845
844static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) 846static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
@@ -984,8 +986,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
984 /* check for a plug or unplug event */ 986 /* check for a plug or unplug event */
985 ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); 987 ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
986 tmp = hotplug_status & (0x11 << ata_no); 988 tmp = hotplug_status & (0x11 << ata_no);
987 if (tmp && ap && 989 if (tmp) {
988 !(ap->flags & ATA_FLAG_DISABLED)) {
989 struct ata_eh_info *ehi = &ap->link.eh_info; 990 struct ata_eh_info *ehi = &ap->link.eh_info;
990 ata_ehi_clear_desc(ehi); 991 ata_ehi_clear_desc(ehi);
991 ata_ehi_hotplugged(ehi); 992 ata_ehi_hotplugged(ehi);
@@ -997,8 +998,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
997 998
998 /* check for a packet interrupt */ 999 /* check for a packet interrupt */
999 tmp = mask & (1 << (i + 1)); 1000 tmp = mask & (1 << (i + 1));
1000 if (tmp && ap && 1001 if (tmp) {
1001 !(ap->flags & ATA_FLAG_DISABLED)) {
1002 struct ata_queued_cmd *qc; 1002 struct ata_queued_cmd *qc;
1003 1003
1004 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1004 qc = ata_qc_from_tag(ap, ap->link.active_tag);
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 92ba45e6689b..d533b3d20ca1 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -147,7 +147,6 @@ static struct ata_port_operations qs_ata_ops = {
147 .prereset = qs_prereset, 147 .prereset = qs_prereset,
148 .softreset = ATA_OP_NULL, 148 .softreset = ATA_OP_NULL,
149 .error_handler = qs_error_handler, 149 .error_handler = qs_error_handler,
150 .post_internal_cmd = ATA_OP_NULL,
151 .lost_interrupt = ATA_OP_NULL, 150 .lost_interrupt = ATA_OP_NULL,
152 151
153 .scr_read = qs_scr_read, 152 .scr_read = qs_scr_read,
@@ -255,7 +254,7 @@ static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
255static void qs_error_handler(struct ata_port *ap) 254static void qs_error_handler(struct ata_port *ap)
256{ 255{
257 qs_enter_reg_mode(ap); 256 qs_enter_reg_mode(ap);
258 ata_std_error_handler(ap); 257 ata_sff_error_handler(ap);
259} 258}
260 259
261static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) 260static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
@@ -304,10 +303,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
304 VPRINTK("ENTER\n"); 303 VPRINTK("ENTER\n");
305 304
306 qs_enter_reg_mode(qc->ap); 305 qs_enter_reg_mode(qc->ap);
307 if (qc->tf.protocol != ATA_PROT_DMA) { 306 if (qc->tf.protocol != ATA_PROT_DMA)
308 ata_sff_qc_prep(qc);
309 return; 307 return;
310 }
311 308
312 nelem = qs_fill_sg(qc); 309 nelem = qs_fill_sg(qc);
313 310
@@ -404,26 +401,24 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host)
404 u8 sHST = sff1 & 0x3f; /* host status */ 401 u8 sHST = sff1 & 0x3f; /* host status */
405 unsigned int port_no = (sff1 >> 8) & 0x03; 402 unsigned int port_no = (sff1 >> 8) & 0x03;
406 struct ata_port *ap = host->ports[port_no]; 403 struct ata_port *ap = host->ports[port_no];
404 struct qs_port_priv *pp = ap->private_data;
405 struct ata_queued_cmd *qc;
407 406
408 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", 407 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
409 sff1, sff0, port_no, sHST, sDST); 408 sff1, sff0, port_no, sHST, sDST);
410 handled = 1; 409 handled = 1;
411 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { 410 if (!pp || pp->state != qs_state_pkt)
412 struct ata_queued_cmd *qc; 411 continue;
413 struct qs_port_priv *pp = ap->private_data; 412 qc = ata_qc_from_tag(ap, ap->link.active_tag);
414 if (!pp || pp->state != qs_state_pkt) 413 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
415 continue; 414 switch (sHST) {
416 qc = ata_qc_from_tag(ap, ap->link.active_tag); 415 case 0: /* successful CPB */
417 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { 416 case 3: /* device error */
418 switch (sHST) { 417 qs_enter_reg_mode(qc->ap);
419 case 0: /* successful CPB */ 418 qs_do_or_die(qc, sDST);
420 case 3: /* device error */ 419 break;
421 qs_enter_reg_mode(qc->ap); 420 default:
422 qs_do_or_die(qc, sDST); 421 break;
423 break;
424 default:
425 break;
426 }
427 } 422 }
428 } 423 }
429 } 424 }
@@ -436,33 +431,30 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host)
436 unsigned int handled = 0, port_no; 431 unsigned int handled = 0, port_no;
437 432
438 for (port_no = 0; port_no < host->n_ports; ++port_no) { 433 for (port_no = 0; port_no < host->n_ports; ++port_no) {
439 struct ata_port *ap; 434 struct ata_port *ap = host->ports[port_no];
440 ap = host->ports[port_no]; 435 struct qs_port_priv *pp = ap->private_data;
441 if (ap && 436 struct ata_queued_cmd *qc;
442 !(ap->flags & ATA_FLAG_DISABLED)) { 437
443 struct ata_queued_cmd *qc; 438 qc = ata_qc_from_tag(ap, ap->link.active_tag);
444 struct qs_port_priv *pp; 439 if (!qc) {
445 qc = ata_qc_from_tag(ap, ap->link.active_tag); 440 /*
446 if (!qc || !(qc->flags & ATA_QCFLAG_ACTIVE)) { 441 * The qstor hardware generates spurious
447 /* 442 * interrupts from time to time when switching
448 * The qstor hardware generates spurious 443 * in and out of packet mode. There's no
449 * interrupts from time to time when switching 444 * obvious way to know if we're here now due
450 * in and out of packet mode. 445 * to that, so just ack the irq and pretend we
451 * There's no obvious way to know if we're 446 * knew it was ours.. (ugh). This does not
452 * here now due to that, so just ack the irq 447 * affect packet mode.
453 * and pretend we knew it was ours.. (ugh). 448 */
454 * This does not affect packet mode. 449 ata_sff_check_status(ap);
455 */ 450 handled = 1;
456 ata_sff_check_status(ap); 451 continue;
457 handled = 1;
458 continue;
459 }
460 pp = ap->private_data;
461 if (!pp || pp->state != qs_state_mmio)
462 continue;
463 if (!(qc->tf.flags & ATA_TFLAG_POLLING))
464 handled |= ata_sff_host_intr(ap, qc);
465 } 452 }
453
454 if (!pp || pp->state != qs_state_mmio)
455 continue;
456 if (!(qc->tf.flags & ATA_TFLAG_POLLING))
457 handled |= ata_sff_host_intr(ap, qc);
466 } 458 }
467 return handled; 459 return handled;
468} 460}
@@ -509,11 +501,7 @@ static int qs_port_start(struct ata_port *ap)
509 void __iomem *mmio_base = qs_mmio_base(ap->host); 501 void __iomem *mmio_base = qs_mmio_base(ap->host);
510 void __iomem *chan = mmio_base + (ap->port_no * 0x4000); 502 void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
511 u64 addr; 503 u64 addr;
512 int rc;
513 504
514 rc = ata_port_start(ap);
515 if (rc)
516 return rc;
517 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 505 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
518 if (!pp) 506 if (!pp)
519 return -ENOMEM; 507 return -ENOMEM;
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3cb69d5fb817..2dda312b6b9a 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -284,7 +284,7 @@ static void sil_bmdma_setup(struct ata_queued_cmd *qc)
284 void __iomem *bmdma = ap->ioaddr.bmdma_addr; 284 void __iomem *bmdma = ap->ioaddr.bmdma_addr;
285 285
286 /* load PRD table addr. */ 286 /* load PRD table addr. */
287 iowrite32(ap->prd_dma, bmdma + ATA_DMA_TABLE_OFS); 287 iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS);
288 288
289 /* issue r/w command */ 289 /* issue r/w command */
290 ap->ops->sff_exec_command(ap, &qc->tf); 290 ap->ops->sff_exec_command(ap, &qc->tf);
@@ -311,10 +311,10 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
311{ 311{
312 struct scatterlist *sg; 312 struct scatterlist *sg;
313 struct ata_port *ap = qc->ap; 313 struct ata_port *ap = qc->ap;
314 struct ata_prd *prd, *last_prd = NULL; 314 struct ata_bmdma_prd *prd, *last_prd = NULL;
315 unsigned int si; 315 unsigned int si;
316 316
317 prd = &ap->prd[0]; 317 prd = &ap->bmdma_prd[0];
318 for_each_sg(qc->sg, sg, qc->n_elem, si) { 318 for_each_sg(qc->sg, sg, qc->n_elem, si) {
319 /* Note h/w doesn't support 64-bit, so we unconditionally 319 /* Note h/w doesn't support 64-bit, so we unconditionally
320 * truncate dma_addr_t to u32. 320 * truncate dma_addr_t to u32.
@@ -532,9 +532,6 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance)
532 struct ata_port *ap = host->ports[i]; 532 struct ata_port *ap = host->ports[i];
533 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); 533 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
534 534
535 if (unlikely(ap->flags & ATA_FLAG_DISABLED))
536 continue;
537
538 /* turn off SATA_IRQ if not supported */ 535 /* turn off SATA_IRQ if not supported */
539 if (ap->flags & SIL_FLAG_NO_SATA_IRQ) 536 if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
540 bmdma2 &= ~SIL_DMA_SATA_IRQ; 537 bmdma2 &= ~SIL_DMA_SATA_IRQ;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 433b6b89c795..e9250514734b 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1160,13 +1160,8 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
1160 1160
1161 for (i = 0; i < host->n_ports; i++) 1161 for (i = 0; i < host->n_ports; i++)
1162 if (status & (1 << i)) { 1162 if (status & (1 << i)) {
1163 struct ata_port *ap = host->ports[i]; 1163 sil24_host_intr(host->ports[i]);
1164 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { 1164 handled++;
1165 sil24_host_intr(ap);
1166 handled++;
1167 } else
1168 printk(KERN_ERR DRV_NAME
1169 ": interrupt from disabled port %d\n", i);
1170 } 1165 }
1171 1166
1172 spin_unlock(&host->lock); 1167 spin_unlock(&host->lock);
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 7257f2d5c52c..101fd6a19829 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -224,7 +224,7 @@ static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc)
224 224
225 /* load PRD table addr. */ 225 /* load PRD table addr. */
226 mb(); /* make sure PRD table writes are visible to controller */ 226 mb(); /* make sure PRD table writes are visible to controller */
227 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); 227 writel(ap->bmdma_prd_dma, mmio + ATA_DMA_TABLE_OFS);
228 228
229 /* specify data direction, triple-check start bit is clear */ 229 /* specify data direction, triple-check start bit is clear */
230 dmactl = readb(mmio + ATA_DMA_CMD); 230 dmactl = readb(mmio + ATA_DMA_CMD);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 232468f2ea90..bedd5188e5b0 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -302,11 +302,6 @@ static int pdc_port_start(struct ata_port *ap)
302{ 302{
303 struct device *dev = ap->host->dev; 303 struct device *dev = ap->host->dev;
304 struct pdc_port_priv *pp; 304 struct pdc_port_priv *pp;
305 int rc;
306
307 rc = ata_port_start(ap);
308 if (rc)
309 return rc;
310 305
311 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 306 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
312 if (!pp) 307 if (!pp)
@@ -840,8 +835,7 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
840 ap = host->ports[port_no]; 835 ap = host->ports[port_no];
841 tmp = mask & (1 << i); 836 tmp = mask & (1 << i);
842 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 837 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
843 if (tmp && ap && 838 if (tmp && ap) {
844 !(ap->flags & ATA_FLAG_DISABLED)) {
845 struct ata_queued_cmd *qc; 839 struct ata_queued_cmd *qc;
846 840
847 qc = ata_qc_from_tag(ap, ap->link.active_tag); 841 qc = ata_qc_from_tag(ap, ap->link.active_tag);
@@ -927,7 +921,7 @@ static void pdc_error_handler(struct ata_port *ap)
927 if (!(ap->pflags & ATA_PFLAG_FROZEN)) 921 if (!(ap->pflags & ATA_PFLAG_FROZEN))
928 pdc_reset_port(ap); 922 pdc_reset_port(ap);
929 923
930 ata_std_error_handler(ap); 924 ata_sff_error_handler(ap);
931} 925}
932 926
933static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) 927static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 011e098590d1..d8dac17dc2c8 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -181,9 +181,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
181 if (rc) 181 if (rc)
182 return rc; 182 return rc;
183 183
184 rc = ata_pci_bmdma_init(host); 184 ata_pci_bmdma_init(host);
185 if (rc)
186 return rc;
187 185
188 iomap = host->iomap; 186 iomap = host->iomap;
189 187
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 8b2a278b2547..2107952ebff1 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -284,14 +284,8 @@ static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance)
284 for (i = 0; i < host->n_ports; i++) { 284 for (i = 0; i < host->n_ports; i++) {
285 u8 port_status = (status >> (8 * i)) & 0xff; 285 u8 port_status = (status >> (8 * i)) & 0xff;
286 if (port_status) { 286 if (port_status) {
287 struct ata_port *ap = host->ports[i]; 287 vsc_port_intr(port_status, host->ports[i]);
288 288 handled++;
289 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
290 vsc_port_intr(port_status, ap);
291 handled++;
292 } else
293 dev_printk(KERN_ERR, host->dev,
294 "interrupt from disabled port %d\n", i);
295 } 289 }
296 } 290 }
297 291