aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/Kconfig484
-rw-r--r--drivers/ata/Makefile62
-rw-r--r--drivers/ata/ahci.c1684
-rw-r--r--drivers/ata/ata_generic.c252
-rw-r--r--drivers/ata/ata_piix.c1258
-rw-r--r--drivers/ata/libata-core.c6171
-rw-r--r--drivers/ata/libata-eh.c2245
-rw-r--r--drivers/ata/libata-scsi.c3322
-rw-r--r--drivers/ata/libata-sff.c1121
-rw-r--r--drivers/ata/libata.h122
-rw-r--r--drivers/ata/pata_ali.c679
-rw-r--r--drivers/ata/pata_amd.c718
-rw-r--r--drivers/ata/pata_artop.c518
-rw-r--r--drivers/ata/pata_atiixp.c306
-rw-r--r--drivers/ata/pata_cmd64x.c505
-rw-r--r--drivers/ata/pata_cs5520.c336
-rw-r--r--drivers/ata/pata_cs5530.c387
-rw-r--r--drivers/ata/pata_cs5535.c291
-rw-r--r--drivers/ata/pata_cypress.c227
-rw-r--r--drivers/ata/pata_efar.c342
-rw-r--r--drivers/ata/pata_hpt366.c478
-rw-r--r--drivers/ata/pata_hpt37x.c1257
-rw-r--r--drivers/ata/pata_hpt3x2n.c597
-rw-r--r--drivers/ata/pata_hpt3x3.c226
-rw-r--r--drivers/ata/pata_isapnp.c156
-rw-r--r--drivers/ata/pata_it821x.c847
-rw-r--r--drivers/ata/pata_jmicron.c266
-rw-r--r--drivers/ata/pata_legacy.c949
-rw-r--r--drivers/ata/pata_mpiix.c313
-rw-r--r--drivers/ata/pata_netcell.c175
-rw-r--r--drivers/ata/pata_ns87410.c236
-rw-r--r--drivers/ata/pata_oldpiix.c339
-rw-r--r--drivers/ata/pata_opti.c292
-rw-r--r--drivers/ata/pata_optidma.c547
-rw-r--r--drivers/ata/pata_pcmcia.c393
-rw-r--r--drivers/ata/pata_pdc2027x.c869
-rw-r--r--drivers/ata/pata_pdc202xx_old.c423
-rw-r--r--drivers/ata/pata_qdi.c403
-rw-r--r--drivers/ata/pata_radisys.c335
-rw-r--r--drivers/ata/pata_rz1000.c205
-rw-r--r--drivers/ata/pata_sc1200.c287
-rw-r--r--drivers/ata/pata_serverworks.c587
-rw-r--r--drivers/ata/pata_sil680.c381
-rw-r--r--drivers/ata/pata_sis.c1034
-rw-r--r--drivers/ata/pata_sl82c105.c388
-rw-r--r--drivers/ata/pata_triflex.c285
-rw-r--r--drivers/ata/pata_via.c568
-rw-r--r--drivers/ata/pdc_adma.c740
-rw-r--r--drivers/ata/sata_mv.c2465
-rw-r--r--drivers/ata/sata_nv.c595
-rw-r--r--drivers/ata/sata_promise.c844
-rw-r--r--drivers/ata/sata_promise.h157
-rw-r--r--drivers/ata/sata_qstor.c730
-rw-r--r--drivers/ata/sata_sil.c728
-rw-r--r--drivers/ata/sata_sil24.c1227
-rw-r--r--drivers/ata/sata_sis.c347
-rw-r--r--drivers/ata/sata_svw.c508
-rw-r--r--drivers/ata/sata_sx4.c1502
-rw-r--r--drivers/ata/sata_uli.c300
-rw-r--r--drivers/ata/sata_via.c502
-rw-r--r--drivers/ata/sata_vsc.c482
61 files changed, 44993 insertions, 0 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
new file mode 100644
index 000000000000..5a8bdac5f5e8
--- /dev/null
+++ b/drivers/ata/Kconfig
@@ -0,0 +1,484 @@
1#
2# SATA/PATA driver configuration
3#
4
5menu "Serial ATA (prod) and Parallel ATA (experimental) drivers"
6
7config ATA
8 tristate "ATA device support"
9 select SCSI
10 ---help---
11 If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
12 any other ATA device under Linux, say Y and make sure that you know
13 the name of your ATA host adapter (the card inside your computer
14 that "speaks" the ATA protocol, also called ATA controller),
15 because you will be asked for it.
16
17if ATA
18
19config SATA_AHCI
20 tristate "AHCI SATA support"
21 depends on PCI
22 help
23 This option enables support for AHCI Serial ATA.
24
25 If unsure, say N.
26
27config SATA_SVW
28 tristate "ServerWorks Frodo / Apple K2 SATA support"
29 depends on PCI
30 help
31 This option enables support for Broadcom/Serverworks/Apple K2
32 SATA support.
33
34 If unsure, say N.
35
36config ATA_PIIX
37 tristate "Intel PIIX/ICH SATA support"
38 depends on PCI
39 help
40 This option enables support for ICH5/6/7/8 Serial ATA.
41 If PATA support was enabled previously, this enables
42 support for select Intel PIIX/ICH PATA host controllers.
43
44 If unsure, say N.
45
46config SATA_MV
47 tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)"
48 depends on PCI && EXPERIMENTAL
49 help
50 This option enables support for the Marvell Serial ATA family.
51 Currently supports 88SX[56]0[48][01] chips.
52
53 If unsure, say N.
54
55config SATA_NV
56 tristate "NVIDIA SATA support"
57 depends on PCI
58 help
59 This option enables support for NVIDIA Serial ATA.
60
61 If unsure, say N.
62
63config PDC_ADMA
64 tristate "Pacific Digital ADMA support"
65 depends on PCI
66 help
67 This option enables support for Pacific Digital ADMA controllers
68
69 If unsure, say N.
70
71config SATA_QSTOR
72 tristate "Pacific Digital SATA QStor support"
73 depends on PCI
74 help
75 This option enables support for Pacific Digital Serial ATA QStor.
76
77 If unsure, say N.
78
79config SATA_PROMISE
80 tristate "Promise SATA TX2/TX4 support"
81 depends on PCI
82 help
83 This option enables support for Promise Serial ATA TX2/TX4.
84
85 If unsure, say N.
86
87config SATA_SX4
88 tristate "Promise SATA SX4 support"
89 depends on PCI && EXPERIMENTAL
90 help
91 This option enables support for Promise Serial ATA SX4.
92
93 If unsure, say N.
94
95config SATA_SIL
96 tristate "Silicon Image SATA support"
97 depends on PCI
98 help
99 This option enables support for Silicon Image Serial ATA.
100
101 If unsure, say N.
102
103config SATA_SIL24
104 tristate "Silicon Image 3124/3132 SATA support"
105 depends on PCI
106 help
107 This option enables support for Silicon Image 3124/3132 Serial ATA.
108
109 If unsure, say N.
110
111config SATA_SIS
112 tristate "SiS 964/180 SATA support"
113 depends on PCI
114 help
115 This option enables support for SiS Serial ATA 964/180.
116
117 If unsure, say N.
118
119config SATA_ULI
120 tristate "ULi Electronics SATA support"
121 depends on PCI
122 help
123 This option enables support for ULi Electronics SATA.
124
125 If unsure, say N.
126
127config SATA_VIA
128 tristate "VIA SATA support"
129 depends on PCI
130 help
131 This option enables support for VIA Serial ATA.
132
133 If unsure, say N.
134
135config SATA_VITESSE
136 tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
137 depends on PCI
138 help
139 This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
140
141 If unsure, say N.
142
143config SATA_INTEL_COMBINED
144 bool
145 depends on IDE=y && !BLK_DEV_IDE_SATA && (SATA_AHCI || ATA_PIIX)
146 default y
147
148config PATA_ALI
149 tristate "ALi PATA support (Experimental)"
150 depends on PCI && EXPERIMENTAL
151 help
152 This option enables support for the ALi ATA interfaces
153 found on the many ALi chipsets.
154
155 If unsure, say N.
156
157config PATA_AMD
158 tristate "AMD/NVidia PATA support (Experimental)"
159 depends on PCI
160 help
161 This option enables support for the AMD and NVidia PATA
162 interfaces found on the chipsets for Athlon/Athlon64.
163
164 If unsure, say N.
165
166config PATA_ARTOP
167 tristate "ARTOP 6210/6260 PATA support (Experimental)"
168 depends on PCI && EXPERIMENTAL
169 help
170 This option enables support for ARTOP PATA controllers.
171
172 If unsure, say N.
173
174config PATA_ATIIXP
175 tristate "ATI PATA support (Experimental)"
176 depends on PCI && EXPERIMENTAL
177 help
178 This option enables support for the ATI ATA interfaces
179 found on the many ATI chipsets.
180
181 If unsure, say N.
182
183config PATA_CMD64X
184 tristate "CMD64x PATA support (Very Experimental)"
185 depends on PCI&& EXPERIMENTAL
186 help
187 This option enables support for the CMD64x series chips
188 except for the CMD640.
189
190 If unsure, say N.
191
192config PATA_CS5520
193 tristate "CS5510/5520 PATA support"
194 depends on PCI
195 help
196 This option enables support for the Cyrix 5510/5520
197 companion chip used with the MediaGX/Geode processor family.
198
199 If unsure, say N.
200
201config PATA_CS5530
202 tristate "CS5530 PATA support (Experimental)"
203 depends on PCI && EXPERIMENTAL
204 help
205 This option enables support for the Cyrix/NatSemi/AMD CS5530
206 companion chip used with the MediaGX/Geode processor family.
207
208 If unsure, say N.
209
210config PATA_CS5535
211 tristate "CS5535 PATA support (Experimental)"
212 depends on PCI && X86 && !X86_64 && EXPERIMENTAL
213 help
214 This option enables support for the NatSemi/AMD CS5535
215 companion chip used with the Geode processor family.
216
217 If unsure, say N.
218
219config PATA_CYPRESS
220 tristate "Cypress CY82C693 PATA support (Very Experimental)"
221 depends on PCI && EXPERIMENTAL
222 help
223 This option enables support for the Cypress/Contaq CY82C693
224 chipset found in some Alpha systems
225
226 If unsure, say N.
227
228config PATA_EFAR
229 tristate "EFAR SLC90E66 support"
230 depends on PCI
231 help
232 This option enables support for the EFAR SLC90E66
233 IDE controller found on some older machines.
234
235 If unsure, say N.
236
237config ATA_GENERIC
238 tristate "Generic ATA support"
239 depends on PCI
240 help
241 This option enables support for generic BIOS configured
242 ATA controllers via the new ATA layer
243
244 If unsure, say N.
245
246config PATA_HPT366
247 tristate "HPT 366/368 PATA support (Very Experimental)"
248 depends on PCI && EXPERIMENTAL
249 help
250 This option enables support for the HPT 366 and 368
251 PATA controllers via the new ATA layer.
252
253 If unsure, say N.
254
255config PATA_HPT37X
256 tristate "HPT 370/370A/371/372/374/302 PATA support (Very Experimental)"
257 depends on PCI && EXPERIMENTAL
258 help
259 This option enables support for the majority of the later HPT
260 PATA controllers via the new ATA layer.
261
262 If unsure, say N.
263
264config PATA_HPT3X2N
265 tristate "HPT 372N/302N PATA support (Very Experimental)"
266 depends on PCI && EXPERIMENTAL
267 help
268 This option enables support for the N variant HPT PATA
269 controllers via the new ATA layer
270
271 If unsure, say N.
272
273config PATA_HPT3X3
274 tristate "HPT 343/363 PATA support (Experimental)"
275 depends on PCI
276 help
277 This option enables support for the HPT 343/363
278 PATA controllers via the new ATA layer
279
280 If unsure, say N.
281
282config PATA_ISAPNP
283 tristate "ISA Plug and Play PATA support (Very Experimental)"
284 depends on EXPERIMENTAL && ISAPNP
285 help
286 This option enables support for ISA plug & play ATA
287 controllers such as those found on old soundcards.
288
289 If unsure, say N.
290
291config PATA_IT821X
292 tristate "IT821x PATA support (Experimental)"
293 depends on PCI && EXPERIMENTAL
294 help
295 This option enables support for the ITE 8211 and 8212
296 PATA controllers via the new ATA layer, including RAID
297 mode.
298
299 If unsure, say N.
300
301config PATA_JMICRON
302 tristate "JMicron PATA support"
303 depends on PCI
304 help
305 Enable support for the JMicron IDE controller, via the new
306 ATA layer.
307
308 If unsure, say N.
309
310config PATA_LEGACY
311 tristate "Legacy ISA PATA support (Experimental)"
312 depends on PCI && EXPERIMENTAL
313 help
314 This option enables support for ISA/VLB bus legacy PATA
315 ports and allows them to be accessed via the new ATA layer.
316
317 If unsure, say N.
318
319config PATA_TRIFLEX
320 tristate "Compaq Triflex PATA support"
321 depends on PCI
322 help
323 Enable support for the Compaq 'Triflex' IDE controller as found
324 on many Compaq Pentium-Pro systems, via the new ATA layer.
325
326 If unsure, say N.
327
328config PATA_MPIIX
329 tristate "Intel PATA MPIIX support"
330 depends on PCI
331 help
332 This option enables support for MPIIX PATA support.
333
334 If unsure, say N.
335
336config PATA_OLDPIIX
337 tristate "Intel PATA old PIIX support (Experimental)"
338 depends on PCI && EXPERIMENTAL
339 help
340 This option enables support for old(?) PIIX PATA support.
341
342 If unsure, say N.
343
344config PATA_NETCELL
345 tristate "NETCELL Revolution RAID support"
346 depends on PCI
347 help
348 This option enables support for the Netcell Revolution RAID
349 PATA controller.
350
351 If unsure, say N.
352
353config PATA_NS87410
354 tristate "Nat Semi NS87410 PATA support (Experimental)"
355 depends on PCI && EXPERIMENTAL
356 help
357 This option enables support for the National Semiconductor
358 NS87410 PCI-IDE controller.
359
360 If unsure, say N.
361
362config PATA_OPTI
363 tristate "OPTI621/6215 PATA support (Very Experimental)"
364 depends on PCI && EXPERIMENTAL
365 help
366 This option enables full PIO support for the early Opti ATA
367 controllers found on some old motherboards.
368
369 If unsure, say N.
370
371config PATA_OPTIDMA
372 tristate "OPTI FireStar PATA support (Veyr Experimental)"
373 depends on PCI && EXPERIMENTAL
374 help
375 This option enables DMA/PIO support for the later OPTi
376 controllers found on some old motherboards and in some
377 latops
378
379 If unsure, say N.
380
381config PATA_PCMCIA
382 tristate "PCMCIA PATA support"
383 depends on PCMCIA
384 help
385 This option enables support for PCMCIA ATA interfaces, including
386 compact flash card adapters via the new ATA layer.
387
388 If unsure, say N.
389
390config PATA_PDC_OLD
391 tristate "Older Promise PATA controller support (Very Experimental)"
392 depends on PCI && EXPERIMENTAL
393 help
394 This option enables support for the Promise 20246, 20262, 20263,
395 20265 and 20267 adapters.
396
397 If unsure, say N.
398
399config PATA_QDI
400 tristate "QDI VLB PATA support"
401 help
402 Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
403
404config PATA_RADISYS
405 tristate "RADISYS 82600 PATA support (Very experimental)"
406 depends on PCI && EXPERIMENTAL
407 help
408 This option enables support for the RADISYS 82600
409 PATA controllers via the new ATA layer
410
411 If unsure, say N.
412
413config PATA_RZ1000
414 tristate "PC Tech RZ1000 PATA support"
415 depends on PCI
416 help
417 This option enables basic support for the PC Tech RZ1000/1
418 PATA controllers via the new ATA layer
419
420 If unsure, say N.
421
422config PATA_SC1200
423 tristate "SC1200 PATA support (Raving Lunatic)"
424 depends on PCI && EXPERIMENTAL
425 help
426 This option enables support for the NatSemi/AMD SC1200 SoC
427 companion chip used with the Geode processor family.
428
429 If unsure, say N.
430
431config PATA_SERVERWORKS
432 tristate "SERVERWORKS OSB4/CSB5/CSB6/HT1000 PATA support (Experimental)"
433 depends on PCI && EXPERIMENTAL
434 help
435 This option enables support for the Serverworks OSB4/CSB5/CSB6 and
436 HT1000 PATA controllers, via the new ATA layer.
437
438 If unsure, say N.
439
440config PATA_PDC2027X
441 tristate "Promise PATA 2027x support"
442 depends on PCI
443 help
444 This option enables support for Promise PATA pdc20268 to pdc20277 host adapters.
445
446 If unsure, say N.
447
448config PATA_SIL680
449 tristate "CMD / Silicon Image 680 PATA support"
450 depends on PCI
451 help
452 This option enables support for CMD / Silicon Image 680 PATA.
453
454 If unsure, say N.
455
456config PATA_SIS
457 tristate "SiS PATA support (Experimental)"
458 depends on PCI && EXPERIMENTAL
459 help
460 This option enables support for SiS PATA controllers
461
462 If unsure, say N.
463
464config PATA_VIA
465 tristate "VIA PATA support"
466 depends on PCI
467 help
468 This option enables support for the VIA PATA interfaces
469 found on the many VIA chipsets.
470
471 If unsure, say N.
472
473config PATA_WINBOND
474 tristate "Winbond SL82C105 PATA support"
475 depends on PCI
476 help
477 This option enables support for SL82C105 PATA devices found in the
478 Netwinder and some other systems
479
480 If unsure, say N.
481
482endif
483endmenu
484
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
new file mode 100644
index 000000000000..72243a677f9b
--- /dev/null
+++ b/drivers/ata/Makefile
@@ -0,0 +1,62 @@
1
2obj-$(CONFIG_ATA) += libata.o
3
4obj-$(CONFIG_SATA_AHCI) += ahci.o
5obj-$(CONFIG_SATA_SVW) += sata_svw.o
6obj-$(CONFIG_ATA_PIIX) += ata_piix.o
7obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
8obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
9obj-$(CONFIG_SATA_SIL) += sata_sil.o
10obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
11obj-$(CONFIG_SATA_VIA) += sata_via.o
12obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
13obj-$(CONFIG_SATA_SIS) += sata_sis.o
14obj-$(CONFIG_SATA_SX4) += sata_sx4.o
15obj-$(CONFIG_SATA_NV) += sata_nv.o
16obj-$(CONFIG_SATA_ULI) += sata_uli.o
17obj-$(CONFIG_SATA_MV) += sata_mv.o
18obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
19
20obj-$(CONFIG_PATA_ALI) += pata_ali.o
21obj-$(CONFIG_PATA_AMD) += pata_amd.o
22obj-$(CONFIG_PATA_ARTOP) += pata_artop.o
23obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o
24obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o
25obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o
26obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o
27obj-$(CONFIG_PATA_CS5535) += pata_cs5535.o
28obj-$(CONFIG_PATA_CYPRESS) += pata_cypress.o
29obj-$(CONFIG_PATA_EFAR) += pata_efar.o
30obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o
31obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
32obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
33obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
34obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
35obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
36obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
37obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
38obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
39obj-$(CONFIG_PATA_OPTI) += pata_opti.o
40obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
41obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
42obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
43obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
44obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o
45obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
46obj-$(CONFIG_PATA_QDI) += pata_qdi.o
47obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
48obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
49obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
50obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
51obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
52obj-$(CONFIG_PATA_VIA) += pata_via.o
53obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
54obj-$(CONFIG_PATA_SIS) += pata_sis.o
55obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
56# Should be last but one libata driver
57obj-$(CONFIG_ATA_GENERIC) += ata_generic.o
58# Should be last libata driver
59obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o
60
61libata-objs := libata-core.o libata-scsi.o libata-sff.o libata-eh.o
62
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
new file mode 100644
index 000000000000..1aabc81d82f1
--- /dev/null
+++ b/drivers/ata/ahci.c
@@ -0,0 +1,1684 @@
1/*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/sched.h>
43#include <linux/dma-mapping.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_cmnd.h>
47#include <linux/libata.h>
48#include <asm/io.h>
49
50#define DRV_NAME "ahci"
51#define DRV_VERSION "2.0"
52
53
54enum {
55 AHCI_PCI_BAR = 5,
56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0,
59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
62 AHCI_RX_FIS_SZ = 256,
63 AHCI_CMD_TBL_CDB = 0x40,
64 AHCI_CMD_TBL_HDR_SZ = 0x80,
65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
68 AHCI_RX_FIS_SZ,
69 AHCI_IRQ_ON_SG = (1 << 31),
70 AHCI_CMD_ATAPI = (1 << 5),
71 AHCI_CMD_WRITE = (1 << 6),
72 AHCI_CMD_PREFETCH = (1 << 7),
73 AHCI_CMD_RESET = (1 << 8),
74 AHCI_CMD_CLR_BUSY = (1 << 10),
75
76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78
79 board_ahci = 0,
80 board_ahci_vt8251 = 1,
81
82 /* global controller registers */
83 HOST_CAP = 0x00, /* host capabilities */
84 HOST_CTL = 0x04, /* global host control */
85 HOST_IRQ_STAT = 0x08, /* interrupt status */
86 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
87 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
88
89 /* HOST_CTL bits */
90 HOST_RESET = (1 << 0), /* reset controller; self-clear */
91 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
93
94 /* HOST_CAP bits */
95 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
96 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
97 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
98 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
99 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
100
101 /* registers for each SATA port */
102 PORT_LST_ADDR = 0x00, /* command list DMA addr */
103 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
104 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
105 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
106 PORT_IRQ_STAT = 0x10, /* interrupt status */
107 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
108 PORT_CMD = 0x18, /* port command */
109 PORT_TFDATA = 0x20, /* taskfile data */
110 PORT_SIG = 0x24, /* device TF signature */
111 PORT_CMD_ISSUE = 0x38, /* command issue */
112 PORT_SCR = 0x28, /* SATA phy register block */
113 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
114 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
115 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
116 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
117
118 /* PORT_IRQ_{STAT,MASK} bits */
119 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
120 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
121 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
122 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
123 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
124 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
125 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
126 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
127
128 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
129 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
130 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
131 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
132 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
133 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
134 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
135 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
136 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
137
138 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
139 PORT_IRQ_IF_ERR |
140 PORT_IRQ_CONNECT |
141 PORT_IRQ_PHYRDY |
142 PORT_IRQ_UNK_FIS,
143 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
144 PORT_IRQ_TF_ERR |
145 PORT_IRQ_HBUS_DATA_ERR,
146 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
147 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
148 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
149
150 /* PORT_CMD bits */
151 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
152 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
153 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
154 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
155 PORT_CMD_CLO = (1 << 3), /* Command list override */
156 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
157 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
158 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
159
160 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
161 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
162 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
163 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
164
165 /* hpriv->flags bits */
166 AHCI_FLAG_MSI = (1 << 0),
167
168 /* ap->flags bits */
169 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
170 AHCI_FLAG_NO_NCQ = (1 << 25),
171};
172
173struct ahci_cmd_hdr {
174 u32 opts;
175 u32 status;
176 u32 tbl_addr;
177 u32 tbl_addr_hi;
178 u32 reserved[4];
179};
180
181struct ahci_sg {
182 u32 addr;
183 u32 addr_hi;
184 u32 reserved;
185 u32 flags_size;
186};
187
188struct ahci_host_priv {
189 unsigned long flags;
190 u32 cap; /* cache of HOST_CAP register */
191 u32 port_map; /* cache of HOST_PORTS_IMPL reg */
192};
193
194struct ahci_port_priv {
195 struct ahci_cmd_hdr *cmd_slot;
196 dma_addr_t cmd_slot_dma;
197 void *cmd_tbl;
198 dma_addr_t cmd_tbl_dma;
199 void *rx_fis;
200 dma_addr_t rx_fis_dma;
201};
202
203static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
204static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
205static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
206static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
207static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
208static void ahci_irq_clear(struct ata_port *ap);
209static int ahci_port_start(struct ata_port *ap);
210static void ahci_port_stop(struct ata_port *ap);
211static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
212static void ahci_qc_prep(struct ata_queued_cmd *qc);
213static u8 ahci_check_status(struct ata_port *ap);
214static void ahci_freeze(struct ata_port *ap);
215static void ahci_thaw(struct ata_port *ap);
216static void ahci_error_handler(struct ata_port *ap);
217static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
218static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
219static int ahci_port_resume(struct ata_port *ap);
220static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
221static int ahci_pci_device_resume(struct pci_dev *pdev);
222static void ahci_remove_one (struct pci_dev *pdev);
223
224static struct scsi_host_template ahci_sht = {
225 .module = THIS_MODULE,
226 .name = DRV_NAME,
227 .ioctl = ata_scsi_ioctl,
228 .queuecommand = ata_scsi_queuecmd,
229 .change_queue_depth = ata_scsi_change_queue_depth,
230 .can_queue = AHCI_MAX_CMDS - 1,
231 .this_id = ATA_SHT_THIS_ID,
232 .sg_tablesize = AHCI_MAX_SG,
233 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
234 .emulated = ATA_SHT_EMULATED,
235 .use_clustering = AHCI_USE_CLUSTERING,
236 .proc_name = DRV_NAME,
237 .dma_boundary = AHCI_DMA_BOUNDARY,
238 .slave_configure = ata_scsi_slave_config,
239 .slave_destroy = ata_scsi_slave_destroy,
240 .bios_param = ata_std_bios_param,
241 .suspend = ata_scsi_device_suspend,
242 .resume = ata_scsi_device_resume,
243};
244
245static const struct ata_port_operations ahci_ops = {
246 .port_disable = ata_port_disable,
247
248 .check_status = ahci_check_status,
249 .check_altstatus = ahci_check_status,
250 .dev_select = ata_noop_dev_select,
251
252 .tf_read = ahci_tf_read,
253
254 .qc_prep = ahci_qc_prep,
255 .qc_issue = ahci_qc_issue,
256
257 .irq_handler = ahci_interrupt,
258 .irq_clear = ahci_irq_clear,
259
260 .scr_read = ahci_scr_read,
261 .scr_write = ahci_scr_write,
262
263 .freeze = ahci_freeze,
264 .thaw = ahci_thaw,
265
266 .error_handler = ahci_error_handler,
267 .post_internal_cmd = ahci_post_internal_cmd,
268
269 .port_suspend = ahci_port_suspend,
270 .port_resume = ahci_port_resume,
271
272 .port_start = ahci_port_start,
273 .port_stop = ahci_port_stop,
274};
275
276static const struct ata_port_info ahci_port_info[] = {
277 /* board_ahci */
278 {
279 .sht = &ahci_sht,
280 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
281 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
282 ATA_FLAG_SKIP_D2H_BSY,
283 .pio_mask = 0x1f, /* pio0-4 */
284 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
285 .port_ops = &ahci_ops,
286 },
287 /* board_ahci_vt8251 */
288 {
289 .sht = &ahci_sht,
290 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
291 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
292 ATA_FLAG_SKIP_D2H_BSY |
293 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
294 .pio_mask = 0x1f, /* pio0-4 */
295 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
296 .port_ops = &ahci_ops,
297 },
298};
299
300static const struct pci_device_id ahci_pci_tbl[] = {
301 /* Intel */
302 { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
303 board_ahci }, /* ICH6 */
304 { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
305 board_ahci }, /* ICH6M */
306 { PCI_VENDOR_ID_INTEL, 0x27c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
307 board_ahci }, /* ICH7 */
308 { PCI_VENDOR_ID_INTEL, 0x27c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
309 board_ahci }, /* ICH7M */
310 { PCI_VENDOR_ID_INTEL, 0x27c3, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
311 board_ahci }, /* ICH7R */
312 { PCI_VENDOR_ID_AL, 0x5288, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
313 board_ahci }, /* ULi M5288 */
314 { PCI_VENDOR_ID_INTEL, 0x2681, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
315 board_ahci }, /* ESB2 */
316 { PCI_VENDOR_ID_INTEL, 0x2682, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
317 board_ahci }, /* ESB2 */
318 { PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
319 board_ahci }, /* ESB2 */
320 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
321 board_ahci }, /* ICH7-M DH */
322 { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
323 board_ahci }, /* ICH8 */
324 { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
325 board_ahci }, /* ICH8 */
326 { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
327 board_ahci }, /* ICH8 */
328 { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
329 board_ahci }, /* ICH8M */
330 { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
331 board_ahci }, /* ICH8M */
332
333 /* JMicron */
334 { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
335 board_ahci }, /* JMicron JMB360 */
336 { 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
337 board_ahci }, /* JMicron JMB361 */
338 { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
339 board_ahci }, /* JMicron JMB363 */
340 { 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
341 board_ahci }, /* JMicron JMB365 */
342 { 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
343 board_ahci }, /* JMicron JMB366 */
344
345 /* ATI */
346 { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
347 board_ahci }, /* ATI SB600 non-raid */
348 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
349 board_ahci }, /* ATI SB600 raid */
350
351 /* VIA */
352 { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
353 board_ahci_vt8251 }, /* VIA VT8251 */
354
355 /* NVIDIA */
356 { PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
357 board_ahci }, /* MCP65 */
358 { PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
359 board_ahci }, /* MCP65 */
360 { PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
361 board_ahci }, /* MCP65 */
362 { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
363 board_ahci }, /* MCP65 */
364
365 /* SiS */
366 { PCI_VENDOR_ID_SI, 0x1184, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
367 board_ahci }, /* SiS 966 */
368 { PCI_VENDOR_ID_SI, 0x1185, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
369 board_ahci }, /* SiS 966 */
370 { PCI_VENDOR_ID_SI, 0x0186, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
371 board_ahci }, /* SiS 968 */
372
373 { } /* terminate list */
374};
375
376
377static struct pci_driver ahci_pci_driver = {
378 .name = DRV_NAME,
379 .id_table = ahci_pci_tbl,
380 .probe = ahci_init_one,
381 .suspend = ahci_pci_device_suspend,
382 .resume = ahci_pci_device_resume,
383 .remove = ahci_remove_one,
384};
385
386
387static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
388{
389 return base + 0x100 + (port * 0x80);
390}
391
392static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
393{
394 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
395}
396
397static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
398{
399 unsigned int sc_reg;
400
401 switch (sc_reg_in) {
402 case SCR_STATUS: sc_reg = 0; break;
403 case SCR_CONTROL: sc_reg = 1; break;
404 case SCR_ERROR: sc_reg = 2; break;
405 case SCR_ACTIVE: sc_reg = 3; break;
406 default:
407 return 0xffffffffU;
408 }
409
410 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
411}
412
413
414static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
415 u32 val)
416{
417 unsigned int sc_reg;
418
419 switch (sc_reg_in) {
420 case SCR_STATUS: sc_reg = 0; break;
421 case SCR_CONTROL: sc_reg = 1; break;
422 case SCR_ERROR: sc_reg = 2; break;
423 case SCR_ACTIVE: sc_reg = 3; break;
424 default:
425 return;
426 }
427
428 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
429}
430
431static void ahci_start_engine(void __iomem *port_mmio)
432{
433 u32 tmp;
434
435 /* start DMA */
436 tmp = readl(port_mmio + PORT_CMD);
437 tmp |= PORT_CMD_START;
438 writel(tmp, port_mmio + PORT_CMD);
439 readl(port_mmio + PORT_CMD); /* flush */
440}
441
442static int ahci_stop_engine(void __iomem *port_mmio)
443{
444 u32 tmp;
445
446 tmp = readl(port_mmio + PORT_CMD);
447
448 /* check if the HBA is idle */
449 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
450 return 0;
451
452 /* setting HBA to idle */
453 tmp &= ~PORT_CMD_START;
454 writel(tmp, port_mmio + PORT_CMD);
455
456 /* wait for engine to stop. This could be as long as 500 msec */
457 tmp = ata_wait_register(port_mmio + PORT_CMD,
458 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
459 if (tmp & PORT_CMD_LIST_ON)
460 return -EIO;
461
462 return 0;
463}
464
465static void ahci_start_fis_rx(void __iomem *port_mmio, u32 cap,
466 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
467{
468 u32 tmp;
469
470 /* set FIS registers */
471 if (cap & HOST_CAP_64)
472 writel((cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
473 writel(cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
474
475 if (cap & HOST_CAP_64)
476 writel((rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
477 writel(rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
478
479 /* enable FIS reception */
480 tmp = readl(port_mmio + PORT_CMD);
481 tmp |= PORT_CMD_FIS_RX;
482 writel(tmp, port_mmio + PORT_CMD);
483
484 /* flush */
485 readl(port_mmio + PORT_CMD);
486}
487
488static int ahci_stop_fis_rx(void __iomem *port_mmio)
489{
490 u32 tmp;
491
492 /* disable FIS reception */
493 tmp = readl(port_mmio + PORT_CMD);
494 tmp &= ~PORT_CMD_FIS_RX;
495 writel(tmp, port_mmio + PORT_CMD);
496
497 /* wait for completion, spec says 500ms, give it 1000 */
498 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
499 PORT_CMD_FIS_ON, 10, 1000);
500 if (tmp & PORT_CMD_FIS_ON)
501 return -EBUSY;
502
503 return 0;
504}
505
506static void ahci_power_up(void __iomem *port_mmio, u32 cap)
507{
508 u32 cmd;
509
510 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
511
512 /* spin up device */
513 if (cap & HOST_CAP_SSS) {
514 cmd |= PORT_CMD_SPIN_UP;
515 writel(cmd, port_mmio + PORT_CMD);
516 }
517
518 /* wake up link */
519 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
520}
521
522static void ahci_power_down(void __iomem *port_mmio, u32 cap)
523{
524 u32 cmd, scontrol;
525
526 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
527
528 if (cap & HOST_CAP_SSC) {
529 /* enable transitions to slumber mode */
530 scontrol = readl(port_mmio + PORT_SCR_CTL);
531 if ((scontrol & 0x0f00) > 0x100) {
532 scontrol &= ~0xf00;
533 writel(scontrol, port_mmio + PORT_SCR_CTL);
534 }
535
536 /* put device into slumber mode */
537 writel(cmd | PORT_CMD_ICC_SLUMBER, port_mmio + PORT_CMD);
538
539 /* wait for the transition to complete */
540 ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_ICC_SLUMBER,
541 PORT_CMD_ICC_SLUMBER, 1, 50);
542 }
543
544 /* put device into listen mode */
545 if (cap & HOST_CAP_SSS) {
546 /* first set PxSCTL.DET to 0 */
547 scontrol = readl(port_mmio + PORT_SCR_CTL);
548 scontrol &= ~0xf;
549 writel(scontrol, port_mmio + PORT_SCR_CTL);
550
551 /* then set PxCMD.SUD to 0 */
552 cmd &= ~PORT_CMD_SPIN_UP;
553 writel(cmd, port_mmio + PORT_CMD);
554 }
555}
556
557static void ahci_init_port(void __iomem *port_mmio, u32 cap,
558 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
559{
560 /* power up */
561 ahci_power_up(port_mmio, cap);
562
563 /* enable FIS reception */
564 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
565
566 /* enable DMA */
567 ahci_start_engine(port_mmio);
568}
569
570static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg)
571{
572 int rc;
573
574 /* disable DMA */
575 rc = ahci_stop_engine(port_mmio);
576 if (rc) {
577 *emsg = "failed to stop engine";
578 return rc;
579 }
580
581 /* disable FIS reception */
582 rc = ahci_stop_fis_rx(port_mmio);
583 if (rc) {
584 *emsg = "failed stop FIS RX";
585 return rc;
586 }
587
588 /* put device into slumber mode */
589 ahci_power_down(port_mmio, cap);
590
591 return 0;
592}
593
594static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
595{
596 u32 cap_save, tmp;
597
598 cap_save = readl(mmio + HOST_CAP);
599 cap_save &= ( (1<<28) | (1<<17) );
600 cap_save |= (1 << 27);
601
602 /* global controller reset */
603 tmp = readl(mmio + HOST_CTL);
604 if ((tmp & HOST_RESET) == 0) {
605 writel(tmp | HOST_RESET, mmio + HOST_CTL);
606 readl(mmio + HOST_CTL); /* flush */
607 }
608
609 /* reset must complete within 1 second, or
610 * the hardware should be considered fried.
611 */
612 ssleep(1);
613
614 tmp = readl(mmio + HOST_CTL);
615 if (tmp & HOST_RESET) {
616 dev_printk(KERN_ERR, &pdev->dev,
617 "controller reset failed (0x%x)\n", tmp);
618 return -EIO;
619 }
620
621 writel(HOST_AHCI_EN, mmio + HOST_CTL);
622 (void) readl(mmio + HOST_CTL); /* flush */
623 writel(cap_save, mmio + HOST_CAP);
624 writel(0xf, mmio + HOST_PORTS_IMPL);
625 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
626
627 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
628 u16 tmp16;
629
630 /* configure PCS */
631 pci_read_config_word(pdev, 0x92, &tmp16);
632 tmp16 |= 0xf;
633 pci_write_config_word(pdev, 0x92, tmp16);
634 }
635
636 return 0;
637}
638
639static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
640 int n_ports, u32 cap)
641{
642 int i, rc;
643 u32 tmp;
644
645 for (i = 0; i < n_ports; i++) {
646 void __iomem *port_mmio = ahci_port_base(mmio, i);
647 const char *emsg = NULL;
648
649#if 0 /* BIOSen initialize this incorrectly */
650 if (!(hpriv->port_map & (1 << i)))
651 continue;
652#endif
653
654 /* make sure port is not active */
655 rc = ahci_deinit_port(port_mmio, cap, &emsg);
656 if (rc)
657 dev_printk(KERN_WARNING, &pdev->dev,
658 "%s (%d)\n", emsg, rc);
659
660 /* clear SError */
661 tmp = readl(port_mmio + PORT_SCR_ERR);
662 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
663 writel(tmp, port_mmio + PORT_SCR_ERR);
664
665 /* clear port IRQ */
666 tmp = readl(port_mmio + PORT_IRQ_STAT);
667 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
668 if (tmp)
669 writel(tmp, port_mmio + PORT_IRQ_STAT);
670
671 writel(1 << i, mmio + HOST_IRQ_STAT);
672 }
673
674 tmp = readl(mmio + HOST_CTL);
675 VPRINTK("HOST_CTL 0x%x\n", tmp);
676 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
677 tmp = readl(mmio + HOST_CTL);
678 VPRINTK("HOST_CTL 0x%x\n", tmp);
679}
680
681static unsigned int ahci_dev_classify(struct ata_port *ap)
682{
683 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
684 struct ata_taskfile tf;
685 u32 tmp;
686
687 tmp = readl(port_mmio + PORT_SIG);
688 tf.lbah = (tmp >> 24) & 0xff;
689 tf.lbam = (tmp >> 16) & 0xff;
690 tf.lbal = (tmp >> 8) & 0xff;
691 tf.nsect = (tmp) & 0xff;
692
693 return ata_dev_classify(&tf);
694}
695
696static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
697 u32 opts)
698{
699 dma_addr_t cmd_tbl_dma;
700
701 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
702
703 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
704 pp->cmd_slot[tag].status = 0;
705 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
706 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
707}
708
709static int ahci_clo(struct ata_port *ap)
710{
711 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
712 struct ahci_host_priv *hpriv = ap->host->private_data;
713 u32 tmp;
714
715 if (!(hpriv->cap & HOST_CAP_CLO))
716 return -EOPNOTSUPP;
717
718 tmp = readl(port_mmio + PORT_CMD);
719 tmp |= PORT_CMD_CLO;
720 writel(tmp, port_mmio + PORT_CMD);
721
722 tmp = ata_wait_register(port_mmio + PORT_CMD,
723 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
724 if (tmp & PORT_CMD_CLO)
725 return -EIO;
726
727 return 0;
728}
729
730static int ahci_prereset(struct ata_port *ap)
731{
732 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
733 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
734 /* ATA_BUSY hasn't cleared, so send a CLO */
735 ahci_clo(ap);
736 }
737
738 return ata_std_prereset(ap);
739}
740
741static int ahci_softreset(struct ata_port *ap, unsigned int *class)
742{
743 struct ahci_port_priv *pp = ap->private_data;
744 void __iomem *mmio = ap->host->mmio_base;
745 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
746 const u32 cmd_fis_len = 5; /* five dwords */
747 const char *reason = NULL;
748 struct ata_taskfile tf;
749 u32 tmp;
750 u8 *fis;
751 int rc;
752
753 DPRINTK("ENTER\n");
754
755 if (ata_port_offline(ap)) {
756 DPRINTK("PHY reports no device\n");
757 *class = ATA_DEV_NONE;
758 return 0;
759 }
760
761 /* prepare for SRST (AHCI-1.1 10.4.1) */
762 rc = ahci_stop_engine(port_mmio);
763 if (rc) {
764 reason = "failed to stop engine";
765 goto fail_restart;
766 }
767
768 /* check BUSY/DRQ, perform Command List Override if necessary */
769 ahci_tf_read(ap, &tf);
770 if (tf.command & (ATA_BUSY | ATA_DRQ)) {
771 rc = ahci_clo(ap);
772
773 if (rc == -EOPNOTSUPP) {
774 reason = "port busy but CLO unavailable";
775 goto fail_restart;
776 } else if (rc) {
777 reason = "port busy but CLO failed";
778 goto fail_restart;
779 }
780 }
781
782 /* restart engine */
783 ahci_start_engine(port_mmio);
784
785 ata_tf_init(ap->device, &tf);
786 fis = pp->cmd_tbl;
787
788 /* issue the first D2H Register FIS */
789 ahci_fill_cmd_slot(pp, 0,
790 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
791
792 tf.ctl |= ATA_SRST;
793 ata_tf_to_fis(&tf, fis, 0);
794 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
795
796 writel(1, port_mmio + PORT_CMD_ISSUE);
797
798 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
799 if (tmp & 0x1) {
800 rc = -EIO;
801 reason = "1st FIS failed";
802 goto fail;
803 }
804
805 /* spec says at least 5us, but be generous and sleep for 1ms */
806 msleep(1);
807
808 /* issue the second D2H Register FIS */
809 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
810
811 tf.ctl &= ~ATA_SRST;
812 ata_tf_to_fis(&tf, fis, 0);
813 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
814
815 writel(1, port_mmio + PORT_CMD_ISSUE);
816 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
817
818 /* spec mandates ">= 2ms" before checking status.
819 * We wait 150ms, because that was the magic delay used for
820 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
821 * between when the ATA command register is written, and then
822 * status is checked. Because waiting for "a while" before
823 * checking status is fine, post SRST, we perform this magic
824 * delay here as well.
825 */
826 msleep(150);
827
828 *class = ATA_DEV_NONE;
829 if (ata_port_online(ap)) {
830 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
831 rc = -EIO;
832 reason = "device not ready";
833 goto fail;
834 }
835 *class = ahci_dev_classify(ap);
836 }
837
838 DPRINTK("EXIT, class=%u\n", *class);
839 return 0;
840
841 fail_restart:
842 ahci_start_engine(port_mmio);
843 fail:
844 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
845 return rc;
846}
847
848static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
849{
850 struct ahci_port_priv *pp = ap->private_data;
851 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
852 struct ata_taskfile tf;
853 void __iomem *mmio = ap->host->mmio_base;
854 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
855 int rc;
856
857 DPRINTK("ENTER\n");
858
859 ahci_stop_engine(port_mmio);
860
861 /* clear D2H reception area to properly wait for D2H FIS */
862 ata_tf_init(ap->device, &tf);
863 tf.command = 0xff;
864 ata_tf_to_fis(&tf, d2h_fis, 0);
865
866 rc = sata_std_hardreset(ap, class);
867
868 ahci_start_engine(port_mmio);
869
870 if (rc == 0 && ata_port_online(ap))
871 *class = ahci_dev_classify(ap);
872 if (*class == ATA_DEV_UNKNOWN)
873 *class = ATA_DEV_NONE;
874
875 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
876 return rc;
877}
878
879static void ahci_postreset(struct ata_port *ap, unsigned int *class)
880{
881 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
882 u32 new_tmp, tmp;
883
884 ata_std_postreset(ap, class);
885
886 /* Make sure port's ATAPI bit is set appropriately */
887 new_tmp = tmp = readl(port_mmio + PORT_CMD);
888 if (*class == ATA_DEV_ATAPI)
889 new_tmp |= PORT_CMD_ATAPI;
890 else
891 new_tmp &= ~PORT_CMD_ATAPI;
892 if (new_tmp != tmp) {
893 writel(new_tmp, port_mmio + PORT_CMD);
894 readl(port_mmio + PORT_CMD); /* flush */
895 }
896}
897
898static u8 ahci_check_status(struct ata_port *ap)
899{
900 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
901
902 return readl(mmio + PORT_TFDATA) & 0xFF;
903}
904
905static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
906{
907 struct ahci_port_priv *pp = ap->private_data;
908 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
909
910 ata_tf_from_fis(d2h_fis, tf);
911}
912
913static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
914{
915 struct scatterlist *sg;
916 struct ahci_sg *ahci_sg;
917 unsigned int n_sg = 0;
918
919 VPRINTK("ENTER\n");
920
921 /*
922 * Next, the S/G list.
923 */
924 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
925 ata_for_each_sg(sg, qc) {
926 dma_addr_t addr = sg_dma_address(sg);
927 u32 sg_len = sg_dma_len(sg);
928
929 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
930 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
931 ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
932
933 ahci_sg++;
934 n_sg++;
935 }
936
937 return n_sg;
938}
939
940static void ahci_qc_prep(struct ata_queued_cmd *qc)
941{
942 struct ata_port *ap = qc->ap;
943 struct ahci_port_priv *pp = ap->private_data;
944 int is_atapi = is_atapi_taskfile(&qc->tf);
945 void *cmd_tbl;
946 u32 opts;
947 const u32 cmd_fis_len = 5; /* five dwords */
948 unsigned int n_elem;
949
950 /*
951 * Fill in command table information. First, the header,
952 * a SATA Register - Host to Device command FIS.
953 */
954 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
955
956 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
957 if (is_atapi) {
958 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
959 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
960 }
961
962 n_elem = 0;
963 if (qc->flags & ATA_QCFLAG_DMAMAP)
964 n_elem = ahci_fill_sg(qc, cmd_tbl);
965
966 /*
967 * Fill in command slot information.
968 */
969 opts = cmd_fis_len | n_elem << 16;
970 if (qc->tf.flags & ATA_TFLAG_WRITE)
971 opts |= AHCI_CMD_WRITE;
972 if (is_atapi)
973 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
974
975 ahci_fill_cmd_slot(pp, qc->tag, opts);
976}
977
978static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
979{
980 struct ahci_port_priv *pp = ap->private_data;
981 struct ata_eh_info *ehi = &ap->eh_info;
982 unsigned int err_mask = 0, action = 0;
983 struct ata_queued_cmd *qc;
984 u32 serror;
985
986 ata_ehi_clear_desc(ehi);
987
988 /* AHCI needs SError cleared; otherwise, it might lock up */
989 serror = ahci_scr_read(ap, SCR_ERROR);
990 ahci_scr_write(ap, SCR_ERROR, serror);
991
992 /* analyze @irq_stat */
993 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
994
995 if (irq_stat & PORT_IRQ_TF_ERR)
996 err_mask |= AC_ERR_DEV;
997
998 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
999 err_mask |= AC_ERR_HOST_BUS;
1000 action |= ATA_EH_SOFTRESET;
1001 }
1002
1003 if (irq_stat & PORT_IRQ_IF_ERR) {
1004 err_mask |= AC_ERR_ATA_BUS;
1005 action |= ATA_EH_SOFTRESET;
1006 ata_ehi_push_desc(ehi, ", interface fatal error");
1007 }
1008
1009 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1010 ata_ehi_hotplugged(ehi);
1011 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
1012 "connection status changed" : "PHY RDY changed");
1013 }
1014
1015 if (irq_stat & PORT_IRQ_UNK_FIS) {
1016 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1017
1018 err_mask |= AC_ERR_HSM;
1019 action |= ATA_EH_SOFTRESET;
1020 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
1021 unk[0], unk[1], unk[2], unk[3]);
1022 }
1023
1024 /* okay, let's hand over to EH */
1025 ehi->serror |= serror;
1026 ehi->action |= action;
1027
1028 qc = ata_qc_from_tag(ap, ap->active_tag);
1029 if (qc)
1030 qc->err_mask |= err_mask;
1031 else
1032 ehi->err_mask |= err_mask;
1033
1034 if (irq_stat & PORT_IRQ_FREEZE)
1035 ata_port_freeze(ap);
1036 else
1037 ata_port_abort(ap);
1038}
1039
1040static void ahci_host_intr(struct ata_port *ap)
1041{
1042 void __iomem *mmio = ap->host->mmio_base;
1043 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1044 struct ata_eh_info *ehi = &ap->eh_info;
1045 u32 status, qc_active;
1046 int rc;
1047
1048 status = readl(port_mmio + PORT_IRQ_STAT);
1049 writel(status, port_mmio + PORT_IRQ_STAT);
1050
1051 if (unlikely(status & PORT_IRQ_ERROR)) {
1052 ahci_error_intr(ap, status);
1053 return;
1054 }
1055
1056 if (ap->sactive)
1057 qc_active = readl(port_mmio + PORT_SCR_ACT);
1058 else
1059 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1060
1061 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
1062 if (rc > 0)
1063 return;
1064 if (rc < 0) {
1065 ehi->err_mask |= AC_ERR_HSM;
1066 ehi->action |= ATA_EH_SOFTRESET;
1067 ata_port_freeze(ap);
1068 return;
1069 }
1070
1071 /* hmmm... a spurious interupt */
1072
1073 /* some devices send D2H reg with I bit set during NCQ command phase */
1074 if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
1075 return;
1076
1077 /* ignore interim PIO setup fis interrupts */
1078 if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS))
1079 return;
1080
1081 if (ata_ratelimit())
1082 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
1083 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
1084 status, ap->active_tag, ap->sactive);
1085}
1086
1087static void ahci_irq_clear(struct ata_port *ap)
1088{
1089 /* TODO */
1090}
1091
1092static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1093{
1094 struct ata_host *host = dev_instance;
1095 struct ahci_host_priv *hpriv;
1096 unsigned int i, handled = 0;
1097 void __iomem *mmio;
1098 u32 irq_stat, irq_ack = 0;
1099
1100 VPRINTK("ENTER\n");
1101
1102 hpriv = host->private_data;
1103 mmio = host->mmio_base;
1104
1105 /* sigh. 0xffffffff is a valid return from h/w */
1106 irq_stat = readl(mmio + HOST_IRQ_STAT);
1107 irq_stat &= hpriv->port_map;
1108 if (!irq_stat)
1109 return IRQ_NONE;
1110
1111 spin_lock(&host->lock);
1112
1113 for (i = 0; i < host->n_ports; i++) {
1114 struct ata_port *ap;
1115
1116 if (!(irq_stat & (1 << i)))
1117 continue;
1118
1119 ap = host->ports[i];
1120 if (ap) {
1121 ahci_host_intr(ap);
1122 VPRINTK("port %u\n", i);
1123 } else {
1124 VPRINTK("port %u (no irq)\n", i);
1125 if (ata_ratelimit())
1126 dev_printk(KERN_WARNING, host->dev,
1127 "interrupt on disabled port %u\n", i);
1128 }
1129
1130 irq_ack |= (1 << i);
1131 }
1132
1133 if (irq_ack) {
1134 writel(irq_ack, mmio + HOST_IRQ_STAT);
1135 handled = 1;
1136 }
1137
1138 spin_unlock(&host->lock);
1139
1140 VPRINTK("EXIT\n");
1141
1142 return IRQ_RETVAL(handled);
1143}
1144
1145static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1146{
1147 struct ata_port *ap = qc->ap;
1148 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
1149
1150 if (qc->tf.protocol == ATA_PROT_NCQ)
1151 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1152 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1153 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1154
1155 return 0;
1156}
1157
1158static void ahci_freeze(struct ata_port *ap)
1159{
1160 void __iomem *mmio = ap->host->mmio_base;
1161 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1162
1163 /* turn IRQ off */
1164 writel(0, port_mmio + PORT_IRQ_MASK);
1165}
1166
1167static void ahci_thaw(struct ata_port *ap)
1168{
1169 void __iomem *mmio = ap->host->mmio_base;
1170 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1171 u32 tmp;
1172
1173 /* clear IRQ */
1174 tmp = readl(port_mmio + PORT_IRQ_STAT);
1175 writel(tmp, port_mmio + PORT_IRQ_STAT);
1176 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1177
1178 /* turn IRQ back on */
1179 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1180}
1181
1182static void ahci_error_handler(struct ata_port *ap)
1183{
1184 void __iomem *mmio = ap->host->mmio_base;
1185 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1186
1187 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1188 /* restart engine */
1189 ahci_stop_engine(port_mmio);
1190 ahci_start_engine(port_mmio);
1191 }
1192
1193 /* perform recovery */
1194 ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
1195 ahci_postreset);
1196}
1197
1198static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1199{
1200 struct ata_port *ap = qc->ap;
1201 void __iomem *mmio = ap->host->mmio_base;
1202 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1203
1204 if (qc->flags & ATA_QCFLAG_FAILED)
1205 qc->err_mask |= AC_ERR_OTHER;
1206
1207 if (qc->err_mask) {
1208 /* make DMA engine forget about the failed command */
1209 ahci_stop_engine(port_mmio);
1210 ahci_start_engine(port_mmio);
1211 }
1212}
1213
1214static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
1215{
1216 struct ahci_host_priv *hpriv = ap->host->private_data;
1217 struct ahci_port_priv *pp = ap->private_data;
1218 void __iomem *mmio = ap->host->mmio_base;
1219 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1220 const char *emsg = NULL;
1221 int rc;
1222
1223 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1224 if (rc) {
1225 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
1226 ahci_init_port(port_mmio, hpriv->cap,
1227 pp->cmd_slot_dma, pp->rx_fis_dma);
1228 }
1229
1230 return rc;
1231}
1232
1233static int ahci_port_resume(struct ata_port *ap)
1234{
1235 struct ahci_port_priv *pp = ap->private_data;
1236 struct ahci_host_priv *hpriv = ap->host->private_data;
1237 void __iomem *mmio = ap->host->mmio_base;
1238 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1239
1240 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1241
1242 return 0;
1243}
1244
1245static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1246{
1247 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1248 void __iomem *mmio = host->mmio_base;
1249 u32 ctl;
1250
1251 if (mesg.event == PM_EVENT_SUSPEND) {
1252 /* AHCI spec rev1.1 section 8.3.3:
1253 * Software must disable interrupts prior to requesting a
1254 * transition of the HBA to D3 state.
1255 */
1256 ctl = readl(mmio + HOST_CTL);
1257 ctl &= ~HOST_IRQ_EN;
1258 writel(ctl, mmio + HOST_CTL);
1259 readl(mmio + HOST_CTL); /* flush */
1260 }
1261
1262 return ata_pci_device_suspend(pdev, mesg);
1263}
1264
1265static int ahci_pci_device_resume(struct pci_dev *pdev)
1266{
1267 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1268 struct ahci_host_priv *hpriv = host->private_data;
1269 void __iomem *mmio = host->mmio_base;
1270 int rc;
1271
1272 ata_pci_device_do_resume(pdev);
1273
1274 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1275 rc = ahci_reset_controller(mmio, pdev);
1276 if (rc)
1277 return rc;
1278
1279 ahci_init_controller(mmio, pdev, host->n_ports, hpriv->cap);
1280 }
1281
1282 ata_host_resume(host);
1283
1284 return 0;
1285}
1286
1287static int ahci_port_start(struct ata_port *ap)
1288{
1289 struct device *dev = ap->host->dev;
1290 struct ahci_host_priv *hpriv = ap->host->private_data;
1291 struct ahci_port_priv *pp;
1292 void __iomem *mmio = ap->host->mmio_base;
1293 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1294 void *mem;
1295 dma_addr_t mem_dma;
1296 int rc;
1297
1298 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
1299 if (!pp)
1300 return -ENOMEM;
1301 memset(pp, 0, sizeof(*pp));
1302
1303 rc = ata_pad_alloc(ap, dev);
1304 if (rc) {
1305 kfree(pp);
1306 return rc;
1307 }
1308
1309 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
1310 if (!mem) {
1311 ata_pad_free(ap, dev);
1312 kfree(pp);
1313 return -ENOMEM;
1314 }
1315 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
1316
1317 /*
1318 * First item in chunk of DMA memory: 32-slot command table,
1319 * 32 bytes each in size
1320 */
1321 pp->cmd_slot = mem;
1322 pp->cmd_slot_dma = mem_dma;
1323
1324 mem += AHCI_CMD_SLOT_SZ;
1325 mem_dma += AHCI_CMD_SLOT_SZ;
1326
1327 /*
1328 * Second item: Received-FIS area
1329 */
1330 pp->rx_fis = mem;
1331 pp->rx_fis_dma = mem_dma;
1332
1333 mem += AHCI_RX_FIS_SZ;
1334 mem_dma += AHCI_RX_FIS_SZ;
1335
1336 /*
1337 * Third item: data area for storing a single command
1338 * and its scatter-gather table
1339 */
1340 pp->cmd_tbl = mem;
1341 pp->cmd_tbl_dma = mem_dma;
1342
1343 ap->private_data = pp;
1344
1345 /* initialize port */
1346 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1347
1348 return 0;
1349}
1350
1351static void ahci_port_stop(struct ata_port *ap)
1352{
1353 struct device *dev = ap->host->dev;
1354 struct ahci_host_priv *hpriv = ap->host->private_data;
1355 struct ahci_port_priv *pp = ap->private_data;
1356 void __iomem *mmio = ap->host->mmio_base;
1357 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1358 const char *emsg = NULL;
1359 int rc;
1360
1361 /* de-initialize port */
1362 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1363 if (rc)
1364 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
1365
1366 ap->private_data = NULL;
1367 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
1368 pp->cmd_slot, pp->cmd_slot_dma);
1369 ata_pad_free(ap, dev);
1370 kfree(pp);
1371}
1372
1373static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
1374 unsigned int port_idx)
1375{
1376 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
1377 base = ahci_port_base_ul(base, port_idx);
1378 VPRINTK("base now==0x%lx\n", base);
1379
1380 port->cmd_addr = base;
1381 port->scr_addr = base + PORT_SCR;
1382
1383 VPRINTK("EXIT\n");
1384}
1385
1386static int ahci_host_init(struct ata_probe_ent *probe_ent)
1387{
1388 struct ahci_host_priv *hpriv = probe_ent->private_data;
1389 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1390 void __iomem *mmio = probe_ent->mmio_base;
1391 unsigned int i, using_dac;
1392 int rc;
1393
1394 rc = ahci_reset_controller(mmio, pdev);
1395 if (rc)
1396 return rc;
1397
1398 hpriv->cap = readl(mmio + HOST_CAP);
1399 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
1400 probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
1401
1402 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1403 hpriv->cap, hpriv->port_map, probe_ent->n_ports);
1404
1405 using_dac = hpriv->cap & HOST_CAP_64;
1406 if (using_dac &&
1407 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1408 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1409 if (rc) {
1410 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1411 if (rc) {
1412 dev_printk(KERN_ERR, &pdev->dev,
1413 "64-bit DMA enable failed\n");
1414 return rc;
1415 }
1416 }
1417 } else {
1418 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1419 if (rc) {
1420 dev_printk(KERN_ERR, &pdev->dev,
1421 "32-bit DMA enable failed\n");
1422 return rc;
1423 }
1424 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1425 if (rc) {
1426 dev_printk(KERN_ERR, &pdev->dev,
1427 "32-bit consistent DMA enable failed\n");
1428 return rc;
1429 }
1430 }
1431
1432 for (i = 0; i < probe_ent->n_ports; i++)
1433 ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
1434
1435 ahci_init_controller(mmio, pdev, probe_ent->n_ports, hpriv->cap);
1436
1437 pci_set_master(pdev);
1438
1439 return 0;
1440}
1441
1442static void ahci_print_info(struct ata_probe_ent *probe_ent)
1443{
1444 struct ahci_host_priv *hpriv = probe_ent->private_data;
1445 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1446 void __iomem *mmio = probe_ent->mmio_base;
1447 u32 vers, cap, impl, speed;
1448 const char *speed_s;
1449 u16 cc;
1450 const char *scc_s;
1451
1452 vers = readl(mmio + HOST_VERSION);
1453 cap = hpriv->cap;
1454 impl = hpriv->port_map;
1455
1456 speed = (cap >> 20) & 0xf;
1457 if (speed == 1)
1458 speed_s = "1.5";
1459 else if (speed == 2)
1460 speed_s = "3";
1461 else
1462 speed_s = "?";
1463
1464 pci_read_config_word(pdev, 0x0a, &cc);
1465 if (cc == 0x0101)
1466 scc_s = "IDE";
1467 else if (cc == 0x0106)
1468 scc_s = "SATA";
1469 else if (cc == 0x0104)
1470 scc_s = "RAID";
1471 else
1472 scc_s = "unknown";
1473
1474 dev_printk(KERN_INFO, &pdev->dev,
1475 "AHCI %02x%02x.%02x%02x "
1476 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
1477 ,
1478
1479 (vers >> 24) & 0xff,
1480 (vers >> 16) & 0xff,
1481 (vers >> 8) & 0xff,
1482 vers & 0xff,
1483
1484 ((cap >> 8) & 0x1f) + 1,
1485 (cap & 0x1f) + 1,
1486 speed_s,
1487 impl,
1488 scc_s);
1489
1490 dev_printk(KERN_INFO, &pdev->dev,
1491 "flags: "
1492 "%s%s%s%s%s%s"
1493 "%s%s%s%s%s%s%s\n"
1494 ,
1495
1496 cap & (1 << 31) ? "64bit " : "",
1497 cap & (1 << 30) ? "ncq " : "",
1498 cap & (1 << 28) ? "ilck " : "",
1499 cap & (1 << 27) ? "stag " : "",
1500 cap & (1 << 26) ? "pm " : "",
1501 cap & (1 << 25) ? "led " : "",
1502
1503 cap & (1 << 24) ? "clo " : "",
1504 cap & (1 << 19) ? "nz " : "",
1505 cap & (1 << 18) ? "only " : "",
1506 cap & (1 << 17) ? "pmp " : "",
1507 cap & (1 << 15) ? "pio " : "",
1508 cap & (1 << 14) ? "slum " : "",
1509 cap & (1 << 13) ? "part " : ""
1510 );
1511}
1512
1513static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1514{
1515 static int printed_version;
1516 struct ata_probe_ent *probe_ent = NULL;
1517 struct ahci_host_priv *hpriv;
1518 unsigned long base;
1519 void __iomem *mmio_base;
1520 unsigned int board_idx = (unsigned int) ent->driver_data;
1521 int have_msi, pci_dev_busy = 0;
1522 int rc;
1523
1524 VPRINTK("ENTER\n");
1525
1526 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1527
1528 if (!printed_version++)
1529 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1530
1531 /* JMicron-specific fixup: make sure we're in AHCI mode */
1532 /* This is protected from races with ata_jmicron by the pci probe
1533 locking */
1534 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
1535 /* AHCI enable, AHCI on function 0 */
1536 pci_write_config_byte(pdev, 0x41, 0xa1);
1537 /* Function 1 is the PATA controller */
1538 if (PCI_FUNC(pdev->devfn))
1539 return -ENODEV;
1540 }
1541
1542 rc = pci_enable_device(pdev);
1543 if (rc)
1544 return rc;
1545
1546 rc = pci_request_regions(pdev, DRV_NAME);
1547 if (rc) {
1548 pci_dev_busy = 1;
1549 goto err_out;
1550 }
1551
1552 if (pci_enable_msi(pdev) == 0)
1553 have_msi = 1;
1554 else {
1555 pci_intx(pdev, 1);
1556 have_msi = 0;
1557 }
1558
1559 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1560 if (probe_ent == NULL) {
1561 rc = -ENOMEM;
1562 goto err_out_msi;
1563 }
1564
1565 memset(probe_ent, 0, sizeof(*probe_ent));
1566 probe_ent->dev = pci_dev_to_dev(pdev);
1567 INIT_LIST_HEAD(&probe_ent->node);
1568
1569 mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
1570 if (mmio_base == NULL) {
1571 rc = -ENOMEM;
1572 goto err_out_free_ent;
1573 }
1574 base = (unsigned long) mmio_base;
1575
1576 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1577 if (!hpriv) {
1578 rc = -ENOMEM;
1579 goto err_out_iounmap;
1580 }
1581 memset(hpriv, 0, sizeof(*hpriv));
1582
1583 probe_ent->sht = ahci_port_info[board_idx].sht;
1584 probe_ent->port_flags = ahci_port_info[board_idx].flags;
1585 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
1586 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
1587 probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
1588
1589 probe_ent->irq = pdev->irq;
1590 probe_ent->irq_flags = IRQF_SHARED;
1591 probe_ent->mmio_base = mmio_base;
1592 probe_ent->private_data = hpriv;
1593
1594 if (have_msi)
1595 hpriv->flags |= AHCI_FLAG_MSI;
1596
1597 /* initialize adapter */
1598 rc = ahci_host_init(probe_ent);
1599 if (rc)
1600 goto err_out_hpriv;
1601
1602 if (!(probe_ent->port_flags & AHCI_FLAG_NO_NCQ) &&
1603 (hpriv->cap & HOST_CAP_NCQ))
1604 probe_ent->port_flags |= ATA_FLAG_NCQ;
1605
1606 ahci_print_info(probe_ent);
1607
1608 /* FIXME: check ata_device_add return value */
1609 ata_device_add(probe_ent);
1610 kfree(probe_ent);
1611
1612 return 0;
1613
1614err_out_hpriv:
1615 kfree(hpriv);
1616err_out_iounmap:
1617 pci_iounmap(pdev, mmio_base);
1618err_out_free_ent:
1619 kfree(probe_ent);
1620err_out_msi:
1621 if (have_msi)
1622 pci_disable_msi(pdev);
1623 else
1624 pci_intx(pdev, 0);
1625 pci_release_regions(pdev);
1626err_out:
1627 if (!pci_dev_busy)
1628 pci_disable_device(pdev);
1629 return rc;
1630}
1631
1632static void ahci_remove_one (struct pci_dev *pdev)
1633{
1634 struct device *dev = pci_dev_to_dev(pdev);
1635 struct ata_host *host = dev_get_drvdata(dev);
1636 struct ahci_host_priv *hpriv = host->private_data;
1637 unsigned int i;
1638 int have_msi;
1639
1640 for (i = 0; i < host->n_ports; i++)
1641 ata_port_detach(host->ports[i]);
1642
1643 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1644 free_irq(host->irq, host);
1645
1646 for (i = 0; i < host->n_ports; i++) {
1647 struct ata_port *ap = host->ports[i];
1648
1649 ata_scsi_release(ap->scsi_host);
1650 scsi_host_put(ap->scsi_host);
1651 }
1652
1653 kfree(hpriv);
1654 pci_iounmap(pdev, host->mmio_base);
1655 kfree(host);
1656
1657 if (have_msi)
1658 pci_disable_msi(pdev);
1659 else
1660 pci_intx(pdev, 0);
1661 pci_release_regions(pdev);
1662 pci_disable_device(pdev);
1663 dev_set_drvdata(dev, NULL);
1664}
1665
1666static int __init ahci_init(void)
1667{
1668 return pci_register_driver(&ahci_pci_driver);
1669}
1670
1671static void __exit ahci_exit(void)
1672{
1673 pci_unregister_driver(&ahci_pci_driver);
1674}
1675
1676
1677MODULE_AUTHOR("Jeff Garzik");
1678MODULE_DESCRIPTION("AHCI SATA low-level driver");
1679MODULE_LICENSE("GPL");
1680MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
1681MODULE_VERSION(DRV_VERSION);
1682
1683module_init(ahci_init);
1684module_exit(ahci_exit);
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
new file mode 100644
index 000000000000..1d1c30a2fcd0
--- /dev/null
+++ b/drivers/ata/ata_generic.c
@@ -0,0 +1,252 @@
1/*
2 * ata_generic.c - Generic PATA/SATA controller driver.
3 * Copyright 2005 Red Hat Inc <alan@redhat.com>, all rights reserved.
4 *
5 * Elements from ide/pci/generic.c
6 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
7 * Portions (C) Copyright 2002 Red Hat Inc <alan@redhat.com>
8 *
9 * May be copied or modified under the terms of the GNU General Public License
10 *
11 * Driver for PCI IDE interfaces implementing the standard bus mastering
12 * interface functionality. This assumes the BIOS did the drive set up and
13 * tuning for us. By default we do not grab all IDE class devices as they
14 * may have other drivers or need fixups to avoid problems. Instead we keep
15 * a default list of stuff without documentation/driver that appears to
16 * work.
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/pci.h>
22#include <linux/init.h>
23#include <linux/blkdev.h>
24#include <linux/delay.h>
25#include <scsi/scsi_host.h>
26#include <linux/libata.h>
27
28#define DRV_NAME "ata_generic"
29#define DRV_VERSION "0.2.6"
30
31/*
32 * A generic parallel ATA driver using libata
33 */
34
35/**
36 * generic_pre_reset - probe begin
37 * @ap: ATA port
38 *
39 * Set up cable type and use generic probe init
40 */
41
42static int generic_pre_reset(struct ata_port *ap)
43{
44 ap->cbl = ATA_CBL_PATA80;
45 return ata_std_prereset(ap);
46}
47
48
49/**
50 * generic_error_handler - Probe specified port on PATA host controller
51 * @ap: Port to probe
52 * @classes:
53 *
54 * LOCKING:
55 * None (inherited from caller).
56 */
57
58
59static void generic_error_handler(struct ata_port *ap)
60{
61 ata_bmdma_drive_eh(ap, generic_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
62}
63
64/**
65 * generic_set_mode - mode setting
66 * @ap: interface to set up
67 *
68 * Use a non standard set_mode function. We don't want to be tuned.
69 * The BIOS configured everything. Our job is not to fiddle. We
70 * read the dma enabled bits from the PCI configuration of the device
71 * and respect them.
72 */
73
74static void generic_set_mode(struct ata_port *ap)
75{
76 int dma_enabled = 0;
77 int i;
78
79 /* Bits 5 and 6 indicate if DMA is active on master/slave */
80 if (ap->ioaddr.bmdma_addr)
81 dma_enabled = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
82
83 for (i = 0; i < ATA_MAX_DEVICES; i++) {
84 struct ata_device *dev = &ap->device[i];
85 if (ata_dev_enabled(dev)) {
86 /* We don't really care */
87 dev->pio_mode = XFER_PIO_0;
88 dev->dma_mode = XFER_MW_DMA_0;
89 /* We do need the right mode information for DMA or PIO
90 and this comes from the current configuration flags */
91 if (dma_enabled & (1 << (5 + i))) {
92 dev->xfer_mode = XFER_MW_DMA_0;
93 dev->xfer_shift = ATA_SHIFT_MWDMA;
94 dev->flags &= ~ATA_DFLAG_PIO;
95 } else {
96 dev->xfer_mode = XFER_PIO_0;
97 dev->xfer_shift = ATA_SHIFT_PIO;
98 dev->flags |= ATA_DFLAG_PIO;
99 }
100 }
101 }
102}
103
104static struct scsi_host_template generic_sht = {
105 .module = THIS_MODULE,
106 .name = DRV_NAME,
107 .ioctl = ata_scsi_ioctl,
108 .queuecommand = ata_scsi_queuecmd,
109 .can_queue = ATA_DEF_QUEUE,
110 .this_id = ATA_SHT_THIS_ID,
111 .sg_tablesize = LIBATA_MAX_PRD,
112 .max_sectors = ATA_MAX_SECTORS,
113 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
114 .emulated = ATA_SHT_EMULATED,
115 .use_clustering = ATA_SHT_USE_CLUSTERING,
116 .proc_name = DRV_NAME,
117 .dma_boundary = ATA_DMA_BOUNDARY,
118 .slave_configure = ata_scsi_slave_config,
119 .bios_param = ata_std_bios_param,
120};
121
122static struct ata_port_operations generic_port_ops = {
123 .set_mode = generic_set_mode,
124
125 .port_disable = ata_port_disable,
126 .tf_load = ata_tf_load,
127 .tf_read = ata_tf_read,
128 .check_status = ata_check_status,
129 .exec_command = ata_exec_command,
130 .dev_select = ata_std_dev_select,
131
132 .bmdma_setup = ata_bmdma_setup,
133 .bmdma_start = ata_bmdma_start,
134 .bmdma_stop = ata_bmdma_stop,
135 .bmdma_status = ata_bmdma_status,
136
137 .data_xfer = ata_pio_data_xfer,
138
139 .freeze = ata_bmdma_freeze,
140 .thaw = ata_bmdma_thaw,
141 .error_handler = generic_error_handler,
142 .post_internal_cmd = ata_bmdma_post_internal_cmd,
143
144 .qc_prep = ata_qc_prep,
145 .qc_issue = ata_qc_issue_prot,
146 .eng_timeout = ata_eng_timeout,
147 .irq_handler = ata_interrupt,
148 .irq_clear = ata_bmdma_irq_clear,
149
150 .port_start = ata_port_start,
151 .port_stop = ata_port_stop,
152 .host_stop = ata_host_stop
153};
154
155static int all_generic_ide; /* Set to claim all devices */
156
157/**
158 * ata_generic_init - attach generic IDE
159 * @dev: PCI device found
160 * @id: match entry
161 *
162 * Called each time a matching IDE interface is found. We check if the
163 * interface is one we wish to claim and if so we perform any chip
164 * specific hacks then let the ATA layer do the heavy lifting.
165 */
166
167static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
168{
169 u16 command;
170 static struct ata_port_info info = {
171 .sht = &generic_sht,
172 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
173 .pio_mask = 0x1f,
174 .mwdma_mask = 0x07,
175 .udma_mask = 0x3f,
176 .port_ops = &generic_port_ops
177 };
178 static struct ata_port_info *port_info[2] = { &info, &info };
179
180 /* Don't use the generic entry unless instructed to do so */
181 if (id->driver_data == 1 && all_generic_ide == 0)
182 return -ENODEV;
183
184 /* Devices that need care */
185 if (dev->vendor == PCI_VENDOR_ID_UMC &&
186 dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
187 (!(PCI_FUNC(dev->devfn) & 1)))
188 return -ENODEV;
189
190 if (dev->vendor == PCI_VENDOR_ID_OPTI &&
191 dev->device == PCI_DEVICE_ID_OPTI_82C558 &&
192 (!(PCI_FUNC(dev->devfn) & 1)))
193 return -ENODEV;
194
195 /* Don't re-enable devices in generic mode or we will break some
196 motherboards with disabled and unused IDE controllers */
197 pci_read_config_word(dev, PCI_COMMAND, &command);
198 if (!(command & PCI_COMMAND_IO))
199 return -ENODEV;
200
201 if (dev->vendor == PCI_VENDOR_ID_AL)
202 ata_pci_clear_simplex(dev);
203
204 return ata_pci_init_one(dev, port_info, 2);
205}
206
207static struct pci_device_id ata_generic[] = {
208 { PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), },
209 { PCI_DEVICE(PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), },
210 { PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8673F), },
211 { PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886A), },
212 { PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF), },
213 { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), },
214 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), },
215 { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), },
216 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), },
217 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
218 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
219 /* Must come last. If you add entries adjust this table appropriately */
220 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
221 { 0, },
222};
223
224static struct pci_driver ata_generic_pci_driver = {
225 .name = DRV_NAME,
226 .id_table = ata_generic,
227 .probe = ata_generic_init_one,
228 .remove = ata_pci_remove_one
229};
230
231static int __init ata_generic_init(void)
232{
233 return pci_module_init(&ata_generic_pci_driver);
234}
235
236
237static void __exit ata_generic_exit(void)
238{
239 pci_unregister_driver(&ata_generic_pci_driver);
240}
241
242
243MODULE_AUTHOR("Alan Cox");
244MODULE_DESCRIPTION("low-level driver for generic ATA");
245MODULE_LICENSE("GPL");
246MODULE_DEVICE_TABLE(pci, ata_generic);
247MODULE_VERSION(DRV_VERSION);
248
249module_init(ata_generic_init);
250module_exit(ata_generic_exit);
251
252module_param(all_generic_ide, int, 0);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
new file mode 100644
index 000000000000..ab2ecccf7798
--- /dev/null
+++ b/drivers/ata/ata_piix.c
@@ -0,0 +1,1258 @@
1/*
2 * ata_piix.c - Intel PATA/SATA controllers
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 *
9 * Copyright 2003-2005 Red Hat Inc
10 * Copyright 2003-2005 Jeff Garzik
11 *
12 *
13 * Copyright header from piix.c:
14 *
15 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
16 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
17 * Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
18 *
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
23 * any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; see the file COPYING. If not, write to
32 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
33 *
34 *
35 * libata documentation is available via 'make {ps|pdf}docs',
36 * as Documentation/DocBook/libata.*
37 *
38 * Hardware documentation available at http://developer.intel.com/
39 *
40 * Documentation
41 * Publically available from Intel web site. Errata documentation
42 * is also publically available. As an aide to anyone hacking on this
43 * driver the list of errata that are relevant is below.going back to
44 * PIIX4. Older device documentation is now a bit tricky to find.
45 *
46 * The chipsets all follow very much the same design. The orginal Triton
47 * series chipsets do _not_ support independant device timings, but this
48 * is fixed in Triton II. With the odd mobile exception the chips then
49 * change little except in gaining more modes until SATA arrives. This
50 * driver supports only the chips with independant timing (that is those
51 * with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix
52 * for the early chip drivers.
53 *
54 * Errata of note:
55 *
56 * Unfixable
57 * PIIX4 errata #9 - Only on ultra obscure hw
58 * ICH3 errata #13 - Not observed to affect real hw
59 * by Intel
60 *
61 * Things we must deal with
62 * PIIX4 errata #10 - BM IDE hang with non UDMA
63 * (must stop/start dma to recover)
64 * 440MX errata #15 - As PIIX4 errata #10
65 * PIIX4 errata #15 - Must not read control registers
66 * during a PIO transfer
67 * 440MX errata #13 - As PIIX4 errata #15
68 * ICH2 errata #21 - DMA mode 0 doesn't work right
69 * ICH0/1 errata #55 - As ICH2 errata #21
70 * ICH2 spec c #9 - Extra operations needed to handle
71 * drive hotswap [NOT YET SUPPORTED]
72 * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
73 * and must be dword aligned
74 * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
75 *
76 * Should have been BIOS fixed:
77 * 450NX: errata #19 - DMA hangs on old 450NX
78 * 450NX: errata #20 - DMA hangs on old 450NX
79 * 450NX: errata #25 - Corruption with DMA on old 450NX
80 * ICH3 errata #15 - IDE deadlock under high load
81 * (BIOS must set dev 31 fn 0 bit 23)
82 * ICH3 errata #18 - Don't use native mode
83 */
84
85#include <linux/kernel.h>
86#include <linux/module.h>
87#include <linux/pci.h>
88#include <linux/init.h>
89#include <linux/blkdev.h>
90#include <linux/delay.h>
91#include <linux/device.h>
92#include <scsi/scsi_host.h>
93#include <linux/libata.h>
94
95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "2.00ac6"
97
98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
100 ICH5_PMR = 0x90, /* port mapping register */
101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */
103
104 PIIX_FLAG_IGNORE_PCS = (1 << 25), /* ignore PCS present bits */
105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108
109 /* combined mode. if set, PATA is channel 0.
110 * if clear, PATA is channel 1.
111 */
112 PIIX_PORT_ENABLED = (1 << 0),
113 PIIX_PORT_PRESENT = (1 << 4),
114
115 PIIX_80C_PRI = (1 << 5) | (1 << 4),
116 PIIX_80C_SEC = (1 << 7) | (1 << 6),
117
118 /* controller IDs */
119 piix_pata_33 = 0, /* PIIX3 or 4 at 33Mhz */
120 ich_pata_33 = 1, /* ICH up to UDMA 33 only */
121 ich_pata_66 = 2, /* ICH up to 66 Mhz */
122 ich_pata_100 = 3, /* ICH up to UDMA 100 */
123 ich_pata_133 = 4, /* ICH up to UDMA 133 */
124 ich5_sata = 5,
125 esb_sata = 6,
126 ich6_sata = 7,
127 ich6_sata_ahci = 8,
128 ich6m_sata_ahci = 9,
129 ich7m_sata_ahci = 10,
130 ich8_sata_ahci = 11,
131
132 /* constants for mapping table */
133 P0 = 0, /* port 0 */
134 P1 = 1, /* port 1 */
135 P2 = 2, /* port 2 */
136 P3 = 3, /* port 3 */
137 IDE = -1, /* IDE */
138 NA = -2, /* not avaliable */
139 RV = -3, /* reserved */
140
141 PIIX_AHCI_DEVICE = 6,
142};
143
144struct piix_map_db {
145 const u32 mask;
146 const u16 port_enable;
147 const int present_shift;
148 const int map[][4];
149};
150
151struct piix_host_priv {
152 const int *map;
153 const struct piix_map_db *map_db;
154};
155
156static int piix_init_one (struct pci_dev *pdev,
157 const struct pci_device_id *ent);
158static void piix_host_stop(struct ata_host *host);
159static void piix_pata_error_handler(struct ata_port *ap);
160static void ich_pata_error_handler(struct ata_port *ap);
161static void piix_sata_error_handler(struct ata_port *ap);
162static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
163static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
164static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev);
165
166static unsigned int in_module_init = 1;
167
168static const struct pci_device_id piix_pci_tbl[] = {
169#ifdef ATA_ENABLE_PATA
170 /* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */
171 /* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */
172 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
173 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
174 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
175 /* Intel PIIX4 */
176 { 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
177 /* Intel PIIX4 */
178 { 0x8086, 0x7601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
179 /* Intel PIIX */
180 { 0x8086, 0x84CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
181 /* Intel ICH (i810, i815, i840) UDMA 66*/
182 { 0x8086, 0x2411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_66 },
183 /* Intel ICH0 : UDMA 33*/
184 { 0x8086, 0x2421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_33 },
185 /* Intel ICH2M */
186 { 0x8086, 0x244A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
187 /* Intel ICH2 (i810E2, i845, 850, 860) UDMA 100 */
188 { 0x8086, 0x244B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
189 /* Intel ICH3M */
190 { 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
191 /* Intel ICH3 (E7500/1) UDMA 100 */
192 { 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
193 /* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */
194 { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
195 { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
196 /* Intel ICH5 */
197 { 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 },
198 /* C-ICH (i810E2) */
199 { 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
200 /* ESB (855GME/875P + 6300ESB) UDMA 100 */
201 { 0x8086, 0x25A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
202 /* ICH6 (and 6) (i915) UDMA 100 */
203 { 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
204 /* ICH7/7-R (i945, i975) UDMA 100*/
205 { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 },
206 { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
207#endif
208
209 /* NOTE: The following PCI ids must be kept in sync with the
210 * list in drivers/pci/quirks.c.
211 */
212
213 /* 82801EB (ICH5) */
214 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
215 /* 82801EB (ICH5) */
216 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
217 /* 6300ESB (ICH5 variant with broken PCS present bits) */
218 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
219 /* 6300ESB pretending RAID */
220 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
221 /* 82801FB/FW (ICH6/ICH6W) */
222 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
223 /* 82801FR/FRW (ICH6R/ICH6RW) */
224 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
225 /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented) */
226 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
227 /* 82801GB/GR/GH (ICH7, identical to ICH6) */
228 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
229 /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
230 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich7m_sata_ahci },
231 /* Enterprise Southbridge 2 (where's the datasheet?) */
232 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
233 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */
234 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
235 /* SATA Controller 2 IDE (ICH8, ditto) */
236 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
237 /* Mobile SATA Controller IDE (ICH8M, ditto) */
238 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
239
240 { } /* terminate list */
241};
242
243static struct pci_driver piix_pci_driver = {
244 .name = DRV_NAME,
245 .id_table = piix_pci_tbl,
246 .probe = piix_init_one,
247 .remove = ata_pci_remove_one,
248 .suspend = ata_pci_device_suspend,
249 .resume = ata_pci_device_resume,
250};
251
252static struct scsi_host_template piix_sht = {
253 .module = THIS_MODULE,
254 .name = DRV_NAME,
255 .ioctl = ata_scsi_ioctl,
256 .queuecommand = ata_scsi_queuecmd,
257 .can_queue = ATA_DEF_QUEUE,
258 .this_id = ATA_SHT_THIS_ID,
259 .sg_tablesize = LIBATA_MAX_PRD,
260 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
261 .emulated = ATA_SHT_EMULATED,
262 .use_clustering = ATA_SHT_USE_CLUSTERING,
263 .proc_name = DRV_NAME,
264 .dma_boundary = ATA_DMA_BOUNDARY,
265 .slave_configure = ata_scsi_slave_config,
266 .slave_destroy = ata_scsi_slave_destroy,
267 .bios_param = ata_std_bios_param,
268 .resume = ata_scsi_device_resume,
269 .suspend = ata_scsi_device_suspend,
270};
271
272static const struct ata_port_operations piix_pata_ops = {
273 .port_disable = ata_port_disable,
274 .set_piomode = piix_set_piomode,
275 .set_dmamode = piix_set_dmamode,
276 .mode_filter = ata_pci_default_filter,
277
278 .tf_load = ata_tf_load,
279 .tf_read = ata_tf_read,
280 .check_status = ata_check_status,
281 .exec_command = ata_exec_command,
282 .dev_select = ata_std_dev_select,
283
284 .bmdma_setup = ata_bmdma_setup,
285 .bmdma_start = ata_bmdma_start,
286 .bmdma_stop = ata_bmdma_stop,
287 .bmdma_status = ata_bmdma_status,
288 .qc_prep = ata_qc_prep,
289 .qc_issue = ata_qc_issue_prot,
290 .data_xfer = ata_pio_data_xfer,
291
292 .freeze = ata_bmdma_freeze,
293 .thaw = ata_bmdma_thaw,
294 .error_handler = piix_pata_error_handler,
295 .post_internal_cmd = ata_bmdma_post_internal_cmd,
296
297 .irq_handler = ata_interrupt,
298 .irq_clear = ata_bmdma_irq_clear,
299
300 .port_start = ata_port_start,
301 .port_stop = ata_port_stop,
302 .host_stop = piix_host_stop,
303};
304
305static const struct ata_port_operations ich_pata_ops = {
306 .port_disable = ata_port_disable,
307 .set_piomode = piix_set_piomode,
308 .set_dmamode = ich_set_dmamode,
309 .mode_filter = ata_pci_default_filter,
310
311 .tf_load = ata_tf_load,
312 .tf_read = ata_tf_read,
313 .check_status = ata_check_status,
314 .exec_command = ata_exec_command,
315 .dev_select = ata_std_dev_select,
316
317 .bmdma_setup = ata_bmdma_setup,
318 .bmdma_start = ata_bmdma_start,
319 .bmdma_stop = ata_bmdma_stop,
320 .bmdma_status = ata_bmdma_status,
321 .qc_prep = ata_qc_prep,
322 .qc_issue = ata_qc_issue_prot,
323 .data_xfer = ata_pio_data_xfer,
324
325 .freeze = ata_bmdma_freeze,
326 .thaw = ata_bmdma_thaw,
327 .error_handler = ich_pata_error_handler,
328 .post_internal_cmd = ata_bmdma_post_internal_cmd,
329
330 .irq_handler = ata_interrupt,
331 .irq_clear = ata_bmdma_irq_clear,
332
333 .port_start = ata_port_start,
334 .port_stop = ata_port_stop,
335 .host_stop = ata_host_stop,
336};
337
338static const struct ata_port_operations piix_sata_ops = {
339 .port_disable = ata_port_disable,
340
341 .tf_load = ata_tf_load,
342 .tf_read = ata_tf_read,
343 .check_status = ata_check_status,
344 .exec_command = ata_exec_command,
345 .dev_select = ata_std_dev_select,
346
347 .bmdma_setup = ata_bmdma_setup,
348 .bmdma_start = ata_bmdma_start,
349 .bmdma_stop = ata_bmdma_stop,
350 .bmdma_status = ata_bmdma_status,
351 .qc_prep = ata_qc_prep,
352 .qc_issue = ata_qc_issue_prot,
353 .data_xfer = ata_pio_data_xfer,
354
355 .freeze = ata_bmdma_freeze,
356 .thaw = ata_bmdma_thaw,
357 .error_handler = piix_sata_error_handler,
358 .post_internal_cmd = ata_bmdma_post_internal_cmd,
359
360 .irq_handler = ata_interrupt,
361 .irq_clear = ata_bmdma_irq_clear,
362
363 .port_start = ata_port_start,
364 .port_stop = ata_port_stop,
365 .host_stop = piix_host_stop,
366};
367
368static const struct piix_map_db ich5_map_db = {
369 .mask = 0x7,
370 .port_enable = 0x3,
371 .present_shift = 4,
372 .map = {
373 /* PM PS SM SS MAP */
374 { P0, NA, P1, NA }, /* 000b */
375 { P1, NA, P0, NA }, /* 001b */
376 { RV, RV, RV, RV },
377 { RV, RV, RV, RV },
378 { P0, P1, IDE, IDE }, /* 100b */
379 { P1, P0, IDE, IDE }, /* 101b */
380 { IDE, IDE, P0, P1 }, /* 110b */
381 { IDE, IDE, P1, P0 }, /* 111b */
382 },
383};
384
385static const struct piix_map_db ich6_map_db = {
386 .mask = 0x3,
387 .port_enable = 0xf,
388 .present_shift = 4,
389 .map = {
390 /* PM PS SM SS MAP */
391 { P0, P2, P1, P3 }, /* 00b */
392 { IDE, IDE, P1, P3 }, /* 01b */
393 { P0, P2, IDE, IDE }, /* 10b */
394 { RV, RV, RV, RV },
395 },
396};
397
398static const struct piix_map_db ich6m_map_db = {
399 .mask = 0x3,
400 .port_enable = 0x5,
401 .present_shift = 4,
402 .map = {
403 /* PM PS SM SS MAP */
404 { P0, P2, RV, RV }, /* 00b */
405 { RV, RV, RV, RV },
406 { P0, P2, IDE, IDE }, /* 10b */
407 { RV, RV, RV, RV },
408 },
409};
410
411static const struct piix_map_db ich7m_map_db = {
412 .mask = 0x3,
413 .port_enable = 0x5,
414 .present_shift = 4,
415
416 /* Map 01b isn't specified in the doc but some notebooks use
417 * it anyway. ATM, the only case spotted carries subsystem ID
418 * 1025:0107. This is the only difference from ich6m.
419 */
420 .map = {
421 /* PM PS SM SS MAP */
422 { P0, P2, RV, RV }, /* 00b */
423 { IDE, IDE, P1, P3 }, /* 01b */
424 { P0, P2, IDE, IDE }, /* 10b */
425 { RV, RV, RV, RV },
426 },
427};
428
429static const struct piix_map_db ich8_map_db = {
430 .mask = 0x3,
431 .port_enable = 0x3,
432 .present_shift = 8,
433 .map = {
434 /* PM PS SM SS MAP */
435 { P0, NA, P1, NA }, /* 00b (hardwired) */
436 { RV, RV, RV, RV },
437 { RV, RV, RV, RV }, /* 10b (never) */
438 { RV, RV, RV, RV },
439 },
440};
441
442static const struct piix_map_db *piix_map_db_table[] = {
443 [ich5_sata] = &ich5_map_db,
444 [esb_sata] = &ich5_map_db,
445 [ich6_sata] = &ich6_map_db,
446 [ich6_sata_ahci] = &ich6_map_db,
447 [ich6m_sata_ahci] = &ich6m_map_db,
448 [ich7m_sata_ahci] = &ich7m_map_db,
449 [ich8_sata_ahci] = &ich8_map_db,
450};
451
452static struct ata_port_info piix_port_info[] = {
453 /* piix_pata_33: 0: PIIX3 or 4 at 33MHz */
454 {
455 .sht = &piix_sht,
456 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
457 .pio_mask = 0x1f, /* pio0-4 */
458 .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
459 .udma_mask = ATA_UDMA_MASK_40C,
460 .port_ops = &piix_pata_ops,
461 },
462
463 /* ich_pata_33: 1 ICH0 - ICH at 33Mhz*/
464 {
465 .sht = &piix_sht,
466 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS,
467 .pio_mask = 0x1f, /* pio 0-4 */
468 .mwdma_mask = 0x06, /* Check: maybe 0x07 */
469 .udma_mask = ATA_UDMA2, /* UDMA33 */
470 .port_ops = &ich_pata_ops,
471 },
472 /* ich_pata_66: 2 ICH controllers up to 66MHz */
473 {
474 .sht = &piix_sht,
475 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS,
476 .pio_mask = 0x1f, /* pio 0-4 */
477 .mwdma_mask = 0x06, /* MWDMA0 is broken on chip */
478 .udma_mask = ATA_UDMA4,
479 .port_ops = &ich_pata_ops,
480 },
481
482 /* ich_pata_100: 3 */
483 {
484 .sht = &piix_sht,
485 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
486 .pio_mask = 0x1f, /* pio0-4 */
487 .mwdma_mask = 0x06, /* mwdma1-2 */
488 .udma_mask = ATA_UDMA5, /* udma0-5 */
489 .port_ops = &ich_pata_ops,
490 },
491
492 /* ich_pata_133: 4 ICH with full UDMA6 */
493 {
494 .sht = &piix_sht,
495 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
496 .pio_mask = 0x1f, /* pio 0-4 */
497 .mwdma_mask = 0x06, /* Check: maybe 0x07 */
498 .udma_mask = ATA_UDMA6, /* UDMA133 */
499 .port_ops = &ich_pata_ops,
500 },
501
502 /* ich5_sata: 5 */
503 {
504 .sht = &piix_sht,
505 .flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
506 PIIX_FLAG_IGNORE_PCS,
507 .pio_mask = 0x1f, /* pio0-4 */
508 .mwdma_mask = 0x07, /* mwdma0-2 */
509 .udma_mask = 0x7f, /* udma0-6 */
510 .port_ops = &piix_sata_ops,
511 },
512
513 /* i6300esb_sata: 6 */
514 {
515 .sht = &piix_sht,
516 .flags = ATA_FLAG_SATA |
517 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
518 .pio_mask = 0x1f, /* pio0-4 */
519 .mwdma_mask = 0x07, /* mwdma0-2 */
520 .udma_mask = 0x7f, /* udma0-6 */
521 .port_ops = &piix_sata_ops,
522 },
523
524 /* ich6_sata: 7 */
525 {
526 .sht = &piix_sht,
527 .flags = ATA_FLAG_SATA |
528 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
529 .pio_mask = 0x1f, /* pio0-4 */
530 .mwdma_mask = 0x07, /* mwdma0-2 */
531 .udma_mask = 0x7f, /* udma0-6 */
532 .port_ops = &piix_sata_ops,
533 },
534
535 /* ich6_sata_ahci: 8 */
536 {
537 .sht = &piix_sht,
538 .flags = ATA_FLAG_SATA |
539 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
540 PIIX_FLAG_AHCI,
541 .pio_mask = 0x1f, /* pio0-4 */
542 .mwdma_mask = 0x07, /* mwdma0-2 */
543 .udma_mask = 0x7f, /* udma0-6 */
544 .port_ops = &piix_sata_ops,
545 },
546
547 /* ich6m_sata_ahci: 9 */
548 {
549 .sht = &piix_sht,
550 .flags = ATA_FLAG_SATA |
551 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
552 PIIX_FLAG_AHCI,
553 .pio_mask = 0x1f, /* pio0-4 */
554 .mwdma_mask = 0x07, /* mwdma0-2 */
555 .udma_mask = 0x7f, /* udma0-6 */
556 .port_ops = &piix_sata_ops,
557 },
558
559 /* ich7m_sata_ahci: 10 */
560 {
561 .sht = &piix_sht,
562 .flags = ATA_FLAG_SATA |
563 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
564 PIIX_FLAG_AHCI,
565 .pio_mask = 0x1f, /* pio0-4 */
566 .mwdma_mask = 0x07, /* mwdma0-2 */
567 .udma_mask = 0x7f, /* udma0-6 */
568 .port_ops = &piix_sata_ops,
569 },
570
571 /* ich8_sata_ahci: 11 */
572 {
573 .sht = &piix_sht,
574 .flags = ATA_FLAG_SATA |
575 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
576 PIIX_FLAG_AHCI,
577 .pio_mask = 0x1f, /* pio0-4 */
578 .mwdma_mask = 0x07, /* mwdma0-2 */
579 .udma_mask = 0x7f, /* udma0-6 */
580 .port_ops = &piix_sata_ops,
581 },
582
583};
584
585static struct pci_bits piix_enable_bits[] = {
586 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
587 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
588};
589
590MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik");
591MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers");
592MODULE_LICENSE("GPL");
593MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
594MODULE_VERSION(DRV_VERSION);
595
596static int force_pcs = 0;
597module_param(force_pcs, int, 0444);
598MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
599 "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)");
600
601/**
602 * piix_pata_cbl_detect - Probe host controller cable detect info
603 * @ap: Port for which cable detect info is desired
604 *
605 * Read 80c cable indicator from ATA PCI device's PCI config
606 * register. This register is normally set by firmware (BIOS).
607 *
608 * LOCKING:
609 * None (inherited from caller).
610 */
611
612static void ich_pata_cbl_detect(struct ata_port *ap)
613{
614 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
615 u8 tmp, mask;
616
617 /* no 80c support in host controller? */
618 if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0)
619 goto cbl40;
620
621 /* check BIOS cable detect results */
622 mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
623 pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
624 if ((tmp & mask) == 0)
625 goto cbl40;
626
627 ap->cbl = ATA_CBL_PATA80;
628 return;
629
630cbl40:
631 ap->cbl = ATA_CBL_PATA40;
632}
633
634/**
635 * piix_pata_prereset - prereset for PATA host controller
636 * @ap: Target port
637 *
638 *
639 * LOCKING:
640 * None (inherited from caller).
641 */
642static int piix_pata_prereset(struct ata_port *ap)
643{
644 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
645
646 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) {
647 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
648 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
649 return 0;
650 }
651 ap->cbl = ATA_CBL_PATA40;
652 return ata_std_prereset(ap);
653}
654
655static void piix_pata_error_handler(struct ata_port *ap)
656{
657 ata_bmdma_drive_eh(ap, piix_pata_prereset, ata_std_softreset, NULL,
658 ata_std_postreset);
659}
660
661
662/**
663 * ich_pata_prereset - prereset for PATA host controller
664 * @ap: Target port
665 *
666 *
667 * LOCKING:
668 * None (inherited from caller).
669 */
670static int ich_pata_prereset(struct ata_port *ap)
671{
672 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
673
674 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) {
675 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
676 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
677 return 0;
678 }
679
680 ich_pata_cbl_detect(ap);
681
682 return ata_std_prereset(ap);
683}
684
685static void ich_pata_error_handler(struct ata_port *ap)
686{
687 ata_bmdma_drive_eh(ap, ich_pata_prereset, ata_std_softreset, NULL,
688 ata_std_postreset);
689}
690
691/**
692 * piix_sata_present_mask - determine present mask for SATA host controller
693 * @ap: Target port
694 *
695 * Reads SATA PCI device's PCI config register Port Configuration
696 * and Status (PCS) to determine port and device availability.
697 *
698 * LOCKING:
699 * None (inherited from caller).
700 *
701 * RETURNS:
702 * determined present_mask
703 */
704static unsigned int piix_sata_present_mask(struct ata_port *ap)
705{
706 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
707 struct piix_host_priv *hpriv = ap->host->private_data;
708 const unsigned int *map = hpriv->map;
709 int base = 2 * ap->port_no;
710 unsigned int present_mask = 0;
711 int port, i;
712 u16 pcs;
713
714 pci_read_config_word(pdev, ICH5_PCS, &pcs);
715 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
716
717 for (i = 0; i < 2; i++) {
718 port = map[base + i];
719 if (port < 0)
720 continue;
721 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
722 (pcs & 1 << (hpriv->map_db->present_shift + port)))
723 present_mask |= 1 << i;
724 }
725
726 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
727 ap->id, pcs, present_mask);
728
729 return present_mask;
730}
731
732/**
733 * piix_sata_softreset - reset SATA host port via ATA SRST
734 * @ap: port to reset
735 * @classes: resulting classes of attached devices
736 *
737 * Reset SATA host port via ATA SRST. On controllers with
738 * reliable PCS present bits, the bits are used to determine
739 * device presence.
740 *
741 * LOCKING:
742 * Kernel thread context (may sleep)
743 *
744 * RETURNS:
745 * 0 on success, -errno otherwise.
746 */
747static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes)
748{
749 unsigned int present_mask;
750 int i, rc;
751
752 present_mask = piix_sata_present_mask(ap);
753
754 rc = ata_std_softreset(ap, classes);
755 if (rc)
756 return rc;
757
758 for (i = 0; i < ATA_MAX_DEVICES; i++) {
759 if (!(present_mask & (1 << i)))
760 classes[i] = ATA_DEV_NONE;
761 }
762
763 return 0;
764}
765
766static void piix_sata_error_handler(struct ata_port *ap)
767{
768 ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL,
769 ata_std_postreset);
770}
771
772/**
773 * piix_set_piomode - Initialize host controller PATA PIO timings
774 * @ap: Port whose timings we are configuring
775 * @adev: um
776 *
777 * Set PIO mode for device, in host controller PCI config space.
778 *
779 * LOCKING:
780 * None (inherited from caller).
781 */
782
783static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
784{
785 unsigned int pio = adev->pio_mode - XFER_PIO_0;
786 struct pci_dev *dev = to_pci_dev(ap->host->dev);
787 unsigned int is_slave = (adev->devno != 0);
788 unsigned int master_port= ap->port_no ? 0x42 : 0x40;
789 unsigned int slave_port = 0x44;
790 u16 master_data;
791 u8 slave_data;
792 u8 udma_enable;
793 int control = 0;
794
795 /*
796 * See Intel Document 298600-004 for the timing programing rules
797 * for ICH controllers.
798 */
799
800 static const /* ISP RTC */
801 u8 timings[][2] = { { 0, 0 },
802 { 0, 0 },
803 { 1, 0 },
804 { 2, 1 },
805 { 2, 3 }, };
806
807 if (pio >= 2)
808 control |= 1; /* TIME1 enable */
809 if (ata_pio_need_iordy(adev))
810 control |= 2; /* IE enable */
811
812 /* Intel specifies that the PPE functionality is for disk only */
813 if (adev->class == ATA_DEV_ATA)
814 control |= 4; /* PPE enable */
815
816 pci_read_config_word(dev, master_port, &master_data);
817 if (is_slave) {
818 /* Enable SITRE (seperate slave timing register) */
819 master_data |= 0x4000;
820 /* enable PPE1, IE1 and TIME1 as needed */
821 master_data |= (control << 4);
822 pci_read_config_byte(dev, slave_port, &slave_data);
823 slave_data &= (ap->port_no ? 0x0f : 0xf0);
824 /* Load the timing nibble for this slave */
825 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
826 } else {
827 /* Master keeps the bits in a different format */
828 master_data &= 0xccf8;
829 /* Enable PPE, IE and TIME as appropriate */
830 master_data |= control;
831 master_data |=
832 (timings[pio][0] << 12) |
833 (timings[pio][1] << 8);
834 }
835 pci_write_config_word(dev, master_port, master_data);
836 if (is_slave)
837 pci_write_config_byte(dev, slave_port, slave_data);
838
839 /* Ensure the UDMA bit is off - it will be turned back on if
840 UDMA is selected */
841
842 if (ap->udma_mask) {
843 pci_read_config_byte(dev, 0x48, &udma_enable);
844 udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
845 pci_write_config_byte(dev, 0x48, udma_enable);
846 }
847}
848
849/**
850 * do_pata_set_dmamode - Initialize host controller PATA PIO timings
851 * @ap: Port whose timings we are configuring
852 * @adev: Drive in question
853 * @udma: udma mode, 0 - 6
854 * @is_ich: set if the chip is an ICH device
855 *
856 * Set UDMA mode for device, in host controller PCI config space.
857 *
858 * LOCKING:
859 * None (inherited from caller).
860 */
861
862static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, int isich)
863{
864 struct pci_dev *dev = to_pci_dev(ap->host->dev);
865 u8 master_port = ap->port_no ? 0x42 : 0x40;
866 u16 master_data;
867 u8 speed = adev->dma_mode;
868 int devid = adev->devno + 2 * ap->port_no;
869 u8 udma_enable;
870
871 static const /* ISP RTC */
872 u8 timings[][2] = { { 0, 0 },
873 { 0, 0 },
874 { 1, 0 },
875 { 2, 1 },
876 { 2, 3 }, };
877
878 pci_read_config_word(dev, master_port, &master_data);
879 pci_read_config_byte(dev, 0x48, &udma_enable);
880
881 if (speed >= XFER_UDMA_0) {
882 unsigned int udma = adev->dma_mode - XFER_UDMA_0;
883 u16 udma_timing;
884 u16 ideconf;
885 int u_clock, u_speed;
886
887 /*
888 * UDMA is handled by a combination of clock switching and
889 * selection of dividers
890 *
891 * Handy rule: Odd modes are UDMATIMx 01, even are 02
892 * except UDMA0 which is 00
893 */
894 u_speed = min(2 - (udma & 1), udma);
895 if (udma == 5)
896 u_clock = 0x1000; /* 100Mhz */
897 else if (udma > 2)
898 u_clock = 1; /* 66Mhz */
899 else
900 u_clock = 0; /* 33Mhz */
901
902 udma_enable |= (1 << devid);
903
904 /* Load the CT/RP selection */
905 pci_read_config_word(dev, 0x4A, &udma_timing);
906 udma_timing &= ~(3 << (4 * devid));
907 udma_timing |= u_speed << (4 * devid);
908 pci_write_config_word(dev, 0x4A, udma_timing);
909
910 if (isich) {
911 /* Select a 33/66/100Mhz clock */
912 pci_read_config_word(dev, 0x54, &ideconf);
913 ideconf &= ~(0x1001 << devid);
914 ideconf |= u_clock << devid;
915 /* For ICH or later we should set bit 10 for better
916 performance (WR_PingPong_En) */
917 pci_write_config_word(dev, 0x54, ideconf);
918 }
919 } else {
920 /*
921 * MWDMA is driven by the PIO timings. We must also enable
922 * IORDY unconditionally along with TIME1. PPE has already
923 * been set when the PIO timing was set.
924 */
925 unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
926 unsigned int control;
927 u8 slave_data;
928 const unsigned int needed_pio[3] = {
929 XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
930 };
931 int pio = needed_pio[mwdma] - XFER_PIO_0;
932
933 control = 3; /* IORDY|TIME1 */
934
935 /* If the drive MWDMA is faster than it can do PIO then
936 we must force PIO into PIO0 */
937
938 if (adev->pio_mode < needed_pio[mwdma])
939 /* Enable DMA timing only */
940 control |= 8; /* PIO cycles in PIO0 */
941
942 if (adev->devno) { /* Slave */
943 master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
944 master_data |= control << 4;
945 pci_read_config_byte(dev, 0x44, &slave_data);
946 slave_data &= (0x0F + 0xE1 * ap->port_no);
947 /* Load the matching timing */
948 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
949 pci_write_config_byte(dev, 0x44, slave_data);
950 } else { /* Master */
951 master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
952 and master timing bits */
953 master_data |= control;
954 master_data |=
955 (timings[pio][0] << 12) |
956 (timings[pio][1] << 8);
957 }
958 udma_enable &= ~(1 << devid);
959 pci_write_config_word(dev, master_port, master_data);
960 }
961 /* Don't scribble on 0x48 if the controller does not support UDMA */
962 if (ap->udma_mask)
963 pci_write_config_byte(dev, 0x48, udma_enable);
964}
965
966/**
967 * piix_set_dmamode - Initialize host controller PATA DMA timings
968 * @ap: Port whose timings we are configuring
969 * @adev: um
970 *
971 * Set MW/UDMA mode for device, in host controller PCI config space.
972 *
973 * LOCKING:
974 * None (inherited from caller).
975 */
976
977static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
978{
979 do_pata_set_dmamode(ap, adev, 0);
980}
981
982/**
983 * ich_set_dmamode - Initialize host controller PATA DMA timings
984 * @ap: Port whose timings we are configuring
985 * @adev: um
986 *
987 * Set MW/UDMA mode for device, in host controller PCI config space.
988 *
989 * LOCKING:
990 * None (inherited from caller).
991 */
992
993static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev)
994{
995 do_pata_set_dmamode(ap, adev, 1);
996}
997
998#define AHCI_PCI_BAR 5
999#define AHCI_GLOBAL_CTL 0x04
1000#define AHCI_ENABLE (1 << 31)
1001static int piix_disable_ahci(struct pci_dev *pdev)
1002{
1003 void __iomem *mmio;
1004 u32 tmp;
1005 int rc = 0;
1006
1007 /* BUG: pci_enable_device has not yet been called. This
1008 * works because this device is usually set up by BIOS.
1009 */
1010
1011 if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
1012 !pci_resource_len(pdev, AHCI_PCI_BAR))
1013 return 0;
1014
1015 mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
1016 if (!mmio)
1017 return -ENOMEM;
1018
1019 tmp = readl(mmio + AHCI_GLOBAL_CTL);
1020 if (tmp & AHCI_ENABLE) {
1021 tmp &= ~AHCI_ENABLE;
1022 writel(tmp, mmio + AHCI_GLOBAL_CTL);
1023
1024 tmp = readl(mmio + AHCI_GLOBAL_CTL);
1025 if (tmp & AHCI_ENABLE)
1026 rc = -EIO;
1027 }
1028
1029 pci_iounmap(pdev, mmio);
1030 return rc;
1031}
1032
1033/**
1034 * piix_check_450nx_errata - Check for problem 450NX setup
1035 * @ata_dev: the PCI device to check
1036 *
1037 * Check for the present of 450NX errata #19 and errata #25. If
1038 * they are found return an error code so we can turn off DMA
1039 */
1040
1041static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
1042{
1043 struct pci_dev *pdev = NULL;
1044 u16 cfg;
1045 u8 rev;
1046 int no_piix_dma = 0;
1047
1048 while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL)
1049 {
1050 /* Look for 450NX PXB. Check for problem configurations
1051 A PCI quirk checks bit 6 already */
1052 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
1053 pci_read_config_word(pdev, 0x41, &cfg);
1054 /* Only on the original revision: IDE DMA can hang */
1055 if (rev == 0x00)
1056 no_piix_dma = 1;
1057 /* On all revisions below 5 PXB bus lock must be disabled for IDE */
1058 else if (cfg & (1<<14) && rev < 5)
1059 no_piix_dma = 2;
1060 }
1061 if (no_piix_dma)
1062 dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n");
1063 if (no_piix_dma == 2)
1064 dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n");
1065 return no_piix_dma;
1066}
1067
1068static void __devinit piix_init_pcs(struct pci_dev *pdev,
1069 struct ata_port_info *pinfo,
1070 const struct piix_map_db *map_db)
1071{
1072 u16 pcs, new_pcs;
1073
1074 pci_read_config_word(pdev, ICH5_PCS, &pcs);
1075
1076 new_pcs = pcs | map_db->port_enable;
1077
1078 if (new_pcs != pcs) {
1079 DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
1080 pci_write_config_word(pdev, ICH5_PCS, new_pcs);
1081 msleep(150);
1082 }
1083
1084 if (force_pcs == 1) {
1085 dev_printk(KERN_INFO, &pdev->dev,
1086 "force ignoring PCS (0x%x)\n", new_pcs);
1087 pinfo[0].flags |= PIIX_FLAG_IGNORE_PCS;
1088 pinfo[1].flags |= PIIX_FLAG_IGNORE_PCS;
1089 } else if (force_pcs == 2) {
1090 dev_printk(KERN_INFO, &pdev->dev,
1091 "force honoring PCS (0x%x)\n", new_pcs);
1092 pinfo[0].flags &= ~PIIX_FLAG_IGNORE_PCS;
1093 pinfo[1].flags &= ~PIIX_FLAG_IGNORE_PCS;
1094 }
1095}
1096
1097static void __devinit piix_init_sata_map(struct pci_dev *pdev,
1098 struct ata_port_info *pinfo,
1099 const struct piix_map_db *map_db)
1100{
1101 struct piix_host_priv *hpriv = pinfo[0].private_data;
1102 const unsigned int *map;
1103 int i, invalid_map = 0;
1104 u8 map_value;
1105
1106 pci_read_config_byte(pdev, ICH5_PMR, &map_value);
1107
1108 map = map_db->map[map_value & map_db->mask];
1109
1110 dev_printk(KERN_INFO, &pdev->dev, "MAP [");
1111 for (i = 0; i < 4; i++) {
1112 switch (map[i]) {
1113 case RV:
1114 invalid_map = 1;
1115 printk(" XX");
1116 break;
1117
1118 case NA:
1119 printk(" --");
1120 break;
1121
1122 case IDE:
1123 WARN_ON((i & 1) || map[i + 1] != IDE);
1124 pinfo[i / 2] = piix_port_info[ich_pata_100];
1125 pinfo[i / 2].private_data = hpriv;
1126 i++;
1127 printk(" IDE IDE");
1128 break;
1129
1130 default:
1131 printk(" P%d", map[i]);
1132 if (i & 1)
1133 pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
1134 break;
1135 }
1136 }
1137 printk(" ]\n");
1138
1139 if (invalid_map)
1140 dev_printk(KERN_ERR, &pdev->dev,
1141 "invalid MAP value %u\n", map_value);
1142
1143 hpriv->map = map;
1144 hpriv->map_db = map_db;
1145}
1146
1147/**
1148 * piix_init_one - Register PIIX ATA PCI device with kernel services
1149 * @pdev: PCI device to register
1150 * @ent: Entry in piix_pci_tbl matching with @pdev
1151 *
1152 * Called from kernel PCI layer. We probe for combined mode (sigh),
1153 * and then hand over control to libata, for it to do the rest.
1154 *
1155 * LOCKING:
1156 * Inherited from PCI layer (may sleep).
1157 *
1158 * RETURNS:
1159 * Zero on success, or -ERRNO value.
1160 */
1161
1162static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1163{
1164 static int printed_version;
1165 struct ata_port_info port_info[2];
1166 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
1167 struct piix_host_priv *hpriv;
1168 unsigned long port_flags;
1169
1170 if (!printed_version++)
1171 dev_printk(KERN_DEBUG, &pdev->dev,
1172 "version " DRV_VERSION "\n");
1173
1174 /* no hotplugging support (FIXME) */
1175 if (!in_module_init)
1176 return -ENODEV;
1177
1178 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
1179 if (!hpriv)
1180 return -ENOMEM;
1181
1182 port_info[0] = piix_port_info[ent->driver_data];
1183 port_info[1] = piix_port_info[ent->driver_data];
1184 port_info[0].private_data = hpriv;
1185 port_info[1].private_data = hpriv;
1186
1187 port_flags = port_info[0].flags;
1188
1189 if (port_flags & PIIX_FLAG_AHCI) {
1190 u8 tmp;
1191 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
1192 if (tmp == PIIX_AHCI_DEVICE) {
1193 int rc = piix_disable_ahci(pdev);
1194 if (rc)
1195 return rc;
1196 }
1197 }
1198
1199 /* Initialize SATA map */
1200 if (port_flags & ATA_FLAG_SATA) {
1201 piix_init_sata_map(pdev, port_info,
1202 piix_map_db_table[ent->driver_data]);
1203 piix_init_pcs(pdev, port_info,
1204 piix_map_db_table[ent->driver_data]);
1205 }
1206
1207 /* On ICH5, some BIOSen disable the interrupt using the
1208 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
1209 * On ICH6, this bit has the same effect, but only when
1210 * MSI is disabled (and it is disabled, as we don't use
1211 * message-signalled interrupts currently).
1212 */
1213 if (port_flags & PIIX_FLAG_CHECKINTR)
1214 pci_intx(pdev, 1);
1215
1216 if (piix_check_450nx_errata(pdev)) {
1217 /* This writes into the master table but it does not
1218 really matter for this errata as we will apply it to
1219 all the PIIX devices on the board */
1220 port_info[0].mwdma_mask = 0;
1221 port_info[0].udma_mask = 0;
1222 port_info[1].mwdma_mask = 0;
1223 port_info[1].udma_mask = 0;
1224 }
1225 return ata_pci_init_one(pdev, ppinfo, 2);
1226}
1227
1228static void piix_host_stop(struct ata_host *host)
1229{
1230 struct piix_host_priv *hpriv = host->private_data;
1231
1232 ata_host_stop(host);
1233
1234 kfree(hpriv);
1235}
1236
1237static int __init piix_init(void)
1238{
1239 int rc;
1240
1241 DPRINTK("pci_register_driver\n");
1242 rc = pci_register_driver(&piix_pci_driver);
1243 if (rc)
1244 return rc;
1245
1246 in_module_init = 0;
1247
1248 DPRINTK("done\n");
1249 return 0;
1250}
1251
1252static void __exit piix_exit(void)
1253{
1254 pci_unregister_driver(&piix_pci_driver);
1255}
1256
1257module_init(piix_init);
1258module_exit(piix_exit);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
new file mode 100644
index 000000000000..753b0152afd1
--- /dev/null
+++ b/drivers/ata/libata-core.c
@@ -0,0 +1,6171 @@
1/*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
50#include <linux/jiffies.h>
51#include <linux/scatterlist.h>
52#include <scsi/scsi.h>
53#include <scsi/scsi_cmnd.h>
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
62/* debounce timing parameters in msecs { interval, duration, timeout } */
63const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
64const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
65const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
66
67static unsigned int ata_dev_init_params(struct ata_device *dev,
68 u16 heads, u16 sectors);
69static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
70static void ata_dev_xfermask(struct ata_device *dev);
71
72static unsigned int ata_unique_id = 1;
73static struct workqueue_struct *ata_wq;
74
75struct workqueue_struct *ata_aux_wq;
76
77int atapi_enabled = 1;
78module_param(atapi_enabled, int, 0444);
79MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
80
81int atapi_dmadir = 0;
82module_param(atapi_dmadir, int, 0444);
83MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
84
85int libata_fua = 0;
86module_param_named(fua, libata_fua, int, 0444);
87MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
88
89static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
90module_param(ata_probe_timeout, int, 0444);
91MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
92
93MODULE_AUTHOR("Jeff Garzik");
94MODULE_DESCRIPTION("Library module for ATA devices");
95MODULE_LICENSE("GPL");
96MODULE_VERSION(DRV_VERSION);
97
98
99/**
100 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
101 * @tf: Taskfile to convert
102 * @fis: Buffer into which data will output
103 * @pmp: Port multiplier port
104 *
105 * Converts a standard ATA taskfile to a Serial ATA
106 * FIS structure (Register - Host to Device).
107 *
108 * LOCKING:
109 * Inherited from caller.
110 */
111
112void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
113{
114 fis[0] = 0x27; /* Register - Host to Device FIS */
115 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
116 bit 7 indicates Command FIS */
117 fis[2] = tf->command;
118 fis[3] = tf->feature;
119
120 fis[4] = tf->lbal;
121 fis[5] = tf->lbam;
122 fis[6] = tf->lbah;
123 fis[7] = tf->device;
124
125 fis[8] = tf->hob_lbal;
126 fis[9] = tf->hob_lbam;
127 fis[10] = tf->hob_lbah;
128 fis[11] = tf->hob_feature;
129
130 fis[12] = tf->nsect;
131 fis[13] = tf->hob_nsect;
132 fis[14] = 0;
133 fis[15] = tf->ctl;
134
135 fis[16] = 0;
136 fis[17] = 0;
137 fis[18] = 0;
138 fis[19] = 0;
139}
140
141/**
142 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
143 * @fis: Buffer from which data will be input
144 * @tf: Taskfile to output
145 *
146 * Converts a serial ATA FIS structure to a standard ATA taskfile.
147 *
148 * LOCKING:
149 * Inherited from caller.
150 */
151
152void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
153{
154 tf->command = fis[2]; /* status */
155 tf->feature = fis[3]; /* error */
156
157 tf->lbal = fis[4];
158 tf->lbam = fis[5];
159 tf->lbah = fis[6];
160 tf->device = fis[7];
161
162 tf->hob_lbal = fis[8];
163 tf->hob_lbam = fis[9];
164 tf->hob_lbah = fis[10];
165
166 tf->nsect = fis[12];
167 tf->hob_nsect = fis[13];
168}
169
170static const u8 ata_rw_cmds[] = {
171 /* pio multi */
172 ATA_CMD_READ_MULTI,
173 ATA_CMD_WRITE_MULTI,
174 ATA_CMD_READ_MULTI_EXT,
175 ATA_CMD_WRITE_MULTI_EXT,
176 0,
177 0,
178 0,
179 ATA_CMD_WRITE_MULTI_FUA_EXT,
180 /* pio */
181 ATA_CMD_PIO_READ,
182 ATA_CMD_PIO_WRITE,
183 ATA_CMD_PIO_READ_EXT,
184 ATA_CMD_PIO_WRITE_EXT,
185 0,
186 0,
187 0,
188 0,
189 /* dma */
190 ATA_CMD_READ,
191 ATA_CMD_WRITE,
192 ATA_CMD_READ_EXT,
193 ATA_CMD_WRITE_EXT,
194 0,
195 0,
196 0,
197 ATA_CMD_WRITE_FUA_EXT
198};
199
200/**
201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
202 * @qc: command to examine and configure
203 *
204 * Examine the device configuration and tf->flags to calculate
205 * the proper read/write commands and protocol to use.
206 *
207 * LOCKING:
208 * caller.
209 */
210int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
211{
212 struct ata_taskfile *tf = &qc->tf;
213 struct ata_device *dev = qc->dev;
214 u8 cmd;
215
216 int index, fua, lba48, write;
217
218 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
219 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
220 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
221
222 if (dev->flags & ATA_DFLAG_PIO) {
223 tf->protocol = ATA_PROT_PIO;
224 index = dev->multi_count ? 0 : 8;
225 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
226 /* Unable to use DMA due to host limitation */
227 tf->protocol = ATA_PROT_PIO;
228 index = dev->multi_count ? 0 : 8;
229 } else {
230 tf->protocol = ATA_PROT_DMA;
231 index = 16;
232 }
233
234 cmd = ata_rw_cmds[index + fua + lba48 + write];
235 if (cmd) {
236 tf->command = cmd;
237 return 0;
238 }
239 return -1;
240}
241
242/**
243 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
244 * @pio_mask: pio_mask
245 * @mwdma_mask: mwdma_mask
246 * @udma_mask: udma_mask
247 *
248 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
249 * unsigned int xfer_mask.
250 *
251 * LOCKING:
252 * None.
253 *
254 * RETURNS:
255 * Packed xfer_mask.
256 */
257static unsigned int ata_pack_xfermask(unsigned int pio_mask,
258 unsigned int mwdma_mask,
259 unsigned int udma_mask)
260{
261 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
262 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
263 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
264}
265
266/**
267 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
268 * @xfer_mask: xfer_mask to unpack
269 * @pio_mask: resulting pio_mask
270 * @mwdma_mask: resulting mwdma_mask
271 * @udma_mask: resulting udma_mask
272 *
273 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
274 * Any NULL distination masks will be ignored.
275 */
276static void ata_unpack_xfermask(unsigned int xfer_mask,
277 unsigned int *pio_mask,
278 unsigned int *mwdma_mask,
279 unsigned int *udma_mask)
280{
281 if (pio_mask)
282 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
283 if (mwdma_mask)
284 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
285 if (udma_mask)
286 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
287}
288
289static const struct ata_xfer_ent {
290 int shift, bits;
291 u8 base;
292} ata_xfer_tbl[] = {
293 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
294 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
295 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
296 { -1, },
297};
298
299/**
300 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
301 * @xfer_mask: xfer_mask of interest
302 *
303 * Return matching XFER_* value for @xfer_mask. Only the highest
304 * bit of @xfer_mask is considered.
305 *
306 * LOCKING:
307 * None.
308 *
309 * RETURNS:
310 * Matching XFER_* value, 0 if no match found.
311 */
312static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
313{
314 int highbit = fls(xfer_mask) - 1;
315 const struct ata_xfer_ent *ent;
316
317 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
318 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
319 return ent->base + highbit - ent->shift;
320 return 0;
321}
322
323/**
324 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
325 * @xfer_mode: XFER_* of interest
326 *
327 * Return matching xfer_mask for @xfer_mode.
328 *
329 * LOCKING:
330 * None.
331 *
332 * RETURNS:
333 * Matching xfer_mask, 0 if no match found.
334 */
335static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
336{
337 const struct ata_xfer_ent *ent;
338
339 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
340 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
341 return 1 << (ent->shift + xfer_mode - ent->base);
342 return 0;
343}
344
345/**
346 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
347 * @xfer_mode: XFER_* of interest
348 *
349 * Return matching xfer_shift for @xfer_mode.
350 *
351 * LOCKING:
352 * None.
353 *
354 * RETURNS:
355 * Matching xfer_shift, -1 if no match found.
356 */
357static int ata_xfer_mode2shift(unsigned int xfer_mode)
358{
359 const struct ata_xfer_ent *ent;
360
361 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
362 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
363 return ent->shift;
364 return -1;
365}
366
367/**
368 * ata_mode_string - convert xfer_mask to string
369 * @xfer_mask: mask of bits supported; only highest bit counts.
370 *
371 * Determine string which represents the highest speed
372 * (highest bit in @modemask).
373 *
374 * LOCKING:
375 * None.
376 *
377 * RETURNS:
378 * Constant C string representing highest speed listed in
379 * @mode_mask, or the constant C string "<n/a>".
380 */
381static const char *ata_mode_string(unsigned int xfer_mask)
382{
383 static const char * const xfer_mode_str[] = {
384 "PIO0",
385 "PIO1",
386 "PIO2",
387 "PIO3",
388 "PIO4",
389 "PIO5",
390 "PIO6",
391 "MWDMA0",
392 "MWDMA1",
393 "MWDMA2",
394 "MWDMA3",
395 "MWDMA4",
396 "UDMA/16",
397 "UDMA/25",
398 "UDMA/33",
399 "UDMA/44",
400 "UDMA/66",
401 "UDMA/100",
402 "UDMA/133",
403 "UDMA7",
404 };
405 int highbit;
406
407 highbit = fls(xfer_mask) - 1;
408 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
409 return xfer_mode_str[highbit];
410 return "<n/a>";
411}
412
413static const char *sata_spd_string(unsigned int spd)
414{
415 static const char * const spd_str[] = {
416 "1.5 Gbps",
417 "3.0 Gbps",
418 };
419
420 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
421 return "<unknown>";
422 return spd_str[spd - 1];
423}
424
425void ata_dev_disable(struct ata_device *dev)
426{
427 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
428 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
429 dev->class++;
430 }
431}
432
433/**
434 * ata_pio_devchk - PATA device presence detection
435 * @ap: ATA channel to examine
436 * @device: Device to examine (starting at zero)
437 *
438 * This technique was originally described in
439 * Hale Landis's ATADRVR (www.ata-atapi.com), and
440 * later found its way into the ATA/ATAPI spec.
441 *
442 * Write a pattern to the ATA shadow registers,
443 * and if a device is present, it will respond by
444 * correctly storing and echoing back the
445 * ATA shadow register contents.
446 *
447 * LOCKING:
448 * caller.
449 */
450
451static unsigned int ata_pio_devchk(struct ata_port *ap,
452 unsigned int device)
453{
454 struct ata_ioports *ioaddr = &ap->ioaddr;
455 u8 nsect, lbal;
456
457 ap->ops->dev_select(ap, device);
458
459 outb(0x55, ioaddr->nsect_addr);
460 outb(0xaa, ioaddr->lbal_addr);
461
462 outb(0xaa, ioaddr->nsect_addr);
463 outb(0x55, ioaddr->lbal_addr);
464
465 outb(0x55, ioaddr->nsect_addr);
466 outb(0xaa, ioaddr->lbal_addr);
467
468 nsect = inb(ioaddr->nsect_addr);
469 lbal = inb(ioaddr->lbal_addr);
470
471 if ((nsect == 0x55) && (lbal == 0xaa))
472 return 1; /* we found a device */
473
474 return 0; /* nothing found */
475}
476
477/**
478 * ata_mmio_devchk - PATA device presence detection
479 * @ap: ATA channel to examine
480 * @device: Device to examine (starting at zero)
481 *
482 * This technique was originally described in
483 * Hale Landis's ATADRVR (www.ata-atapi.com), and
484 * later found its way into the ATA/ATAPI spec.
485 *
486 * Write a pattern to the ATA shadow registers,
487 * and if a device is present, it will respond by
488 * correctly storing and echoing back the
489 * ATA shadow register contents.
490 *
491 * LOCKING:
492 * caller.
493 */
494
495static unsigned int ata_mmio_devchk(struct ata_port *ap,
496 unsigned int device)
497{
498 struct ata_ioports *ioaddr = &ap->ioaddr;
499 u8 nsect, lbal;
500
501 ap->ops->dev_select(ap, device);
502
503 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
504 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
505
506 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
507 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
508
509 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
510 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
511
512 nsect = readb((void __iomem *) ioaddr->nsect_addr);
513 lbal = readb((void __iomem *) ioaddr->lbal_addr);
514
515 if ((nsect == 0x55) && (lbal == 0xaa))
516 return 1; /* we found a device */
517
518 return 0; /* nothing found */
519}
520
521/**
522 * ata_devchk - PATA device presence detection
523 * @ap: ATA channel to examine
524 * @device: Device to examine (starting at zero)
525 *
526 * Dispatch ATA device presence detection, depending
527 * on whether we are using PIO or MMIO to talk to the
528 * ATA shadow registers.
529 *
530 * LOCKING:
531 * caller.
532 */
533
534static unsigned int ata_devchk(struct ata_port *ap,
535 unsigned int device)
536{
537 if (ap->flags & ATA_FLAG_MMIO)
538 return ata_mmio_devchk(ap, device);
539 return ata_pio_devchk(ap, device);
540}
541
542/**
543 * ata_dev_classify - determine device type based on ATA-spec signature
544 * @tf: ATA taskfile register set for device to be identified
545 *
546 * Determine from taskfile register contents whether a device is
547 * ATA or ATAPI, as per "Signature and persistence" section
548 * of ATA/PI spec (volume 1, sect 5.14).
549 *
550 * LOCKING:
551 * None.
552 *
553 * RETURNS:
554 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
555 * the event of failure.
556 */
557
558unsigned int ata_dev_classify(const struct ata_taskfile *tf)
559{
560 /* Apple's open source Darwin code hints that some devices only
561 * put a proper signature into the LBA mid/high registers,
562 * So, we only check those. It's sufficient for uniqueness.
563 */
564
565 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
566 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
567 DPRINTK("found ATA device by sig\n");
568 return ATA_DEV_ATA;
569 }
570
571 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
572 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
573 DPRINTK("found ATAPI device by sig\n");
574 return ATA_DEV_ATAPI;
575 }
576
577 DPRINTK("unknown device\n");
578 return ATA_DEV_UNKNOWN;
579}
580
581/**
582 * ata_dev_try_classify - Parse returned ATA device signature
583 * @ap: ATA channel to examine
584 * @device: Device to examine (starting at zero)
585 * @r_err: Value of error register on completion
586 *
587 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
588 * an ATA/ATAPI-defined set of values is placed in the ATA
589 * shadow registers, indicating the results of device detection
590 * and diagnostics.
591 *
592 * Select the ATA device, and read the values from the ATA shadow
593 * registers. Then parse according to the Error register value,
594 * and the spec-defined values examined by ata_dev_classify().
595 *
596 * LOCKING:
597 * caller.
598 *
599 * RETURNS:
600 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
601 */
602
603static unsigned int
604ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
605{
606 struct ata_taskfile tf;
607 unsigned int class;
608 u8 err;
609
610 ap->ops->dev_select(ap, device);
611
612 memset(&tf, 0, sizeof(tf));
613
614 ap->ops->tf_read(ap, &tf);
615 err = tf.feature;
616 if (r_err)
617 *r_err = err;
618
619 /* see if device passed diags: if master then continue and warn later */
620 if (err == 0 && device == 0)
621 /* diagnostic fail : do nothing _YET_ */
622 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
623 else if (err == 1)
624 /* do nothing */ ;
625 else if ((device == 0) && (err == 0x81))
626 /* do nothing */ ;
627 else
628 return ATA_DEV_NONE;
629
630 /* determine if device is ATA or ATAPI */
631 class = ata_dev_classify(&tf);
632
633 if (class == ATA_DEV_UNKNOWN)
634 return ATA_DEV_NONE;
635 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
636 return ATA_DEV_NONE;
637 return class;
638}
639
640/**
641 * ata_id_string - Convert IDENTIFY DEVICE page into string
642 * @id: IDENTIFY DEVICE results we will examine
643 * @s: string into which data is output
644 * @ofs: offset into identify device page
645 * @len: length of string to return. must be an even number.
646 *
647 * The strings in the IDENTIFY DEVICE page are broken up into
648 * 16-bit chunks. Run through the string, and output each
649 * 8-bit chunk linearly, regardless of platform.
650 *
651 * LOCKING:
652 * caller.
653 */
654
655void ata_id_string(const u16 *id, unsigned char *s,
656 unsigned int ofs, unsigned int len)
657{
658 unsigned int c;
659
660 while (len > 0) {
661 c = id[ofs] >> 8;
662 *s = c;
663 s++;
664
665 c = id[ofs] & 0xff;
666 *s = c;
667 s++;
668
669 ofs++;
670 len -= 2;
671 }
672}
673
674/**
675 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
676 * @id: IDENTIFY DEVICE results we will examine
677 * @s: string into which data is output
678 * @ofs: offset into identify device page
679 * @len: length of string to return. must be an odd number.
680 *
681 * This function is identical to ata_id_string except that it
682 * trims trailing spaces and terminates the resulting string with
683 * null. @len must be actual maximum length (even number) + 1.
684 *
685 * LOCKING:
686 * caller.
687 */
688void ata_id_c_string(const u16 *id, unsigned char *s,
689 unsigned int ofs, unsigned int len)
690{
691 unsigned char *p;
692
693 WARN_ON(!(len & 1));
694
695 ata_id_string(id, s, ofs, len - 1);
696
697 p = s + strnlen(s, len - 1);
698 while (p > s && p[-1] == ' ')
699 p--;
700 *p = '\0';
701}
702
703static u64 ata_id_n_sectors(const u16 *id)
704{
705 if (ata_id_has_lba(id)) {
706 if (ata_id_has_lba48(id))
707 return ata_id_u64(id, 100);
708 else
709 return ata_id_u32(id, 60);
710 } else {
711 if (ata_id_current_chs_valid(id))
712 return ata_id_u32(id, 57);
713 else
714 return id[1] * id[3] * id[6];
715 }
716}
717
718/**
719 * ata_noop_dev_select - Select device 0/1 on ATA bus
720 * @ap: ATA channel to manipulate
721 * @device: ATA device (numbered from zero) to select
722 *
723 * This function performs no actual function.
724 *
725 * May be used as the dev_select() entry in ata_port_operations.
726 *
727 * LOCKING:
728 * caller.
729 */
730void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
731{
732}
733
734
735/**
736 * ata_std_dev_select - Select device 0/1 on ATA bus
737 * @ap: ATA channel to manipulate
738 * @device: ATA device (numbered from zero) to select
739 *
740 * Use the method defined in the ATA specification to
741 * make either device 0, or device 1, active on the
742 * ATA channel. Works with both PIO and MMIO.
743 *
744 * May be used as the dev_select() entry in ata_port_operations.
745 *
746 * LOCKING:
747 * caller.
748 */
749
750void ata_std_dev_select (struct ata_port *ap, unsigned int device)
751{
752 u8 tmp;
753
754 if (device == 0)
755 tmp = ATA_DEVICE_OBS;
756 else
757 tmp = ATA_DEVICE_OBS | ATA_DEV1;
758
759 if (ap->flags & ATA_FLAG_MMIO) {
760 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
761 } else {
762 outb(tmp, ap->ioaddr.device_addr);
763 }
764 ata_pause(ap); /* needed; also flushes, for mmio */
765}
766
767/**
768 * ata_dev_select - Select device 0/1 on ATA bus
769 * @ap: ATA channel to manipulate
770 * @device: ATA device (numbered from zero) to select
771 * @wait: non-zero to wait for Status register BSY bit to clear
772 * @can_sleep: non-zero if context allows sleeping
773 *
774 * Use the method defined in the ATA specification to
775 * make either device 0, or device 1, active on the
776 * ATA channel.
777 *
778 * This is a high-level version of ata_std_dev_select(),
779 * which additionally provides the services of inserting
780 * the proper pauses and status polling, where needed.
781 *
782 * LOCKING:
783 * caller.
784 */
785
786void ata_dev_select(struct ata_port *ap, unsigned int device,
787 unsigned int wait, unsigned int can_sleep)
788{
789 if (ata_msg_probe(ap))
790 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
791 "device %u, wait %u\n", ap->id, device, wait);
792
793 if (wait)
794 ata_wait_idle(ap);
795
796 ap->ops->dev_select(ap, device);
797
798 if (wait) {
799 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
800 msleep(150);
801 ata_wait_idle(ap);
802 }
803}
804
805/**
806 * ata_dump_id - IDENTIFY DEVICE info debugging output
807 * @id: IDENTIFY DEVICE page to dump
808 *
809 * Dump selected 16-bit words from the given IDENTIFY DEVICE
810 * page.
811 *
812 * LOCKING:
813 * caller.
814 */
815
816static inline void ata_dump_id(const u16 *id)
817{
818 DPRINTK("49==0x%04x "
819 "53==0x%04x "
820 "63==0x%04x "
821 "64==0x%04x "
822 "75==0x%04x \n",
823 id[49],
824 id[53],
825 id[63],
826 id[64],
827 id[75]);
828 DPRINTK("80==0x%04x "
829 "81==0x%04x "
830 "82==0x%04x "
831 "83==0x%04x "
832 "84==0x%04x \n",
833 id[80],
834 id[81],
835 id[82],
836 id[83],
837 id[84]);
838 DPRINTK("88==0x%04x "
839 "93==0x%04x\n",
840 id[88],
841 id[93]);
842}
843
844/**
845 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
846 * @id: IDENTIFY data to compute xfer mask from
847 *
848 * Compute the xfermask for this device. This is not as trivial
849 * as it seems if we must consider early devices correctly.
850 *
851 * FIXME: pre IDE drive timing (do we care ?).
852 *
853 * LOCKING:
854 * None.
855 *
856 * RETURNS:
857 * Computed xfermask
858 */
859static unsigned int ata_id_xfermask(const u16 *id)
860{
861 unsigned int pio_mask, mwdma_mask, udma_mask;
862
863 /* Usual case. Word 53 indicates word 64 is valid */
864 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
865 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
866 pio_mask <<= 3;
867 pio_mask |= 0x7;
868 } else {
869 /* If word 64 isn't valid then Word 51 high byte holds
870 * the PIO timing number for the maximum. Turn it into
871 * a mask.
872 */
873 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
874
875 /* But wait.. there's more. Design your standards by
876 * committee and you too can get a free iordy field to
877 * process. However its the speeds not the modes that
878 * are supported... Note drivers using the timing API
879 * will get this right anyway
880 */
881 }
882
883 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
884
885 if (ata_id_is_cfa(id)) {
886 /*
887 * Process compact flash extended modes
888 */
889 int pio = id[163] & 0x7;
890 int dma = (id[163] >> 3) & 7;
891
892 if (pio)
893 pio_mask |= (1 << 5);
894 if (pio > 1)
895 pio_mask |= (1 << 6);
896 if (dma)
897 mwdma_mask |= (1 << 3);
898 if (dma > 1)
899 mwdma_mask |= (1 << 4);
900 }
901
902 udma_mask = 0;
903 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
904 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
905
906 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
907}
908
909/**
910 * ata_port_queue_task - Queue port_task
911 * @ap: The ata_port to queue port_task for
912 * @fn: workqueue function to be scheduled
913 * @data: data value to pass to workqueue function
914 * @delay: delay time for workqueue function
915 *
916 * Schedule @fn(@data) for execution after @delay jiffies using
917 * port_task. There is one port_task per port and it's the
918 * user(low level driver)'s responsibility to make sure that only
919 * one task is active at any given time.
920 *
921 * libata core layer takes care of synchronization between
922 * port_task and EH. ata_port_queue_task() may be ignored for EH
923 * synchronization.
924 *
925 * LOCKING:
926 * Inherited from caller.
927 */
928void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
929 unsigned long delay)
930{
931 int rc;
932
933 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
934 return;
935
936 PREPARE_WORK(&ap->port_task, fn, data);
937
938 if (!delay)
939 rc = queue_work(ata_wq, &ap->port_task);
940 else
941 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
942
943 /* rc == 0 means that another user is using port task */
944 WARN_ON(rc == 0);
945}
946
947/**
948 * ata_port_flush_task - Flush port_task
949 * @ap: The ata_port to flush port_task for
950 *
951 * After this function completes, port_task is guranteed not to
952 * be running or scheduled.
953 *
954 * LOCKING:
955 * Kernel thread context (may sleep)
956 */
957void ata_port_flush_task(struct ata_port *ap)
958{
959 unsigned long flags;
960
961 DPRINTK("ENTER\n");
962
963 spin_lock_irqsave(ap->lock, flags);
964 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
965 spin_unlock_irqrestore(ap->lock, flags);
966
967 DPRINTK("flush #1\n");
968 flush_workqueue(ata_wq);
969
970 /*
971 * At this point, if a task is running, it's guaranteed to see
972 * the FLUSH flag; thus, it will never queue pio tasks again.
973 * Cancel and flush.
974 */
975 if (!cancel_delayed_work(&ap->port_task)) {
976 if (ata_msg_ctl(ap))
977 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
978 __FUNCTION__);
979 flush_workqueue(ata_wq);
980 }
981
982 spin_lock_irqsave(ap->lock, flags);
983 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
984 spin_unlock_irqrestore(ap->lock, flags);
985
986 if (ata_msg_ctl(ap))
987 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
988}
989
990void ata_qc_complete_internal(struct ata_queued_cmd *qc)
991{
992 struct completion *waiting = qc->private_data;
993
994 complete(waiting);
995}
996
997/**
998 * ata_exec_internal - execute libata internal command
999 * @dev: Device to which the command is sent
1000 * @tf: Taskfile registers for the command and the result
1001 * @cdb: CDB for packet command
1002 * @dma_dir: Data tranfer direction of the command
1003 * @buf: Data buffer of the command
1004 * @buflen: Length of data buffer
1005 *
1006 * Executes libata internal command with timeout. @tf contains
1007 * command on entry and result on return. Timeout and error
1008 * conditions are reported via return value. No recovery action
1009 * is taken after a command times out. It's caller's duty to
1010 * clean up after timeout.
1011 *
1012 * LOCKING:
1013 * None. Should be called with kernel context, might sleep.
1014 *
1015 * RETURNS:
1016 * Zero on success, AC_ERR_* mask on failure
1017 */
1018unsigned ata_exec_internal(struct ata_device *dev,
1019 struct ata_taskfile *tf, const u8 *cdb,
1020 int dma_dir, void *buf, unsigned int buflen)
1021{
1022 struct ata_port *ap = dev->ap;
1023 u8 command = tf->command;
1024 struct ata_queued_cmd *qc;
1025 unsigned int tag, preempted_tag;
1026 u32 preempted_sactive, preempted_qc_active;
1027 DECLARE_COMPLETION_ONSTACK(wait);
1028 unsigned long flags;
1029 unsigned int err_mask;
1030 int rc;
1031
1032 spin_lock_irqsave(ap->lock, flags);
1033
1034 /* no internal command while frozen */
1035 if (ap->pflags & ATA_PFLAG_FROZEN) {
1036 spin_unlock_irqrestore(ap->lock, flags);
1037 return AC_ERR_SYSTEM;
1038 }
1039
1040 /* initialize internal qc */
1041
1042 /* XXX: Tag 0 is used for drivers with legacy EH as some
1043 * drivers choke if any other tag is given. This breaks
1044 * ata_tag_internal() test for those drivers. Don't use new
1045 * EH stuff without converting to it.
1046 */
1047 if (ap->ops->error_handler)
1048 tag = ATA_TAG_INTERNAL;
1049 else
1050 tag = 0;
1051
1052 if (test_and_set_bit(tag, &ap->qc_allocated))
1053 BUG();
1054 qc = __ata_qc_from_tag(ap, tag);
1055
1056 qc->tag = tag;
1057 qc->scsicmd = NULL;
1058 qc->ap = ap;
1059 qc->dev = dev;
1060 ata_qc_reinit(qc);
1061
1062 preempted_tag = ap->active_tag;
1063 preempted_sactive = ap->sactive;
1064 preempted_qc_active = ap->qc_active;
1065 ap->active_tag = ATA_TAG_POISON;
1066 ap->sactive = 0;
1067 ap->qc_active = 0;
1068
1069 /* prepare & issue qc */
1070 qc->tf = *tf;
1071 if (cdb)
1072 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1073 qc->flags |= ATA_QCFLAG_RESULT_TF;
1074 qc->dma_dir = dma_dir;
1075 if (dma_dir != DMA_NONE) {
1076 ata_sg_init_one(qc, buf, buflen);
1077 qc->nsect = buflen / ATA_SECT_SIZE;
1078 }
1079
1080 qc->private_data = &wait;
1081 qc->complete_fn = ata_qc_complete_internal;
1082
1083 ata_qc_issue(qc);
1084
1085 spin_unlock_irqrestore(ap->lock, flags);
1086
1087 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1088
1089 ata_port_flush_task(ap);
1090
1091 if (!rc) {
1092 spin_lock_irqsave(ap->lock, flags);
1093
1094 /* We're racing with irq here. If we lose, the
1095 * following test prevents us from completing the qc
1096 * twice. If we win, the port is frozen and will be
1097 * cleaned up by ->post_internal_cmd().
1098 */
1099 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1100 qc->err_mask |= AC_ERR_TIMEOUT;
1101
1102 if (ap->ops->error_handler)
1103 ata_port_freeze(ap);
1104 else
1105 ata_qc_complete(qc);
1106
1107 if (ata_msg_warn(ap))
1108 ata_dev_printk(dev, KERN_WARNING,
1109 "qc timeout (cmd 0x%x)\n", command);
1110 }
1111
1112 spin_unlock_irqrestore(ap->lock, flags);
1113 }
1114
1115 /* do post_internal_cmd */
1116 if (ap->ops->post_internal_cmd)
1117 ap->ops->post_internal_cmd(qc);
1118
1119 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1120 if (ata_msg_warn(ap))
1121 ata_dev_printk(dev, KERN_WARNING,
1122 "zero err_mask for failed "
1123 "internal command, assuming AC_ERR_OTHER\n");
1124 qc->err_mask |= AC_ERR_OTHER;
1125 }
1126
1127 /* finish up */
1128 spin_lock_irqsave(ap->lock, flags);
1129
1130 *tf = qc->result_tf;
1131 err_mask = qc->err_mask;
1132
1133 ata_qc_free(qc);
1134 ap->active_tag = preempted_tag;
1135 ap->sactive = preempted_sactive;
1136 ap->qc_active = preempted_qc_active;
1137
1138 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1139 * Until those drivers are fixed, we detect the condition
1140 * here, fail the command with AC_ERR_SYSTEM and reenable the
1141 * port.
1142 *
1143 * Note that this doesn't change any behavior as internal
1144 * command failure results in disabling the device in the
1145 * higher layer for LLDDs without new reset/EH callbacks.
1146 *
1147 * Kill the following code as soon as those drivers are fixed.
1148 */
1149 if (ap->flags & ATA_FLAG_DISABLED) {
1150 err_mask |= AC_ERR_SYSTEM;
1151 ata_port_probe(ap);
1152 }
1153
1154 spin_unlock_irqrestore(ap->lock, flags);
1155
1156 return err_mask;
1157}
1158
1159/**
1160 * ata_do_simple_cmd - execute simple internal command
1161 * @dev: Device to which the command is sent
1162 * @cmd: Opcode to execute
1163 *
1164 * Execute a 'simple' command, that only consists of the opcode
1165 * 'cmd' itself, without filling any other registers
1166 *
1167 * LOCKING:
1168 * Kernel thread context (may sleep).
1169 *
1170 * RETURNS:
1171 * Zero on success, AC_ERR_* mask on failure
1172 */
1173unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1174{
1175 struct ata_taskfile tf;
1176
1177 ata_tf_init(dev, &tf);
1178
1179 tf.command = cmd;
1180 tf.flags |= ATA_TFLAG_DEVICE;
1181 tf.protocol = ATA_PROT_NODATA;
1182
1183 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1184}
1185
1186/**
1187 * ata_pio_need_iordy - check if iordy needed
1188 * @adev: ATA device
1189 *
1190 * Check if the current speed of the device requires IORDY. Used
1191 * by various controllers for chip configuration.
1192 */
1193
1194unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1195{
1196 int pio;
1197 int speed = adev->pio_mode - XFER_PIO_0;
1198
1199 if (speed < 2)
1200 return 0;
1201 if (speed > 2)
1202 return 1;
1203
1204 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1205
1206 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1207 pio = adev->id[ATA_ID_EIDE_PIO];
1208 /* Is the speed faster than the drive allows non IORDY ? */
1209 if (pio) {
1210 /* This is cycle times not frequency - watch the logic! */
1211 if (pio > 240) /* PIO2 is 240nS per cycle */
1212 return 1;
1213 return 0;
1214 }
1215 }
1216 return 0;
1217}
1218
1219/**
1220 * ata_dev_read_id - Read ID data from the specified device
1221 * @dev: target device
1222 * @p_class: pointer to class of the target device (may be changed)
1223 * @post_reset: is this read ID post-reset?
1224 * @id: buffer to read IDENTIFY data into
1225 *
1226 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1227 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1228 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1229 * for pre-ATA4 drives.
1230 *
1231 * LOCKING:
1232 * Kernel thread context (may sleep)
1233 *
1234 * RETURNS:
1235 * 0 on success, -errno otherwise.
1236 */
1237int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1238 int post_reset, u16 *id)
1239{
1240 struct ata_port *ap = dev->ap;
1241 unsigned int class = *p_class;
1242 struct ata_taskfile tf;
1243 unsigned int err_mask = 0;
1244 const char *reason;
1245 int rc;
1246
1247 if (ata_msg_ctl(ap))
1248 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1249 __FUNCTION__, ap->id, dev->devno);
1250
1251 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1252
1253 retry:
1254 ata_tf_init(dev, &tf);
1255
1256 switch (class) {
1257 case ATA_DEV_ATA:
1258 tf.command = ATA_CMD_ID_ATA;
1259 break;
1260 case ATA_DEV_ATAPI:
1261 tf.command = ATA_CMD_ID_ATAPI;
1262 break;
1263 default:
1264 rc = -ENODEV;
1265 reason = "unsupported class";
1266 goto err_out;
1267 }
1268
1269 tf.protocol = ATA_PROT_PIO;
1270
1271 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1272 id, sizeof(id[0]) * ATA_ID_WORDS);
1273 if (err_mask) {
1274 rc = -EIO;
1275 reason = "I/O error";
1276 goto err_out;
1277 }
1278
1279 swap_buf_le16(id, ATA_ID_WORDS);
1280
1281 /* sanity check */
1282 rc = -EINVAL;
1283 reason = "device reports illegal type";
1284
1285 if (class == ATA_DEV_ATA) {
1286 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1287 goto err_out;
1288 } else {
1289 if (ata_id_is_ata(id))
1290 goto err_out;
1291 }
1292
1293 if (post_reset && class == ATA_DEV_ATA) {
1294 /*
1295 * The exact sequence expected by certain pre-ATA4 drives is:
1296 * SRST RESET
1297 * IDENTIFY
1298 * INITIALIZE DEVICE PARAMETERS
1299 * anything else..
1300 * Some drives were very specific about that exact sequence.
1301 */
1302 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1303 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1304 if (err_mask) {
1305 rc = -EIO;
1306 reason = "INIT_DEV_PARAMS failed";
1307 goto err_out;
1308 }
1309
1310 /* current CHS translation info (id[53-58]) might be
1311 * changed. reread the identify device info.
1312 */
1313 post_reset = 0;
1314 goto retry;
1315 }
1316 }
1317
1318 *p_class = class;
1319
1320 return 0;
1321
1322 err_out:
1323 if (ata_msg_warn(ap))
1324 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1325 "(%s, err_mask=0x%x)\n", reason, err_mask);
1326 return rc;
1327}
1328
1329static inline u8 ata_dev_knobble(struct ata_device *dev)
1330{
1331 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1332}
1333
1334static void ata_dev_config_ncq(struct ata_device *dev,
1335 char *desc, size_t desc_sz)
1336{
1337 struct ata_port *ap = dev->ap;
1338 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1339
1340 if (!ata_id_has_ncq(dev->id)) {
1341 desc[0] = '\0';
1342 return;
1343 }
1344
1345 if (ap->flags & ATA_FLAG_NCQ) {
1346 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1347 dev->flags |= ATA_DFLAG_NCQ;
1348 }
1349
1350 if (hdepth >= ddepth)
1351 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1352 else
1353 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1354}
1355
1356static void ata_set_port_max_cmd_len(struct ata_port *ap)
1357{
1358 int i;
1359
1360 if (ap->scsi_host) {
1361 unsigned int len = 0;
1362
1363 for (i = 0; i < ATA_MAX_DEVICES; i++)
1364 len = max(len, ap->device[i].cdb_len);
1365
1366 ap->scsi_host->max_cmd_len = len;
1367 }
1368}
1369
1370/**
1371 * ata_dev_configure - Configure the specified ATA/ATAPI device
1372 * @dev: Target device to configure
1373 * @print_info: Enable device info printout
1374 *
1375 * Configure @dev according to @dev->id. Generic and low-level
1376 * driver specific fixups are also applied.
1377 *
1378 * LOCKING:
1379 * Kernel thread context (may sleep)
1380 *
1381 * RETURNS:
1382 * 0 on success, -errno otherwise
1383 */
1384int ata_dev_configure(struct ata_device *dev, int print_info)
1385{
1386 struct ata_port *ap = dev->ap;
1387 const u16 *id = dev->id;
1388 unsigned int xfer_mask;
1389 char revbuf[7]; /* XYZ-99\0 */
1390 int rc;
1391
1392 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1393 ata_dev_printk(dev, KERN_INFO,
1394 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1395 __FUNCTION__, ap->id, dev->devno);
1396 return 0;
1397 }
1398
1399 if (ata_msg_probe(ap))
1400 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1401 __FUNCTION__, ap->id, dev->devno);
1402
1403 /* print device capabilities */
1404 if (ata_msg_probe(ap))
1405 ata_dev_printk(dev, KERN_DEBUG,
1406 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1407 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1408 __FUNCTION__,
1409 id[49], id[82], id[83], id[84],
1410 id[85], id[86], id[87], id[88]);
1411
1412 /* initialize to-be-configured parameters */
1413 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1414 dev->max_sectors = 0;
1415 dev->cdb_len = 0;
1416 dev->n_sectors = 0;
1417 dev->cylinders = 0;
1418 dev->heads = 0;
1419 dev->sectors = 0;
1420
1421 /*
1422 * common ATA, ATAPI feature tests
1423 */
1424
1425 /* find max transfer mode; for printk only */
1426 xfer_mask = ata_id_xfermask(id);
1427
1428 if (ata_msg_probe(ap))
1429 ata_dump_id(id);
1430
1431 /* ATA-specific feature tests */
1432 if (dev->class == ATA_DEV_ATA) {
1433 if (ata_id_is_cfa(id)) {
1434 if (id[162] & 1) /* CPRM may make this media unusable */
1435 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1436 ap->id, dev->devno);
1437 snprintf(revbuf, 7, "CFA");
1438 }
1439 else
1440 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1441
1442 dev->n_sectors = ata_id_n_sectors(id);
1443
1444 if (ata_id_has_lba(id)) {
1445 const char *lba_desc;
1446 char ncq_desc[20];
1447
1448 lba_desc = "LBA";
1449 dev->flags |= ATA_DFLAG_LBA;
1450 if (ata_id_has_lba48(id)) {
1451 dev->flags |= ATA_DFLAG_LBA48;
1452 lba_desc = "LBA48";
1453 }
1454
1455 /* config NCQ */
1456 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1457
1458 /* print device info to dmesg */
1459 if (ata_msg_drv(ap) && print_info)
1460 ata_dev_printk(dev, KERN_INFO, "%s, "
1461 "max %s, %Lu sectors: %s %s\n",
1462 revbuf,
1463 ata_mode_string(xfer_mask),
1464 (unsigned long long)dev->n_sectors,
1465 lba_desc, ncq_desc);
1466 } else {
1467 /* CHS */
1468
1469 /* Default translation */
1470 dev->cylinders = id[1];
1471 dev->heads = id[3];
1472 dev->sectors = id[6];
1473
1474 if (ata_id_current_chs_valid(id)) {
1475 /* Current CHS translation is valid. */
1476 dev->cylinders = id[54];
1477 dev->heads = id[55];
1478 dev->sectors = id[56];
1479 }
1480
1481 /* print device info to dmesg */
1482 if (ata_msg_drv(ap) && print_info)
1483 ata_dev_printk(dev, KERN_INFO, "%s, "
1484 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1485 revbuf,
1486 ata_mode_string(xfer_mask),
1487 (unsigned long long)dev->n_sectors,
1488 dev->cylinders, dev->heads,
1489 dev->sectors);
1490 }
1491
1492 if (dev->id[59] & 0x100) {
1493 dev->multi_count = dev->id[59] & 0xff;
1494 if (ata_msg_drv(ap) && print_info)
1495 ata_dev_printk(dev, KERN_INFO,
1496 "ata%u: dev %u multi count %u\n",
1497 ap->id, dev->devno, dev->multi_count);
1498 }
1499
1500 dev->cdb_len = 16;
1501 }
1502
1503 /* ATAPI-specific feature tests */
1504 else if (dev->class == ATA_DEV_ATAPI) {
1505 char *cdb_intr_string = "";
1506
1507 rc = atapi_cdb_len(id);
1508 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1509 if (ata_msg_warn(ap))
1510 ata_dev_printk(dev, KERN_WARNING,
1511 "unsupported CDB len\n");
1512 rc = -EINVAL;
1513 goto err_out_nosup;
1514 }
1515 dev->cdb_len = (unsigned int) rc;
1516
1517 if (ata_id_cdb_intr(dev->id)) {
1518 dev->flags |= ATA_DFLAG_CDB_INTR;
1519 cdb_intr_string = ", CDB intr";
1520 }
1521
1522 /* print device info to dmesg */
1523 if (ata_msg_drv(ap) && print_info)
1524 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1525 ata_mode_string(xfer_mask),
1526 cdb_intr_string);
1527 }
1528
1529 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1530 /* Let the user know. We don't want to disallow opens for
1531 rescue purposes, or in case the vendor is just a blithering
1532 idiot */
1533 if (print_info) {
1534 ata_dev_printk(dev, KERN_WARNING,
1535"Drive reports diagnostics failure. This may indicate a drive\n");
1536 ata_dev_printk(dev, KERN_WARNING,
1537"fault or invalid emulation. Contact drive vendor for information.\n");
1538 }
1539 }
1540
1541 ata_set_port_max_cmd_len(ap);
1542
1543 /* limit bridge transfers to udma5, 200 sectors */
1544 if (ata_dev_knobble(dev)) {
1545 if (ata_msg_drv(ap) && print_info)
1546 ata_dev_printk(dev, KERN_INFO,
1547 "applying bridge limits\n");
1548 dev->udma_mask &= ATA_UDMA5;
1549 dev->max_sectors = ATA_MAX_SECTORS;
1550 }
1551
1552 if (ap->ops->dev_config)
1553 ap->ops->dev_config(ap, dev);
1554
1555 if (ata_msg_probe(ap))
1556 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1557 __FUNCTION__, ata_chk_status(ap));
1558 return 0;
1559
1560err_out_nosup:
1561 if (ata_msg_probe(ap))
1562 ata_dev_printk(dev, KERN_DEBUG,
1563 "%s: EXIT, err\n", __FUNCTION__);
1564 return rc;
1565}
1566
1567/**
1568 * ata_bus_probe - Reset and probe ATA bus
1569 * @ap: Bus to probe
1570 *
1571 * Master ATA bus probing function. Initiates a hardware-dependent
1572 * bus reset, then attempts to identify any devices found on
1573 * the bus.
1574 *
1575 * LOCKING:
1576 * PCI/etc. bus probe sem.
1577 *
1578 * RETURNS:
1579 * Zero on success, negative errno otherwise.
1580 */
1581
1582int ata_bus_probe(struct ata_port *ap)
1583{
1584 unsigned int classes[ATA_MAX_DEVICES];
1585 int tries[ATA_MAX_DEVICES];
1586 int i, rc, down_xfermask;
1587 struct ata_device *dev;
1588
1589 ata_port_probe(ap);
1590
1591 for (i = 0; i < ATA_MAX_DEVICES; i++)
1592 tries[i] = ATA_PROBE_MAX_TRIES;
1593
1594 retry:
1595 down_xfermask = 0;
1596
1597 /* reset and determine device classes */
1598 ap->ops->phy_reset(ap);
1599
1600 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1601 dev = &ap->device[i];
1602
1603 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1604 dev->class != ATA_DEV_UNKNOWN)
1605 classes[dev->devno] = dev->class;
1606 else
1607 classes[dev->devno] = ATA_DEV_NONE;
1608
1609 dev->class = ATA_DEV_UNKNOWN;
1610 }
1611
1612 ata_port_probe(ap);
1613
1614 /* after the reset the device state is PIO 0 and the controller
1615 state is undefined. Record the mode */
1616
1617 for (i = 0; i < ATA_MAX_DEVICES; i++)
1618 ap->device[i].pio_mode = XFER_PIO_0;
1619
1620 /* read IDENTIFY page and configure devices */
1621 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1622 dev = &ap->device[i];
1623
1624 if (tries[i])
1625 dev->class = classes[i];
1626
1627 if (!ata_dev_enabled(dev))
1628 continue;
1629
1630 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1631 if (rc)
1632 goto fail;
1633
1634 rc = ata_dev_configure(dev, 1);
1635 if (rc)
1636 goto fail;
1637 }
1638
1639 /* configure transfer mode */
1640 rc = ata_set_mode(ap, &dev);
1641 if (rc) {
1642 down_xfermask = 1;
1643 goto fail;
1644 }
1645
1646 for (i = 0; i < ATA_MAX_DEVICES; i++)
1647 if (ata_dev_enabled(&ap->device[i]))
1648 return 0;
1649
1650 /* no device present, disable port */
1651 ata_port_disable(ap);
1652 ap->ops->port_disable(ap);
1653 return -ENODEV;
1654
1655 fail:
1656 switch (rc) {
1657 case -EINVAL:
1658 case -ENODEV:
1659 tries[dev->devno] = 0;
1660 break;
1661 case -EIO:
1662 sata_down_spd_limit(ap);
1663 /* fall through */
1664 default:
1665 tries[dev->devno]--;
1666 if (down_xfermask &&
1667 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1668 tries[dev->devno] = 0;
1669 }
1670
1671 if (!tries[dev->devno]) {
1672 ata_down_xfermask_limit(dev, 1);
1673 ata_dev_disable(dev);
1674 }
1675
1676 goto retry;
1677}
1678
1679/**
1680 * ata_port_probe - Mark port as enabled
1681 * @ap: Port for which we indicate enablement
1682 *
1683 * Modify @ap data structure such that the system
1684 * thinks that the entire port is enabled.
1685 *
1686 * LOCKING: host lock, or some other form of
1687 * serialization.
1688 */
1689
1690void ata_port_probe(struct ata_port *ap)
1691{
1692 ap->flags &= ~ATA_FLAG_DISABLED;
1693}
1694
1695/**
1696 * sata_print_link_status - Print SATA link status
1697 * @ap: SATA port to printk link status about
1698 *
1699 * This function prints link speed and status of a SATA link.
1700 *
1701 * LOCKING:
1702 * None.
1703 */
1704static void sata_print_link_status(struct ata_port *ap)
1705{
1706 u32 sstatus, scontrol, tmp;
1707
1708 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1709 return;
1710 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1711
1712 if (ata_port_online(ap)) {
1713 tmp = (sstatus >> 4) & 0xf;
1714 ata_port_printk(ap, KERN_INFO,
1715 "SATA link up %s (SStatus %X SControl %X)\n",
1716 sata_spd_string(tmp), sstatus, scontrol);
1717 } else {
1718 ata_port_printk(ap, KERN_INFO,
1719 "SATA link down (SStatus %X SControl %X)\n",
1720 sstatus, scontrol);
1721 }
1722}
1723
1724/**
1725 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1726 * @ap: SATA port associated with target SATA PHY.
1727 *
1728 * This function issues commands to standard SATA Sxxx
1729 * PHY registers, to wake up the phy (and device), and
1730 * clear any reset condition.
1731 *
1732 * LOCKING:
1733 * PCI/etc. bus probe sem.
1734 *
1735 */
1736void __sata_phy_reset(struct ata_port *ap)
1737{
1738 u32 sstatus;
1739 unsigned long timeout = jiffies + (HZ * 5);
1740
1741 if (ap->flags & ATA_FLAG_SATA_RESET) {
1742 /* issue phy wake/reset */
1743 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1744 /* Couldn't find anything in SATA I/II specs, but
1745 * AHCI-1.1 10.4.2 says at least 1 ms. */
1746 mdelay(1);
1747 }
1748 /* phy wake/clear reset */
1749 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1750
1751 /* wait for phy to become ready, if necessary */
1752 do {
1753 msleep(200);
1754 sata_scr_read(ap, SCR_STATUS, &sstatus);
1755 if ((sstatus & 0xf) != 1)
1756 break;
1757 } while (time_before(jiffies, timeout));
1758
1759 /* print link status */
1760 sata_print_link_status(ap);
1761
1762 /* TODO: phy layer with polling, timeouts, etc. */
1763 if (!ata_port_offline(ap))
1764 ata_port_probe(ap);
1765 else
1766 ata_port_disable(ap);
1767
1768 if (ap->flags & ATA_FLAG_DISABLED)
1769 return;
1770
1771 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1772 ata_port_disable(ap);
1773 return;
1774 }
1775
1776 ap->cbl = ATA_CBL_SATA;
1777}
1778
1779/**
1780 * sata_phy_reset - Reset SATA bus.
1781 * @ap: SATA port associated with target SATA PHY.
1782 *
1783 * This function resets the SATA bus, and then probes
1784 * the bus for devices.
1785 *
1786 * LOCKING:
1787 * PCI/etc. bus probe sem.
1788 *
1789 */
1790void sata_phy_reset(struct ata_port *ap)
1791{
1792 __sata_phy_reset(ap);
1793 if (ap->flags & ATA_FLAG_DISABLED)
1794 return;
1795 ata_bus_reset(ap);
1796}
1797
1798/**
1799 * ata_dev_pair - return other device on cable
1800 * @adev: device
1801 *
1802 * Obtain the other device on the same cable, or if none is
1803 * present NULL is returned
1804 */
1805
1806struct ata_device *ata_dev_pair(struct ata_device *adev)
1807{
1808 struct ata_port *ap = adev->ap;
1809 struct ata_device *pair = &ap->device[1 - adev->devno];
1810 if (!ata_dev_enabled(pair))
1811 return NULL;
1812 return pair;
1813}
1814
1815/**
1816 * ata_port_disable - Disable port.
1817 * @ap: Port to be disabled.
1818 *
1819 * Modify @ap data structure such that the system
1820 * thinks that the entire port is disabled, and should
1821 * never attempt to probe or communicate with devices
1822 * on this port.
1823 *
1824 * LOCKING: host lock, or some other form of
1825 * serialization.
1826 */
1827
1828void ata_port_disable(struct ata_port *ap)
1829{
1830 ap->device[0].class = ATA_DEV_NONE;
1831 ap->device[1].class = ATA_DEV_NONE;
1832 ap->flags |= ATA_FLAG_DISABLED;
1833}
1834
1835/**
1836 * sata_down_spd_limit - adjust SATA spd limit downward
1837 * @ap: Port to adjust SATA spd limit for
1838 *
1839 * Adjust SATA spd limit of @ap downward. Note that this
1840 * function only adjusts the limit. The change must be applied
1841 * using sata_set_spd().
1842 *
1843 * LOCKING:
1844 * Inherited from caller.
1845 *
1846 * RETURNS:
1847 * 0 on success, negative errno on failure
1848 */
1849int sata_down_spd_limit(struct ata_port *ap)
1850{
1851 u32 sstatus, spd, mask;
1852 int rc, highbit;
1853
1854 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1855 if (rc)
1856 return rc;
1857
1858 mask = ap->sata_spd_limit;
1859 if (mask <= 1)
1860 return -EINVAL;
1861 highbit = fls(mask) - 1;
1862 mask &= ~(1 << highbit);
1863
1864 spd = (sstatus >> 4) & 0xf;
1865 if (spd <= 1)
1866 return -EINVAL;
1867 spd--;
1868 mask &= (1 << spd) - 1;
1869 if (!mask)
1870 return -EINVAL;
1871
1872 ap->sata_spd_limit = mask;
1873
1874 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1875 sata_spd_string(fls(mask)));
1876
1877 return 0;
1878}
1879
1880static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1881{
1882 u32 spd, limit;
1883
1884 if (ap->sata_spd_limit == UINT_MAX)
1885 limit = 0;
1886 else
1887 limit = fls(ap->sata_spd_limit);
1888
1889 spd = (*scontrol >> 4) & 0xf;
1890 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1891
1892 return spd != limit;
1893}
1894
1895/**
1896 * sata_set_spd_needed - is SATA spd configuration needed
1897 * @ap: Port in question
1898 *
1899 * Test whether the spd limit in SControl matches
1900 * @ap->sata_spd_limit. This function is used to determine
1901 * whether hardreset is necessary to apply SATA spd
1902 * configuration.
1903 *
1904 * LOCKING:
1905 * Inherited from caller.
1906 *
1907 * RETURNS:
1908 * 1 if SATA spd configuration is needed, 0 otherwise.
1909 */
1910int sata_set_spd_needed(struct ata_port *ap)
1911{
1912 u32 scontrol;
1913
1914 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1915 return 0;
1916
1917 return __sata_set_spd_needed(ap, &scontrol);
1918}
1919
1920/**
1921 * sata_set_spd - set SATA spd according to spd limit
1922 * @ap: Port to set SATA spd for
1923 *
1924 * Set SATA spd of @ap according to sata_spd_limit.
1925 *
1926 * LOCKING:
1927 * Inherited from caller.
1928 *
1929 * RETURNS:
1930 * 0 if spd doesn't need to be changed, 1 if spd has been
1931 * changed. Negative errno if SCR registers are inaccessible.
1932 */
1933int sata_set_spd(struct ata_port *ap)
1934{
1935 u32 scontrol;
1936 int rc;
1937
1938 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1939 return rc;
1940
1941 if (!__sata_set_spd_needed(ap, &scontrol))
1942 return 0;
1943
1944 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1945 return rc;
1946
1947 return 1;
1948}
1949
1950/*
1951 * This mode timing computation functionality is ported over from
1952 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1953 */
1954/*
1955 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1956 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1957 * for UDMA6, which is currently supported only by Maxtor drives.
1958 *
1959 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
1960 */
1961
1962static const struct ata_timing ata_timing[] = {
1963
1964 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1965 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1966 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1967 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1968
1969 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
1970 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
1971 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1972 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1973 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1974
1975/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1976
1977 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1978 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1979 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1980
1981 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1982 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1983 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1984
1985 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
1986 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
1987 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1988 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1989
1990 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1991 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1992 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1993
1994/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1995
1996 { 0xFF }
1997};
1998
1999#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2000#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2001
2002static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2003{
2004 q->setup = EZ(t->setup * 1000, T);
2005 q->act8b = EZ(t->act8b * 1000, T);
2006 q->rec8b = EZ(t->rec8b * 1000, T);
2007 q->cyc8b = EZ(t->cyc8b * 1000, T);
2008 q->active = EZ(t->active * 1000, T);
2009 q->recover = EZ(t->recover * 1000, T);
2010 q->cycle = EZ(t->cycle * 1000, T);
2011 q->udma = EZ(t->udma * 1000, UT);
2012}
2013
2014void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2015 struct ata_timing *m, unsigned int what)
2016{
2017 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2018 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2019 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2020 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2021 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2022 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2023 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2024 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2025}
2026
2027static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2028{
2029 const struct ata_timing *t;
2030
2031 for (t = ata_timing; t->mode != speed; t++)
2032 if (t->mode == 0xFF)
2033 return NULL;
2034 return t;
2035}
2036
2037int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2038 struct ata_timing *t, int T, int UT)
2039{
2040 const struct ata_timing *s;
2041 struct ata_timing p;
2042
2043 /*
2044 * Find the mode.
2045 */
2046
2047 if (!(s = ata_timing_find_mode(speed)))
2048 return -EINVAL;
2049
2050 memcpy(t, s, sizeof(*s));
2051
2052 /*
2053 * If the drive is an EIDE drive, it can tell us it needs extended
2054 * PIO/MW_DMA cycle timing.
2055 */
2056
2057 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2058 memset(&p, 0, sizeof(p));
2059 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2060 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2061 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2062 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2063 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2064 }
2065 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2066 }
2067
2068 /*
2069 * Convert the timing to bus clock counts.
2070 */
2071
2072 ata_timing_quantize(t, t, T, UT);
2073
2074 /*
2075 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2076 * S.M.A.R.T * and some other commands. We have to ensure that the
2077 * DMA cycle timing is slower/equal than the fastest PIO timing.
2078 */
2079
2080 if (speed > XFER_PIO_4) {
2081 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2082 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2083 }
2084
2085 /*
2086 * Lengthen active & recovery time so that cycle time is correct.
2087 */
2088
2089 if (t->act8b + t->rec8b < t->cyc8b) {
2090 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2091 t->rec8b = t->cyc8b - t->act8b;
2092 }
2093
2094 if (t->active + t->recover < t->cycle) {
2095 t->active += (t->cycle - (t->active + t->recover)) / 2;
2096 t->recover = t->cycle - t->active;
2097 }
2098
2099 return 0;
2100}
2101
2102/**
2103 * ata_down_xfermask_limit - adjust dev xfer masks downward
2104 * @dev: Device to adjust xfer masks
2105 * @force_pio0: Force PIO0
2106 *
2107 * Adjust xfer masks of @dev downward. Note that this function
2108 * does not apply the change. Invoking ata_set_mode() afterwards
2109 * will apply the limit.
2110 *
2111 * LOCKING:
2112 * Inherited from caller.
2113 *
2114 * RETURNS:
2115 * 0 on success, negative errno on failure
2116 */
2117int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2118{
2119 unsigned long xfer_mask;
2120 int highbit;
2121
2122 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2123 dev->udma_mask);
2124
2125 if (!xfer_mask)
2126 goto fail;
2127 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2128 if (xfer_mask & ATA_MASK_UDMA)
2129 xfer_mask &= ~ATA_MASK_MWDMA;
2130
2131 highbit = fls(xfer_mask) - 1;
2132 xfer_mask &= ~(1 << highbit);
2133 if (force_pio0)
2134 xfer_mask &= 1 << ATA_SHIFT_PIO;
2135 if (!xfer_mask)
2136 goto fail;
2137
2138 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2139 &dev->udma_mask);
2140
2141 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2142 ata_mode_string(xfer_mask));
2143
2144 return 0;
2145
2146 fail:
2147 return -EINVAL;
2148}
2149
2150static int ata_dev_set_mode(struct ata_device *dev)
2151{
2152 unsigned int err_mask;
2153 int rc;
2154
2155 dev->flags &= ~ATA_DFLAG_PIO;
2156 if (dev->xfer_shift == ATA_SHIFT_PIO)
2157 dev->flags |= ATA_DFLAG_PIO;
2158
2159 err_mask = ata_dev_set_xfermode(dev);
2160 if (err_mask) {
2161 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2162 "(err_mask=0x%x)\n", err_mask);
2163 return -EIO;
2164 }
2165
2166 rc = ata_dev_revalidate(dev, 0);
2167 if (rc)
2168 return rc;
2169
2170 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2171 dev->xfer_shift, (int)dev->xfer_mode);
2172
2173 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2174 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2175 return 0;
2176}
2177
2178/**
2179 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2180 * @ap: port on which timings will be programmed
2181 * @r_failed_dev: out paramter for failed device
2182 *
2183 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2184 * ata_set_mode() fails, pointer to the failing device is
2185 * returned in @r_failed_dev.
2186 *
2187 * LOCKING:
2188 * PCI/etc. bus probe sem.
2189 *
2190 * RETURNS:
2191 * 0 on success, negative errno otherwise
2192 */
2193int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2194{
2195 struct ata_device *dev;
2196 int i, rc = 0, used_dma = 0, found = 0;
2197
2198 /* has private set_mode? */
2199 if (ap->ops->set_mode) {
2200 /* FIXME: make ->set_mode handle no device case and
2201 * return error code and failing device on failure.
2202 */
2203 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2204 if (ata_dev_ready(&ap->device[i])) {
2205 ap->ops->set_mode(ap);
2206 break;
2207 }
2208 }
2209 return 0;
2210 }
2211
2212 /* step 1: calculate xfer_mask */
2213 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2214 unsigned int pio_mask, dma_mask;
2215
2216 dev = &ap->device[i];
2217
2218 if (!ata_dev_enabled(dev))
2219 continue;
2220
2221 ata_dev_xfermask(dev);
2222
2223 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2224 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2225 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2226 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2227
2228 found = 1;
2229 if (dev->dma_mode)
2230 used_dma = 1;
2231 }
2232 if (!found)
2233 goto out;
2234
2235 /* step 2: always set host PIO timings */
2236 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2237 dev = &ap->device[i];
2238 if (!ata_dev_enabled(dev))
2239 continue;
2240
2241 if (!dev->pio_mode) {
2242 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2243 rc = -EINVAL;
2244 goto out;
2245 }
2246
2247 dev->xfer_mode = dev->pio_mode;
2248 dev->xfer_shift = ATA_SHIFT_PIO;
2249 if (ap->ops->set_piomode)
2250 ap->ops->set_piomode(ap, dev);
2251 }
2252
2253 /* step 3: set host DMA timings */
2254 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2255 dev = &ap->device[i];
2256
2257 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2258 continue;
2259
2260 dev->xfer_mode = dev->dma_mode;
2261 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2262 if (ap->ops->set_dmamode)
2263 ap->ops->set_dmamode(ap, dev);
2264 }
2265
2266 /* step 4: update devices' xfer mode */
2267 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2268 dev = &ap->device[i];
2269
2270 /* don't udpate suspended devices' xfer mode */
2271 if (!ata_dev_ready(dev))
2272 continue;
2273
2274 rc = ata_dev_set_mode(dev);
2275 if (rc)
2276 goto out;
2277 }
2278
2279 /* Record simplex status. If we selected DMA then the other
2280 * host channels are not permitted to do so.
2281 */
2282 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2283 ap->host->simplex_claimed = 1;
2284
2285 /* step5: chip specific finalisation */
2286 if (ap->ops->post_set_mode)
2287 ap->ops->post_set_mode(ap);
2288
2289 out:
2290 if (rc)
2291 *r_failed_dev = dev;
2292 return rc;
2293}
2294
2295/**
2296 * ata_tf_to_host - issue ATA taskfile to host controller
2297 * @ap: port to which command is being issued
2298 * @tf: ATA taskfile register set
2299 *
2300 * Issues ATA taskfile register set to ATA host controller,
2301 * with proper synchronization with interrupt handler and
2302 * other threads.
2303 *
2304 * LOCKING:
2305 * spin_lock_irqsave(host lock)
2306 */
2307
2308static inline void ata_tf_to_host(struct ata_port *ap,
2309 const struct ata_taskfile *tf)
2310{
2311 ap->ops->tf_load(ap, tf);
2312 ap->ops->exec_command(ap, tf);
2313}
2314
2315/**
2316 * ata_busy_sleep - sleep until BSY clears, or timeout
2317 * @ap: port containing status register to be polled
2318 * @tmout_pat: impatience timeout
2319 * @tmout: overall timeout
2320 *
2321 * Sleep until ATA Status register bit BSY clears,
2322 * or a timeout occurs.
2323 *
2324 * LOCKING: None.
2325 */
2326
2327unsigned int ata_busy_sleep (struct ata_port *ap,
2328 unsigned long tmout_pat, unsigned long tmout)
2329{
2330 unsigned long timer_start, timeout;
2331 u8 status;
2332
2333 status = ata_busy_wait(ap, ATA_BUSY, 300);
2334 timer_start = jiffies;
2335 timeout = timer_start + tmout_pat;
2336 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2337 msleep(50);
2338 status = ata_busy_wait(ap, ATA_BUSY, 3);
2339 }
2340
2341 if (status & ATA_BUSY)
2342 ata_port_printk(ap, KERN_WARNING,
2343 "port is slow to respond, please be patient\n");
2344
2345 timeout = timer_start + tmout;
2346 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2347 msleep(50);
2348 status = ata_chk_status(ap);
2349 }
2350
2351 if (status & ATA_BUSY) {
2352 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2353 "(%lu secs)\n", tmout / HZ);
2354 return 1;
2355 }
2356
2357 return 0;
2358}
2359
2360static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2361{
2362 struct ata_ioports *ioaddr = &ap->ioaddr;
2363 unsigned int dev0 = devmask & (1 << 0);
2364 unsigned int dev1 = devmask & (1 << 1);
2365 unsigned long timeout;
2366
2367 /* if device 0 was found in ata_devchk, wait for its
2368 * BSY bit to clear
2369 */
2370 if (dev0)
2371 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2372
2373 /* if device 1 was found in ata_devchk, wait for
2374 * register access, then wait for BSY to clear
2375 */
2376 timeout = jiffies + ATA_TMOUT_BOOT;
2377 while (dev1) {
2378 u8 nsect, lbal;
2379
2380 ap->ops->dev_select(ap, 1);
2381 if (ap->flags & ATA_FLAG_MMIO) {
2382 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2383 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2384 } else {
2385 nsect = inb(ioaddr->nsect_addr);
2386 lbal = inb(ioaddr->lbal_addr);
2387 }
2388 if ((nsect == 1) && (lbal == 1))
2389 break;
2390 if (time_after(jiffies, timeout)) {
2391 dev1 = 0;
2392 break;
2393 }
2394 msleep(50); /* give drive a breather */
2395 }
2396 if (dev1)
2397 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2398
2399 /* is all this really necessary? */
2400 ap->ops->dev_select(ap, 0);
2401 if (dev1)
2402 ap->ops->dev_select(ap, 1);
2403 if (dev0)
2404 ap->ops->dev_select(ap, 0);
2405}
2406
2407static unsigned int ata_bus_softreset(struct ata_port *ap,
2408 unsigned int devmask)
2409{
2410 struct ata_ioports *ioaddr = &ap->ioaddr;
2411
2412 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2413
2414 /* software reset. causes dev0 to be selected */
2415 if (ap->flags & ATA_FLAG_MMIO) {
2416 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2417 udelay(20); /* FIXME: flush */
2418 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2419 udelay(20); /* FIXME: flush */
2420 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2421 } else {
2422 outb(ap->ctl, ioaddr->ctl_addr);
2423 udelay(10);
2424 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2425 udelay(10);
2426 outb(ap->ctl, ioaddr->ctl_addr);
2427 }
2428
2429 /* spec mandates ">= 2ms" before checking status.
2430 * We wait 150ms, because that was the magic delay used for
2431 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2432 * between when the ATA command register is written, and then
2433 * status is checked. Because waiting for "a while" before
2434 * checking status is fine, post SRST, we perform this magic
2435 * delay here as well.
2436 *
2437 * Old drivers/ide uses the 2mS rule and then waits for ready
2438 */
2439 msleep(150);
2440
2441 /* Before we perform post reset processing we want to see if
2442 * the bus shows 0xFF because the odd clown forgets the D7
2443 * pulldown resistor.
2444 */
2445 if (ata_check_status(ap) == 0xFF) {
2446 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2447 return AC_ERR_OTHER;
2448 }
2449
2450 ata_bus_post_reset(ap, devmask);
2451
2452 return 0;
2453}
2454
2455/**
2456 * ata_bus_reset - reset host port and associated ATA channel
2457 * @ap: port to reset
2458 *
2459 * This is typically the first time we actually start issuing
2460 * commands to the ATA channel. We wait for BSY to clear, then
2461 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2462 * result. Determine what devices, if any, are on the channel
2463 * by looking at the device 0/1 error register. Look at the signature
2464 * stored in each device's taskfile registers, to determine if
2465 * the device is ATA or ATAPI.
2466 *
2467 * LOCKING:
2468 * PCI/etc. bus probe sem.
2469 * Obtains host lock.
2470 *
2471 * SIDE EFFECTS:
2472 * Sets ATA_FLAG_DISABLED if bus reset fails.
2473 */
2474
2475void ata_bus_reset(struct ata_port *ap)
2476{
2477 struct ata_ioports *ioaddr = &ap->ioaddr;
2478 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2479 u8 err;
2480 unsigned int dev0, dev1 = 0, devmask = 0;
2481
2482 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2483
2484 /* determine if device 0/1 are present */
2485 if (ap->flags & ATA_FLAG_SATA_RESET)
2486 dev0 = 1;
2487 else {
2488 dev0 = ata_devchk(ap, 0);
2489 if (slave_possible)
2490 dev1 = ata_devchk(ap, 1);
2491 }
2492
2493 if (dev0)
2494 devmask |= (1 << 0);
2495 if (dev1)
2496 devmask |= (1 << 1);
2497
2498 /* select device 0 again */
2499 ap->ops->dev_select(ap, 0);
2500
2501 /* issue bus reset */
2502 if (ap->flags & ATA_FLAG_SRST)
2503 if (ata_bus_softreset(ap, devmask))
2504 goto err_out;
2505
2506 /*
2507 * determine by signature whether we have ATA or ATAPI devices
2508 */
2509 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2510 if ((slave_possible) && (err != 0x81))
2511 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2512
2513 /* re-enable interrupts */
2514 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2515 ata_irq_on(ap);
2516
2517 /* is double-select really necessary? */
2518 if (ap->device[1].class != ATA_DEV_NONE)
2519 ap->ops->dev_select(ap, 1);
2520 if (ap->device[0].class != ATA_DEV_NONE)
2521 ap->ops->dev_select(ap, 0);
2522
2523 /* if no devices were detected, disable this port */
2524 if ((ap->device[0].class == ATA_DEV_NONE) &&
2525 (ap->device[1].class == ATA_DEV_NONE))
2526 goto err_out;
2527
2528 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2529 /* set up device control for ATA_FLAG_SATA_RESET */
2530 if (ap->flags & ATA_FLAG_MMIO)
2531 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2532 else
2533 outb(ap->ctl, ioaddr->ctl_addr);
2534 }
2535
2536 DPRINTK("EXIT\n");
2537 return;
2538
2539err_out:
2540 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2541 ap->ops->port_disable(ap);
2542
2543 DPRINTK("EXIT\n");
2544}
2545
2546/**
2547 * sata_phy_debounce - debounce SATA phy status
2548 * @ap: ATA port to debounce SATA phy status for
2549 * @params: timing parameters { interval, duratinon, timeout } in msec
2550 *
2551 * Make sure SStatus of @ap reaches stable state, determined by
2552 * holding the same value where DET is not 1 for @duration polled
2553 * every @interval, before @timeout. Timeout constraints the
2554 * beginning of the stable state. Because, after hot unplugging,
2555 * DET gets stuck at 1 on some controllers, this functions waits
2556 * until timeout then returns 0 if DET is stable at 1.
2557 *
2558 * LOCKING:
2559 * Kernel thread context (may sleep)
2560 *
2561 * RETURNS:
2562 * 0 on success, -errno on failure.
2563 */
2564int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2565{
2566 unsigned long interval_msec = params[0];
2567 unsigned long duration = params[1] * HZ / 1000;
2568 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2569 unsigned long last_jiffies;
2570 u32 last, cur;
2571 int rc;
2572
2573 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2574 return rc;
2575 cur &= 0xf;
2576
2577 last = cur;
2578 last_jiffies = jiffies;
2579
2580 while (1) {
2581 msleep(interval_msec);
2582 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2583 return rc;
2584 cur &= 0xf;
2585
2586 /* DET stable? */
2587 if (cur == last) {
2588 if (cur == 1 && time_before(jiffies, timeout))
2589 continue;
2590 if (time_after(jiffies, last_jiffies + duration))
2591 return 0;
2592 continue;
2593 }
2594
2595 /* unstable, start over */
2596 last = cur;
2597 last_jiffies = jiffies;
2598
2599 /* check timeout */
2600 if (time_after(jiffies, timeout))
2601 return -EBUSY;
2602 }
2603}
2604
2605/**
2606 * sata_phy_resume - resume SATA phy
2607 * @ap: ATA port to resume SATA phy for
2608 * @params: timing parameters { interval, duratinon, timeout } in msec
2609 *
2610 * Resume SATA phy of @ap and debounce it.
2611 *
2612 * LOCKING:
2613 * Kernel thread context (may sleep)
2614 *
2615 * RETURNS:
2616 * 0 on success, -errno on failure.
2617 */
2618int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2619{
2620 u32 scontrol;
2621 int rc;
2622
2623 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2624 return rc;
2625
2626 scontrol = (scontrol & 0x0f0) | 0x300;
2627
2628 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2629 return rc;
2630
2631 /* Some PHYs react badly if SStatus is pounded immediately
2632 * after resuming. Delay 200ms before debouncing.
2633 */
2634 msleep(200);
2635
2636 return sata_phy_debounce(ap, params);
2637}
2638
2639static void ata_wait_spinup(struct ata_port *ap)
2640{
2641 struct ata_eh_context *ehc = &ap->eh_context;
2642 unsigned long end, secs;
2643 int rc;
2644
2645 /* first, debounce phy if SATA */
2646 if (ap->cbl == ATA_CBL_SATA) {
2647 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
2648
2649 /* if debounced successfully and offline, no need to wait */
2650 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2651 return;
2652 }
2653
2654 /* okay, let's give the drive time to spin up */
2655 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2656 secs = ((end - jiffies) + HZ - 1) / HZ;
2657
2658 if (time_after(jiffies, end))
2659 return;
2660
2661 if (secs > 5)
2662 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2663 "(%lu secs)\n", secs);
2664
2665 schedule_timeout_uninterruptible(end - jiffies);
2666}
2667
2668/**
2669 * ata_std_prereset - prepare for reset
2670 * @ap: ATA port to be reset
2671 *
2672 * @ap is about to be reset. Initialize it.
2673 *
2674 * LOCKING:
2675 * Kernel thread context (may sleep)
2676 *
2677 * RETURNS:
2678 * 0 on success, -errno otherwise.
2679 */
2680int ata_std_prereset(struct ata_port *ap)
2681{
2682 struct ata_eh_context *ehc = &ap->eh_context;
2683 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2684 int rc;
2685
2686 /* handle link resume & hotplug spinup */
2687 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2688 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2689 ehc->i.action |= ATA_EH_HARDRESET;
2690
2691 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2692 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2693 ata_wait_spinup(ap);
2694
2695 /* if we're about to do hardreset, nothing more to do */
2696 if (ehc->i.action & ATA_EH_HARDRESET)
2697 return 0;
2698
2699 /* if SATA, resume phy */
2700 if (ap->cbl == ATA_CBL_SATA) {
2701 rc = sata_phy_resume(ap, timing);
2702 if (rc && rc != -EOPNOTSUPP) {
2703 /* phy resume failed */
2704 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2705 "link for reset (errno=%d)\n", rc);
2706 return rc;
2707 }
2708 }
2709
2710 /* Wait for !BSY if the controller can wait for the first D2H
2711 * Reg FIS and we don't know that no device is attached.
2712 */
2713 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2714 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2715
2716 return 0;
2717}
2718
2719/**
2720 * ata_std_softreset - reset host port via ATA SRST
2721 * @ap: port to reset
2722 * @classes: resulting classes of attached devices
2723 *
2724 * Reset host port using ATA SRST.
2725 *
2726 * LOCKING:
2727 * Kernel thread context (may sleep)
2728 *
2729 * RETURNS:
2730 * 0 on success, -errno otherwise.
2731 */
2732int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2733{
2734 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2735 unsigned int devmask = 0, err_mask;
2736 u8 err;
2737
2738 DPRINTK("ENTER\n");
2739
2740 if (ata_port_offline(ap)) {
2741 classes[0] = ATA_DEV_NONE;
2742 goto out;
2743 }
2744
2745 /* determine if device 0/1 are present */
2746 if (ata_devchk(ap, 0))
2747 devmask |= (1 << 0);
2748 if (slave_possible && ata_devchk(ap, 1))
2749 devmask |= (1 << 1);
2750
2751 /* select device 0 again */
2752 ap->ops->dev_select(ap, 0);
2753
2754 /* issue bus reset */
2755 DPRINTK("about to softreset, devmask=%x\n", devmask);
2756 err_mask = ata_bus_softreset(ap, devmask);
2757 if (err_mask) {
2758 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2759 err_mask);
2760 return -EIO;
2761 }
2762
2763 /* determine by signature whether we have ATA or ATAPI devices */
2764 classes[0] = ata_dev_try_classify(ap, 0, &err);
2765 if (slave_possible && err != 0x81)
2766 classes[1] = ata_dev_try_classify(ap, 1, &err);
2767
2768 out:
2769 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2770 return 0;
2771}
2772
2773/**
2774 * sata_std_hardreset - reset host port via SATA phy reset
2775 * @ap: port to reset
2776 * @class: resulting class of attached device
2777 *
2778 * SATA phy-reset host port using DET bits of SControl register.
2779 *
2780 * LOCKING:
2781 * Kernel thread context (may sleep)
2782 *
2783 * RETURNS:
2784 * 0 on success, -errno otherwise.
2785 */
2786int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2787{
2788 struct ata_eh_context *ehc = &ap->eh_context;
2789 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2790 u32 scontrol;
2791 int rc;
2792
2793 DPRINTK("ENTER\n");
2794
2795 if (sata_set_spd_needed(ap)) {
2796 /* SATA spec says nothing about how to reconfigure
2797 * spd. To be on the safe side, turn off phy during
2798 * reconfiguration. This works for at least ICH7 AHCI
2799 * and Sil3124.
2800 */
2801 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2802 return rc;
2803
2804 scontrol = (scontrol & 0x0f0) | 0x304;
2805
2806 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2807 return rc;
2808
2809 sata_set_spd(ap);
2810 }
2811
2812 /* issue phy wake/reset */
2813 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2814 return rc;
2815
2816 scontrol = (scontrol & 0x0f0) | 0x301;
2817
2818 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2819 return rc;
2820
2821 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2822 * 10.4.2 says at least 1 ms.
2823 */
2824 msleep(1);
2825
2826 /* bring phy back */
2827 sata_phy_resume(ap, timing);
2828
2829 /* TODO: phy layer with polling, timeouts, etc. */
2830 if (ata_port_offline(ap)) {
2831 *class = ATA_DEV_NONE;
2832 DPRINTK("EXIT, link offline\n");
2833 return 0;
2834 }
2835
2836 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2837 ata_port_printk(ap, KERN_ERR,
2838 "COMRESET failed (device not ready)\n");
2839 return -EIO;
2840 }
2841
2842 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2843
2844 *class = ata_dev_try_classify(ap, 0, NULL);
2845
2846 DPRINTK("EXIT, class=%u\n", *class);
2847 return 0;
2848}
2849
2850/**
2851 * ata_std_postreset - standard postreset callback
2852 * @ap: the target ata_port
2853 * @classes: classes of attached devices
2854 *
2855 * This function is invoked after a successful reset. Note that
2856 * the device might have been reset more than once using
2857 * different reset methods before postreset is invoked.
2858 *
2859 * LOCKING:
2860 * Kernel thread context (may sleep)
2861 */
2862void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2863{
2864 u32 serror;
2865
2866 DPRINTK("ENTER\n");
2867
2868 /* print link status */
2869 sata_print_link_status(ap);
2870
2871 /* clear SError */
2872 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2873 sata_scr_write(ap, SCR_ERROR, serror);
2874
2875 /* re-enable interrupts */
2876 if (!ap->ops->error_handler) {
2877 /* FIXME: hack. create a hook instead */
2878 if (ap->ioaddr.ctl_addr)
2879 ata_irq_on(ap);
2880 }
2881
2882 /* is double-select really necessary? */
2883 if (classes[0] != ATA_DEV_NONE)
2884 ap->ops->dev_select(ap, 1);
2885 if (classes[1] != ATA_DEV_NONE)
2886 ap->ops->dev_select(ap, 0);
2887
2888 /* bail out if no device is present */
2889 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2890 DPRINTK("EXIT, no device\n");
2891 return;
2892 }
2893
2894 /* set up device control */
2895 if (ap->ioaddr.ctl_addr) {
2896 if (ap->flags & ATA_FLAG_MMIO)
2897 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2898 else
2899 outb(ap->ctl, ap->ioaddr.ctl_addr);
2900 }
2901
2902 DPRINTK("EXIT\n");
2903}
2904
2905/**
2906 * ata_dev_same_device - Determine whether new ID matches configured device
2907 * @dev: device to compare against
2908 * @new_class: class of the new device
2909 * @new_id: IDENTIFY page of the new device
2910 *
2911 * Compare @new_class and @new_id against @dev and determine
2912 * whether @dev is the device indicated by @new_class and
2913 * @new_id.
2914 *
2915 * LOCKING:
2916 * None.
2917 *
2918 * RETURNS:
2919 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2920 */
2921static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2922 const u16 *new_id)
2923{
2924 const u16 *old_id = dev->id;
2925 unsigned char model[2][41], serial[2][21];
2926 u64 new_n_sectors;
2927
2928 if (dev->class != new_class) {
2929 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2930 dev->class, new_class);
2931 return 0;
2932 }
2933
2934 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2935 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2936 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2937 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2938 new_n_sectors = ata_id_n_sectors(new_id);
2939
2940 if (strcmp(model[0], model[1])) {
2941 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2942 "'%s' != '%s'\n", model[0], model[1]);
2943 return 0;
2944 }
2945
2946 if (strcmp(serial[0], serial[1])) {
2947 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2948 "'%s' != '%s'\n", serial[0], serial[1]);
2949 return 0;
2950 }
2951
2952 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2953 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2954 "%llu != %llu\n",
2955 (unsigned long long)dev->n_sectors,
2956 (unsigned long long)new_n_sectors);
2957 return 0;
2958 }
2959
2960 return 1;
2961}
2962
2963/**
2964 * ata_dev_revalidate - Revalidate ATA device
2965 * @dev: device to revalidate
2966 * @post_reset: is this revalidation after reset?
2967 *
2968 * Re-read IDENTIFY page and make sure @dev is still attached to
2969 * the port.
2970 *
2971 * LOCKING:
2972 * Kernel thread context (may sleep)
2973 *
2974 * RETURNS:
2975 * 0 on success, negative errno otherwise
2976 */
2977int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2978{
2979 unsigned int class = dev->class;
2980 u16 *id = (void *)dev->ap->sector_buf;
2981 int rc;
2982
2983 if (!ata_dev_enabled(dev)) {
2984 rc = -ENODEV;
2985 goto fail;
2986 }
2987
2988 /* read ID data */
2989 rc = ata_dev_read_id(dev, &class, post_reset, id);
2990 if (rc)
2991 goto fail;
2992
2993 /* is the device still there? */
2994 if (!ata_dev_same_device(dev, class, id)) {
2995 rc = -ENODEV;
2996 goto fail;
2997 }
2998
2999 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3000
3001 /* configure device according to the new ID */
3002 rc = ata_dev_configure(dev, 0);
3003 if (rc == 0)
3004 return 0;
3005
3006 fail:
3007 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3008 return rc;
3009}
3010
3011static const char * const ata_dma_blacklist [] = {
3012 "WDC AC11000H", NULL,
3013 "WDC AC22100H", NULL,
3014 "WDC AC32500H", NULL,
3015 "WDC AC33100H", NULL,
3016 "WDC AC31600H", NULL,
3017 "WDC AC32100H", "24.09P07",
3018 "WDC AC23200L", "21.10N21",
3019 "Compaq CRD-8241B", NULL,
3020 "CRD-8400B", NULL,
3021 "CRD-8480B", NULL,
3022 "CRD-8482B", NULL,
3023 "CRD-84", NULL,
3024 "SanDisk SDP3B", NULL,
3025 "SanDisk SDP3B-64", NULL,
3026 "SANYO CD-ROM CRD", NULL,
3027 "HITACHI CDR-8", NULL,
3028 "HITACHI CDR-8335", NULL,
3029 "HITACHI CDR-8435", NULL,
3030 "Toshiba CD-ROM XM-6202B", NULL,
3031 "TOSHIBA CD-ROM XM-1702BC", NULL,
3032 "CD-532E-A", NULL,
3033 "E-IDE CD-ROM CR-840", NULL,
3034 "CD-ROM Drive/F5A", NULL,
3035 "WPI CDD-820", NULL,
3036 "SAMSUNG CD-ROM SC-148C", NULL,
3037 "SAMSUNG CD-ROM SC", NULL,
3038 "SanDisk SDP3B-64", NULL,
3039 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
3040 "_NEC DV5800A", NULL,
3041 "SAMSUNG CD-ROM SN-124", "N001"
3042};
3043
3044static int ata_strim(char *s, size_t len)
3045{
3046 len = strnlen(s, len);
3047
3048 /* ATAPI specifies that empty space is blank-filled; remove blanks */
3049 while ((len > 0) && (s[len - 1] == ' ')) {
3050 len--;
3051 s[len] = 0;
3052 }
3053 return len;
3054}
3055
3056static int ata_dma_blacklisted(const struct ata_device *dev)
3057{
3058 unsigned char model_num[40];
3059 unsigned char model_rev[16];
3060 unsigned int nlen, rlen;
3061 int i;
3062
3063 /* We don't support polling DMA.
3064 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3065 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3066 */
3067 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3068 (dev->flags & ATA_DFLAG_CDB_INTR))
3069 return 1;
3070
3071 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3072 sizeof(model_num));
3073 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3074 sizeof(model_rev));
3075 nlen = ata_strim(model_num, sizeof(model_num));
3076 rlen = ata_strim(model_rev, sizeof(model_rev));
3077
3078 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
3079 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
3080 if (ata_dma_blacklist[i+1] == NULL)
3081 return 1;
3082 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
3083 return 1;
3084 }
3085 }
3086 return 0;
3087}
3088
3089/**
3090 * ata_dev_xfermask - Compute supported xfermask of the given device
3091 * @dev: Device to compute xfermask for
3092 *
3093 * Compute supported xfermask of @dev and store it in
3094 * dev->*_mask. This function is responsible for applying all
3095 * known limits including host controller limits, device
3096 * blacklist, etc...
3097 *
3098 * LOCKING:
3099 * None.
3100 */
3101static void ata_dev_xfermask(struct ata_device *dev)
3102{
3103 struct ata_port *ap = dev->ap;
3104 struct ata_host *host = ap->host;
3105 unsigned long xfer_mask;
3106
3107 /* controller modes available */
3108 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3109 ap->mwdma_mask, ap->udma_mask);
3110
3111 /* Apply cable rule here. Don't apply it early because when
3112 * we handle hot plug the cable type can itself change.
3113 */
3114 if (ap->cbl == ATA_CBL_PATA40)
3115 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3116
3117 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3118 dev->mwdma_mask, dev->udma_mask);
3119 xfer_mask &= ata_id_xfermask(dev->id);
3120
3121 /*
3122 * CFA Advanced TrueIDE timings are not allowed on a shared
3123 * cable
3124 */
3125 if (ata_dev_pair(dev)) {
3126 /* No PIO5 or PIO6 */
3127 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3128 /* No MWDMA3 or MWDMA 4 */
3129 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3130 }
3131
3132 if (ata_dma_blacklisted(dev)) {
3133 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3134 ata_dev_printk(dev, KERN_WARNING,
3135 "device is on DMA blacklist, disabling DMA\n");
3136 }
3137
3138 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
3139 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3140 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3141 "other device, disabling DMA\n");
3142 }
3143
3144 if (ap->ops->mode_filter)
3145 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3146
3147 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3148 &dev->mwdma_mask, &dev->udma_mask);
3149}
3150
3151/**
3152 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3153 * @dev: Device to which command will be sent
3154 *
3155 * Issue SET FEATURES - XFER MODE command to device @dev
3156 * on port @ap.
3157 *
3158 * LOCKING:
3159 * PCI/etc. bus probe sem.
3160 *
3161 * RETURNS:
3162 * 0 on success, AC_ERR_* mask otherwise.
3163 */
3164
3165static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3166{
3167 struct ata_taskfile tf;
3168 unsigned int err_mask;
3169
3170 /* set up set-features taskfile */
3171 DPRINTK("set features - xfer mode\n");
3172
3173 ata_tf_init(dev, &tf);
3174 tf.command = ATA_CMD_SET_FEATURES;
3175 tf.feature = SETFEATURES_XFER;
3176 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3177 tf.protocol = ATA_PROT_NODATA;
3178 tf.nsect = dev->xfer_mode;
3179
3180 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3181
3182 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3183 return err_mask;
3184}
3185
3186/**
3187 * ata_dev_init_params - Issue INIT DEV PARAMS command
3188 * @dev: Device to which command will be sent
3189 * @heads: Number of heads (taskfile parameter)
3190 * @sectors: Number of sectors (taskfile parameter)
3191 *
3192 * LOCKING:
3193 * Kernel thread context (may sleep)
3194 *
3195 * RETURNS:
3196 * 0 on success, AC_ERR_* mask otherwise.
3197 */
3198static unsigned int ata_dev_init_params(struct ata_device *dev,
3199 u16 heads, u16 sectors)
3200{
3201 struct ata_taskfile tf;
3202 unsigned int err_mask;
3203
3204 /* Number of sectors per track 1-255. Number of heads 1-16 */
3205 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3206 return AC_ERR_INVALID;
3207
3208 /* set up init dev params taskfile */
3209 DPRINTK("init dev params \n");
3210
3211 ata_tf_init(dev, &tf);
3212 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3213 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3214 tf.protocol = ATA_PROT_NODATA;
3215 tf.nsect = sectors;
3216 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3217
3218 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3219
3220 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3221 return err_mask;
3222}
3223
3224/**
3225 * ata_sg_clean - Unmap DMA memory associated with command
3226 * @qc: Command containing DMA memory to be released
3227 *
3228 * Unmap all mapped DMA memory associated with this command.
3229 *
3230 * LOCKING:
3231 * spin_lock_irqsave(host lock)
3232 */
3233
3234static void ata_sg_clean(struct ata_queued_cmd *qc)
3235{
3236 struct ata_port *ap = qc->ap;
3237 struct scatterlist *sg = qc->__sg;
3238 int dir = qc->dma_dir;
3239 void *pad_buf = NULL;
3240
3241 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3242 WARN_ON(sg == NULL);
3243
3244 if (qc->flags & ATA_QCFLAG_SINGLE)
3245 WARN_ON(qc->n_elem > 1);
3246
3247 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3248
3249 /* if we padded the buffer out to 32-bit bound, and data
3250 * xfer direction is from-device, we must copy from the
3251 * pad buffer back into the supplied buffer
3252 */
3253 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3254 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3255
3256 if (qc->flags & ATA_QCFLAG_SG) {
3257 if (qc->n_elem)
3258 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3259 /* restore last sg */
3260 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3261 if (pad_buf) {
3262 struct scatterlist *psg = &qc->pad_sgent;
3263 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3264 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3265 kunmap_atomic(addr, KM_IRQ0);
3266 }
3267 } else {
3268 if (qc->n_elem)
3269 dma_unmap_single(ap->dev,
3270 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3271 dir);
3272 /* restore sg */
3273 sg->length += qc->pad_len;
3274 if (pad_buf)
3275 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3276 pad_buf, qc->pad_len);
3277 }
3278
3279 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3280 qc->__sg = NULL;
3281}
3282
3283/**
3284 * ata_fill_sg - Fill PCI IDE PRD table
3285 * @qc: Metadata associated with taskfile to be transferred
3286 *
3287 * Fill PCI IDE PRD (scatter-gather) table with segments
3288 * associated with the current disk command.
3289 *
3290 * LOCKING:
3291 * spin_lock_irqsave(host lock)
3292 *
3293 */
3294static void ata_fill_sg(struct ata_queued_cmd *qc)
3295{
3296 struct ata_port *ap = qc->ap;
3297 struct scatterlist *sg;
3298 unsigned int idx;
3299
3300 WARN_ON(qc->__sg == NULL);
3301 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3302
3303 idx = 0;
3304 ata_for_each_sg(sg, qc) {
3305 u32 addr, offset;
3306 u32 sg_len, len;
3307
3308 /* determine if physical DMA addr spans 64K boundary.
3309 * Note h/w doesn't support 64-bit, so we unconditionally
3310 * truncate dma_addr_t to u32.
3311 */
3312 addr = (u32) sg_dma_address(sg);
3313 sg_len = sg_dma_len(sg);
3314
3315 while (sg_len) {
3316 offset = addr & 0xffff;
3317 len = sg_len;
3318 if ((offset + sg_len) > 0x10000)
3319 len = 0x10000 - offset;
3320
3321 ap->prd[idx].addr = cpu_to_le32(addr);
3322 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3323 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3324
3325 idx++;
3326 sg_len -= len;
3327 addr += len;
3328 }
3329 }
3330
3331 if (idx)
3332 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3333}
3334/**
3335 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3336 * @qc: Metadata associated with taskfile to check
3337 *
3338 * Allow low-level driver to filter ATA PACKET commands, returning
3339 * a status indicating whether or not it is OK to use DMA for the
3340 * supplied PACKET command.
3341 *
3342 * LOCKING:
3343 * spin_lock_irqsave(host lock)
3344 *
3345 * RETURNS: 0 when ATAPI DMA can be used
3346 * nonzero otherwise
3347 */
3348int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3349{
3350 struct ata_port *ap = qc->ap;
3351 int rc = 0; /* Assume ATAPI DMA is OK by default */
3352
3353 if (ap->ops->check_atapi_dma)
3354 rc = ap->ops->check_atapi_dma(qc);
3355
3356 return rc;
3357}
3358/**
3359 * ata_qc_prep - Prepare taskfile for submission
3360 * @qc: Metadata associated with taskfile to be prepared
3361 *
3362 * Prepare ATA taskfile for submission.
3363 *
3364 * LOCKING:
3365 * spin_lock_irqsave(host lock)
3366 */
3367void ata_qc_prep(struct ata_queued_cmd *qc)
3368{
3369 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3370 return;
3371
3372 ata_fill_sg(qc);
3373}
3374
3375void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3376
3377/**
3378 * ata_sg_init_one - Associate command with memory buffer
3379 * @qc: Command to be associated
3380 * @buf: Memory buffer
3381 * @buflen: Length of memory buffer, in bytes.
3382 *
3383 * Initialize the data-related elements of queued_cmd @qc
3384 * to point to a single memory buffer, @buf of byte length @buflen.
3385 *
3386 * LOCKING:
3387 * spin_lock_irqsave(host lock)
3388 */
3389
3390void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3391{
3392 struct scatterlist *sg;
3393
3394 qc->flags |= ATA_QCFLAG_SINGLE;
3395
3396 memset(&qc->sgent, 0, sizeof(qc->sgent));
3397 qc->__sg = &qc->sgent;
3398 qc->n_elem = 1;
3399 qc->orig_n_elem = 1;
3400 qc->buf_virt = buf;
3401 qc->nbytes = buflen;
3402
3403 sg = qc->__sg;
3404 sg_init_one(sg, buf, buflen);
3405}
3406
3407/**
3408 * ata_sg_init - Associate command with scatter-gather table.
3409 * @qc: Command to be associated
3410 * @sg: Scatter-gather table.
3411 * @n_elem: Number of elements in s/g table.
3412 *
3413 * Initialize the data-related elements of queued_cmd @qc
3414 * to point to a scatter-gather table @sg, containing @n_elem
3415 * elements.
3416 *
3417 * LOCKING:
3418 * spin_lock_irqsave(host lock)
3419 */
3420
3421void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3422 unsigned int n_elem)
3423{
3424 qc->flags |= ATA_QCFLAG_SG;
3425 qc->__sg = sg;
3426 qc->n_elem = n_elem;
3427 qc->orig_n_elem = n_elem;
3428}
3429
3430/**
3431 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3432 * @qc: Command with memory buffer to be mapped.
3433 *
3434 * DMA-map the memory buffer associated with queued_cmd @qc.
3435 *
3436 * LOCKING:
3437 * spin_lock_irqsave(host lock)
3438 *
3439 * RETURNS:
3440 * Zero on success, negative on error.
3441 */
3442
3443static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3444{
3445 struct ata_port *ap = qc->ap;
3446 int dir = qc->dma_dir;
3447 struct scatterlist *sg = qc->__sg;
3448 dma_addr_t dma_address;
3449 int trim_sg = 0;
3450
3451 /* we must lengthen transfers to end on a 32-bit boundary */
3452 qc->pad_len = sg->length & 3;
3453 if (qc->pad_len) {
3454 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3455 struct scatterlist *psg = &qc->pad_sgent;
3456
3457 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3458
3459 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3460
3461 if (qc->tf.flags & ATA_TFLAG_WRITE)
3462 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3463 qc->pad_len);
3464
3465 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3466 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3467 /* trim sg */
3468 sg->length -= qc->pad_len;
3469 if (sg->length == 0)
3470 trim_sg = 1;
3471
3472 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3473 sg->length, qc->pad_len);
3474 }
3475
3476 if (trim_sg) {
3477 qc->n_elem--;
3478 goto skip_map;
3479 }
3480
3481 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3482 sg->length, dir);
3483 if (dma_mapping_error(dma_address)) {
3484 /* restore sg */
3485 sg->length += qc->pad_len;
3486 return -1;
3487 }
3488
3489 sg_dma_address(sg) = dma_address;
3490 sg_dma_len(sg) = sg->length;
3491
3492skip_map:
3493 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3494 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3495
3496 return 0;
3497}
3498
3499/**
3500 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3501 * @qc: Command with scatter-gather table to be mapped.
3502 *
3503 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3504 *
3505 * LOCKING:
3506 * spin_lock_irqsave(host lock)
3507 *
3508 * RETURNS:
3509 * Zero on success, negative on error.
3510 *
3511 */
3512
3513static int ata_sg_setup(struct ata_queued_cmd *qc)
3514{
3515 struct ata_port *ap = qc->ap;
3516 struct scatterlist *sg = qc->__sg;
3517 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3518 int n_elem, pre_n_elem, dir, trim_sg = 0;
3519
3520 VPRINTK("ENTER, ata%u\n", ap->id);
3521 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3522
3523 /* we must lengthen transfers to end on a 32-bit boundary */
3524 qc->pad_len = lsg->length & 3;
3525 if (qc->pad_len) {
3526 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3527 struct scatterlist *psg = &qc->pad_sgent;
3528 unsigned int offset;
3529
3530 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3531
3532 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3533
3534 /*
3535 * psg->page/offset are used to copy to-be-written
3536 * data in this function or read data in ata_sg_clean.
3537 */
3538 offset = lsg->offset + lsg->length - qc->pad_len;
3539 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3540 psg->offset = offset_in_page(offset);
3541
3542 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3543 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3544 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3545 kunmap_atomic(addr, KM_IRQ0);
3546 }
3547
3548 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3549 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3550 /* trim last sg */
3551 lsg->length -= qc->pad_len;
3552 if (lsg->length == 0)
3553 trim_sg = 1;
3554
3555 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3556 qc->n_elem - 1, lsg->length, qc->pad_len);
3557 }
3558
3559 pre_n_elem = qc->n_elem;
3560 if (trim_sg && pre_n_elem)
3561 pre_n_elem--;
3562
3563 if (!pre_n_elem) {
3564 n_elem = 0;
3565 goto skip_map;
3566 }
3567
3568 dir = qc->dma_dir;
3569 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3570 if (n_elem < 1) {
3571 /* restore last sg */
3572 lsg->length += qc->pad_len;
3573 return -1;
3574 }
3575
3576 DPRINTK("%d sg elements mapped\n", n_elem);
3577
3578skip_map:
3579 qc->n_elem = n_elem;
3580
3581 return 0;
3582}
3583
3584/**
3585 * swap_buf_le16 - swap halves of 16-bit words in place
3586 * @buf: Buffer to swap
3587 * @buf_words: Number of 16-bit words in buffer.
3588 *
3589 * Swap halves of 16-bit words if needed to convert from
3590 * little-endian byte order to native cpu byte order, or
3591 * vice-versa.
3592 *
3593 * LOCKING:
3594 * Inherited from caller.
3595 */
3596void swap_buf_le16(u16 *buf, unsigned int buf_words)
3597{
3598#ifdef __BIG_ENDIAN
3599 unsigned int i;
3600
3601 for (i = 0; i < buf_words; i++)
3602 buf[i] = le16_to_cpu(buf[i]);
3603#endif /* __BIG_ENDIAN */
3604}
3605
3606/**
3607 * ata_mmio_data_xfer - Transfer data by MMIO
3608 * @adev: device for this I/O
3609 * @buf: data buffer
3610 * @buflen: buffer length
3611 * @write_data: read/write
3612 *
3613 * Transfer data from/to the device data register by MMIO.
3614 *
3615 * LOCKING:
3616 * Inherited from caller.
3617 */
3618
3619void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3620 unsigned int buflen, int write_data)
3621{
3622 struct ata_port *ap = adev->ap;
3623 unsigned int i;
3624 unsigned int words = buflen >> 1;
3625 u16 *buf16 = (u16 *) buf;
3626 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3627
3628 /* Transfer multiple of 2 bytes */
3629 if (write_data) {
3630 for (i = 0; i < words; i++)
3631 writew(le16_to_cpu(buf16[i]), mmio);
3632 } else {
3633 for (i = 0; i < words; i++)
3634 buf16[i] = cpu_to_le16(readw(mmio));
3635 }
3636
3637 /* Transfer trailing 1 byte, if any. */
3638 if (unlikely(buflen & 0x01)) {
3639 u16 align_buf[1] = { 0 };
3640 unsigned char *trailing_buf = buf + buflen - 1;
3641
3642 if (write_data) {
3643 memcpy(align_buf, trailing_buf, 1);
3644 writew(le16_to_cpu(align_buf[0]), mmio);
3645 } else {
3646 align_buf[0] = cpu_to_le16(readw(mmio));
3647 memcpy(trailing_buf, align_buf, 1);
3648 }
3649 }
3650}
3651
3652/**
3653 * ata_pio_data_xfer - Transfer data by PIO
3654 * @adev: device to target
3655 * @buf: data buffer
3656 * @buflen: buffer length
3657 * @write_data: read/write
3658 *
3659 * Transfer data from/to the device data register by PIO.
3660 *
3661 * LOCKING:
3662 * Inherited from caller.
3663 */
3664
3665void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3666 unsigned int buflen, int write_data)
3667{
3668 struct ata_port *ap = adev->ap;
3669 unsigned int words = buflen >> 1;
3670
3671 /* Transfer multiple of 2 bytes */
3672 if (write_data)
3673 outsw(ap->ioaddr.data_addr, buf, words);
3674 else
3675 insw(ap->ioaddr.data_addr, buf, words);
3676
3677 /* Transfer trailing 1 byte, if any. */
3678 if (unlikely(buflen & 0x01)) {
3679 u16 align_buf[1] = { 0 };
3680 unsigned char *trailing_buf = buf + buflen - 1;
3681
3682 if (write_data) {
3683 memcpy(align_buf, trailing_buf, 1);
3684 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3685 } else {
3686 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3687 memcpy(trailing_buf, align_buf, 1);
3688 }
3689 }
3690}
3691
3692/**
3693 * ata_pio_data_xfer_noirq - Transfer data by PIO
3694 * @adev: device to target
3695 * @buf: data buffer
3696 * @buflen: buffer length
3697 * @write_data: read/write
3698 *
3699 * Transfer data from/to the device data register by PIO. Do the
3700 * transfer with interrupts disabled.
3701 *
3702 * LOCKING:
3703 * Inherited from caller.
3704 */
3705
3706void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3707 unsigned int buflen, int write_data)
3708{
3709 unsigned long flags;
3710 local_irq_save(flags);
3711 ata_pio_data_xfer(adev, buf, buflen, write_data);
3712 local_irq_restore(flags);
3713}
3714
3715
3716/**
3717 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3718 * @qc: Command on going
3719 *
3720 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3721 *
3722 * LOCKING:
3723 * Inherited from caller.
3724 */
3725
3726static void ata_pio_sector(struct ata_queued_cmd *qc)
3727{
3728 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3729 struct scatterlist *sg = qc->__sg;
3730 struct ata_port *ap = qc->ap;
3731 struct page *page;
3732 unsigned int offset;
3733 unsigned char *buf;
3734
3735 if (qc->cursect == (qc->nsect - 1))
3736 ap->hsm_task_state = HSM_ST_LAST;
3737
3738 page = sg[qc->cursg].page;
3739 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3740
3741 /* get the current page and offset */
3742 page = nth_page(page, (offset >> PAGE_SHIFT));
3743 offset %= PAGE_SIZE;
3744
3745 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3746
3747 if (PageHighMem(page)) {
3748 unsigned long flags;
3749
3750 /* FIXME: use a bounce buffer */
3751 local_irq_save(flags);
3752 buf = kmap_atomic(page, KM_IRQ0);
3753
3754 /* do the actual data transfer */
3755 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3756
3757 kunmap_atomic(buf, KM_IRQ0);
3758 local_irq_restore(flags);
3759 } else {
3760 buf = page_address(page);
3761 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3762 }
3763
3764 qc->cursect++;
3765 qc->cursg_ofs++;
3766
3767 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3768 qc->cursg++;
3769 qc->cursg_ofs = 0;
3770 }
3771}
3772
3773/**
3774 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3775 * @qc: Command on going
3776 *
3777 * Transfer one or many ATA_SECT_SIZE of data from/to the
3778 * ATA device for the DRQ request.
3779 *
3780 * LOCKING:
3781 * Inherited from caller.
3782 */
3783
3784static void ata_pio_sectors(struct ata_queued_cmd *qc)
3785{
3786 if (is_multi_taskfile(&qc->tf)) {
3787 /* READ/WRITE MULTIPLE */
3788 unsigned int nsect;
3789
3790 WARN_ON(qc->dev->multi_count == 0);
3791
3792 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3793 while (nsect--)
3794 ata_pio_sector(qc);
3795 } else
3796 ata_pio_sector(qc);
3797}
3798
3799/**
3800 * atapi_send_cdb - Write CDB bytes to hardware
3801 * @ap: Port to which ATAPI device is attached.
3802 * @qc: Taskfile currently active
3803 *
3804 * When device has indicated its readiness to accept
3805 * a CDB, this function is called. Send the CDB.
3806 *
3807 * LOCKING:
3808 * caller.
3809 */
3810
3811static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3812{
3813 /* send SCSI cdb */
3814 DPRINTK("send cdb\n");
3815 WARN_ON(qc->dev->cdb_len < 12);
3816
3817 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3818 ata_altstatus(ap); /* flush */
3819
3820 switch (qc->tf.protocol) {
3821 case ATA_PROT_ATAPI:
3822 ap->hsm_task_state = HSM_ST;
3823 break;
3824 case ATA_PROT_ATAPI_NODATA:
3825 ap->hsm_task_state = HSM_ST_LAST;
3826 break;
3827 case ATA_PROT_ATAPI_DMA:
3828 ap->hsm_task_state = HSM_ST_LAST;
3829 /* initiate bmdma */
3830 ap->ops->bmdma_start(qc);
3831 break;
3832 }
3833}
3834
3835/**
3836 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3837 * @qc: Command on going
3838 * @bytes: number of bytes
3839 *
3840 * Transfer Transfer data from/to the ATAPI device.
3841 *
3842 * LOCKING:
3843 * Inherited from caller.
3844 *
3845 */
3846
3847static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3848{
3849 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3850 struct scatterlist *sg = qc->__sg;
3851 struct ata_port *ap = qc->ap;
3852 struct page *page;
3853 unsigned char *buf;
3854 unsigned int offset, count;
3855
3856 if (qc->curbytes + bytes >= qc->nbytes)
3857 ap->hsm_task_state = HSM_ST_LAST;
3858
3859next_sg:
3860 if (unlikely(qc->cursg >= qc->n_elem)) {
3861 /*
3862 * The end of qc->sg is reached and the device expects
3863 * more data to transfer. In order not to overrun qc->sg
3864 * and fulfill length specified in the byte count register,
3865 * - for read case, discard trailing data from the device
3866 * - for write case, padding zero data to the device
3867 */
3868 u16 pad_buf[1] = { 0 };
3869 unsigned int words = bytes >> 1;
3870 unsigned int i;
3871
3872 if (words) /* warning if bytes > 1 */
3873 ata_dev_printk(qc->dev, KERN_WARNING,
3874 "%u bytes trailing data\n", bytes);
3875
3876 for (i = 0; i < words; i++)
3877 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3878
3879 ap->hsm_task_state = HSM_ST_LAST;
3880 return;
3881 }
3882
3883 sg = &qc->__sg[qc->cursg];
3884
3885 page = sg->page;
3886 offset = sg->offset + qc->cursg_ofs;
3887
3888 /* get the current page and offset */
3889 page = nth_page(page, (offset >> PAGE_SHIFT));
3890 offset %= PAGE_SIZE;
3891
3892 /* don't overrun current sg */
3893 count = min(sg->length - qc->cursg_ofs, bytes);
3894
3895 /* don't cross page boundaries */
3896 count = min(count, (unsigned int)PAGE_SIZE - offset);
3897
3898 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3899
3900 if (PageHighMem(page)) {
3901 unsigned long flags;
3902
3903 /* FIXME: use bounce buffer */
3904 local_irq_save(flags);
3905 buf = kmap_atomic(page, KM_IRQ0);
3906
3907 /* do the actual data transfer */
3908 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3909
3910 kunmap_atomic(buf, KM_IRQ0);
3911 local_irq_restore(flags);
3912 } else {
3913 buf = page_address(page);
3914 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3915 }
3916
3917 bytes -= count;
3918 qc->curbytes += count;
3919 qc->cursg_ofs += count;
3920
3921 if (qc->cursg_ofs == sg->length) {
3922 qc->cursg++;
3923 qc->cursg_ofs = 0;
3924 }
3925
3926 if (bytes)
3927 goto next_sg;
3928}
3929
3930/**
3931 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3932 * @qc: Command on going
3933 *
3934 * Transfer Transfer data from/to the ATAPI device.
3935 *
3936 * LOCKING:
3937 * Inherited from caller.
3938 */
3939
3940static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3941{
3942 struct ata_port *ap = qc->ap;
3943 struct ata_device *dev = qc->dev;
3944 unsigned int ireason, bc_lo, bc_hi, bytes;
3945 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3946
3947 /* Abuse qc->result_tf for temp storage of intermediate TF
3948 * here to save some kernel stack usage.
3949 * For normal completion, qc->result_tf is not relevant. For
3950 * error, qc->result_tf is later overwritten by ata_qc_complete().
3951 * So, the correctness of qc->result_tf is not affected.
3952 */
3953 ap->ops->tf_read(ap, &qc->result_tf);
3954 ireason = qc->result_tf.nsect;
3955 bc_lo = qc->result_tf.lbam;
3956 bc_hi = qc->result_tf.lbah;
3957 bytes = (bc_hi << 8) | bc_lo;
3958
3959 /* shall be cleared to zero, indicating xfer of data */
3960 if (ireason & (1 << 0))
3961 goto err_out;
3962
3963 /* make sure transfer direction matches expected */
3964 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3965 if (do_write != i_write)
3966 goto err_out;
3967
3968 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3969
3970 __atapi_pio_bytes(qc, bytes);
3971
3972 return;
3973
3974err_out:
3975 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3976 qc->err_mask |= AC_ERR_HSM;
3977 ap->hsm_task_state = HSM_ST_ERR;
3978}
3979
3980/**
3981 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3982 * @ap: the target ata_port
3983 * @qc: qc on going
3984 *
3985 * RETURNS:
3986 * 1 if ok in workqueue, 0 otherwise.
3987 */
3988
3989static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3990{
3991 if (qc->tf.flags & ATA_TFLAG_POLLING)
3992 return 1;
3993
3994 if (ap->hsm_task_state == HSM_ST_FIRST) {
3995 if (qc->tf.protocol == ATA_PROT_PIO &&
3996 (qc->tf.flags & ATA_TFLAG_WRITE))
3997 return 1;
3998
3999 if (is_atapi_taskfile(&qc->tf) &&
4000 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4001 return 1;
4002 }
4003
4004 return 0;
4005}
4006
4007/**
4008 * ata_hsm_qc_complete - finish a qc running on standard HSM
4009 * @qc: Command to complete
4010 * @in_wq: 1 if called from workqueue, 0 otherwise
4011 *
4012 * Finish @qc which is running on standard HSM.
4013 *
4014 * LOCKING:
4015 * If @in_wq is zero, spin_lock_irqsave(host lock).
4016 * Otherwise, none on entry and grabs host lock.
4017 */
4018static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4019{
4020 struct ata_port *ap = qc->ap;
4021 unsigned long flags;
4022
4023 if (ap->ops->error_handler) {
4024 if (in_wq) {
4025 spin_lock_irqsave(ap->lock, flags);
4026
4027 /* EH might have kicked in while host lock is
4028 * released.
4029 */
4030 qc = ata_qc_from_tag(ap, qc->tag);
4031 if (qc) {
4032 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4033 ata_irq_on(ap);
4034 ata_qc_complete(qc);
4035 } else
4036 ata_port_freeze(ap);
4037 }
4038
4039 spin_unlock_irqrestore(ap->lock, flags);
4040 } else {
4041 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4042 ata_qc_complete(qc);
4043 else
4044 ata_port_freeze(ap);
4045 }
4046 } else {
4047 if (in_wq) {
4048 spin_lock_irqsave(ap->lock, flags);
4049 ata_irq_on(ap);
4050 ata_qc_complete(qc);
4051 spin_unlock_irqrestore(ap->lock, flags);
4052 } else
4053 ata_qc_complete(qc);
4054 }
4055
4056 ata_altstatus(ap); /* flush */
4057}
4058
4059/**
4060 * ata_hsm_move - move the HSM to the next state.
4061 * @ap: the target ata_port
4062 * @qc: qc on going
4063 * @status: current device status
4064 * @in_wq: 1 if called from workqueue, 0 otherwise
4065 *
4066 * RETURNS:
4067 * 1 when poll next status needed, 0 otherwise.
4068 */
4069int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4070 u8 status, int in_wq)
4071{
4072 unsigned long flags = 0;
4073 int poll_next;
4074
4075 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4076
4077 /* Make sure ata_qc_issue_prot() does not throw things
4078 * like DMA polling into the workqueue. Notice that
4079 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4080 */
4081 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4082
4083fsm_start:
4084 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4085 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4086
4087 switch (ap->hsm_task_state) {
4088 case HSM_ST_FIRST:
4089 /* Send first data block or PACKET CDB */
4090
4091 /* If polling, we will stay in the work queue after
4092 * sending the data. Otherwise, interrupt handler
4093 * takes over after sending the data.
4094 */
4095 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4096
4097 /* check device status */
4098 if (unlikely((status & ATA_DRQ) == 0)) {
4099 /* handle BSY=0, DRQ=0 as error */
4100 if (likely(status & (ATA_ERR | ATA_DF)))
4101 /* device stops HSM for abort/error */
4102 qc->err_mask |= AC_ERR_DEV;
4103 else
4104 /* HSM violation. Let EH handle this */
4105 qc->err_mask |= AC_ERR_HSM;
4106
4107 ap->hsm_task_state = HSM_ST_ERR;
4108 goto fsm_start;
4109 }
4110
4111 /* Device should not ask for data transfer (DRQ=1)
4112 * when it finds something wrong.
4113 * We ignore DRQ here and stop the HSM by
4114 * changing hsm_task_state to HSM_ST_ERR and
4115 * let the EH abort the command or reset the device.
4116 */
4117 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4118 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4119 ap->id, status);
4120 qc->err_mask |= AC_ERR_HSM;
4121 ap->hsm_task_state = HSM_ST_ERR;
4122 goto fsm_start;
4123 }
4124
4125 /* Send the CDB (atapi) or the first data block (ata pio out).
4126 * During the state transition, interrupt handler shouldn't
4127 * be invoked before the data transfer is complete and
4128 * hsm_task_state is changed. Hence, the following locking.
4129 */
4130 if (in_wq)
4131 spin_lock_irqsave(ap->lock, flags);
4132
4133 if (qc->tf.protocol == ATA_PROT_PIO) {
4134 /* PIO data out protocol.
4135 * send first data block.
4136 */
4137
4138 /* ata_pio_sectors() might change the state
4139 * to HSM_ST_LAST. so, the state is changed here
4140 * before ata_pio_sectors().
4141 */
4142 ap->hsm_task_state = HSM_ST;
4143 ata_pio_sectors(qc);
4144 ata_altstatus(ap); /* flush */
4145 } else
4146 /* send CDB */
4147 atapi_send_cdb(ap, qc);
4148
4149 if (in_wq)
4150 spin_unlock_irqrestore(ap->lock, flags);
4151
4152 /* if polling, ata_pio_task() handles the rest.
4153 * otherwise, interrupt handler takes over from here.
4154 */
4155 break;
4156
4157 case HSM_ST:
4158 /* complete command or read/write the data register */
4159 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4160 /* ATAPI PIO protocol */
4161 if ((status & ATA_DRQ) == 0) {
4162 /* No more data to transfer or device error.
4163 * Device error will be tagged in HSM_ST_LAST.
4164 */
4165 ap->hsm_task_state = HSM_ST_LAST;
4166 goto fsm_start;
4167 }
4168
4169 /* Device should not ask for data transfer (DRQ=1)
4170 * when it finds something wrong.
4171 * We ignore DRQ here and stop the HSM by
4172 * changing hsm_task_state to HSM_ST_ERR and
4173 * let the EH abort the command or reset the device.
4174 */
4175 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4176 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4177 ap->id, status);
4178 qc->err_mask |= AC_ERR_HSM;
4179 ap->hsm_task_state = HSM_ST_ERR;
4180 goto fsm_start;
4181 }
4182
4183 atapi_pio_bytes(qc);
4184
4185 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4186 /* bad ireason reported by device */
4187 goto fsm_start;
4188
4189 } else {
4190 /* ATA PIO protocol */
4191 if (unlikely((status & ATA_DRQ) == 0)) {
4192 /* handle BSY=0, DRQ=0 as error */
4193 if (likely(status & (ATA_ERR | ATA_DF)))
4194 /* device stops HSM for abort/error */
4195 qc->err_mask |= AC_ERR_DEV;
4196 else
4197 /* HSM violation. Let EH handle this */
4198 qc->err_mask |= AC_ERR_HSM;
4199
4200 ap->hsm_task_state = HSM_ST_ERR;
4201 goto fsm_start;
4202 }
4203
4204 /* For PIO reads, some devices may ask for
4205 * data transfer (DRQ=1) alone with ERR=1.
4206 * We respect DRQ here and transfer one
4207 * block of junk data before changing the
4208 * hsm_task_state to HSM_ST_ERR.
4209 *
4210 * For PIO writes, ERR=1 DRQ=1 doesn't make
4211 * sense since the data block has been
4212 * transferred to the device.
4213 */
4214 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4215 /* data might be corrputed */
4216 qc->err_mask |= AC_ERR_DEV;
4217
4218 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4219 ata_pio_sectors(qc);
4220 ata_altstatus(ap);
4221 status = ata_wait_idle(ap);
4222 }
4223
4224 if (status & (ATA_BUSY | ATA_DRQ))
4225 qc->err_mask |= AC_ERR_HSM;
4226
4227 /* ata_pio_sectors() might change the
4228 * state to HSM_ST_LAST. so, the state
4229 * is changed after ata_pio_sectors().
4230 */
4231 ap->hsm_task_state = HSM_ST_ERR;
4232 goto fsm_start;
4233 }
4234
4235 ata_pio_sectors(qc);
4236
4237 if (ap->hsm_task_state == HSM_ST_LAST &&
4238 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4239 /* all data read */
4240 ata_altstatus(ap);
4241 status = ata_wait_idle(ap);
4242 goto fsm_start;
4243 }
4244 }
4245
4246 ata_altstatus(ap); /* flush */
4247 poll_next = 1;
4248 break;
4249
4250 case HSM_ST_LAST:
4251 if (unlikely(!ata_ok(status))) {
4252 qc->err_mask |= __ac_err_mask(status);
4253 ap->hsm_task_state = HSM_ST_ERR;
4254 goto fsm_start;
4255 }
4256
4257 /* no more data to transfer */
4258 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4259 ap->id, qc->dev->devno, status);
4260
4261 WARN_ON(qc->err_mask);
4262
4263 ap->hsm_task_state = HSM_ST_IDLE;
4264
4265 /* complete taskfile transaction */
4266 ata_hsm_qc_complete(qc, in_wq);
4267
4268 poll_next = 0;
4269 break;
4270
4271 case HSM_ST_ERR:
4272 /* make sure qc->err_mask is available to
4273 * know what's wrong and recover
4274 */
4275 WARN_ON(qc->err_mask == 0);
4276
4277 ap->hsm_task_state = HSM_ST_IDLE;
4278
4279 /* complete taskfile transaction */
4280 ata_hsm_qc_complete(qc, in_wq);
4281
4282 poll_next = 0;
4283 break;
4284 default:
4285 poll_next = 0;
4286 BUG();
4287 }
4288
4289 return poll_next;
4290}
4291
4292static void ata_pio_task(void *_data)
4293{
4294 struct ata_queued_cmd *qc = _data;
4295 struct ata_port *ap = qc->ap;
4296 u8 status;
4297 int poll_next;
4298
4299fsm_start:
4300 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4301
4302 /*
4303 * This is purely heuristic. This is a fast path.
4304 * Sometimes when we enter, BSY will be cleared in
4305 * a chk-status or two. If not, the drive is probably seeking
4306 * or something. Snooze for a couple msecs, then
4307 * chk-status again. If still busy, queue delayed work.
4308 */
4309 status = ata_busy_wait(ap, ATA_BUSY, 5);
4310 if (status & ATA_BUSY) {
4311 msleep(2);
4312 status = ata_busy_wait(ap, ATA_BUSY, 10);
4313 if (status & ATA_BUSY) {
4314 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4315 return;
4316 }
4317 }
4318
4319 /* move the HSM */
4320 poll_next = ata_hsm_move(ap, qc, status, 1);
4321
4322 /* another command or interrupt handler
4323 * may be running at this point.
4324 */
4325 if (poll_next)
4326 goto fsm_start;
4327}
4328
4329/**
4330 * ata_qc_new - Request an available ATA command, for queueing
4331 * @ap: Port associated with device @dev
4332 * @dev: Device from whom we request an available command structure
4333 *
4334 * LOCKING:
4335 * None.
4336 */
4337
4338static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4339{
4340 struct ata_queued_cmd *qc = NULL;
4341 unsigned int i;
4342
4343 /* no command while frozen */
4344 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4345 return NULL;
4346
4347 /* the last tag is reserved for internal command. */
4348 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4349 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4350 qc = __ata_qc_from_tag(ap, i);
4351 break;
4352 }
4353
4354 if (qc)
4355 qc->tag = i;
4356
4357 return qc;
4358}
4359
4360/**
4361 * ata_qc_new_init - Request an available ATA command, and initialize it
4362 * @dev: Device from whom we request an available command structure
4363 *
4364 * LOCKING:
4365 * None.
4366 */
4367
4368struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4369{
4370 struct ata_port *ap = dev->ap;
4371 struct ata_queued_cmd *qc;
4372
4373 qc = ata_qc_new(ap);
4374 if (qc) {
4375 qc->scsicmd = NULL;
4376 qc->ap = ap;
4377 qc->dev = dev;
4378
4379 ata_qc_reinit(qc);
4380 }
4381
4382 return qc;
4383}
4384
4385/**
4386 * ata_qc_free - free unused ata_queued_cmd
4387 * @qc: Command to complete
4388 *
4389 * Designed to free unused ata_queued_cmd object
4390 * in case something prevents using it.
4391 *
4392 * LOCKING:
4393 * spin_lock_irqsave(host lock)
4394 */
4395void ata_qc_free(struct ata_queued_cmd *qc)
4396{
4397 struct ata_port *ap = qc->ap;
4398 unsigned int tag;
4399
4400 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4401
4402 qc->flags = 0;
4403 tag = qc->tag;
4404 if (likely(ata_tag_valid(tag))) {
4405 qc->tag = ATA_TAG_POISON;
4406 clear_bit(tag, &ap->qc_allocated);
4407 }
4408}
4409
4410void __ata_qc_complete(struct ata_queued_cmd *qc)
4411{
4412 struct ata_port *ap = qc->ap;
4413
4414 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4415 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4416
4417 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4418 ata_sg_clean(qc);
4419
4420 /* command should be marked inactive atomically with qc completion */
4421 if (qc->tf.protocol == ATA_PROT_NCQ)
4422 ap->sactive &= ~(1 << qc->tag);
4423 else
4424 ap->active_tag = ATA_TAG_POISON;
4425
4426 /* atapi: mark qc as inactive to prevent the interrupt handler
4427 * from completing the command twice later, before the error handler
4428 * is called. (when rc != 0 and atapi request sense is needed)
4429 */
4430 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4431 ap->qc_active &= ~(1 << qc->tag);
4432
4433 /* call completion callback */
4434 qc->complete_fn(qc);
4435}
4436
4437/**
4438 * ata_qc_complete - Complete an active ATA command
4439 * @qc: Command to complete
4440 * @err_mask: ATA Status register contents
4441 *
4442 * Indicate to the mid and upper layers that an ATA
4443 * command has completed, with either an ok or not-ok status.
4444 *
4445 * LOCKING:
4446 * spin_lock_irqsave(host lock)
4447 */
4448void ata_qc_complete(struct ata_queued_cmd *qc)
4449{
4450 struct ata_port *ap = qc->ap;
4451
4452 /* XXX: New EH and old EH use different mechanisms to
4453 * synchronize EH with regular execution path.
4454 *
4455 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4456 * Normal execution path is responsible for not accessing a
4457 * failed qc. libata core enforces the rule by returning NULL
4458 * from ata_qc_from_tag() for failed qcs.
4459 *
4460 * Old EH depends on ata_qc_complete() nullifying completion
4461 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4462 * not synchronize with interrupt handler. Only PIO task is
4463 * taken care of.
4464 */
4465 if (ap->ops->error_handler) {
4466 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4467
4468 if (unlikely(qc->err_mask))
4469 qc->flags |= ATA_QCFLAG_FAILED;
4470
4471 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4472 if (!ata_tag_internal(qc->tag)) {
4473 /* always fill result TF for failed qc */
4474 ap->ops->tf_read(ap, &qc->result_tf);
4475 ata_qc_schedule_eh(qc);
4476 return;
4477 }
4478 }
4479
4480 /* read result TF if requested */
4481 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4482 ap->ops->tf_read(ap, &qc->result_tf);
4483
4484 __ata_qc_complete(qc);
4485 } else {
4486 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4487 return;
4488
4489 /* read result TF if failed or requested */
4490 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4491 ap->ops->tf_read(ap, &qc->result_tf);
4492
4493 __ata_qc_complete(qc);
4494 }
4495}
4496
4497/**
4498 * ata_qc_complete_multiple - Complete multiple qcs successfully
4499 * @ap: port in question
4500 * @qc_active: new qc_active mask
4501 * @finish_qc: LLDD callback invoked before completing a qc
4502 *
4503 * Complete in-flight commands. This functions is meant to be
4504 * called from low-level driver's interrupt routine to complete
4505 * requests normally. ap->qc_active and @qc_active is compared
4506 * and commands are completed accordingly.
4507 *
4508 * LOCKING:
4509 * spin_lock_irqsave(host lock)
4510 *
4511 * RETURNS:
4512 * Number of completed commands on success, -errno otherwise.
4513 */
4514int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4515 void (*finish_qc)(struct ata_queued_cmd *))
4516{
4517 int nr_done = 0;
4518 u32 done_mask;
4519 int i;
4520
4521 done_mask = ap->qc_active ^ qc_active;
4522
4523 if (unlikely(done_mask & qc_active)) {
4524 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4525 "(%08x->%08x)\n", ap->qc_active, qc_active);
4526 return -EINVAL;
4527 }
4528
4529 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4530 struct ata_queued_cmd *qc;
4531
4532 if (!(done_mask & (1 << i)))
4533 continue;
4534
4535 if ((qc = ata_qc_from_tag(ap, i))) {
4536 if (finish_qc)
4537 finish_qc(qc);
4538 ata_qc_complete(qc);
4539 nr_done++;
4540 }
4541 }
4542
4543 return nr_done;
4544}
4545
4546static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4547{
4548 struct ata_port *ap = qc->ap;
4549
4550 switch (qc->tf.protocol) {
4551 case ATA_PROT_NCQ:
4552 case ATA_PROT_DMA:
4553 case ATA_PROT_ATAPI_DMA:
4554 return 1;
4555
4556 case ATA_PROT_ATAPI:
4557 case ATA_PROT_PIO:
4558 if (ap->flags & ATA_FLAG_PIO_DMA)
4559 return 1;
4560
4561 /* fall through */
4562
4563 default:
4564 return 0;
4565 }
4566
4567 /* never reached */
4568}
4569
4570/**
4571 * ata_qc_issue - issue taskfile to device
4572 * @qc: command to issue to device
4573 *
4574 * Prepare an ATA command to submission to device.
4575 * This includes mapping the data into a DMA-able
4576 * area, filling in the S/G table, and finally
4577 * writing the taskfile to hardware, starting the command.
4578 *
4579 * LOCKING:
4580 * spin_lock_irqsave(host lock)
4581 */
4582void ata_qc_issue(struct ata_queued_cmd *qc)
4583{
4584 struct ata_port *ap = qc->ap;
4585
4586 /* Make sure only one non-NCQ command is outstanding. The
4587 * check is skipped for old EH because it reuses active qc to
4588 * request ATAPI sense.
4589 */
4590 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4591
4592 if (qc->tf.protocol == ATA_PROT_NCQ) {
4593 WARN_ON(ap->sactive & (1 << qc->tag));
4594 ap->sactive |= 1 << qc->tag;
4595 } else {
4596 WARN_ON(ap->sactive);
4597 ap->active_tag = qc->tag;
4598 }
4599
4600 qc->flags |= ATA_QCFLAG_ACTIVE;
4601 ap->qc_active |= 1 << qc->tag;
4602
4603 if (ata_should_dma_map(qc)) {
4604 if (qc->flags & ATA_QCFLAG_SG) {
4605 if (ata_sg_setup(qc))
4606 goto sg_err;
4607 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4608 if (ata_sg_setup_one(qc))
4609 goto sg_err;
4610 }
4611 } else {
4612 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4613 }
4614
4615 ap->ops->qc_prep(qc);
4616
4617 qc->err_mask |= ap->ops->qc_issue(qc);
4618 if (unlikely(qc->err_mask))
4619 goto err;
4620 return;
4621
4622sg_err:
4623 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4624 qc->err_mask |= AC_ERR_SYSTEM;
4625err:
4626 ata_qc_complete(qc);
4627}
4628
4629/**
4630 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4631 * @qc: command to issue to device
4632 *
4633 * Using various libata functions and hooks, this function
4634 * starts an ATA command. ATA commands are grouped into
4635 * classes called "protocols", and issuing each type of protocol
4636 * is slightly different.
4637 *
4638 * May be used as the qc_issue() entry in ata_port_operations.
4639 *
4640 * LOCKING:
4641 * spin_lock_irqsave(host lock)
4642 *
4643 * RETURNS:
4644 * Zero on success, AC_ERR_* mask on failure
4645 */
4646
4647unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4648{
4649 struct ata_port *ap = qc->ap;
4650
4651 /* Use polling pio if the LLD doesn't handle
4652 * interrupt driven pio and atapi CDB interrupt.
4653 */
4654 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4655 switch (qc->tf.protocol) {
4656 case ATA_PROT_PIO:
4657 case ATA_PROT_ATAPI:
4658 case ATA_PROT_ATAPI_NODATA:
4659 qc->tf.flags |= ATA_TFLAG_POLLING;
4660 break;
4661 case ATA_PROT_ATAPI_DMA:
4662 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4663 /* see ata_dma_blacklisted() */
4664 BUG();
4665 break;
4666 default:
4667 break;
4668 }
4669 }
4670
4671 /* select the device */
4672 ata_dev_select(ap, qc->dev->devno, 1, 0);
4673
4674 /* start the command */
4675 switch (qc->tf.protocol) {
4676 case ATA_PROT_NODATA:
4677 if (qc->tf.flags & ATA_TFLAG_POLLING)
4678 ata_qc_set_polling(qc);
4679
4680 ata_tf_to_host(ap, &qc->tf);
4681 ap->hsm_task_state = HSM_ST_LAST;
4682
4683 if (qc->tf.flags & ATA_TFLAG_POLLING)
4684 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4685
4686 break;
4687
4688 case ATA_PROT_DMA:
4689 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4690
4691 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4692 ap->ops->bmdma_setup(qc); /* set up bmdma */
4693 ap->ops->bmdma_start(qc); /* initiate bmdma */
4694 ap->hsm_task_state = HSM_ST_LAST;
4695 break;
4696
4697 case ATA_PROT_PIO:
4698 if (qc->tf.flags & ATA_TFLAG_POLLING)
4699 ata_qc_set_polling(qc);
4700
4701 ata_tf_to_host(ap, &qc->tf);
4702
4703 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4704 /* PIO data out protocol */
4705 ap->hsm_task_state = HSM_ST_FIRST;
4706 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4707
4708 /* always send first data block using
4709 * the ata_pio_task() codepath.
4710 */
4711 } else {
4712 /* PIO data in protocol */
4713 ap->hsm_task_state = HSM_ST;
4714
4715 if (qc->tf.flags & ATA_TFLAG_POLLING)
4716 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4717
4718 /* if polling, ata_pio_task() handles the rest.
4719 * otherwise, interrupt handler takes over from here.
4720 */
4721 }
4722
4723 break;
4724
4725 case ATA_PROT_ATAPI:
4726 case ATA_PROT_ATAPI_NODATA:
4727 if (qc->tf.flags & ATA_TFLAG_POLLING)
4728 ata_qc_set_polling(qc);
4729
4730 ata_tf_to_host(ap, &qc->tf);
4731
4732 ap->hsm_task_state = HSM_ST_FIRST;
4733
4734 /* send cdb by polling if no cdb interrupt */
4735 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4736 (qc->tf.flags & ATA_TFLAG_POLLING))
4737 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4738 break;
4739
4740 case ATA_PROT_ATAPI_DMA:
4741 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4742
4743 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4744 ap->ops->bmdma_setup(qc); /* set up bmdma */
4745 ap->hsm_task_state = HSM_ST_FIRST;
4746
4747 /* send cdb by polling if no cdb interrupt */
4748 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4749 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4750 break;
4751
4752 default:
4753 WARN_ON(1);
4754 return AC_ERR_SYSTEM;
4755 }
4756
4757 return 0;
4758}
4759
4760/**
4761 * ata_host_intr - Handle host interrupt for given (port, task)
4762 * @ap: Port on which interrupt arrived (possibly...)
4763 * @qc: Taskfile currently active in engine
4764 *
4765 * Handle host interrupt for given queued command. Currently,
4766 * only DMA interrupts are handled. All other commands are
4767 * handled via polling with interrupts disabled (nIEN bit).
4768 *
4769 * LOCKING:
4770 * spin_lock_irqsave(host lock)
4771 *
4772 * RETURNS:
4773 * One if interrupt was handled, zero if not (shared irq).
4774 */
4775
4776inline unsigned int ata_host_intr (struct ata_port *ap,
4777 struct ata_queued_cmd *qc)
4778{
4779 u8 status, host_stat = 0;
4780
4781 VPRINTK("ata%u: protocol %d task_state %d\n",
4782 ap->id, qc->tf.protocol, ap->hsm_task_state);
4783
4784 /* Check whether we are expecting interrupt in this state */
4785 switch (ap->hsm_task_state) {
4786 case HSM_ST_FIRST:
4787 /* Some pre-ATAPI-4 devices assert INTRQ
4788 * at this state when ready to receive CDB.
4789 */
4790
4791 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4792 * The flag was turned on only for atapi devices.
4793 * No need to check is_atapi_taskfile(&qc->tf) again.
4794 */
4795 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4796 goto idle_irq;
4797 break;
4798 case HSM_ST_LAST:
4799 if (qc->tf.protocol == ATA_PROT_DMA ||
4800 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4801 /* check status of DMA engine */
4802 host_stat = ap->ops->bmdma_status(ap);
4803 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4804
4805 /* if it's not our irq... */
4806 if (!(host_stat & ATA_DMA_INTR))
4807 goto idle_irq;
4808
4809 /* before we do anything else, clear DMA-Start bit */
4810 ap->ops->bmdma_stop(qc);
4811
4812 if (unlikely(host_stat & ATA_DMA_ERR)) {
4813 /* error when transfering data to/from memory */
4814 qc->err_mask |= AC_ERR_HOST_BUS;
4815 ap->hsm_task_state = HSM_ST_ERR;
4816 }
4817 }
4818 break;
4819 case HSM_ST:
4820 break;
4821 default:
4822 goto idle_irq;
4823 }
4824
4825 /* check altstatus */
4826 status = ata_altstatus(ap);
4827 if (status & ATA_BUSY)
4828 goto idle_irq;
4829
4830 /* check main status, clearing INTRQ */
4831 status = ata_chk_status(ap);
4832 if (unlikely(status & ATA_BUSY))
4833 goto idle_irq;
4834
4835 /* ack bmdma irq events */
4836 ap->ops->irq_clear(ap);
4837
4838 ata_hsm_move(ap, qc, status, 0);
4839 return 1; /* irq handled */
4840
4841idle_irq:
4842 ap->stats.idle_irq++;
4843
4844#ifdef ATA_IRQ_TRAP
4845 if ((ap->stats.idle_irq % 1000) == 0) {
4846 ata_irq_ack(ap, 0); /* debug trap */
4847 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4848 return 1;
4849 }
4850#endif
4851 return 0; /* irq not handled */
4852}
4853
4854/**
4855 * ata_interrupt - Default ATA host interrupt handler
4856 * @irq: irq line (unused)
4857 * @dev_instance: pointer to our ata_host information structure
4858 * @regs: unused
4859 *
4860 * Default interrupt handler for PCI IDE devices. Calls
4861 * ata_host_intr() for each port that is not disabled.
4862 *
4863 * LOCKING:
4864 * Obtains host lock during operation.
4865 *
4866 * RETURNS:
4867 * IRQ_NONE or IRQ_HANDLED.
4868 */
4869
4870irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4871{
4872 struct ata_host *host = dev_instance;
4873 unsigned int i;
4874 unsigned int handled = 0;
4875 unsigned long flags;
4876
4877 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4878 spin_lock_irqsave(&host->lock, flags);
4879
4880 for (i = 0; i < host->n_ports; i++) {
4881 struct ata_port *ap;
4882
4883 ap = host->ports[i];
4884 if (ap &&
4885 !(ap->flags & ATA_FLAG_DISABLED)) {
4886 struct ata_queued_cmd *qc;
4887
4888 qc = ata_qc_from_tag(ap, ap->active_tag);
4889 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4890 (qc->flags & ATA_QCFLAG_ACTIVE))
4891 handled |= ata_host_intr(ap, qc);
4892 }
4893 }
4894
4895 spin_unlock_irqrestore(&host->lock, flags);
4896
4897 return IRQ_RETVAL(handled);
4898}
4899
4900/**
4901 * sata_scr_valid - test whether SCRs are accessible
4902 * @ap: ATA port to test SCR accessibility for
4903 *
4904 * Test whether SCRs are accessible for @ap.
4905 *
4906 * LOCKING:
4907 * None.
4908 *
4909 * RETURNS:
4910 * 1 if SCRs are accessible, 0 otherwise.
4911 */
4912int sata_scr_valid(struct ata_port *ap)
4913{
4914 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4915}
4916
4917/**
4918 * sata_scr_read - read SCR register of the specified port
4919 * @ap: ATA port to read SCR for
4920 * @reg: SCR to read
4921 * @val: Place to store read value
4922 *
4923 * Read SCR register @reg of @ap into *@val. This function is
4924 * guaranteed to succeed if the cable type of the port is SATA
4925 * and the port implements ->scr_read.
4926 *
4927 * LOCKING:
4928 * None.
4929 *
4930 * RETURNS:
4931 * 0 on success, negative errno on failure.
4932 */
4933int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4934{
4935 if (sata_scr_valid(ap)) {
4936 *val = ap->ops->scr_read(ap, reg);
4937 return 0;
4938 }
4939 return -EOPNOTSUPP;
4940}
4941
4942/**
4943 * sata_scr_write - write SCR register of the specified port
4944 * @ap: ATA port to write SCR for
4945 * @reg: SCR to write
4946 * @val: value to write
4947 *
4948 * Write @val to SCR register @reg of @ap. This function is
4949 * guaranteed to succeed if the cable type of the port is SATA
4950 * and the port implements ->scr_read.
4951 *
4952 * LOCKING:
4953 * None.
4954 *
4955 * RETURNS:
4956 * 0 on success, negative errno on failure.
4957 */
4958int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4959{
4960 if (sata_scr_valid(ap)) {
4961 ap->ops->scr_write(ap, reg, val);
4962 return 0;
4963 }
4964 return -EOPNOTSUPP;
4965}
4966
4967/**
4968 * sata_scr_write_flush - write SCR register of the specified port and flush
4969 * @ap: ATA port to write SCR for
4970 * @reg: SCR to write
4971 * @val: value to write
4972 *
4973 * This function is identical to sata_scr_write() except that this
4974 * function performs flush after writing to the register.
4975 *
4976 * LOCKING:
4977 * None.
4978 *
4979 * RETURNS:
4980 * 0 on success, negative errno on failure.
4981 */
4982int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4983{
4984 if (sata_scr_valid(ap)) {
4985 ap->ops->scr_write(ap, reg, val);
4986 ap->ops->scr_read(ap, reg);
4987 return 0;
4988 }
4989 return -EOPNOTSUPP;
4990}
4991
4992/**
4993 * ata_port_online - test whether the given port is online
4994 * @ap: ATA port to test
4995 *
4996 * Test whether @ap is online. Note that this function returns 0
4997 * if online status of @ap cannot be obtained, so
4998 * ata_port_online(ap) != !ata_port_offline(ap).
4999 *
5000 * LOCKING:
5001 * None.
5002 *
5003 * RETURNS:
5004 * 1 if the port online status is available and online.
5005 */
5006int ata_port_online(struct ata_port *ap)
5007{
5008 u32 sstatus;
5009
5010 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5011 return 1;
5012 return 0;
5013}
5014
5015/**
5016 * ata_port_offline - test whether the given port is offline
5017 * @ap: ATA port to test
5018 *
5019 * Test whether @ap is offline. Note that this function returns
5020 * 0 if offline status of @ap cannot be obtained, so
5021 * ata_port_online(ap) != !ata_port_offline(ap).
5022 *
5023 * LOCKING:
5024 * None.
5025 *
5026 * RETURNS:
5027 * 1 if the port offline status is available and offline.
5028 */
5029int ata_port_offline(struct ata_port *ap)
5030{
5031 u32 sstatus;
5032
5033 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5034 return 1;
5035 return 0;
5036}
5037
5038int ata_flush_cache(struct ata_device *dev)
5039{
5040 unsigned int err_mask;
5041 u8 cmd;
5042
5043 if (!ata_try_flush_cache(dev))
5044 return 0;
5045
5046 if (ata_id_has_flush_ext(dev->id))
5047 cmd = ATA_CMD_FLUSH_EXT;
5048 else
5049 cmd = ATA_CMD_FLUSH;
5050
5051 err_mask = ata_do_simple_cmd(dev, cmd);
5052 if (err_mask) {
5053 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5054 return -EIO;
5055 }
5056
5057 return 0;
5058}
5059
5060static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5061 unsigned int action, unsigned int ehi_flags,
5062 int wait)
5063{
5064 unsigned long flags;
5065 int i, rc;
5066
5067 for (i = 0; i < host->n_ports; i++) {
5068 struct ata_port *ap = host->ports[i];
5069
5070 /* Previous resume operation might still be in
5071 * progress. Wait for PM_PENDING to clear.
5072 */
5073 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5074 ata_port_wait_eh(ap);
5075 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5076 }
5077
5078 /* request PM ops to EH */
5079 spin_lock_irqsave(ap->lock, flags);
5080
5081 ap->pm_mesg = mesg;
5082 if (wait) {
5083 rc = 0;
5084 ap->pm_result = &rc;
5085 }
5086
5087 ap->pflags |= ATA_PFLAG_PM_PENDING;
5088 ap->eh_info.action |= action;
5089 ap->eh_info.flags |= ehi_flags;
5090
5091 ata_port_schedule_eh(ap);
5092
5093 spin_unlock_irqrestore(ap->lock, flags);
5094
5095 /* wait and check result */
5096 if (wait) {
5097 ata_port_wait_eh(ap);
5098 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5099 if (rc)
5100 return rc;
5101 }
5102 }
5103
5104 return 0;
5105}
5106
5107/**
5108 * ata_host_suspend - suspend host
5109 * @host: host to suspend
5110 * @mesg: PM message
5111 *
5112 * Suspend @host. Actual operation is performed by EH. This
5113 * function requests EH to perform PM operations and waits for EH
5114 * to finish.
5115 *
5116 * LOCKING:
5117 * Kernel thread context (may sleep).
5118 *
5119 * RETURNS:
5120 * 0 on success, -errno on failure.
5121 */
5122int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5123{
5124 int i, j, rc;
5125
5126 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5127 if (rc)
5128 goto fail;
5129
5130 /* EH is quiescent now. Fail if we have any ready device.
5131 * This happens if hotplug occurs between completion of device
5132 * suspension and here.
5133 */
5134 for (i = 0; i < host->n_ports; i++) {
5135 struct ata_port *ap = host->ports[i];
5136
5137 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5138 struct ata_device *dev = &ap->device[j];
5139
5140 if (ata_dev_ready(dev)) {
5141 ata_port_printk(ap, KERN_WARNING,
5142 "suspend failed, device %d "
5143 "still active\n", dev->devno);
5144 rc = -EBUSY;
5145 goto fail;
5146 }
5147 }
5148 }
5149
5150 host->dev->power.power_state = mesg;
5151 return 0;
5152
5153 fail:
5154 ata_host_resume(host);
5155 return rc;
5156}
5157
5158/**
5159 * ata_host_resume - resume host
5160 * @host: host to resume
5161 *
5162 * Resume @host. Actual operation is performed by EH. This
5163 * function requests EH to perform PM operations and returns.
5164 * Note that all resume operations are performed parallely.
5165 *
5166 * LOCKING:
5167 * Kernel thread context (may sleep).
5168 */
5169void ata_host_resume(struct ata_host *host)
5170{
5171 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5172 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5173 host->dev->power.power_state = PMSG_ON;
5174}
5175
5176/**
5177 * ata_port_start - Set port up for dma.
5178 * @ap: Port to initialize
5179 *
5180 * Called just after data structures for each port are
5181 * initialized. Allocates space for PRD table.
5182 *
5183 * May be used as the port_start() entry in ata_port_operations.
5184 *
5185 * LOCKING:
5186 * Inherited from caller.
5187 */
5188
5189int ata_port_start (struct ata_port *ap)
5190{
5191 struct device *dev = ap->dev;
5192 int rc;
5193
5194 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5195 if (!ap->prd)
5196 return -ENOMEM;
5197
5198 rc = ata_pad_alloc(ap, dev);
5199 if (rc) {
5200 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5201 return rc;
5202 }
5203
5204 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5205
5206 return 0;
5207}
5208
5209
5210/**
5211 * ata_port_stop - Undo ata_port_start()
5212 * @ap: Port to shut down
5213 *
5214 * Frees the PRD table.
5215 *
5216 * May be used as the port_stop() entry in ata_port_operations.
5217 *
5218 * LOCKING:
5219 * Inherited from caller.
5220 */
5221
5222void ata_port_stop (struct ata_port *ap)
5223{
5224 struct device *dev = ap->dev;
5225
5226 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5227 ata_pad_free(ap, dev);
5228}
5229
5230void ata_host_stop (struct ata_host *host)
5231{
5232 if (host->mmio_base)
5233 iounmap(host->mmio_base);
5234}
5235
5236/**
5237 * ata_dev_init - Initialize an ata_device structure
5238 * @dev: Device structure to initialize
5239 *
5240 * Initialize @dev in preparation for probing.
5241 *
5242 * LOCKING:
5243 * Inherited from caller.
5244 */
5245void ata_dev_init(struct ata_device *dev)
5246{
5247 struct ata_port *ap = dev->ap;
5248 unsigned long flags;
5249
5250 /* SATA spd limit is bound to the first device */
5251 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5252
5253 /* High bits of dev->flags are used to record warm plug
5254 * requests which occur asynchronously. Synchronize using
5255 * host lock.
5256 */
5257 spin_lock_irqsave(ap->lock, flags);
5258 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5259 spin_unlock_irqrestore(ap->lock, flags);
5260
5261 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5262 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5263 dev->pio_mask = UINT_MAX;
5264 dev->mwdma_mask = UINT_MAX;
5265 dev->udma_mask = UINT_MAX;
5266}
5267
5268/**
5269 * ata_port_init - Initialize an ata_port structure
5270 * @ap: Structure to initialize
5271 * @host: Collection of hosts to which @ap belongs
5272 * @ent: Probe information provided by low-level driver
5273 * @port_no: Port number associated with this ata_port
5274 *
5275 * Initialize a new ata_port structure.
5276 *
5277 * LOCKING:
5278 * Inherited from caller.
5279 */
5280void ata_port_init(struct ata_port *ap, struct ata_host *host,
5281 const struct ata_probe_ent *ent, unsigned int port_no)
5282{
5283 unsigned int i;
5284
5285 ap->lock = &host->lock;
5286 ap->flags = ATA_FLAG_DISABLED;
5287 ap->id = ata_unique_id++;
5288 ap->ctl = ATA_DEVCTL_OBS;
5289 ap->host = host;
5290 ap->dev = ent->dev;
5291 ap->port_no = port_no;
5292 if (port_no == 1 && ent->pinfo2) {
5293 ap->pio_mask = ent->pinfo2->pio_mask;
5294 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5295 ap->udma_mask = ent->pinfo2->udma_mask;
5296 ap->flags |= ent->pinfo2->flags;
5297 ap->ops = ent->pinfo2->port_ops;
5298 } else {
5299 ap->pio_mask = ent->pio_mask;
5300 ap->mwdma_mask = ent->mwdma_mask;
5301 ap->udma_mask = ent->udma_mask;
5302 ap->flags |= ent->port_flags;
5303 ap->ops = ent->port_ops;
5304 }
5305 ap->hw_sata_spd_limit = UINT_MAX;
5306 ap->active_tag = ATA_TAG_POISON;
5307 ap->last_ctl = 0xFF;
5308
5309#if defined(ATA_VERBOSE_DEBUG)
5310 /* turn on all debugging levels */
5311 ap->msg_enable = 0x00FF;
5312#elif defined(ATA_DEBUG)
5313 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5314#else
5315 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5316#endif
5317
5318 INIT_WORK(&ap->port_task, NULL, NULL);
5319 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5320 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
5321 INIT_LIST_HEAD(&ap->eh_done_q);
5322 init_waitqueue_head(&ap->eh_wait_q);
5323
5324 /* set cable type */
5325 ap->cbl = ATA_CBL_NONE;
5326 if (ap->flags & ATA_FLAG_SATA)
5327 ap->cbl = ATA_CBL_SATA;
5328
5329 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5330 struct ata_device *dev = &ap->device[i];
5331 dev->ap = ap;
5332 dev->devno = i;
5333 ata_dev_init(dev);
5334 }
5335
5336#ifdef ATA_IRQ_TRAP
5337 ap->stats.unhandled_irq = 1;
5338 ap->stats.idle_irq = 1;
5339#endif
5340
5341 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5342}
5343
5344/**
5345 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5346 * @ap: ATA port to initialize SCSI host for
5347 * @shost: SCSI host associated with @ap
5348 *
5349 * Initialize SCSI host @shost associated with ATA port @ap.
5350 *
5351 * LOCKING:
5352 * Inherited from caller.
5353 */
5354static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
5355{
5356 ap->scsi_host = shost;
5357
5358 shost->unique_id = ap->id;
5359 shost->max_id = 16;
5360 shost->max_lun = 1;
5361 shost->max_channel = 1;
5362 shost->max_cmd_len = 12;
5363}
5364
5365/**
5366 * ata_port_add - Attach low-level ATA driver to system
5367 * @ent: Information provided by low-level driver
5368 * @host: Collections of ports to which we add
5369 * @port_no: Port number associated with this host
5370 *
5371 * Attach low-level ATA driver to system.
5372 *
5373 * LOCKING:
5374 * PCI/etc. bus probe sem.
5375 *
5376 * RETURNS:
5377 * New ata_port on success, for NULL on error.
5378 */
5379static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
5380 struct ata_host *host,
5381 unsigned int port_no)
5382{
5383 struct Scsi_Host *shost;
5384 struct ata_port *ap;
5385
5386 DPRINTK("ENTER\n");
5387
5388 if (!ent->port_ops->error_handler &&
5389 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5390 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5391 port_no);
5392 return NULL;
5393 }
5394
5395 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5396 if (!shost)
5397 return NULL;
5398
5399 shost->transportt = &ata_scsi_transport_template;
5400
5401 ap = ata_shost_to_port(shost);
5402
5403 ata_port_init(ap, host, ent, port_no);
5404 ata_port_init_shost(ap, shost);
5405
5406 return ap;
5407}
5408
5409/**
5410 * ata_sas_host_init - Initialize a host struct
5411 * @host: host to initialize
5412 * @dev: device host is attached to
5413 * @flags: host flags
5414 * @ops: port_ops
5415 *
5416 * LOCKING:
5417 * PCI/etc. bus probe sem.
5418 *
5419 */
5420
5421void ata_host_init(struct ata_host *host, struct device *dev,
5422 unsigned long flags, const struct ata_port_operations *ops)
5423{
5424 spin_lock_init(&host->lock);
5425 host->dev = dev;
5426 host->flags = flags;
5427 host->ops = ops;
5428}
5429
5430/**
5431 * ata_device_add - Register hardware device with ATA and SCSI layers
5432 * @ent: Probe information describing hardware device to be registered
5433 *
5434 * This function processes the information provided in the probe
5435 * information struct @ent, allocates the necessary ATA and SCSI
5436 * host information structures, initializes them, and registers
5437 * everything with requisite kernel subsystems.
5438 *
5439 * This function requests irqs, probes the ATA bus, and probes
5440 * the SCSI bus.
5441 *
5442 * LOCKING:
5443 * PCI/etc. bus probe sem.
5444 *
5445 * RETURNS:
5446 * Number of ports registered. Zero on error (no ports registered).
5447 */
5448int ata_device_add(const struct ata_probe_ent *ent)
5449{
5450 unsigned int i;
5451 struct device *dev = ent->dev;
5452 struct ata_host *host;
5453 int rc;
5454
5455 DPRINTK("ENTER\n");
5456 /* alloc a container for our list of ATA ports (buses) */
5457 host = kzalloc(sizeof(struct ata_host) +
5458 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5459 if (!host)
5460 return 0;
5461
5462 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5463 host->n_ports = ent->n_ports;
5464 host->irq = ent->irq;
5465 host->irq2 = ent->irq2;
5466 host->mmio_base = ent->mmio_base;
5467 host->private_data = ent->private_data;
5468
5469 /* register each port bound to this device */
5470 for (i = 0; i < host->n_ports; i++) {
5471 struct ata_port *ap;
5472 unsigned long xfer_mode_mask;
5473 int irq_line = ent->irq;
5474
5475 ap = ata_port_add(ent, host, i);
5476 if (!ap)
5477 goto err_out;
5478
5479 host->ports[i] = ap;
5480
5481 /* dummy? */
5482 if (ent->dummy_port_mask & (1 << i)) {
5483 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5484 ap->ops = &ata_dummy_port_ops;
5485 continue;
5486 }
5487
5488 /* start port */
5489 rc = ap->ops->port_start(ap);
5490 if (rc) {
5491 host->ports[i] = NULL;
5492 scsi_host_put(ap->scsi_host);
5493 goto err_out;
5494 }
5495
5496 /* Report the secondary IRQ for second channel legacy */
5497 if (i == 1 && ent->irq2)
5498 irq_line = ent->irq2;
5499
5500 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5501 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5502 (ap->pio_mask << ATA_SHIFT_PIO);
5503
5504 /* print per-port info to dmesg */
5505 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5506 "ctl 0x%lX bmdma 0x%lX irq %d\n",
5507 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5508 ata_mode_string(xfer_mode_mask),
5509 ap->ioaddr.cmd_addr,
5510 ap->ioaddr.ctl_addr,
5511 ap->ioaddr.bmdma_addr,
5512 irq_line);
5513
5514 ata_chk_status(ap);
5515 host->ops->irq_clear(ap);
5516 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5517 }
5518
5519 /* obtain irq, that may be shared between channels */
5520 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5521 DRV_NAME, host);
5522 if (rc) {
5523 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5524 ent->irq, rc);
5525 goto err_out;
5526 }
5527
5528 /* do we have a second IRQ for the other channel, eg legacy mode */
5529 if (ent->irq2) {
5530 /* We will get weird core code crashes later if this is true
5531 so trap it now */
5532 BUG_ON(ent->irq == ent->irq2);
5533
5534 rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags,
5535 DRV_NAME, host);
5536 if (rc) {
5537 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5538 ent->irq2, rc);
5539 goto err_out_free_irq;
5540 }
5541 }
5542
5543 /* perform each probe synchronously */
5544 DPRINTK("probe begin\n");
5545 for (i = 0; i < host->n_ports; i++) {
5546 struct ata_port *ap = host->ports[i];
5547 u32 scontrol;
5548 int rc;
5549
5550 /* init sata_spd_limit to the current value */
5551 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5552 int spd = (scontrol >> 4) & 0xf;
5553 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5554 }
5555 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5556
5557 rc = scsi_add_host(ap->scsi_host, dev);
5558 if (rc) {
5559 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5560 /* FIXME: do something useful here */
5561 /* FIXME: handle unconditional calls to
5562 * scsi_scan_host and ata_host_remove, below,
5563 * at the very least
5564 */
5565 }
5566
5567 if (ap->ops->error_handler) {
5568 struct ata_eh_info *ehi = &ap->eh_info;
5569 unsigned long flags;
5570
5571 ata_port_probe(ap);
5572
5573 /* kick EH for boot probing */
5574 spin_lock_irqsave(ap->lock, flags);
5575
5576 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5577 ehi->action |= ATA_EH_SOFTRESET;
5578 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5579
5580 ap->pflags |= ATA_PFLAG_LOADING;
5581 ata_port_schedule_eh(ap);
5582
5583 spin_unlock_irqrestore(ap->lock, flags);
5584
5585 /* wait for EH to finish */
5586 ata_port_wait_eh(ap);
5587 } else {
5588 DPRINTK("ata%u: bus probe begin\n", ap->id);
5589 rc = ata_bus_probe(ap);
5590 DPRINTK("ata%u: bus probe end\n", ap->id);
5591
5592 if (rc) {
5593 /* FIXME: do something useful here?
5594 * Current libata behavior will
5595 * tear down everything when
5596 * the module is removed
5597 * or the h/w is unplugged.
5598 */
5599 }
5600 }
5601 }
5602
5603 /* probes are done, now scan each port's disk(s) */
5604 DPRINTK("host probe begin\n");
5605 for (i = 0; i < host->n_ports; i++) {
5606 struct ata_port *ap = host->ports[i];
5607
5608 ata_scsi_scan_host(ap);
5609 }
5610
5611 dev_set_drvdata(dev, host);
5612
5613 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5614 return ent->n_ports; /* success */
5615
5616err_out_free_irq:
5617 free_irq(ent->irq, host);
5618err_out:
5619 for (i = 0; i < host->n_ports; i++) {
5620 struct ata_port *ap = host->ports[i];
5621 if (ap) {
5622 ap->ops->port_stop(ap);
5623 scsi_host_put(ap->scsi_host);
5624 }
5625 }
5626
5627 kfree(host);
5628 VPRINTK("EXIT, returning 0\n");
5629 return 0;
5630}
5631
5632/**
5633 * ata_port_detach - Detach ATA port in prepration of device removal
5634 * @ap: ATA port to be detached
5635 *
5636 * Detach all ATA devices and the associated SCSI devices of @ap;
5637 * then, remove the associated SCSI host. @ap is guaranteed to
5638 * be quiescent on return from this function.
5639 *
5640 * LOCKING:
5641 * Kernel thread context (may sleep).
5642 */
5643void ata_port_detach(struct ata_port *ap)
5644{
5645 unsigned long flags;
5646 int i;
5647
5648 if (!ap->ops->error_handler)
5649 goto skip_eh;
5650
5651 /* tell EH we're leaving & flush EH */
5652 spin_lock_irqsave(ap->lock, flags);
5653 ap->pflags |= ATA_PFLAG_UNLOADING;
5654 spin_unlock_irqrestore(ap->lock, flags);
5655
5656 ata_port_wait_eh(ap);
5657
5658 /* EH is now guaranteed to see UNLOADING, so no new device
5659 * will be attached. Disable all existing devices.
5660 */
5661 spin_lock_irqsave(ap->lock, flags);
5662
5663 for (i = 0; i < ATA_MAX_DEVICES; i++)
5664 ata_dev_disable(&ap->device[i]);
5665
5666 spin_unlock_irqrestore(ap->lock, flags);
5667
5668 /* Final freeze & EH. All in-flight commands are aborted. EH
5669 * will be skipped and retrials will be terminated with bad
5670 * target.
5671 */
5672 spin_lock_irqsave(ap->lock, flags);
5673 ata_port_freeze(ap); /* won't be thawed */
5674 spin_unlock_irqrestore(ap->lock, flags);
5675
5676 ata_port_wait_eh(ap);
5677
5678 /* Flush hotplug task. The sequence is similar to
5679 * ata_port_flush_task().
5680 */
5681 flush_workqueue(ata_aux_wq);
5682 cancel_delayed_work(&ap->hotplug_task);
5683 flush_workqueue(ata_aux_wq);
5684
5685 skip_eh:
5686 /* remove the associated SCSI host */
5687 scsi_remove_host(ap->scsi_host);
5688}
5689
5690/**
5691 * ata_host_remove - PCI layer callback for device removal
5692 * @host: ATA host set that was removed
5693 *
5694 * Unregister all objects associated with this host set. Free those
5695 * objects.
5696 *
5697 * LOCKING:
5698 * Inherited from calling layer (may sleep).
5699 */
5700
5701void ata_host_remove(struct ata_host *host)
5702{
5703 unsigned int i;
5704
5705 for (i = 0; i < host->n_ports; i++)
5706 ata_port_detach(host->ports[i]);
5707
5708 free_irq(host->irq, host);
5709 if (host->irq2)
5710 free_irq(host->irq2, host);
5711
5712 for (i = 0; i < host->n_ports; i++) {
5713 struct ata_port *ap = host->ports[i];
5714
5715 ata_scsi_release(ap->scsi_host);
5716
5717 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5718 struct ata_ioports *ioaddr = &ap->ioaddr;
5719
5720 /* FIXME: Add -ac IDE pci mods to remove these special cases */
5721 if (ioaddr->cmd_addr == ATA_PRIMARY_CMD)
5722 release_region(ATA_PRIMARY_CMD, 8);
5723 else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD)
5724 release_region(ATA_SECONDARY_CMD, 8);
5725 }
5726
5727 scsi_host_put(ap->scsi_host);
5728 }
5729
5730 if (host->ops->host_stop)
5731 host->ops->host_stop(host);
5732
5733 kfree(host);
5734}
5735
5736/**
5737 * ata_scsi_release - SCSI layer callback hook for host unload
5738 * @host: libata host to be unloaded
5739 *
5740 * Performs all duties necessary to shut down a libata port...
5741 * Kill port kthread, disable port, and release resources.
5742 *
5743 * LOCKING:
5744 * Inherited from SCSI layer.
5745 *
5746 * RETURNS:
5747 * One.
5748 */
5749
5750int ata_scsi_release(struct Scsi_Host *shost)
5751{
5752 struct ata_port *ap = ata_shost_to_port(shost);
5753
5754 DPRINTK("ENTER\n");
5755
5756 ap->ops->port_disable(ap);
5757 ap->ops->port_stop(ap);
5758
5759 DPRINTK("EXIT\n");
5760 return 1;
5761}
5762
5763struct ata_probe_ent *
5764ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5765{
5766 struct ata_probe_ent *probe_ent;
5767
5768 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
5769 if (!probe_ent) {
5770 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5771 kobject_name(&(dev->kobj)));
5772 return NULL;
5773 }
5774
5775 INIT_LIST_HEAD(&probe_ent->node);
5776 probe_ent->dev = dev;
5777
5778 probe_ent->sht = port->sht;
5779 probe_ent->port_flags = port->flags;
5780 probe_ent->pio_mask = port->pio_mask;
5781 probe_ent->mwdma_mask = port->mwdma_mask;
5782 probe_ent->udma_mask = port->udma_mask;
5783 probe_ent->port_ops = port->port_ops;
5784
5785 return probe_ent;
5786}
5787
5788/**
5789 * ata_std_ports - initialize ioaddr with standard port offsets.
5790 * @ioaddr: IO address structure to be initialized
5791 *
5792 * Utility function which initializes data_addr, error_addr,
5793 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5794 * device_addr, status_addr, and command_addr to standard offsets
5795 * relative to cmd_addr.
5796 *
5797 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5798 */
5799
5800void ata_std_ports(struct ata_ioports *ioaddr)
5801{
5802 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5803 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5804 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5805 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5806 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5807 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5808 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5809 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5810 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5811 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5812}
5813
5814
5815#ifdef CONFIG_PCI
5816
5817void ata_pci_host_stop (struct ata_host *host)
5818{
5819 struct pci_dev *pdev = to_pci_dev(host->dev);
5820
5821 pci_iounmap(pdev, host->mmio_base);
5822}
5823
5824/**
5825 * ata_pci_remove_one - PCI layer callback for device removal
5826 * @pdev: PCI device that was removed
5827 *
5828 * PCI layer indicates to libata via this hook that
5829 * hot-unplug or module unload event has occurred.
5830 * Handle this by unregistering all objects associated
5831 * with this PCI device. Free those objects. Then finally
5832 * release PCI resources and disable device.
5833 *
5834 * LOCKING:
5835 * Inherited from PCI layer (may sleep).
5836 */
5837
5838void ata_pci_remove_one (struct pci_dev *pdev)
5839{
5840 struct device *dev = pci_dev_to_dev(pdev);
5841 struct ata_host *host = dev_get_drvdata(dev);
5842
5843 ata_host_remove(host);
5844
5845 pci_release_regions(pdev);
5846 pci_disable_device(pdev);
5847 dev_set_drvdata(dev, NULL);
5848}
5849
5850/* move to PCI subsystem */
5851int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5852{
5853 unsigned long tmp = 0;
5854
5855 switch (bits->width) {
5856 case 1: {
5857 u8 tmp8 = 0;
5858 pci_read_config_byte(pdev, bits->reg, &tmp8);
5859 tmp = tmp8;
5860 break;
5861 }
5862 case 2: {
5863 u16 tmp16 = 0;
5864 pci_read_config_word(pdev, bits->reg, &tmp16);
5865 tmp = tmp16;
5866 break;
5867 }
5868 case 4: {
5869 u32 tmp32 = 0;
5870 pci_read_config_dword(pdev, bits->reg, &tmp32);
5871 tmp = tmp32;
5872 break;
5873 }
5874
5875 default:
5876 return -EINVAL;
5877 }
5878
5879 tmp &= bits->mask;
5880
5881 return (tmp == bits->val) ? 1 : 0;
5882}
5883
5884void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5885{
5886 pci_save_state(pdev);
5887
5888 if (mesg.event == PM_EVENT_SUSPEND) {
5889 pci_disable_device(pdev);
5890 pci_set_power_state(pdev, PCI_D3hot);
5891 }
5892}
5893
5894void ata_pci_device_do_resume(struct pci_dev *pdev)
5895{
5896 pci_set_power_state(pdev, PCI_D0);
5897 pci_restore_state(pdev);
5898 pci_enable_device(pdev);
5899 pci_set_master(pdev);
5900}
5901
5902int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5903{
5904 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5905 int rc = 0;
5906
5907 rc = ata_host_suspend(host, mesg);
5908 if (rc)
5909 return rc;
5910
5911 ata_pci_device_do_suspend(pdev, mesg);
5912
5913 return 0;
5914}
5915
5916int ata_pci_device_resume(struct pci_dev *pdev)
5917{
5918 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5919
5920 ata_pci_device_do_resume(pdev);
5921 ata_host_resume(host);
5922 return 0;
5923}
5924#endif /* CONFIG_PCI */
5925
5926
5927static int __init ata_init(void)
5928{
5929 ata_probe_timeout *= HZ;
5930 ata_wq = create_workqueue("ata");
5931 if (!ata_wq)
5932 return -ENOMEM;
5933
5934 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5935 if (!ata_aux_wq) {
5936 destroy_workqueue(ata_wq);
5937 return -ENOMEM;
5938 }
5939
5940 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5941 return 0;
5942}
5943
5944static void __exit ata_exit(void)
5945{
5946 destroy_workqueue(ata_wq);
5947 destroy_workqueue(ata_aux_wq);
5948}
5949
5950module_init(ata_init);
5951module_exit(ata_exit);
5952
5953static unsigned long ratelimit_time;
5954static DEFINE_SPINLOCK(ata_ratelimit_lock);
5955
5956int ata_ratelimit(void)
5957{
5958 int rc;
5959 unsigned long flags;
5960
5961 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5962
5963 if (time_after(jiffies, ratelimit_time)) {
5964 rc = 1;
5965 ratelimit_time = jiffies + (HZ/5);
5966 } else
5967 rc = 0;
5968
5969 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5970
5971 return rc;
5972}
5973
5974/**
5975 * ata_wait_register - wait until register value changes
5976 * @reg: IO-mapped register
5977 * @mask: Mask to apply to read register value
5978 * @val: Wait condition
5979 * @interval_msec: polling interval in milliseconds
5980 * @timeout_msec: timeout in milliseconds
5981 *
5982 * Waiting for some bits of register to change is a common
5983 * operation for ATA controllers. This function reads 32bit LE
5984 * IO-mapped register @reg and tests for the following condition.
5985 *
5986 * (*@reg & mask) != val
5987 *
5988 * If the condition is met, it returns; otherwise, the process is
5989 * repeated after @interval_msec until timeout.
5990 *
5991 * LOCKING:
5992 * Kernel thread context (may sleep)
5993 *
5994 * RETURNS:
5995 * The final register value.
5996 */
5997u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5998 unsigned long interval_msec,
5999 unsigned long timeout_msec)
6000{
6001 unsigned long timeout;
6002 u32 tmp;
6003
6004 tmp = ioread32(reg);
6005
6006 /* Calculate timeout _after_ the first read to make sure
6007 * preceding writes reach the controller before starting to
6008 * eat away the timeout.
6009 */
6010 timeout = jiffies + (timeout_msec * HZ) / 1000;
6011
6012 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6013 msleep(interval_msec);
6014 tmp = ioread32(reg);
6015 }
6016
6017 return tmp;
6018}
6019
6020/*
6021 * Dummy port_ops
6022 */
6023static void ata_dummy_noret(struct ata_port *ap) { }
6024static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6025static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6026
6027static u8 ata_dummy_check_status(struct ata_port *ap)
6028{
6029 return ATA_DRDY;
6030}
6031
6032static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6033{
6034 return AC_ERR_SYSTEM;
6035}
6036
6037const struct ata_port_operations ata_dummy_port_ops = {
6038 .port_disable = ata_port_disable,
6039 .check_status = ata_dummy_check_status,
6040 .check_altstatus = ata_dummy_check_status,
6041 .dev_select = ata_noop_dev_select,
6042 .qc_prep = ata_noop_qc_prep,
6043 .qc_issue = ata_dummy_qc_issue,
6044 .freeze = ata_dummy_noret,
6045 .thaw = ata_dummy_noret,
6046 .error_handler = ata_dummy_noret,
6047 .post_internal_cmd = ata_dummy_qc_noret,
6048 .irq_clear = ata_dummy_noret,
6049 .port_start = ata_dummy_ret0,
6050 .port_stop = ata_dummy_noret,
6051};
6052
6053/*
6054 * libata is essentially a library of internal helper functions for
6055 * low-level ATA host controller drivers. As such, the API/ABI is
6056 * likely to change as new drivers are added and updated.
6057 * Do not depend on ABI/API stability.
6058 */
6059
6060EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6061EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6062EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6063EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6064EXPORT_SYMBOL_GPL(ata_std_bios_param);
6065EXPORT_SYMBOL_GPL(ata_std_ports);
6066EXPORT_SYMBOL_GPL(ata_host_init);
6067EXPORT_SYMBOL_GPL(ata_device_add);
6068EXPORT_SYMBOL_GPL(ata_port_detach);
6069EXPORT_SYMBOL_GPL(ata_host_remove);
6070EXPORT_SYMBOL_GPL(ata_sg_init);
6071EXPORT_SYMBOL_GPL(ata_sg_init_one);
6072EXPORT_SYMBOL_GPL(ata_hsm_move);
6073EXPORT_SYMBOL_GPL(ata_qc_complete);
6074EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6075EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6076EXPORT_SYMBOL_GPL(ata_tf_load);
6077EXPORT_SYMBOL_GPL(ata_tf_read);
6078EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6079EXPORT_SYMBOL_GPL(ata_std_dev_select);
6080EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6081EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6082EXPORT_SYMBOL_GPL(ata_check_status);
6083EXPORT_SYMBOL_GPL(ata_altstatus);
6084EXPORT_SYMBOL_GPL(ata_exec_command);
6085EXPORT_SYMBOL_GPL(ata_port_start);
6086EXPORT_SYMBOL_GPL(ata_port_stop);
6087EXPORT_SYMBOL_GPL(ata_host_stop);
6088EXPORT_SYMBOL_GPL(ata_interrupt);
6089EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
6090EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
6091EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
6092EXPORT_SYMBOL_GPL(ata_qc_prep);
6093EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6094EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6095EXPORT_SYMBOL_GPL(ata_bmdma_start);
6096EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6097EXPORT_SYMBOL_GPL(ata_bmdma_status);
6098EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6099EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6100EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6101EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6102EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6103EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6104EXPORT_SYMBOL_GPL(ata_port_probe);
6105EXPORT_SYMBOL_GPL(sata_set_spd);
6106EXPORT_SYMBOL_GPL(sata_phy_debounce);
6107EXPORT_SYMBOL_GPL(sata_phy_resume);
6108EXPORT_SYMBOL_GPL(sata_phy_reset);
6109EXPORT_SYMBOL_GPL(__sata_phy_reset);
6110EXPORT_SYMBOL_GPL(ata_bus_reset);
6111EXPORT_SYMBOL_GPL(ata_std_prereset);
6112EXPORT_SYMBOL_GPL(ata_std_softreset);
6113EXPORT_SYMBOL_GPL(sata_std_hardreset);
6114EXPORT_SYMBOL_GPL(ata_std_postreset);
6115EXPORT_SYMBOL_GPL(ata_dev_revalidate);
6116EXPORT_SYMBOL_GPL(ata_dev_classify);
6117EXPORT_SYMBOL_GPL(ata_dev_pair);
6118EXPORT_SYMBOL_GPL(ata_port_disable);
6119EXPORT_SYMBOL_GPL(ata_ratelimit);
6120EXPORT_SYMBOL_GPL(ata_wait_register);
6121EXPORT_SYMBOL_GPL(ata_busy_sleep);
6122EXPORT_SYMBOL_GPL(ata_port_queue_task);
6123EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6124EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6125EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6126EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6127EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6128EXPORT_SYMBOL_GPL(ata_scsi_release);
6129EXPORT_SYMBOL_GPL(ata_host_intr);
6130EXPORT_SYMBOL_GPL(sata_scr_valid);
6131EXPORT_SYMBOL_GPL(sata_scr_read);
6132EXPORT_SYMBOL_GPL(sata_scr_write);
6133EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6134EXPORT_SYMBOL_GPL(ata_port_online);
6135EXPORT_SYMBOL_GPL(ata_port_offline);
6136EXPORT_SYMBOL_GPL(ata_host_suspend);
6137EXPORT_SYMBOL_GPL(ata_host_resume);
6138EXPORT_SYMBOL_GPL(ata_id_string);
6139EXPORT_SYMBOL_GPL(ata_id_c_string);
6140EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6141
6142EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6143EXPORT_SYMBOL_GPL(ata_timing_compute);
6144EXPORT_SYMBOL_GPL(ata_timing_merge);
6145
6146#ifdef CONFIG_PCI
6147EXPORT_SYMBOL_GPL(pci_test_config_bits);
6148EXPORT_SYMBOL_GPL(ata_pci_host_stop);
6149EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6150EXPORT_SYMBOL_GPL(ata_pci_init_one);
6151EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6152EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6153EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6154EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6155EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6156EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6157EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6158#endif /* CONFIG_PCI */
6159
6160EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6161EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6162
6163EXPORT_SYMBOL_GPL(ata_eng_timeout);
6164EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6165EXPORT_SYMBOL_GPL(ata_port_abort);
6166EXPORT_SYMBOL_GPL(ata_port_freeze);
6167EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6168EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6169EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6170EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6171EXPORT_SYMBOL_GPL(ata_do_eh);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
new file mode 100644
index 000000000000..3fa80f09f2ae
--- /dev/null
+++ b/drivers/ata/libata-eh.c
@@ -0,0 +1,2245 @@
1/*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/kernel.h>
36#include <scsi/scsi.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi_eh.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_cmnd.h>
41#include "../scsi/scsi_transport_api.h"
42
43#include <linux/libata.h>
44
45#include "libata.h"
46
47static void __ata_port_freeze(struct ata_port *ap);
48static void ata_eh_finish(struct ata_port *ap);
49static void ata_eh_handle_port_suspend(struct ata_port *ap);
50static void ata_eh_handle_port_resume(struct ata_port *ap);
51
52static void ata_ering_record(struct ata_ering *ering, int is_io,
53 unsigned int err_mask)
54{
55 struct ata_ering_entry *ent;
56
57 WARN_ON(!err_mask);
58
59 ering->cursor++;
60 ering->cursor %= ATA_ERING_SIZE;
61
62 ent = &ering->ring[ering->cursor];
63 ent->is_io = is_io;
64 ent->err_mask = err_mask;
65 ent->timestamp = get_jiffies_64();
66}
67
68static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
69{
70 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
71 if (!ent->err_mask)
72 return NULL;
73 return ent;
74}
75
76static int ata_ering_map(struct ata_ering *ering,
77 int (*map_fn)(struct ata_ering_entry *, void *),
78 void *arg)
79{
80 int idx, rc = 0;
81 struct ata_ering_entry *ent;
82
83 idx = ering->cursor;
84 do {
85 ent = &ering->ring[idx];
86 if (!ent->err_mask)
87 break;
88 rc = map_fn(ent, arg);
89 if (rc)
90 break;
91 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
92 } while (idx != ering->cursor);
93
94 return rc;
95}
96
97static unsigned int ata_eh_dev_action(struct ata_device *dev)
98{
99 struct ata_eh_context *ehc = &dev->ap->eh_context;
100
101 return ehc->i.action | ehc->i.dev_action[dev->devno];
102}
103
104static void ata_eh_clear_action(struct ata_device *dev,
105 struct ata_eh_info *ehi, unsigned int action)
106{
107 int i;
108
109 if (!dev) {
110 ehi->action &= ~action;
111 for (i = 0; i < ATA_MAX_DEVICES; i++)
112 ehi->dev_action[i] &= ~action;
113 } else {
114 /* doesn't make sense for port-wide EH actions */
115 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
116
117 /* break ehi->action into ehi->dev_action */
118 if (ehi->action & action) {
119 for (i = 0; i < ATA_MAX_DEVICES; i++)
120 ehi->dev_action[i] |= ehi->action & action;
121 ehi->action &= ~action;
122 }
123
124 /* turn off the specified per-dev action */
125 ehi->dev_action[dev->devno] &= ~action;
126 }
127}
128
129/**
130 * ata_scsi_timed_out - SCSI layer time out callback
131 * @cmd: timed out SCSI command
132 *
133 * Handles SCSI layer timeout. We race with normal completion of
134 * the qc for @cmd. If the qc is already gone, we lose and let
135 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
136 * timed out and EH should be invoked. Prevent ata_qc_complete()
137 * from finishing it by setting EH_SCHEDULED and return
138 * EH_NOT_HANDLED.
139 *
140 * TODO: kill this function once old EH is gone.
141 *
142 * LOCKING:
143 * Called from timer context
144 *
145 * RETURNS:
146 * EH_HANDLED or EH_NOT_HANDLED
147 */
148enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
149{
150 struct Scsi_Host *host = cmd->device->host;
151 struct ata_port *ap = ata_shost_to_port(host);
152 unsigned long flags;
153 struct ata_queued_cmd *qc;
154 enum scsi_eh_timer_return ret;
155
156 DPRINTK("ENTER\n");
157
158 if (ap->ops->error_handler) {
159 ret = EH_NOT_HANDLED;
160 goto out;
161 }
162
163 ret = EH_HANDLED;
164 spin_lock_irqsave(ap->lock, flags);
165 qc = ata_qc_from_tag(ap, ap->active_tag);
166 if (qc) {
167 WARN_ON(qc->scsicmd != cmd);
168 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
169 qc->err_mask |= AC_ERR_TIMEOUT;
170 ret = EH_NOT_HANDLED;
171 }
172 spin_unlock_irqrestore(ap->lock, flags);
173
174 out:
175 DPRINTK("EXIT, ret=%d\n", ret);
176 return ret;
177}
178
179/**
180 * ata_scsi_error - SCSI layer error handler callback
181 * @host: SCSI host on which error occurred
182 *
183 * Handles SCSI-layer-thrown error events.
184 *
185 * LOCKING:
186 * Inherited from SCSI layer (none, can sleep)
187 *
188 * RETURNS:
189 * Zero.
190 */
191void ata_scsi_error(struct Scsi_Host *host)
192{
193 struct ata_port *ap = ata_shost_to_port(host);
194 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
195 unsigned long flags;
196
197 DPRINTK("ENTER\n");
198
199 /* synchronize with port task */
200 ata_port_flush_task(ap);
201
202 /* synchronize with host lock and sort out timeouts */
203
204 /* For new EH, all qcs are finished in one of three ways -
205 * normal completion, error completion, and SCSI timeout.
206 * Both cmpletions can race against SCSI timeout. When normal
207 * completion wins, the qc never reaches EH. When error
208 * completion wins, the qc has ATA_QCFLAG_FAILED set.
209 *
210 * When SCSI timeout wins, things are a bit more complex.
211 * Normal or error completion can occur after the timeout but
212 * before this point. In such cases, both types of
213 * completions are honored. A scmd is determined to have
214 * timed out iff its associated qc is active and not failed.
215 */
216 if (ap->ops->error_handler) {
217 struct scsi_cmnd *scmd, *tmp;
218 int nr_timedout = 0;
219
220 spin_lock_irqsave(ap->lock, flags);
221
222 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
223 struct ata_queued_cmd *qc;
224
225 for (i = 0; i < ATA_MAX_QUEUE; i++) {
226 qc = __ata_qc_from_tag(ap, i);
227 if (qc->flags & ATA_QCFLAG_ACTIVE &&
228 qc->scsicmd == scmd)
229 break;
230 }
231
232 if (i < ATA_MAX_QUEUE) {
233 /* the scmd has an associated qc */
234 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
235 /* which hasn't failed yet, timeout */
236 qc->err_mask |= AC_ERR_TIMEOUT;
237 qc->flags |= ATA_QCFLAG_FAILED;
238 nr_timedout++;
239 }
240 } else {
241 /* Normal completion occurred after
242 * SCSI timeout but before this point.
243 * Successfully complete it.
244 */
245 scmd->retries = scmd->allowed;
246 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
247 }
248 }
249
250 /* If we have timed out qcs. They belong to EH from
251 * this point but the state of the controller is
252 * unknown. Freeze the port to make sure the IRQ
253 * handler doesn't diddle with those qcs. This must
254 * be done atomically w.r.t. setting QCFLAG_FAILED.
255 */
256 if (nr_timedout)
257 __ata_port_freeze(ap);
258
259 spin_unlock_irqrestore(ap->lock, flags);
260 } else
261 spin_unlock_wait(ap->lock);
262
263 repeat:
264 /* invoke error handler */
265 if (ap->ops->error_handler) {
266 /* process port resume request */
267 ata_eh_handle_port_resume(ap);
268
269 /* fetch & clear EH info */
270 spin_lock_irqsave(ap->lock, flags);
271
272 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
273 ap->eh_context.i = ap->eh_info;
274 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
275
276 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
277 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
278
279 spin_unlock_irqrestore(ap->lock, flags);
280
281 /* invoke EH, skip if unloading or suspended */
282 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
283 ap->ops->error_handler(ap);
284 else
285 ata_eh_finish(ap);
286
287 /* process port suspend request */
288 ata_eh_handle_port_suspend(ap);
289
290 /* Exception might have happend after ->error_handler
291 * recovered the port but before this point. Repeat
292 * EH in such case.
293 */
294 spin_lock_irqsave(ap->lock, flags);
295
296 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
297 if (--repeat_cnt) {
298 ata_port_printk(ap, KERN_INFO,
299 "EH pending after completion, "
300 "repeating EH (cnt=%d)\n", repeat_cnt);
301 spin_unlock_irqrestore(ap->lock, flags);
302 goto repeat;
303 }
304 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
305 "tries, giving up\n", ATA_EH_MAX_REPEAT);
306 }
307
308 /* this run is complete, make sure EH info is clear */
309 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
310
311 /* Clear host_eh_scheduled while holding ap->lock such
312 * that if exception occurs after this point but
313 * before EH completion, SCSI midlayer will
314 * re-initiate EH.
315 */
316 host->host_eh_scheduled = 0;
317
318 spin_unlock_irqrestore(ap->lock, flags);
319 } else {
320 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
321 ap->ops->eng_timeout(ap);
322 }
323
324 /* finish or retry handled scmd's and clean up */
325 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
326
327 scsi_eh_flush_done_q(&ap->eh_done_q);
328
329 /* clean up */
330 spin_lock_irqsave(ap->lock, flags);
331
332 if (ap->pflags & ATA_PFLAG_LOADING)
333 ap->pflags &= ~ATA_PFLAG_LOADING;
334 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
335 queue_work(ata_aux_wq, &ap->hotplug_task);
336
337 if (ap->pflags & ATA_PFLAG_RECOVERED)
338 ata_port_printk(ap, KERN_INFO, "EH complete\n");
339
340 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
341
342 /* tell wait_eh that we're done */
343 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
344 wake_up_all(&ap->eh_wait_q);
345
346 spin_unlock_irqrestore(ap->lock, flags);
347
348 DPRINTK("EXIT\n");
349}
350
351/**
352 * ata_port_wait_eh - Wait for the currently pending EH to complete
353 * @ap: Port to wait EH for
354 *
355 * Wait until the currently pending EH is complete.
356 *
357 * LOCKING:
358 * Kernel thread context (may sleep).
359 */
360void ata_port_wait_eh(struct ata_port *ap)
361{
362 unsigned long flags;
363 DEFINE_WAIT(wait);
364
365 retry:
366 spin_lock_irqsave(ap->lock, flags);
367
368 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
369 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
370 spin_unlock_irqrestore(ap->lock, flags);
371 schedule();
372 spin_lock_irqsave(ap->lock, flags);
373 }
374 finish_wait(&ap->eh_wait_q, &wait);
375
376 spin_unlock_irqrestore(ap->lock, flags);
377
378 /* make sure SCSI EH is complete */
379 if (scsi_host_in_recovery(ap->scsi_host)) {
380 msleep(10);
381 goto retry;
382 }
383}
384
385/**
386 * ata_qc_timeout - Handle timeout of queued command
387 * @qc: Command that timed out
388 *
389 * Some part of the kernel (currently, only the SCSI layer)
390 * has noticed that the active command on port @ap has not
391 * completed after a specified length of time. Handle this
392 * condition by disabling DMA (if necessary) and completing
393 * transactions, with error if necessary.
394 *
395 * This also handles the case of the "lost interrupt", where
396 * for some reason (possibly hardware bug, possibly driver bug)
397 * an interrupt was not delivered to the driver, even though the
398 * transaction completed successfully.
399 *
400 * TODO: kill this function once old EH is gone.
401 *
402 * LOCKING:
403 * Inherited from SCSI layer (none, can sleep)
404 */
405static void ata_qc_timeout(struct ata_queued_cmd *qc)
406{
407 struct ata_port *ap = qc->ap;
408 u8 host_stat = 0, drv_stat;
409 unsigned long flags;
410
411 DPRINTK("ENTER\n");
412
413 ap->hsm_task_state = HSM_ST_IDLE;
414
415 spin_lock_irqsave(ap->lock, flags);
416
417 switch (qc->tf.protocol) {
418
419 case ATA_PROT_DMA:
420 case ATA_PROT_ATAPI_DMA:
421 host_stat = ap->ops->bmdma_status(ap);
422
423 /* before we do anything else, clear DMA-Start bit */
424 ap->ops->bmdma_stop(qc);
425
426 /* fall through */
427
428 default:
429 ata_altstatus(ap);
430 drv_stat = ata_chk_status(ap);
431
432 /* ack bmdma irq events */
433 ap->ops->irq_clear(ap);
434
435 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
436 "stat 0x%x host_stat 0x%x\n",
437 qc->tf.command, drv_stat, host_stat);
438
439 /* complete taskfile transaction */
440 qc->err_mask |= AC_ERR_TIMEOUT;
441 break;
442 }
443
444 spin_unlock_irqrestore(ap->lock, flags);
445
446 ata_eh_qc_complete(qc);
447
448 DPRINTK("EXIT\n");
449}
450
451/**
452 * ata_eng_timeout - Handle timeout of queued command
453 * @ap: Port on which timed-out command is active
454 *
455 * Some part of the kernel (currently, only the SCSI layer)
456 * has noticed that the active command on port @ap has not
457 * completed after a specified length of time. Handle this
458 * condition by disabling DMA (if necessary) and completing
459 * transactions, with error if necessary.
460 *
461 * This also handles the case of the "lost interrupt", where
462 * for some reason (possibly hardware bug, possibly driver bug)
463 * an interrupt was not delivered to the driver, even though the
464 * transaction completed successfully.
465 *
466 * TODO: kill this function once old EH is gone.
467 *
468 * LOCKING:
469 * Inherited from SCSI layer (none, can sleep)
470 */
471void ata_eng_timeout(struct ata_port *ap)
472{
473 DPRINTK("ENTER\n");
474
475 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
476
477 DPRINTK("EXIT\n");
478}
479
480/**
481 * ata_qc_schedule_eh - schedule qc for error handling
482 * @qc: command to schedule error handling for
483 *
484 * Schedule error handling for @qc. EH will kick in as soon as
485 * other commands are drained.
486 *
487 * LOCKING:
488 * spin_lock_irqsave(host lock)
489 */
490void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
491{
492 struct ata_port *ap = qc->ap;
493
494 WARN_ON(!ap->ops->error_handler);
495
496 qc->flags |= ATA_QCFLAG_FAILED;
497 qc->ap->pflags |= ATA_PFLAG_EH_PENDING;
498
499 /* The following will fail if timeout has already expired.
500 * ata_scsi_error() takes care of such scmds on EH entry.
501 * Note that ATA_QCFLAG_FAILED is unconditionally set after
502 * this function completes.
503 */
504 scsi_req_abort_cmd(qc->scsicmd);
505}
506
507/**
508 * ata_port_schedule_eh - schedule error handling without a qc
509 * @ap: ATA port to schedule EH for
510 *
511 * Schedule error handling for @ap. EH will kick in as soon as
512 * all commands are drained.
513 *
514 * LOCKING:
515 * spin_lock_irqsave(host lock)
516 */
517void ata_port_schedule_eh(struct ata_port *ap)
518{
519 WARN_ON(!ap->ops->error_handler);
520
521 ap->pflags |= ATA_PFLAG_EH_PENDING;
522 scsi_schedule_eh(ap->scsi_host);
523
524 DPRINTK("port EH scheduled\n");
525}
526
527/**
528 * ata_port_abort - abort all qc's on the port
529 * @ap: ATA port to abort qc's for
530 *
531 * Abort all active qc's of @ap and schedule EH.
532 *
533 * LOCKING:
534 * spin_lock_irqsave(host lock)
535 *
536 * RETURNS:
537 * Number of aborted qc's.
538 */
539int ata_port_abort(struct ata_port *ap)
540{
541 int tag, nr_aborted = 0;
542
543 WARN_ON(!ap->ops->error_handler);
544
545 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
546 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
547
548 if (qc) {
549 qc->flags |= ATA_QCFLAG_FAILED;
550 ata_qc_complete(qc);
551 nr_aborted++;
552 }
553 }
554
555 if (!nr_aborted)
556 ata_port_schedule_eh(ap);
557
558 return nr_aborted;
559}
560
561/**
562 * __ata_port_freeze - freeze port
563 * @ap: ATA port to freeze
564 *
565 * This function is called when HSM violation or some other
566 * condition disrupts normal operation of the port. Frozen port
567 * is not allowed to perform any operation until the port is
568 * thawed, which usually follows a successful reset.
569 *
570 * ap->ops->freeze() callback can be used for freezing the port
571 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
572 * port cannot be frozen hardware-wise, the interrupt handler
573 * must ack and clear interrupts unconditionally while the port
574 * is frozen.
575 *
576 * LOCKING:
577 * spin_lock_irqsave(host lock)
578 */
579static void __ata_port_freeze(struct ata_port *ap)
580{
581 WARN_ON(!ap->ops->error_handler);
582
583 if (ap->ops->freeze)
584 ap->ops->freeze(ap);
585
586 ap->pflags |= ATA_PFLAG_FROZEN;
587
588 DPRINTK("ata%u port frozen\n", ap->id);
589}
590
591/**
592 * ata_port_freeze - abort & freeze port
593 * @ap: ATA port to freeze
594 *
595 * Abort and freeze @ap.
596 *
597 * LOCKING:
598 * spin_lock_irqsave(host lock)
599 *
600 * RETURNS:
601 * Number of aborted commands.
602 */
603int ata_port_freeze(struct ata_port *ap)
604{
605 int nr_aborted;
606
607 WARN_ON(!ap->ops->error_handler);
608
609 nr_aborted = ata_port_abort(ap);
610 __ata_port_freeze(ap);
611
612 return nr_aborted;
613}
614
615/**
616 * ata_eh_freeze_port - EH helper to freeze port
617 * @ap: ATA port to freeze
618 *
619 * Freeze @ap.
620 *
621 * LOCKING:
622 * None.
623 */
624void ata_eh_freeze_port(struct ata_port *ap)
625{
626 unsigned long flags;
627
628 if (!ap->ops->error_handler)
629 return;
630
631 spin_lock_irqsave(ap->lock, flags);
632 __ata_port_freeze(ap);
633 spin_unlock_irqrestore(ap->lock, flags);
634}
635
636/**
637 * ata_port_thaw_port - EH helper to thaw port
638 * @ap: ATA port to thaw
639 *
640 * Thaw frozen port @ap.
641 *
642 * LOCKING:
643 * None.
644 */
645void ata_eh_thaw_port(struct ata_port *ap)
646{
647 unsigned long flags;
648
649 if (!ap->ops->error_handler)
650 return;
651
652 spin_lock_irqsave(ap->lock, flags);
653
654 ap->pflags &= ~ATA_PFLAG_FROZEN;
655
656 if (ap->ops->thaw)
657 ap->ops->thaw(ap);
658
659 spin_unlock_irqrestore(ap->lock, flags);
660
661 DPRINTK("ata%u port thawed\n", ap->id);
662}
663
664static void ata_eh_scsidone(struct scsi_cmnd *scmd)
665{
666 /* nada */
667}
668
669static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
670{
671 struct ata_port *ap = qc->ap;
672 struct scsi_cmnd *scmd = qc->scsicmd;
673 unsigned long flags;
674
675 spin_lock_irqsave(ap->lock, flags);
676 qc->scsidone = ata_eh_scsidone;
677 __ata_qc_complete(qc);
678 WARN_ON(ata_tag_valid(qc->tag));
679 spin_unlock_irqrestore(ap->lock, flags);
680
681 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
682}
683
684/**
685 * ata_eh_qc_complete - Complete an active ATA command from EH
686 * @qc: Command to complete
687 *
688 * Indicate to the mid and upper layers that an ATA command has
689 * completed. To be used from EH.
690 */
691void ata_eh_qc_complete(struct ata_queued_cmd *qc)
692{
693 struct scsi_cmnd *scmd = qc->scsicmd;
694 scmd->retries = scmd->allowed;
695 __ata_eh_qc_complete(qc);
696}
697
698/**
699 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
700 * @qc: Command to retry
701 *
702 * Indicate to the mid and upper layers that an ATA command
703 * should be retried. To be used from EH.
704 *
705 * SCSI midlayer limits the number of retries to scmd->allowed.
706 * scmd->retries is decremented for commands which get retried
707 * due to unrelated failures (qc->err_mask is zero).
708 */
709void ata_eh_qc_retry(struct ata_queued_cmd *qc)
710{
711 struct scsi_cmnd *scmd = qc->scsicmd;
712 if (!qc->err_mask && scmd->retries)
713 scmd->retries--;
714 __ata_eh_qc_complete(qc);
715}
716
717/**
718 * ata_eh_detach_dev - detach ATA device
719 * @dev: ATA device to detach
720 *
721 * Detach @dev.
722 *
723 * LOCKING:
724 * None.
725 */
726static void ata_eh_detach_dev(struct ata_device *dev)
727{
728 struct ata_port *ap = dev->ap;
729 unsigned long flags;
730
731 ata_dev_disable(dev);
732
733 spin_lock_irqsave(ap->lock, flags);
734
735 dev->flags &= ~ATA_DFLAG_DETACH;
736
737 if (ata_scsi_offline_dev(dev)) {
738 dev->flags |= ATA_DFLAG_DETACHED;
739 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
740 }
741
742 /* clear per-dev EH actions */
743 ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK);
744 ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK);
745
746 spin_unlock_irqrestore(ap->lock, flags);
747}
748
749/**
750 * ata_eh_about_to_do - about to perform eh_action
751 * @ap: target ATA port
752 * @dev: target ATA dev for per-dev action (can be NULL)
753 * @action: action about to be performed
754 *
755 * Called just before performing EH actions to clear related bits
756 * in @ap->eh_info such that eh actions are not unnecessarily
757 * repeated.
758 *
759 * LOCKING:
760 * None.
761 */
762static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
763 unsigned int action)
764{
765 unsigned long flags;
766 struct ata_eh_info *ehi = &ap->eh_info;
767 struct ata_eh_context *ehc = &ap->eh_context;
768
769 spin_lock_irqsave(ap->lock, flags);
770
771 /* Reset is represented by combination of actions and EHI
772 * flags. Suck in all related bits before clearing eh_info to
773 * avoid losing requested action.
774 */
775 if (action & ATA_EH_RESET_MASK) {
776 ehc->i.action |= ehi->action & ATA_EH_RESET_MASK;
777 ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK;
778
779 /* make sure all reset actions are cleared & clear EHI flags */
780 action |= ATA_EH_RESET_MASK;
781 ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
782 }
783
784 ata_eh_clear_action(dev, ehi, action);
785
786 if (!(ehc->i.flags & ATA_EHI_QUIET))
787 ap->pflags |= ATA_PFLAG_RECOVERED;
788
789 spin_unlock_irqrestore(ap->lock, flags);
790}
791
792/**
793 * ata_eh_done - EH action complete
794 * @ap: target ATA port
795 * @dev: target ATA dev for per-dev action (can be NULL)
796 * @action: action just completed
797 *
798 * Called right after performing EH actions to clear related bits
799 * in @ap->eh_context.
800 *
801 * LOCKING:
802 * None.
803 */
804static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
805 unsigned int action)
806{
807 /* if reset is complete, clear all reset actions & reset modifier */
808 if (action & ATA_EH_RESET_MASK) {
809 action |= ATA_EH_RESET_MASK;
810 ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
811 }
812
813 ata_eh_clear_action(dev, &ap->eh_context.i, action);
814}
815
816/**
817 * ata_err_string - convert err_mask to descriptive string
818 * @err_mask: error mask to convert to string
819 *
820 * Convert @err_mask to descriptive string. Errors are
821 * prioritized according to severity and only the most severe
822 * error is reported.
823 *
824 * LOCKING:
825 * None.
826 *
827 * RETURNS:
828 * Descriptive string for @err_mask
829 */
830static const char * ata_err_string(unsigned int err_mask)
831{
832 if (err_mask & AC_ERR_HOST_BUS)
833 return "host bus error";
834 if (err_mask & AC_ERR_ATA_BUS)
835 return "ATA bus error";
836 if (err_mask & AC_ERR_TIMEOUT)
837 return "timeout";
838 if (err_mask & AC_ERR_HSM)
839 return "HSM violation";
840 if (err_mask & AC_ERR_SYSTEM)
841 return "internal error";
842 if (err_mask & AC_ERR_MEDIA)
843 return "media error";
844 if (err_mask & AC_ERR_INVALID)
845 return "invalid argument";
846 if (err_mask & AC_ERR_DEV)
847 return "device error";
848 return "unknown error";
849}
850
851/**
852 * ata_read_log_page - read a specific log page
853 * @dev: target device
854 * @page: page to read
855 * @buf: buffer to store read page
856 * @sectors: number of sectors to read
857 *
858 * Read log page using READ_LOG_EXT command.
859 *
860 * LOCKING:
861 * Kernel thread context (may sleep).
862 *
863 * RETURNS:
864 * 0 on success, AC_ERR_* mask otherwise.
865 */
866static unsigned int ata_read_log_page(struct ata_device *dev,
867 u8 page, void *buf, unsigned int sectors)
868{
869 struct ata_taskfile tf;
870 unsigned int err_mask;
871
872 DPRINTK("read log page - page %d\n", page);
873
874 ata_tf_init(dev, &tf);
875 tf.command = ATA_CMD_READ_LOG_EXT;
876 tf.lbal = page;
877 tf.nsect = sectors;
878 tf.hob_nsect = sectors >> 8;
879 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
880 tf.protocol = ATA_PROT_PIO;
881
882 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
883 buf, sectors * ATA_SECT_SIZE);
884
885 DPRINTK("EXIT, err_mask=%x\n", err_mask);
886 return err_mask;
887}
888
889/**
890 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
891 * @dev: Device to read log page 10h from
892 * @tag: Resulting tag of the failed command
893 * @tf: Resulting taskfile registers of the failed command
894 *
895 * Read log page 10h to obtain NCQ error details and clear error
896 * condition.
897 *
898 * LOCKING:
899 * Kernel thread context (may sleep).
900 *
901 * RETURNS:
902 * 0 on success, -errno otherwise.
903 */
904static int ata_eh_read_log_10h(struct ata_device *dev,
905 int *tag, struct ata_taskfile *tf)
906{
907 u8 *buf = dev->ap->sector_buf;
908 unsigned int err_mask;
909 u8 csum;
910 int i;
911
912 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
913 if (err_mask)
914 return -EIO;
915
916 csum = 0;
917 for (i = 0; i < ATA_SECT_SIZE; i++)
918 csum += buf[i];
919 if (csum)
920 ata_dev_printk(dev, KERN_WARNING,
921 "invalid checksum 0x%x on log page 10h\n", csum);
922
923 if (buf[0] & 0x80)
924 return -ENOENT;
925
926 *tag = buf[0] & 0x1f;
927
928 tf->command = buf[2];
929 tf->feature = buf[3];
930 tf->lbal = buf[4];
931 tf->lbam = buf[5];
932 tf->lbah = buf[6];
933 tf->device = buf[7];
934 tf->hob_lbal = buf[8];
935 tf->hob_lbam = buf[9];
936 tf->hob_lbah = buf[10];
937 tf->nsect = buf[12];
938 tf->hob_nsect = buf[13];
939
940 return 0;
941}
942
943/**
944 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
945 * @dev: device to perform REQUEST_SENSE to
946 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
947 *
948 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
949 * SENSE. This function is EH helper.
950 *
951 * LOCKING:
952 * Kernel thread context (may sleep).
953 *
954 * RETURNS:
955 * 0 on success, AC_ERR_* mask on failure
956 */
957static unsigned int atapi_eh_request_sense(struct ata_device *dev,
958 unsigned char *sense_buf)
959{
960 struct ata_port *ap = dev->ap;
961 struct ata_taskfile tf;
962 u8 cdb[ATAPI_CDB_LEN];
963
964 DPRINTK("ATAPI request sense\n");
965
966 ata_tf_init(dev, &tf);
967
968 /* FIXME: is this needed? */
969 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
970
971 /* XXX: why tf_read here? */
972 ap->ops->tf_read(ap, &tf);
973
974 /* fill these in, for the case where they are -not- overwritten */
975 sense_buf[0] = 0x70;
976 sense_buf[2] = tf.feature >> 4;
977
978 memset(cdb, 0, ATAPI_CDB_LEN);
979 cdb[0] = REQUEST_SENSE;
980 cdb[4] = SCSI_SENSE_BUFFERSIZE;
981
982 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
983 tf.command = ATA_CMD_PACKET;
984
985 /* is it pointless to prefer PIO for "safety reasons"? */
986 if (ap->flags & ATA_FLAG_PIO_DMA) {
987 tf.protocol = ATA_PROT_ATAPI_DMA;
988 tf.feature |= ATAPI_PKT_DMA;
989 } else {
990 tf.protocol = ATA_PROT_ATAPI;
991 tf.lbam = (8 * 1024) & 0xff;
992 tf.lbah = (8 * 1024) >> 8;
993 }
994
995 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
996 sense_buf, SCSI_SENSE_BUFFERSIZE);
997}
998
999/**
1000 * ata_eh_analyze_serror - analyze SError for a failed port
1001 * @ap: ATA port to analyze SError for
1002 *
1003 * Analyze SError if available and further determine cause of
1004 * failure.
1005 *
1006 * LOCKING:
1007 * None.
1008 */
1009static void ata_eh_analyze_serror(struct ata_port *ap)
1010{
1011 struct ata_eh_context *ehc = &ap->eh_context;
1012 u32 serror = ehc->i.serror;
1013 unsigned int err_mask = 0, action = 0;
1014
1015 if (serror & SERR_PERSISTENT) {
1016 err_mask |= AC_ERR_ATA_BUS;
1017 action |= ATA_EH_HARDRESET;
1018 }
1019 if (serror &
1020 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
1021 err_mask |= AC_ERR_ATA_BUS;
1022 action |= ATA_EH_SOFTRESET;
1023 }
1024 if (serror & SERR_PROTOCOL) {
1025 err_mask |= AC_ERR_HSM;
1026 action |= ATA_EH_SOFTRESET;
1027 }
1028 if (serror & SERR_INTERNAL) {
1029 err_mask |= AC_ERR_SYSTEM;
1030 action |= ATA_EH_SOFTRESET;
1031 }
1032 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
1033 ata_ehi_hotplugged(&ehc->i);
1034
1035 ehc->i.err_mask |= err_mask;
1036 ehc->i.action |= action;
1037}
1038
1039/**
1040 * ata_eh_analyze_ncq_error - analyze NCQ error
1041 * @ap: ATA port to analyze NCQ error for
1042 *
1043 * Read log page 10h, determine the offending qc and acquire
1044 * error status TF. For NCQ device errors, all LLDDs have to do
1045 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1046 * care of the rest.
1047 *
1048 * LOCKING:
1049 * Kernel thread context (may sleep).
1050 */
1051static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1052{
1053 struct ata_eh_context *ehc = &ap->eh_context;
1054 struct ata_device *dev = ap->device;
1055 struct ata_queued_cmd *qc;
1056 struct ata_taskfile tf;
1057 int tag, rc;
1058
1059 /* if frozen, we can't do much */
1060 if (ap->pflags & ATA_PFLAG_FROZEN)
1061 return;
1062
1063 /* is it NCQ device error? */
1064 if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1065 return;
1066
1067 /* has LLDD analyzed already? */
1068 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1069 qc = __ata_qc_from_tag(ap, tag);
1070
1071 if (!(qc->flags & ATA_QCFLAG_FAILED))
1072 continue;
1073
1074 if (qc->err_mask)
1075 return;
1076 }
1077
1078 /* okay, this error is ours */
1079 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1080 if (rc) {
1081 ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
1082 "(errno=%d)\n", rc);
1083 return;
1084 }
1085
1086 if (!(ap->sactive & (1 << tag))) {
1087 ata_port_printk(ap, KERN_ERR, "log page 10h reported "
1088 "inactive tag %d\n", tag);
1089 return;
1090 }
1091
1092 /* we've got the perpetrator, condemn it */
1093 qc = __ata_qc_from_tag(ap, tag);
1094 memcpy(&qc->result_tf, &tf, sizeof(tf));
1095 qc->err_mask |= AC_ERR_DEV;
1096 ehc->i.err_mask &= ~AC_ERR_DEV;
1097}
1098
1099/**
1100 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1101 * @qc: qc to analyze
1102 * @tf: Taskfile registers to analyze
1103 *
1104 * Analyze taskfile of @qc and further determine cause of
1105 * failure. This function also requests ATAPI sense data if
1106 * avaliable.
1107 *
1108 * LOCKING:
1109 * Kernel thread context (may sleep).
1110 *
1111 * RETURNS:
1112 * Determined recovery action
1113 */
1114static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1115 const struct ata_taskfile *tf)
1116{
1117 unsigned int tmp, action = 0;
1118 u8 stat = tf->command, err = tf->feature;
1119
1120 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1121 qc->err_mask |= AC_ERR_HSM;
1122 return ATA_EH_SOFTRESET;
1123 }
1124
1125 if (!(qc->err_mask & AC_ERR_DEV))
1126 return 0;
1127
1128 switch (qc->dev->class) {
1129 case ATA_DEV_ATA:
1130 if (err & ATA_ICRC)
1131 qc->err_mask |= AC_ERR_ATA_BUS;
1132 if (err & ATA_UNC)
1133 qc->err_mask |= AC_ERR_MEDIA;
1134 if (err & ATA_IDNF)
1135 qc->err_mask |= AC_ERR_INVALID;
1136 break;
1137
1138 case ATA_DEV_ATAPI:
1139 tmp = atapi_eh_request_sense(qc->dev,
1140 qc->scsicmd->sense_buffer);
1141 if (!tmp) {
1142 /* ATA_QCFLAG_SENSE_VALID is used to tell
1143 * atapi_qc_complete() that sense data is
1144 * already valid.
1145 *
1146 * TODO: interpret sense data and set
1147 * appropriate err_mask.
1148 */
1149 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1150 } else
1151 qc->err_mask |= tmp;
1152 }
1153
1154 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1155 action |= ATA_EH_SOFTRESET;
1156
1157 return action;
1158}
1159
1160static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
1161{
1162 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
1163 return 1;
1164
1165 if (ent->is_io) {
1166 if (ent->err_mask & AC_ERR_HSM)
1167 return 1;
1168 if ((ent->err_mask &
1169 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1170 return 2;
1171 }
1172
1173 return 0;
1174}
1175
1176struct speed_down_needed_arg {
1177 u64 since;
1178 int nr_errors[3];
1179};
1180
1181static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
1182{
1183 struct speed_down_needed_arg *arg = void_arg;
1184
1185 if (ent->timestamp < arg->since)
1186 return -1;
1187
1188 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
1189 return 0;
1190}
1191
1192/**
1193 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
1194 * @dev: Device of interest
1195 *
1196 * This function examines error ring of @dev and determines
1197 * whether speed down is necessary. Speed down is necessary if
1198 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
1199 * errors during last 15 minutes.
1200 *
1201 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1202 * violation for known supported commands.
1203 *
1204 * Cat-2 errors are unclassified DEV error for known supported
1205 * command.
1206 *
1207 * LOCKING:
1208 * Inherited from caller.
1209 *
1210 * RETURNS:
1211 * 1 if speed down is necessary, 0 otherwise
1212 */
1213static int ata_eh_speed_down_needed(struct ata_device *dev)
1214{
1215 const u64 interval = 15LLU * 60 * HZ;
1216 static const int err_limits[3] = { -1, 3, 10 };
1217 struct speed_down_needed_arg arg;
1218 struct ata_ering_entry *ent;
1219 int err_cat;
1220 u64 j64;
1221
1222 ent = ata_ering_top(&dev->ering);
1223 if (!ent)
1224 return 0;
1225
1226 err_cat = ata_eh_categorize_ering_entry(ent);
1227 if (err_cat == 0)
1228 return 0;
1229
1230 memset(&arg, 0, sizeof(arg));
1231
1232 j64 = get_jiffies_64();
1233 if (j64 >= interval)
1234 arg.since = j64 - interval;
1235 else
1236 arg.since = 0;
1237
1238 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1239
1240 return arg.nr_errors[err_cat] > err_limits[err_cat];
1241}
1242
1243/**
1244 * ata_eh_speed_down - record error and speed down if necessary
1245 * @dev: Failed device
1246 * @is_io: Did the device fail during normal IO?
1247 * @err_mask: err_mask of the error
1248 *
1249 * Record error and examine error history to determine whether
1250 * adjusting transmission speed is necessary. It also sets
1251 * transmission limits appropriately if such adjustment is
1252 * necessary.
1253 *
1254 * LOCKING:
1255 * Kernel thread context (may sleep).
1256 *
1257 * RETURNS:
1258 * 0 on success, -errno otherwise
1259 */
1260static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1261 unsigned int err_mask)
1262{
1263 if (!err_mask)
1264 return 0;
1265
1266 /* record error and determine whether speed down is necessary */
1267 ata_ering_record(&dev->ering, is_io, err_mask);
1268
1269 if (!ata_eh_speed_down_needed(dev))
1270 return 0;
1271
1272 /* speed down SATA link speed if possible */
1273 if (sata_down_spd_limit(dev->ap) == 0)
1274 return ATA_EH_HARDRESET;
1275
1276 /* lower transfer mode */
1277 if (ata_down_xfermask_limit(dev, 0) == 0)
1278 return ATA_EH_SOFTRESET;
1279
1280 ata_dev_printk(dev, KERN_ERR,
1281 "speed down requested but no transfer mode left\n");
1282 return 0;
1283}
1284
1285/**
1286 * ata_eh_autopsy - analyze error and determine recovery action
1287 * @ap: ATA port to perform autopsy on
1288 *
1289 * Analyze why @ap failed and determine which recovery action is
1290 * needed. This function also sets more detailed AC_ERR_* values
1291 * and fills sense data for ATAPI CHECK SENSE.
1292 *
1293 * LOCKING:
1294 * Kernel thread context (may sleep).
1295 */
1296static void ata_eh_autopsy(struct ata_port *ap)
1297{
1298 struct ata_eh_context *ehc = &ap->eh_context;
1299 unsigned int all_err_mask = 0;
1300 int tag, is_io = 0;
1301 u32 serror;
1302 int rc;
1303
1304 DPRINTK("ENTER\n");
1305
1306 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1307 return;
1308
1309 /* obtain and analyze SError */
1310 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1311 if (rc == 0) {
1312 ehc->i.serror |= serror;
1313 ata_eh_analyze_serror(ap);
1314 } else if (rc != -EOPNOTSUPP)
1315 ehc->i.action |= ATA_EH_HARDRESET;
1316
1317 /* analyze NCQ failure */
1318 ata_eh_analyze_ncq_error(ap);
1319
1320 /* any real error trumps AC_ERR_OTHER */
1321 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1322 ehc->i.err_mask &= ~AC_ERR_OTHER;
1323
1324 all_err_mask |= ehc->i.err_mask;
1325
1326 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1327 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1328
1329 if (!(qc->flags & ATA_QCFLAG_FAILED))
1330 continue;
1331
1332 /* inherit upper level err_mask */
1333 qc->err_mask |= ehc->i.err_mask;
1334
1335 /* analyze TF */
1336 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1337
1338 /* DEV errors are probably spurious in case of ATA_BUS error */
1339 if (qc->err_mask & AC_ERR_ATA_BUS)
1340 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1341 AC_ERR_INVALID);
1342
1343 /* any real error trumps unknown error */
1344 if (qc->err_mask & ~AC_ERR_OTHER)
1345 qc->err_mask &= ~AC_ERR_OTHER;
1346
1347 /* SENSE_VALID trumps dev/unknown error and revalidation */
1348 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1349 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1350 ehc->i.action &= ~ATA_EH_REVALIDATE;
1351 }
1352
1353 /* accumulate error info */
1354 ehc->i.dev = qc->dev;
1355 all_err_mask |= qc->err_mask;
1356 if (qc->flags & ATA_QCFLAG_IO)
1357 is_io = 1;
1358 }
1359
1360 /* enforce default EH actions */
1361 if (ap->pflags & ATA_PFLAG_FROZEN ||
1362 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1363 ehc->i.action |= ATA_EH_SOFTRESET;
1364 else if (all_err_mask)
1365 ehc->i.action |= ATA_EH_REVALIDATE;
1366
1367 /* if we have offending qcs and the associated failed device */
1368 if (ehc->i.dev) {
1369 /* speed down */
1370 ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io,
1371 all_err_mask);
1372
1373 /* perform per-dev EH action only on the offending device */
1374 ehc->i.dev_action[ehc->i.dev->devno] |=
1375 ehc->i.action & ATA_EH_PERDEV_MASK;
1376 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
1377 }
1378
1379 DPRINTK("EXIT\n");
1380}
1381
1382/**
1383 * ata_eh_report - report error handling to user
1384 * @ap: ATA port EH is going on
1385 *
1386 * Report EH to user.
1387 *
1388 * LOCKING:
1389 * None.
1390 */
1391static void ata_eh_report(struct ata_port *ap)
1392{
1393 struct ata_eh_context *ehc = &ap->eh_context;
1394 const char *frozen, *desc;
1395 int tag, nr_failed = 0;
1396
1397 desc = NULL;
1398 if (ehc->i.desc[0] != '\0')
1399 desc = ehc->i.desc;
1400
1401 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1402 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1403
1404 if (!(qc->flags & ATA_QCFLAG_FAILED))
1405 continue;
1406 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1407 continue;
1408
1409 nr_failed++;
1410 }
1411
1412 if (!nr_failed && !ehc->i.err_mask)
1413 return;
1414
1415 frozen = "";
1416 if (ap->pflags & ATA_PFLAG_FROZEN)
1417 frozen = " frozen";
1418
1419 if (ehc->i.dev) {
1420 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1421 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1422 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1423 ehc->i.action, frozen);
1424 if (desc)
1425 ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1426 } else {
1427 ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
1428 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1429 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1430 ehc->i.action, frozen);
1431 if (desc)
1432 ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1433 }
1434
1435 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1436 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1437
1438 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1439 continue;
1440
1441 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1442 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1443 qc->tag, qc->tf.command, qc->err_mask,
1444 qc->result_tf.command, qc->result_tf.feature,
1445 ata_err_string(qc->err_mask));
1446 }
1447}
1448
1449static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
1450 unsigned int *classes)
1451{
1452 int i, rc;
1453
1454 for (i = 0; i < ATA_MAX_DEVICES; i++)
1455 classes[i] = ATA_DEV_UNKNOWN;
1456
1457 rc = reset(ap, classes);
1458 if (rc)
1459 return rc;
1460
1461 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
1462 * is complete and convert all ATA_DEV_UNKNOWN to
1463 * ATA_DEV_NONE.
1464 */
1465 for (i = 0; i < ATA_MAX_DEVICES; i++)
1466 if (classes[i] != ATA_DEV_UNKNOWN)
1467 break;
1468
1469 if (i < ATA_MAX_DEVICES)
1470 for (i = 0; i < ATA_MAX_DEVICES; i++)
1471 if (classes[i] == ATA_DEV_UNKNOWN)
1472 classes[i] = ATA_DEV_NONE;
1473
1474 return 0;
1475}
1476
1477static int ata_eh_followup_srst_needed(int rc, int classify,
1478 const unsigned int *classes)
1479{
1480 if (rc == -EAGAIN)
1481 return 1;
1482 if (rc != 0)
1483 return 0;
1484 if (classify && classes[0] == ATA_DEV_UNKNOWN)
1485 return 1;
1486 return 0;
1487}
1488
1489static int ata_eh_reset(struct ata_port *ap, int classify,
1490 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1491 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1492{
1493 struct ata_eh_context *ehc = &ap->eh_context;
1494 unsigned int *classes = ehc->classes;
1495 int tries = ATA_EH_RESET_TRIES;
1496 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
1497 unsigned int action;
1498 ata_reset_fn_t reset;
1499 int i, did_followup_srst, rc;
1500
1501 /* about to reset */
1502 ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1503
1504 /* Determine which reset to use and record in ehc->i.action.
1505 * prereset() may examine and modify it.
1506 */
1507 action = ehc->i.action;
1508 ehc->i.action &= ~ATA_EH_RESET_MASK;
1509 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1510 !(action & ATA_EH_HARDRESET))))
1511 ehc->i.action |= ATA_EH_SOFTRESET;
1512 else
1513 ehc->i.action |= ATA_EH_HARDRESET;
1514
1515 if (prereset) {
1516 rc = prereset(ap);
1517 if (rc) {
1518 ata_port_printk(ap, KERN_ERR,
1519 "prereset failed (errno=%d)\n", rc);
1520 return rc;
1521 }
1522 }
1523
1524 /* prereset() might have modified ehc->i.action */
1525 if (ehc->i.action & ATA_EH_HARDRESET)
1526 reset = hardreset;
1527 else if (ehc->i.action & ATA_EH_SOFTRESET)
1528 reset = softreset;
1529 else {
1530 /* prereset told us not to reset, bang classes and return */
1531 for (i = 0; i < ATA_MAX_DEVICES; i++)
1532 classes[i] = ATA_DEV_NONE;
1533 return 0;
1534 }
1535
1536 /* did prereset() screw up? if so, fix up to avoid oopsing */
1537 if (!reset) {
1538 ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested "
1539 "invalid reset type\n");
1540 if (softreset)
1541 reset = softreset;
1542 else
1543 reset = hardreset;
1544 }
1545
1546 retry:
1547 /* shut up during boot probing */
1548 if (verbose)
1549 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1550 reset == softreset ? "soft" : "hard");
1551
1552 /* mark that this EH session started with reset */
1553 ehc->i.flags |= ATA_EHI_DID_RESET;
1554
1555 rc = ata_do_reset(ap, reset, classes);
1556
1557 did_followup_srst = 0;
1558 if (reset == hardreset &&
1559 ata_eh_followup_srst_needed(rc, classify, classes)) {
1560 /* okay, let's do follow-up softreset */
1561 did_followup_srst = 1;
1562 reset = softreset;
1563
1564 if (!reset) {
1565 ata_port_printk(ap, KERN_ERR,
1566 "follow-up softreset required "
1567 "but no softreset avaliable\n");
1568 return -EINVAL;
1569 }
1570
1571 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1572 rc = ata_do_reset(ap, reset, classes);
1573
1574 if (rc == 0 && classify &&
1575 classes[0] == ATA_DEV_UNKNOWN) {
1576 ata_port_printk(ap, KERN_ERR,
1577 "classification failed\n");
1578 return -EINVAL;
1579 }
1580 }
1581
1582 if (rc && --tries) {
1583 const char *type;
1584
1585 if (reset == softreset) {
1586 if (did_followup_srst)
1587 type = "follow-up soft";
1588 else
1589 type = "soft";
1590 } else
1591 type = "hard";
1592
1593 ata_port_printk(ap, KERN_WARNING,
1594 "%sreset failed, retrying in 5 secs\n", type);
1595 ssleep(5);
1596
1597 if (reset == hardreset)
1598 sata_down_spd_limit(ap);
1599 if (hardreset)
1600 reset = hardreset;
1601 goto retry;
1602 }
1603
1604 if (rc == 0) {
1605 /* After the reset, the device state is PIO 0 and the
1606 * controller state is undefined. Record the mode.
1607 */
1608 for (i = 0; i < ATA_MAX_DEVICES; i++)
1609 ap->device[i].pio_mode = XFER_PIO_0;
1610
1611 if (postreset)
1612 postreset(ap, classes);
1613
1614 /* reset successful, schedule revalidation */
1615 ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1616 ehc->i.action |= ATA_EH_REVALIDATE;
1617 }
1618
1619 return rc;
1620}
1621
1622static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1623 struct ata_device **r_failed_dev)
1624{
1625 struct ata_eh_context *ehc = &ap->eh_context;
1626 struct ata_device *dev;
1627 unsigned long flags;
1628 int i, rc = 0;
1629
1630 DPRINTK("ENTER\n");
1631
1632 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1633 unsigned int action;
1634
1635 dev = &ap->device[i];
1636 action = ata_eh_dev_action(dev);
1637
1638 if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
1639 if (ata_port_offline(ap)) {
1640 rc = -EIO;
1641 break;
1642 }
1643
1644 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
1645 rc = ata_dev_revalidate(dev,
1646 ehc->i.flags & ATA_EHI_DID_RESET);
1647 if (rc)
1648 break;
1649
1650 ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
1651
1652 /* schedule the scsi_rescan_device() here */
1653 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
1654 } else if (dev->class == ATA_DEV_UNKNOWN &&
1655 ehc->tries[dev->devno] &&
1656 ata_class_enabled(ehc->classes[dev->devno])) {
1657 dev->class = ehc->classes[dev->devno];
1658
1659 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1660 if (rc == 0)
1661 rc = ata_dev_configure(dev, 1);
1662
1663 if (rc) {
1664 dev->class = ATA_DEV_UNKNOWN;
1665 break;
1666 }
1667
1668 spin_lock_irqsave(ap->lock, flags);
1669 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1670 spin_unlock_irqrestore(ap->lock, flags);
1671 }
1672 }
1673
1674 if (rc)
1675 *r_failed_dev = dev;
1676
1677 DPRINTK("EXIT\n");
1678 return rc;
1679}
1680
1681/**
1682 * ata_eh_suspend - handle suspend EH action
1683 * @ap: target host port
1684 * @r_failed_dev: result parameter to indicate failing device
1685 *
1686 * Handle suspend EH action. Disk devices are spinned down and
1687 * other types of devices are just marked suspended. Once
1688 * suspended, no EH action to the device is allowed until it is
1689 * resumed.
1690 *
1691 * LOCKING:
1692 * Kernel thread context (may sleep).
1693 *
1694 * RETURNS:
1695 * 0 on success, -errno otherwise
1696 */
1697static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
1698{
1699 struct ata_device *dev;
1700 int i, rc = 0;
1701
1702 DPRINTK("ENTER\n");
1703
1704 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1705 unsigned long flags;
1706 unsigned int action, err_mask;
1707
1708 dev = &ap->device[i];
1709 action = ata_eh_dev_action(dev);
1710
1711 if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND))
1712 continue;
1713
1714 WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED);
1715
1716 ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND);
1717
1718 if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1719 /* flush cache */
1720 rc = ata_flush_cache(dev);
1721 if (rc)
1722 break;
1723
1724 /* spin down */
1725 err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
1726 if (err_mask) {
1727 ata_dev_printk(dev, KERN_ERR, "failed to "
1728 "spin down (err_mask=0x%x)\n",
1729 err_mask);
1730 rc = -EIO;
1731 break;
1732 }
1733 }
1734
1735 spin_lock_irqsave(ap->lock, flags);
1736 dev->flags |= ATA_DFLAG_SUSPENDED;
1737 spin_unlock_irqrestore(ap->lock, flags);
1738
1739 ata_eh_done(ap, dev, ATA_EH_SUSPEND);
1740 }
1741
1742 if (rc)
1743 *r_failed_dev = dev;
1744
1745 DPRINTK("EXIT\n");
1746 return 0;
1747}
1748
1749/**
1750 * ata_eh_prep_resume - prep for resume EH action
1751 * @ap: target host port
1752 *
1753 * Clear SUSPENDED in preparation for scheduled resume actions.
1754 * This allows other parts of EH to access the devices being
1755 * resumed.
1756 *
1757 * LOCKING:
1758 * Kernel thread context (may sleep).
1759 */
1760static void ata_eh_prep_resume(struct ata_port *ap)
1761{
1762 struct ata_device *dev;
1763 unsigned long flags;
1764 int i;
1765
1766 DPRINTK("ENTER\n");
1767
1768 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1769 unsigned int action;
1770
1771 dev = &ap->device[i];
1772 action = ata_eh_dev_action(dev);
1773
1774 if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1775 continue;
1776
1777 spin_lock_irqsave(ap->lock, flags);
1778 dev->flags &= ~ATA_DFLAG_SUSPENDED;
1779 spin_unlock_irqrestore(ap->lock, flags);
1780 }
1781
1782 DPRINTK("EXIT\n");
1783}
1784
1785/**
1786 * ata_eh_resume - handle resume EH action
1787 * @ap: target host port
1788 * @r_failed_dev: result parameter to indicate failing device
1789 *
1790 * Handle resume EH action. Target devices are already reset and
1791 * revalidated. Spinning up is the only operation left.
1792 *
1793 * LOCKING:
1794 * Kernel thread context (may sleep).
1795 *
1796 * RETURNS:
1797 * 0 on success, -errno otherwise
1798 */
1799static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
1800{
1801 struct ata_device *dev;
1802 int i, rc = 0;
1803
1804 DPRINTK("ENTER\n");
1805
1806 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1807 unsigned int action, err_mask;
1808
1809 dev = &ap->device[i];
1810 action = ata_eh_dev_action(dev);
1811
1812 if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1813 continue;
1814
1815 ata_eh_about_to_do(ap, dev, ATA_EH_RESUME);
1816
1817 if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1818 err_mask = ata_do_simple_cmd(dev,
1819 ATA_CMD_IDLEIMMEDIATE);
1820 if (err_mask) {
1821 ata_dev_printk(dev, KERN_ERR, "failed to "
1822 "spin up (err_mask=0x%x)\n",
1823 err_mask);
1824 rc = -EIO;
1825 break;
1826 }
1827 }
1828
1829 ata_eh_done(ap, dev, ATA_EH_RESUME);
1830 }
1831
1832 if (rc)
1833 *r_failed_dev = dev;
1834
1835 DPRINTK("EXIT\n");
1836 return 0;
1837}
1838
1839static int ata_port_nr_enabled(struct ata_port *ap)
1840{
1841 int i, cnt = 0;
1842
1843 for (i = 0; i < ATA_MAX_DEVICES; i++)
1844 if (ata_dev_enabled(&ap->device[i]))
1845 cnt++;
1846 return cnt;
1847}
1848
1849static int ata_port_nr_vacant(struct ata_port *ap)
1850{
1851 int i, cnt = 0;
1852
1853 for (i = 0; i < ATA_MAX_DEVICES; i++)
1854 if (ap->device[i].class == ATA_DEV_UNKNOWN)
1855 cnt++;
1856 return cnt;
1857}
1858
1859static int ata_eh_skip_recovery(struct ata_port *ap)
1860{
1861 struct ata_eh_context *ehc = &ap->eh_context;
1862 int i;
1863
1864 /* skip if all possible devices are suspended */
1865 for (i = 0; i < ata_port_max_devices(ap); i++) {
1866 struct ata_device *dev = &ap->device[i];
1867
1868 if (!(dev->flags & ATA_DFLAG_SUSPENDED))
1869 break;
1870 }
1871
1872 if (i == ata_port_max_devices(ap))
1873 return 1;
1874
1875 /* thaw frozen port, resume link and recover failed devices */
1876 if ((ap->pflags & ATA_PFLAG_FROZEN) ||
1877 (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap))
1878 return 0;
1879
1880 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1881 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1882 struct ata_device *dev = &ap->device[i];
1883
1884 if (dev->class == ATA_DEV_UNKNOWN &&
1885 ehc->classes[dev->devno] != ATA_DEV_NONE)
1886 return 0;
1887 }
1888
1889 return 1;
1890}
1891
1892/**
1893 * ata_eh_recover - recover host port after error
1894 * @ap: host port to recover
1895 * @prereset: prereset method (can be NULL)
1896 * @softreset: softreset method (can be NULL)
1897 * @hardreset: hardreset method (can be NULL)
1898 * @postreset: postreset method (can be NULL)
1899 *
1900 * This is the alpha and omega, eum and yang, heart and soul of
1901 * libata exception handling. On entry, actions required to
1902 * recover the port and hotplug requests are recorded in
1903 * eh_context. This function executes all the operations with
1904 * appropriate retrials and fallbacks to resurrect failed
1905 * devices, detach goners and greet newcomers.
1906 *
1907 * LOCKING:
1908 * Kernel thread context (may sleep).
1909 *
1910 * RETURNS:
1911 * 0 on success, -errno on failure.
1912 */
1913static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1914 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1915 ata_postreset_fn_t postreset)
1916{
1917 struct ata_eh_context *ehc = &ap->eh_context;
1918 struct ata_device *dev;
1919 int down_xfermask, i, rc;
1920
1921 DPRINTK("ENTER\n");
1922
1923 /* prep for recovery */
1924 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1925 dev = &ap->device[i];
1926
1927 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1928
1929 /* process hotplug request */
1930 if (dev->flags & ATA_DFLAG_DETACH)
1931 ata_eh_detach_dev(dev);
1932
1933 if (!ata_dev_enabled(dev) &&
1934 ((ehc->i.probe_mask & (1 << dev->devno)) &&
1935 !(ehc->did_probe_mask & (1 << dev->devno)))) {
1936 ata_eh_detach_dev(dev);
1937 ata_dev_init(dev);
1938 ehc->did_probe_mask |= (1 << dev->devno);
1939 ehc->i.action |= ATA_EH_SOFTRESET;
1940 }
1941 }
1942
1943 retry:
1944 down_xfermask = 0;
1945 rc = 0;
1946
1947 /* if UNLOADING, finish immediately */
1948 if (ap->pflags & ATA_PFLAG_UNLOADING)
1949 goto out;
1950
1951 /* prep for resume */
1952 ata_eh_prep_resume(ap);
1953
1954 /* skip EH if possible. */
1955 if (ata_eh_skip_recovery(ap))
1956 ehc->i.action = 0;
1957
1958 for (i = 0; i < ATA_MAX_DEVICES; i++)
1959 ehc->classes[i] = ATA_DEV_UNKNOWN;
1960
1961 /* reset */
1962 if (ehc->i.action & ATA_EH_RESET_MASK) {
1963 ata_eh_freeze_port(ap);
1964
1965 rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
1966 softreset, hardreset, postreset);
1967 if (rc) {
1968 ata_port_printk(ap, KERN_ERR,
1969 "reset failed, giving up\n");
1970 goto out;
1971 }
1972
1973 ata_eh_thaw_port(ap);
1974 }
1975
1976 /* revalidate existing devices and attach new ones */
1977 rc = ata_eh_revalidate_and_attach(ap, &dev);
1978 if (rc)
1979 goto dev_fail;
1980
1981 /* resume devices */
1982 rc = ata_eh_resume(ap, &dev);
1983 if (rc)
1984 goto dev_fail;
1985
1986 /* configure transfer mode if the port has been reset */
1987 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1988 rc = ata_set_mode(ap, &dev);
1989 if (rc) {
1990 down_xfermask = 1;
1991 goto dev_fail;
1992 }
1993 }
1994
1995 /* suspend devices */
1996 rc = ata_eh_suspend(ap, &dev);
1997 if (rc)
1998 goto dev_fail;
1999
2000 goto out;
2001
2002 dev_fail:
2003 switch (rc) {
2004 case -ENODEV:
2005 /* device missing, schedule probing */
2006 ehc->i.probe_mask |= (1 << dev->devno);
2007 case -EINVAL:
2008 ehc->tries[dev->devno] = 0;
2009 break;
2010 case -EIO:
2011 sata_down_spd_limit(ap);
2012 default:
2013 ehc->tries[dev->devno]--;
2014 if (down_xfermask &&
2015 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
2016 ehc->tries[dev->devno] = 0;
2017 }
2018
2019 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
2020 /* disable device if it has used up all its chances */
2021 ata_dev_disable(dev);
2022
2023 /* detach if offline */
2024 if (ata_port_offline(ap))
2025 ata_eh_detach_dev(dev);
2026
2027 /* probe if requested */
2028 if ((ehc->i.probe_mask & (1 << dev->devno)) &&
2029 !(ehc->did_probe_mask & (1 << dev->devno))) {
2030 ata_eh_detach_dev(dev);
2031 ata_dev_init(dev);
2032
2033 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
2034 ehc->did_probe_mask |= (1 << dev->devno);
2035 ehc->i.action |= ATA_EH_SOFTRESET;
2036 }
2037 } else {
2038 /* soft didn't work? be haaaaard */
2039 if (ehc->i.flags & ATA_EHI_DID_RESET)
2040 ehc->i.action |= ATA_EH_HARDRESET;
2041 else
2042 ehc->i.action |= ATA_EH_SOFTRESET;
2043 }
2044
2045 if (ata_port_nr_enabled(ap)) {
2046 ata_port_printk(ap, KERN_WARNING, "failed to recover some "
2047 "devices, retrying in 5 secs\n");
2048 ssleep(5);
2049 } else {
2050 /* no device left, repeat fast */
2051 msleep(500);
2052 }
2053
2054 goto retry;
2055
2056 out:
2057 if (rc) {
2058 for (i = 0; i < ATA_MAX_DEVICES; i++)
2059 ata_dev_disable(&ap->device[i]);
2060 }
2061
2062 DPRINTK("EXIT, rc=%d\n", rc);
2063 return rc;
2064}
2065
2066/**
2067 * ata_eh_finish - finish up EH
2068 * @ap: host port to finish EH for
2069 *
2070 * Recovery is complete. Clean up EH states and retry or finish
2071 * failed qcs.
2072 *
2073 * LOCKING:
2074 * None.
2075 */
2076static void ata_eh_finish(struct ata_port *ap)
2077{
2078 int tag;
2079
2080 /* retry or finish qcs */
2081 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2082 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2083
2084 if (!(qc->flags & ATA_QCFLAG_FAILED))
2085 continue;
2086
2087 if (qc->err_mask) {
2088 /* FIXME: Once EH migration is complete,
2089 * generate sense data in this function,
2090 * considering both err_mask and tf.
2091 */
2092 if (qc->err_mask & AC_ERR_INVALID)
2093 ata_eh_qc_complete(qc);
2094 else
2095 ata_eh_qc_retry(qc);
2096 } else {
2097 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
2098 ata_eh_qc_complete(qc);
2099 } else {
2100 /* feed zero TF to sense generation */
2101 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
2102 ata_eh_qc_retry(qc);
2103 }
2104 }
2105 }
2106}
2107
2108/**
2109 * ata_do_eh - do standard error handling
2110 * @ap: host port to handle error for
2111 * @prereset: prereset method (can be NULL)
2112 * @softreset: softreset method (can be NULL)
2113 * @hardreset: hardreset method (can be NULL)
2114 * @postreset: postreset method (can be NULL)
2115 *
2116 * Perform standard error handling sequence.
2117 *
2118 * LOCKING:
2119 * Kernel thread context (may sleep).
2120 */
2121void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
2122 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2123 ata_postreset_fn_t postreset)
2124{
2125 ata_eh_autopsy(ap);
2126 ata_eh_report(ap);
2127 ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
2128 ata_eh_finish(ap);
2129}
2130
2131/**
2132 * ata_eh_handle_port_suspend - perform port suspend operation
2133 * @ap: port to suspend
2134 *
2135 * Suspend @ap.
2136 *
2137 * LOCKING:
2138 * Kernel thread context (may sleep).
2139 */
2140static void ata_eh_handle_port_suspend(struct ata_port *ap)
2141{
2142 unsigned long flags;
2143 int rc = 0;
2144
2145 /* are we suspending? */
2146 spin_lock_irqsave(ap->lock, flags);
2147 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2148 ap->pm_mesg.event == PM_EVENT_ON) {
2149 spin_unlock_irqrestore(ap->lock, flags);
2150 return;
2151 }
2152 spin_unlock_irqrestore(ap->lock, flags);
2153
2154 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
2155
2156 /* suspend */
2157 ata_eh_freeze_port(ap);
2158
2159 if (ap->ops->port_suspend)
2160 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
2161
2162 /* report result */
2163 spin_lock_irqsave(ap->lock, flags);
2164
2165 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
2166 if (rc == 0)
2167 ap->pflags |= ATA_PFLAG_SUSPENDED;
2168 else
2169 ata_port_schedule_eh(ap);
2170
2171 if (ap->pm_result) {
2172 *ap->pm_result = rc;
2173 ap->pm_result = NULL;
2174 }
2175
2176 spin_unlock_irqrestore(ap->lock, flags);
2177
2178 return;
2179}
2180
2181/**
2182 * ata_eh_handle_port_resume - perform port resume operation
2183 * @ap: port to resume
2184 *
2185 * Resume @ap.
2186 *
2187 * This function also waits upto one second until all devices
2188 * hanging off this port requests resume EH action. This is to
2189 * prevent invoking EH and thus reset multiple times on resume.
2190 *
2191 * On DPM resume, where some of devices might not be resumed
2192 * together, this may delay port resume upto one second, but such
2193 * DPM resumes are rare and 1 sec delay isn't too bad.
2194 *
2195 * LOCKING:
2196 * Kernel thread context (may sleep).
2197 */
2198static void ata_eh_handle_port_resume(struct ata_port *ap)
2199{
2200 unsigned long timeout;
2201 unsigned long flags;
2202 int i, rc = 0;
2203
2204 /* are we resuming? */
2205 spin_lock_irqsave(ap->lock, flags);
2206 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2207 ap->pm_mesg.event != PM_EVENT_ON) {
2208 spin_unlock_irqrestore(ap->lock, flags);
2209 return;
2210 }
2211 spin_unlock_irqrestore(ap->lock, flags);
2212
2213 /* spurious? */
2214 if (!(ap->pflags & ATA_PFLAG_SUSPENDED))
2215 goto done;
2216
2217 if (ap->ops->port_resume)
2218 rc = ap->ops->port_resume(ap);
2219
2220 /* give devices time to request EH */
2221 timeout = jiffies + HZ; /* 1s max */
2222 while (1) {
2223 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2224 struct ata_device *dev = &ap->device[i];
2225 unsigned int action = ata_eh_dev_action(dev);
2226
2227 if ((dev->flags & ATA_DFLAG_SUSPENDED) &&
2228 !(action & ATA_EH_RESUME))
2229 break;
2230 }
2231
2232 if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout))
2233 break;
2234 msleep(10);
2235 }
2236
2237 done:
2238 spin_lock_irqsave(ap->lock, flags);
2239 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
2240 if (ap->pm_result) {
2241 *ap->pm_result = rc;
2242 ap->pm_result = NULL;
2243 }
2244 spin_unlock_irqrestore(ap->lock, flags);
2245}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
new file mode 100644
index 000000000000..3986ec8741b4
--- /dev/null
+++ b/drivers/ata/libata-scsi.c
@@ -0,0 +1,3322 @@
1/*
2 * libata-scsi.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from
31 * - http://www.t10.org/
32 * - http://www.t13.org/
33 *
34 */
35
36#include <linux/kernel.h>
37#include <linux/blkdev.h>
38#include <linux/spinlock.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_eh.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_tcq.h>
45#include <scsi/scsi_transport.h>
46#include <linux/libata.h>
47#include <linux/hdreg.h>
48#include <asm/uaccess.h>
49
50#include "libata.h"
51
52#define SECTOR_SIZE 512
53
54typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
55
56static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
57 const struct scsi_device *scsidev);
58static struct ata_device * ata_scsi_find_dev(struct ata_port *ap,
59 const struct scsi_device *scsidev);
60static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
61 unsigned int id, unsigned int lun);
62
63
64#define RW_RECOVERY_MPAGE 0x1
65#define RW_RECOVERY_MPAGE_LEN 12
66#define CACHE_MPAGE 0x8
67#define CACHE_MPAGE_LEN 20
68#define CONTROL_MPAGE 0xa
69#define CONTROL_MPAGE_LEN 12
70#define ALL_MPAGES 0x3f
71#define ALL_SUB_MPAGES 0xff
72
73
74static const u8 def_rw_recovery_mpage[] = {
75 RW_RECOVERY_MPAGE,
76 RW_RECOVERY_MPAGE_LEN - 2,
77 (1 << 7) | /* AWRE, sat-r06 say it shall be 0 */
78 (1 << 6), /* ARRE (auto read reallocation) */
79 0, /* read retry count */
80 0, 0, 0, 0,
81 0, /* write retry count */
82 0, 0, 0
83};
84
85static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
86 CACHE_MPAGE,
87 CACHE_MPAGE_LEN - 2,
88 0, /* contains WCE, needs to be 0 for logic */
89 0, 0, 0, 0, 0, 0, 0, 0, 0,
90 0, /* contains DRA, needs to be 0 for logic */
91 0, 0, 0, 0, 0, 0, 0
92};
93
94static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
95 CONTROL_MPAGE,
96 CONTROL_MPAGE_LEN - 2,
97 2, /* DSENSE=0, GLTSD=1 */
98 0, /* [QAM+QERR may be 1, see 05-359r1] */
99 0, 0, 0, 0, 0xff, 0xff,
100 0, 30 /* extended self test time, see 05-359r1 */
101};
102
103/*
104 * libata transport template. libata doesn't do real transport stuff.
105 * It just needs the eh_timed_out hook.
106 */
107struct scsi_transport_template ata_scsi_transport_template = {
108 .eh_strategy_handler = ata_scsi_error,
109 .eh_timed_out = ata_scsi_timed_out,
110 .user_scan = ata_scsi_user_scan,
111};
112
113
114static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
115 void (*done)(struct scsi_cmnd *))
116{
117 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
118 /* "Invalid field in cbd" */
119 done(cmd);
120}
121
122/**
123 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
124 * @sdev: SCSI device for which BIOS geometry is to be determined
125 * @bdev: block device associated with @sdev
126 * @capacity: capacity of SCSI device
127 * @geom: location to which geometry will be output
128 *
129 * Generic bios head/sector/cylinder calculator
130 * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS)
131 * mapping. Some situations may arise where the disk is not
132 * bootable if this is not used.
133 *
134 * LOCKING:
135 * Defined by the SCSI layer. We don't really care.
136 *
137 * RETURNS:
138 * Zero.
139 */
140int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
141 sector_t capacity, int geom[])
142{
143 geom[0] = 255;
144 geom[1] = 63;
145 sector_div(capacity, 255*63);
146 geom[2] = capacity;
147
148 return 0;
149}
150
151/**
152 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
153 * @scsidev: Device to which we are issuing command
154 * @arg: User provided data for issuing command
155 *
156 * LOCKING:
157 * Defined by the SCSI layer. We don't really care.
158 *
159 * RETURNS:
160 * Zero on success, negative errno on error.
161 */
162
163int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
164{
165 int rc = 0;
166 u8 scsi_cmd[MAX_COMMAND_SIZE];
167 u8 args[4], *argbuf = NULL;
168 int argsize = 0;
169 struct scsi_sense_hdr sshdr;
170 enum dma_data_direction data_dir;
171
172 if (arg == NULL)
173 return -EINVAL;
174
175 if (copy_from_user(args, arg, sizeof(args)))
176 return -EFAULT;
177
178 memset(scsi_cmd, 0, sizeof(scsi_cmd));
179
180 if (args[3]) {
181 argsize = SECTOR_SIZE * args[3];
182 argbuf = kmalloc(argsize, GFP_KERNEL);
183 if (argbuf == NULL) {
184 rc = -ENOMEM;
185 goto error;
186 }
187
188 scsi_cmd[1] = (4 << 1); /* PIO Data-in */
189 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
190 block count in sector count field */
191 data_dir = DMA_FROM_DEVICE;
192 } else {
193 scsi_cmd[1] = (3 << 1); /* Non-data */
194 /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
195 data_dir = DMA_NONE;
196 }
197
198 scsi_cmd[0] = ATA_16;
199
200 scsi_cmd[4] = args[2];
201 if (args[0] == WIN_SMART) { /* hack -- ide driver does this too... */
202 scsi_cmd[6] = args[3];
203 scsi_cmd[8] = args[1];
204 scsi_cmd[10] = 0x4f;
205 scsi_cmd[12] = 0xc2;
206 } else {
207 scsi_cmd[6] = args[1];
208 }
209 scsi_cmd[14] = args[0];
210
211 /* Good values for timeout and retries? Values below
212 from scsi_ioctl_send_command() for default case... */
213 if (scsi_execute_req(scsidev, scsi_cmd, data_dir, argbuf, argsize,
214 &sshdr, (10*HZ), 5)) {
215 rc = -EIO;
216 goto error;
217 }
218
219 /* Need code to retrieve data from check condition? */
220
221 if ((argbuf)
222 && copy_to_user(arg + sizeof(args), argbuf, argsize))
223 rc = -EFAULT;
224error:
225 kfree(argbuf);
226 return rc;
227}
228
229/**
230 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
231 * @scsidev: Device to which we are issuing command
232 * @arg: User provided data for issuing command
233 *
234 * LOCKING:
235 * Defined by the SCSI layer. We don't really care.
236 *
237 * RETURNS:
238 * Zero on success, negative errno on error.
239 */
240int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
241{
242 int rc = 0;
243 u8 scsi_cmd[MAX_COMMAND_SIZE];
244 u8 args[7];
245 struct scsi_sense_hdr sshdr;
246
247 if (arg == NULL)
248 return -EINVAL;
249
250 if (copy_from_user(args, arg, sizeof(args)))
251 return -EFAULT;
252
253 memset(scsi_cmd, 0, sizeof(scsi_cmd));
254 scsi_cmd[0] = ATA_16;
255 scsi_cmd[1] = (3 << 1); /* Non-data */
256 /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
257 scsi_cmd[4] = args[1];
258 scsi_cmd[6] = args[2];
259 scsi_cmd[8] = args[3];
260 scsi_cmd[10] = args[4];
261 scsi_cmd[12] = args[5];
262 scsi_cmd[14] = args[0];
263
264 /* Good values for timeout and retries? Values below
265 from scsi_ioctl_send_command() for default case... */
266 if (scsi_execute_req(scsidev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
267 (10*HZ), 5))
268 rc = -EIO;
269
270 /* Need code to retrieve data from check condition? */
271 return rc;
272}
273
274int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
275{
276 int val = -EINVAL, rc = -EINVAL;
277
278 switch (cmd) {
279 case ATA_IOC_GET_IO32:
280 val = 0;
281 if (copy_to_user(arg, &val, 1))
282 return -EFAULT;
283 return 0;
284
285 case ATA_IOC_SET_IO32:
286 val = (unsigned long) arg;
287 if (val != 0)
288 return -EINVAL;
289 return 0;
290
291 case HDIO_DRIVE_CMD:
292 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
293 return -EACCES;
294 return ata_cmd_ioctl(scsidev, arg);
295
296 case HDIO_DRIVE_TASK:
297 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
298 return -EACCES;
299 return ata_task_ioctl(scsidev, arg);
300
301 default:
302 rc = -ENOTTY;
303 break;
304 }
305
306 return rc;
307}
308
309/**
310 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
311 * @dev: ATA device to which the new command is attached
312 * @cmd: SCSI command that originated this ATA command
313 * @done: SCSI command completion function
314 *
315 * Obtain a reference to an unused ata_queued_cmd structure,
316 * which is the basic libata structure representing a single
317 * ATA command sent to the hardware.
318 *
319 * If a command was available, fill in the SCSI-specific
320 * portions of the structure with information on the
321 * current command.
322 *
323 * LOCKING:
324 * spin_lock_irqsave(host lock)
325 *
326 * RETURNS:
327 * Command allocated, or %NULL if none available.
328 */
329struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
330 struct scsi_cmnd *cmd,
331 void (*done)(struct scsi_cmnd *))
332{
333 struct ata_queued_cmd *qc;
334
335 qc = ata_qc_new_init(dev);
336 if (qc) {
337 qc->scsicmd = cmd;
338 qc->scsidone = done;
339
340 if (cmd->use_sg) {
341 qc->__sg = (struct scatterlist *) cmd->request_buffer;
342 qc->n_elem = cmd->use_sg;
343 } else {
344 qc->__sg = &qc->sgent;
345 qc->n_elem = 1;
346 }
347 } else {
348 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
349 done(cmd);
350 }
351
352 return qc;
353}
354
355/**
356 * ata_dump_status - user friendly display of error info
357 * @id: id of the port in question
358 * @tf: ptr to filled out taskfile
359 *
360 * Decode and dump the ATA error/status registers for the user so
361 * that they have some idea what really happened at the non
362 * make-believe layer.
363 *
364 * LOCKING:
365 * inherited from caller
366 */
367void ata_dump_status(unsigned id, struct ata_taskfile *tf)
368{
369 u8 stat = tf->command, err = tf->feature;
370
371 printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat);
372 if (stat & ATA_BUSY) {
373 printk("Busy }\n"); /* Data is not valid in this case */
374 } else {
375 if (stat & 0x40) printk("DriveReady ");
376 if (stat & 0x20) printk("DeviceFault ");
377 if (stat & 0x10) printk("SeekComplete ");
378 if (stat & 0x08) printk("DataRequest ");
379 if (stat & 0x04) printk("CorrectedError ");
380 if (stat & 0x02) printk("Index ");
381 if (stat & 0x01) printk("Error ");
382 printk("}\n");
383
384 if (err) {
385 printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
386 if (err & 0x04) printk("DriveStatusError ");
387 if (err & 0x80) {
388 if (err & 0x04) printk("BadCRC ");
389 else printk("Sector ");
390 }
391 if (err & 0x40) printk("UncorrectableError ");
392 if (err & 0x10) printk("SectorIdNotFound ");
393 if (err & 0x02) printk("TrackZeroNotFound ");
394 if (err & 0x01) printk("AddrMarkNotFound ");
395 printk("}\n");
396 }
397 }
398}
399
400/**
401 * ata_scsi_device_suspend - suspend ATA device associated with sdev
402 * @sdev: the SCSI device to suspend
403 * @mesg: target power management message
404 *
405 * Request suspend EH action on the ATA device associated with
406 * @sdev and wait for the operation to complete.
407 *
408 * LOCKING:
409 * Kernel thread context (may sleep).
410 *
411 * RETURNS:
412 * 0 on success, -errno otherwise.
413 */
414int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t mesg)
415{
416 struct ata_port *ap = ata_shost_to_port(sdev->host);
417 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
418 unsigned long flags;
419 unsigned int action;
420 int rc = 0;
421
422 if (!dev)
423 goto out;
424
425 spin_lock_irqsave(ap->lock, flags);
426
427 /* wait for the previous resume to complete */
428 while (dev->flags & ATA_DFLAG_SUSPENDED) {
429 spin_unlock_irqrestore(ap->lock, flags);
430 ata_port_wait_eh(ap);
431 spin_lock_irqsave(ap->lock, flags);
432 }
433
434 /* if @sdev is already detached, nothing to do */
435 if (sdev->sdev_state == SDEV_OFFLINE ||
436 sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
437 goto out_unlock;
438
439 /* request suspend */
440 action = ATA_EH_SUSPEND;
441 if (mesg.event != PM_EVENT_SUSPEND)
442 action |= ATA_EH_PM_FREEZE;
443 ap->eh_info.dev_action[dev->devno] |= action;
444 ap->eh_info.flags |= ATA_EHI_QUIET;
445 ata_port_schedule_eh(ap);
446
447 spin_unlock_irqrestore(ap->lock, flags);
448
449 /* wait for EH to do the job */
450 ata_port_wait_eh(ap);
451
452 spin_lock_irqsave(ap->lock, flags);
453
454 /* If @sdev is still attached but the associated ATA device
455 * isn't suspended, the operation failed.
456 */
457 if (sdev->sdev_state != SDEV_OFFLINE &&
458 sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_DEL &&
459 !(dev->flags & ATA_DFLAG_SUSPENDED))
460 rc = -EIO;
461
462 out_unlock:
463 spin_unlock_irqrestore(ap->lock, flags);
464 out:
465 if (rc == 0)
466 sdev->sdev_gendev.power.power_state = mesg;
467 return rc;
468}
469
470/**
471 * ata_scsi_device_resume - resume ATA device associated with sdev
472 * @sdev: the SCSI device to resume
473 *
474 * Request resume EH action on the ATA device associated with
475 * @sdev and return immediately. This enables parallel
476 * wakeup/spinup of devices.
477 *
478 * LOCKING:
479 * Kernel thread context (may sleep).
480 *
481 * RETURNS:
482 * 0.
483 */
484int ata_scsi_device_resume(struct scsi_device *sdev)
485{
486 struct ata_port *ap = ata_shost_to_port(sdev->host);
487 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
488 struct ata_eh_info *ehi = &ap->eh_info;
489 unsigned long flags;
490 unsigned int action;
491
492 if (!dev)
493 goto out;
494
495 spin_lock_irqsave(ap->lock, flags);
496
497 /* if @sdev is already detached, nothing to do */
498 if (sdev->sdev_state == SDEV_OFFLINE ||
499 sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
500 goto out_unlock;
501
502 /* request resume */
503 action = ATA_EH_RESUME;
504 if (sdev->sdev_gendev.power.power_state.event == PM_EVENT_SUSPEND)
505 __ata_ehi_hotplugged(ehi);
506 else
507 action |= ATA_EH_PM_FREEZE | ATA_EH_SOFTRESET;
508 ehi->dev_action[dev->devno] |= action;
509
510 /* We don't want autopsy and verbose EH messages. Disable
511 * those if we're the only device on this link.
512 */
513 if (ata_port_max_devices(ap) == 1)
514 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
515
516 ata_port_schedule_eh(ap);
517
518 out_unlock:
519 spin_unlock_irqrestore(ap->lock, flags);
520 out:
521 sdev->sdev_gendev.power.power_state = PMSG_ON;
522 return 0;
523}
524
525/**
526 * ata_to_sense_error - convert ATA error to SCSI error
527 * @id: ATA device number
528 * @drv_stat: value contained in ATA status register
529 * @drv_err: value contained in ATA error register
530 * @sk: the sense key we'll fill out
531 * @asc: the additional sense code we'll fill out
532 * @ascq: the additional sense code qualifier we'll fill out
533 * @verbose: be verbose
534 *
535 * Converts an ATA error into a SCSI error. Fill out pointers to
536 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor
537 * format sense blocks.
538 *
539 * LOCKING:
540 * spin_lock_irqsave(host lock)
541 */
542void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
543 u8 *ascq, int verbose)
544{
545 int i;
546
547 /* Based on the 3ware driver translation table */
548 static const unsigned char sense_table[][4] = {
549 /* BBD|ECC|ID|MAR */
550 {0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
551 /* BBD|ECC|ID */
552 {0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
553 /* ECC|MC|MARK */
554 {0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error
555 /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */
556 {0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error
557 /* MC|ID|ABRT|TRK0|MARK */
558 {0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready
559 /* MCR|MARK */
560 {0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready
561 /* Bad address mark */
562 {0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field
563 /* TRK0 */
564 {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error
565 /* Abort & !ICRC */
566 {0x04, ABORTED_COMMAND, 0x00, 0x00}, // Aborted command Aborted command
567 /* Media change request */
568 {0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline
569 /* SRV */
570 {0x10, ABORTED_COMMAND, 0x14, 0x00}, // ID not found Recorded entity not found
571 /* Media change */
572 {0x08, NOT_READY, 0x04, 0x00}, // Media change FIXME: faking offline
573 /* ECC */
574 {0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error
575 /* BBD - block marked bad */
576 {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error
577 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
578 };
579 static const unsigned char stat_table[][4] = {
580 /* Must be first because BUSY means no other bits valid */
581 {0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now
582 {0x20, HARDWARE_ERROR, 0x00, 0x00}, // Device fault
583 {0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now
584 {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered
585 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
586 };
587
588 /*
589 * Is this an error we can process/parse
590 */
591 if (drv_stat & ATA_BUSY) {
592 drv_err = 0; /* Ignore the err bits, they're invalid */
593 }
594
595 if (drv_err) {
596 /* Look for drv_err */
597 for (i = 0; sense_table[i][0] != 0xFF; i++) {
598 /* Look for best matches first */
599 if ((sense_table[i][0] & drv_err) ==
600 sense_table[i][0]) {
601 *sk = sense_table[i][1];
602 *asc = sense_table[i][2];
603 *ascq = sense_table[i][3];
604 goto translate_done;
605 }
606 }
607 /* No immediate match */
608 if (verbose)
609 printk(KERN_WARNING "ata%u: no sense translation for "
610 "error 0x%02x\n", id, drv_err);
611 }
612
613 /* Fall back to interpreting status bits */
614 for (i = 0; stat_table[i][0] != 0xFF; i++) {
615 if (stat_table[i][0] & drv_stat) {
616 *sk = stat_table[i][1];
617 *asc = stat_table[i][2];
618 *ascq = stat_table[i][3];
619 goto translate_done;
620 }
621 }
622 /* No error? Undecoded? */
623 if (verbose)
624 printk(KERN_WARNING "ata%u: no sense translation for "
625 "status: 0x%02x\n", id, drv_stat);
626
627 /* We need a sensible error return here, which is tricky, and one
628 that won't cause people to do things like return a disk wrongly */
629 *sk = ABORTED_COMMAND;
630 *asc = 0x00;
631 *ascq = 0x00;
632
633 translate_done:
634 if (verbose)
635 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
636 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
637 id, drv_stat, drv_err, *sk, *asc, *ascq);
638 return;
639}
640
641/*
642 * ata_gen_ata_desc_sense - Generate check condition sense block.
643 * @qc: Command that completed.
644 *
645 * This function is specific to the ATA descriptor format sense
646 * block specified for the ATA pass through commands. Regardless
647 * of whether the command errored or not, return a sense
648 * block. Copy all controller registers into the sense
649 * block. Clear sense key, ASC & ASCQ if there is no error.
650 *
651 * LOCKING:
652 * spin_lock_irqsave(host lock)
653 */
654void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
655{
656 struct scsi_cmnd *cmd = qc->scsicmd;
657 struct ata_taskfile *tf = &qc->result_tf;
658 unsigned char *sb = cmd->sense_buffer;
659 unsigned char *desc = sb + 8;
660 int verbose = qc->ap->ops->error_handler == NULL;
661
662 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
663
664 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
665
666 /*
667 * Use ata_to_sense_error() to map status register bits
668 * onto sense key, asc & ascq.
669 */
670 if (qc->err_mask ||
671 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
672 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
673 &sb[1], &sb[2], &sb[3], verbose);
674 sb[1] &= 0x0f;
675 }
676
677 /*
678 * Sense data is current and format is descriptor.
679 */
680 sb[0] = 0x72;
681
682 desc[0] = 0x09;
683
684 /*
685 * Set length of additional sense data.
686 * Since we only populate descriptor 0, the total
687 * length is the same (fixed) length as descriptor 0.
688 */
689 desc[1] = sb[7] = 14;
690
691 /*
692 * Copy registers into sense buffer.
693 */
694 desc[2] = 0x00;
695 desc[3] = tf->feature; /* == error reg */
696 desc[5] = tf->nsect;
697 desc[7] = tf->lbal;
698 desc[9] = tf->lbam;
699 desc[11] = tf->lbah;
700 desc[12] = tf->device;
701 desc[13] = tf->command; /* == status reg */
702
703 /*
704 * Fill in Extend bit, and the high order bytes
705 * if applicable.
706 */
707 if (tf->flags & ATA_TFLAG_LBA48) {
708 desc[2] |= 0x01;
709 desc[4] = tf->hob_nsect;
710 desc[6] = tf->hob_lbal;
711 desc[8] = tf->hob_lbam;
712 desc[10] = tf->hob_lbah;
713 }
714}
715
716/**
717 * ata_gen_fixed_sense - generate a SCSI fixed sense block
718 * @qc: Command that we are erroring out
719 *
720 * Leverage ata_to_sense_error() to give us the codes. Fit our
721 * LBA in here if there's room.
722 *
723 * LOCKING:
724 * inherited from caller
725 */
726void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
727{
728 struct scsi_cmnd *cmd = qc->scsicmd;
729 struct ata_taskfile *tf = &qc->result_tf;
730 unsigned char *sb = cmd->sense_buffer;
731 int verbose = qc->ap->ops->error_handler == NULL;
732
733 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
734
735 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
736
737 /*
738 * Use ata_to_sense_error() to map status register bits
739 * onto sense key, asc & ascq.
740 */
741 if (qc->err_mask ||
742 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
743 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
744 &sb[2], &sb[12], &sb[13], verbose);
745 sb[2] &= 0x0f;
746 }
747
748 sb[0] = 0x70;
749 sb[7] = 0x0a;
750
751 if (tf->flags & ATA_TFLAG_LBA48) {
752 /* TODO: find solution for LBA48 descriptors */
753 }
754
755 else if (tf->flags & ATA_TFLAG_LBA) {
756 /* A small (28b) LBA will fit in the 32b info field */
757 sb[0] |= 0x80; /* set valid bit */
758 sb[3] = tf->device & 0x0f;
759 sb[4] = tf->lbah;
760 sb[5] = tf->lbam;
761 sb[6] = tf->lbal;
762 }
763
764 else {
765 /* TODO: C/H/S */
766 }
767}
768
769static void ata_scsi_sdev_config(struct scsi_device *sdev)
770{
771 sdev->use_10_for_rw = 1;
772 sdev->use_10_for_ms = 1;
773}
774
775static void ata_scsi_dev_config(struct scsi_device *sdev,
776 struct ata_device *dev)
777{
778 unsigned int max_sectors;
779
780 /* TODO: 2048 is an arbitrary number, not the
781 * hardware maximum. This should be increased to
782 * 65534 when Jens Axboe's patch for dynamically
783 * determining max_sectors is merged.
784 */
785 max_sectors = ATA_MAX_SECTORS;
786 if (dev->flags & ATA_DFLAG_LBA48)
787 max_sectors = ATA_MAX_SECTORS_LBA48;
788 if (dev->max_sectors)
789 max_sectors = dev->max_sectors;
790
791 blk_queue_max_sectors(sdev->request_queue, max_sectors);
792
793 /*
794 * SATA DMA transfers must be multiples of 4 byte, so
795 * we need to pad ATAPI transfers using an extra sg.
796 * Decrement max hw segments accordingly.
797 */
798 if (dev->class == ATA_DEV_ATAPI) {
799 request_queue_t *q = sdev->request_queue;
800 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
801 }
802
803 if (dev->flags & ATA_DFLAG_NCQ) {
804 int depth;
805
806 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
807 depth = min(ATA_MAX_QUEUE - 1, depth);
808 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
809 }
810}
811
812/**
813 * ata_scsi_slave_config - Set SCSI device attributes
814 * @sdev: SCSI device to examine
815 *
816 * This is called before we actually start reading
817 * and writing to the device, to configure certain
818 * SCSI mid-layer behaviors.
819 *
820 * LOCKING:
821 * Defined by SCSI layer. We don't really care.
822 */
823
824int ata_scsi_slave_config(struct scsi_device *sdev)
825{
826 struct ata_port *ap = ata_shost_to_port(sdev->host);
827 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
828
829 ata_scsi_sdev_config(sdev);
830
831 blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
832
833 if (dev)
834 ata_scsi_dev_config(sdev, dev);
835
836 return 0; /* scsi layer doesn't check return value, sigh */
837}
838
839/**
840 * ata_scsi_slave_destroy - SCSI device is about to be destroyed
841 * @sdev: SCSI device to be destroyed
842 *
843 * @sdev is about to be destroyed for hot/warm unplugging. If
844 * this unplugging was initiated by libata as indicated by NULL
845 * dev->sdev, this function doesn't have to do anything.
846 * Otherwise, SCSI layer initiated warm-unplug is in progress.
847 * Clear dev->sdev, schedule the device for ATA detach and invoke
848 * EH.
849 *
850 * LOCKING:
851 * Defined by SCSI layer. We don't really care.
852 */
853void ata_scsi_slave_destroy(struct scsi_device *sdev)
854{
855 struct ata_port *ap = ata_shost_to_port(sdev->host);
856 unsigned long flags;
857 struct ata_device *dev;
858
859 if (!ap->ops->error_handler)
860 return;
861
862 spin_lock_irqsave(ap->lock, flags);
863 dev = __ata_scsi_find_dev(ap, sdev);
864 if (dev && dev->sdev) {
865 /* SCSI device already in CANCEL state, no need to offline it */
866 dev->sdev = NULL;
867 dev->flags |= ATA_DFLAG_DETACH;
868 ata_port_schedule_eh(ap);
869 }
870 spin_unlock_irqrestore(ap->lock, flags);
871}
872
873/**
874 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
875 * @sdev: SCSI device to configure queue depth for
876 * @queue_depth: new queue depth
877 *
878 * This is libata standard hostt->change_queue_depth callback.
879 * SCSI will call into this callback when user tries to set queue
880 * depth via sysfs.
881 *
882 * LOCKING:
883 * SCSI layer (we don't care)
884 *
885 * RETURNS:
886 * Newly configured queue depth.
887 */
888int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
889{
890 struct ata_port *ap = ata_shost_to_port(sdev->host);
891 struct ata_device *dev;
892 int max_depth;
893
894 if (queue_depth < 1)
895 return sdev->queue_depth;
896
897 dev = ata_scsi_find_dev(ap, sdev);
898 if (!dev || !ata_dev_enabled(dev))
899 return sdev->queue_depth;
900
901 max_depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
902 max_depth = min(ATA_MAX_QUEUE - 1, max_depth);
903 if (queue_depth > max_depth)
904 queue_depth = max_depth;
905
906 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
907 return queue_depth;
908}
909
910/**
911 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
912 * @qc: Storage for translated ATA taskfile
913 * @scsicmd: SCSI command to translate
914 *
915 * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
916 * (to start). Perhaps these commands should be preceded by
917 * CHECK POWER MODE to see what power mode the device is already in.
918 * [See SAT revision 5 at www.t10.org]
919 *
920 * LOCKING:
921 * spin_lock_irqsave(host lock)
922 *
923 * RETURNS:
924 * Zero on success, non-zero on error.
925 */
926
927static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
928 const u8 *scsicmd)
929{
930 struct ata_taskfile *tf = &qc->tf;
931
932 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
933 tf->protocol = ATA_PROT_NODATA;
934 if (scsicmd[1] & 0x1) {
935 ; /* ignore IMMED bit, violates sat-r05 */
936 }
937 if (scsicmd[4] & 0x2)
938 goto invalid_fld; /* LOEJ bit set not supported */
939 if (((scsicmd[4] >> 4) & 0xf) != 0)
940 goto invalid_fld; /* power conditions not supported */
941 if (scsicmd[4] & 0x1) {
942 tf->nsect = 1; /* 1 sector, lba=0 */
943
944 if (qc->dev->flags & ATA_DFLAG_LBA) {
945 tf->flags |= ATA_TFLAG_LBA;
946
947 tf->lbah = 0x0;
948 tf->lbam = 0x0;
949 tf->lbal = 0x0;
950 tf->device |= ATA_LBA;
951 } else {
952 /* CHS */
953 tf->lbal = 0x1; /* sect */
954 tf->lbam = 0x0; /* cyl low */
955 tf->lbah = 0x0; /* cyl high */
956 }
957
958 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
959 } else {
960 tf->nsect = 0; /* time period value (0 implies now) */
961 tf->command = ATA_CMD_STANDBY;
962 /* Consider: ATA STANDBY IMMEDIATE command */
963 }
964 /*
965 * Standby and Idle condition timers could be implemented but that
966 * would require libata to implement the Power condition mode page
967 * and allow the user to change it. Changing mode pages requires
968 * MODE SELECT to be implemented.
969 */
970
971 return 0;
972
973invalid_fld:
974 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
975 /* "Invalid field in cbd" */
976 return 1;
977}
978
979
980/**
981 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
982 * @qc: Storage for translated ATA taskfile
983 * @scsicmd: SCSI command to translate (ignored)
984 *
985 * Sets up an ATA taskfile to issue FLUSH CACHE or
986 * FLUSH CACHE EXT.
987 *
988 * LOCKING:
989 * spin_lock_irqsave(host lock)
990 *
991 * RETURNS:
992 * Zero on success, non-zero on error.
993 */
994
995static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
996{
997 struct ata_taskfile *tf = &qc->tf;
998
999 tf->flags |= ATA_TFLAG_DEVICE;
1000 tf->protocol = ATA_PROT_NODATA;
1001
1002 if ((qc->dev->flags & ATA_DFLAG_LBA48) &&
1003 (ata_id_has_flush_ext(qc->dev->id)))
1004 tf->command = ATA_CMD_FLUSH_EXT;
1005 else
1006 tf->command = ATA_CMD_FLUSH;
1007
1008 return 0;
1009}
1010
1011/**
1012 * scsi_6_lba_len - Get LBA and transfer length
1013 * @scsicmd: SCSI command to translate
1014 *
1015 * Calculate LBA and transfer length for 6-byte commands.
1016 *
1017 * RETURNS:
1018 * @plba: the LBA
1019 * @plen: the transfer length
1020 */
1021
1022static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
1023{
1024 u64 lba = 0;
1025 u32 len = 0;
1026
1027 VPRINTK("six-byte command\n");
1028
1029 lba |= ((u64)scsicmd[2]) << 8;
1030 lba |= ((u64)scsicmd[3]);
1031
1032 len |= ((u32)scsicmd[4]);
1033
1034 *plba = lba;
1035 *plen = len;
1036}
1037
1038/**
1039 * scsi_10_lba_len - Get LBA and transfer length
1040 * @scsicmd: SCSI command to translate
1041 *
1042 * Calculate LBA and transfer length for 10-byte commands.
1043 *
1044 * RETURNS:
1045 * @plba: the LBA
1046 * @plen: the transfer length
1047 */
1048
1049static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
1050{
1051 u64 lba = 0;
1052 u32 len = 0;
1053
1054 VPRINTK("ten-byte command\n");
1055
1056 lba |= ((u64)scsicmd[2]) << 24;
1057 lba |= ((u64)scsicmd[3]) << 16;
1058 lba |= ((u64)scsicmd[4]) << 8;
1059 lba |= ((u64)scsicmd[5]);
1060
1061 len |= ((u32)scsicmd[7]) << 8;
1062 len |= ((u32)scsicmd[8]);
1063
1064 *plba = lba;
1065 *plen = len;
1066}
1067
1068/**
1069 * scsi_16_lba_len - Get LBA and transfer length
1070 * @scsicmd: SCSI command to translate
1071 *
1072 * Calculate LBA and transfer length for 16-byte commands.
1073 *
1074 * RETURNS:
1075 * @plba: the LBA
1076 * @plen: the transfer length
1077 */
1078
1079static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
1080{
1081 u64 lba = 0;
1082 u32 len = 0;
1083
1084 VPRINTK("sixteen-byte command\n");
1085
1086 lba |= ((u64)scsicmd[2]) << 56;
1087 lba |= ((u64)scsicmd[3]) << 48;
1088 lba |= ((u64)scsicmd[4]) << 40;
1089 lba |= ((u64)scsicmd[5]) << 32;
1090 lba |= ((u64)scsicmd[6]) << 24;
1091 lba |= ((u64)scsicmd[7]) << 16;
1092 lba |= ((u64)scsicmd[8]) << 8;
1093 lba |= ((u64)scsicmd[9]);
1094
1095 len |= ((u32)scsicmd[10]) << 24;
1096 len |= ((u32)scsicmd[11]) << 16;
1097 len |= ((u32)scsicmd[12]) << 8;
1098 len |= ((u32)scsicmd[13]);
1099
1100 *plba = lba;
1101 *plen = len;
1102}
1103
1104/**
1105 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
1106 * @qc: Storage for translated ATA taskfile
1107 * @scsicmd: SCSI command to translate
1108 *
1109 * Converts SCSI VERIFY command to an ATA READ VERIFY command.
1110 *
1111 * LOCKING:
1112 * spin_lock_irqsave(host lock)
1113 *
1114 * RETURNS:
1115 * Zero on success, non-zero on error.
1116 */
1117
1118static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1119{
1120 struct ata_taskfile *tf = &qc->tf;
1121 struct ata_device *dev = qc->dev;
1122 u64 dev_sectors = qc->dev->n_sectors;
1123 u64 block;
1124 u32 n_block;
1125
1126 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1127 tf->protocol = ATA_PROT_NODATA;
1128
1129 if (scsicmd[0] == VERIFY)
1130 scsi_10_lba_len(scsicmd, &block, &n_block);
1131 else if (scsicmd[0] == VERIFY_16)
1132 scsi_16_lba_len(scsicmd, &block, &n_block);
1133 else
1134 goto invalid_fld;
1135
1136 if (!n_block)
1137 goto nothing_to_do;
1138 if (block >= dev_sectors)
1139 goto out_of_range;
1140 if ((block + n_block) > dev_sectors)
1141 goto out_of_range;
1142
1143 if (dev->flags & ATA_DFLAG_LBA) {
1144 tf->flags |= ATA_TFLAG_LBA;
1145
1146 if (lba_28_ok(block, n_block)) {
1147 /* use LBA28 */
1148 tf->command = ATA_CMD_VERIFY;
1149 tf->device |= (block >> 24) & 0xf;
1150 } else if (lba_48_ok(block, n_block)) {
1151 if (!(dev->flags & ATA_DFLAG_LBA48))
1152 goto out_of_range;
1153
1154 /* use LBA48 */
1155 tf->flags |= ATA_TFLAG_LBA48;
1156 tf->command = ATA_CMD_VERIFY_EXT;
1157
1158 tf->hob_nsect = (n_block >> 8) & 0xff;
1159
1160 tf->hob_lbah = (block >> 40) & 0xff;
1161 tf->hob_lbam = (block >> 32) & 0xff;
1162 tf->hob_lbal = (block >> 24) & 0xff;
1163 } else
1164 /* request too large even for LBA48 */
1165 goto out_of_range;
1166
1167 tf->nsect = n_block & 0xff;
1168
1169 tf->lbah = (block >> 16) & 0xff;
1170 tf->lbam = (block >> 8) & 0xff;
1171 tf->lbal = block & 0xff;
1172
1173 tf->device |= ATA_LBA;
1174 } else {
1175 /* CHS */
1176 u32 sect, head, cyl, track;
1177
1178 if (!lba_28_ok(block, n_block))
1179 goto out_of_range;
1180
1181 /* Convert LBA to CHS */
1182 track = (u32)block / dev->sectors;
1183 cyl = track / dev->heads;
1184 head = track % dev->heads;
1185 sect = (u32)block % dev->sectors + 1;
1186
1187 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1188 (u32)block, track, cyl, head, sect);
1189
1190 /* Check whether the converted CHS can fit.
1191 Cylinder: 0-65535
1192 Head: 0-15
1193 Sector: 1-255*/
1194 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1195 goto out_of_range;
1196
1197 tf->command = ATA_CMD_VERIFY;
1198 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1199 tf->lbal = sect;
1200 tf->lbam = cyl;
1201 tf->lbah = cyl >> 8;
1202 tf->device |= head;
1203 }
1204
1205 return 0;
1206
1207invalid_fld:
1208 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
1209 /* "Invalid field in cbd" */
1210 return 1;
1211
1212out_of_range:
1213 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
1214 /* "Logical Block Address out of range" */
1215 return 1;
1216
1217nothing_to_do:
1218 qc->scsicmd->result = SAM_STAT_GOOD;
1219 return 1;
1220}
1221
1222/**
1223 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
1224 * @qc: Storage for translated ATA taskfile
1225 * @scsicmd: SCSI command to translate
1226 *
1227 * Converts any of six SCSI read/write commands into the
1228 * ATA counterpart, including starting sector (LBA),
1229 * sector count, and taking into account the device's LBA48
1230 * support.
1231 *
1232 * Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and
1233 * %WRITE_16 are currently supported.
1234 *
1235 * LOCKING:
1236 * spin_lock_irqsave(host lock)
1237 *
1238 * RETURNS:
1239 * Zero on success, non-zero on error.
1240 */
1241
1242static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1243{
1244 struct ata_taskfile *tf = &qc->tf;
1245 struct ata_device *dev = qc->dev;
1246 u64 block;
1247 u32 n_block;
1248
1249 qc->flags |= ATA_QCFLAG_IO;
1250 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1251
1252 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
1253 scsicmd[0] == WRITE_16)
1254 tf->flags |= ATA_TFLAG_WRITE;
1255
1256 /* Calculate the SCSI LBA, transfer length and FUA. */
1257 switch (scsicmd[0]) {
1258 case READ_10:
1259 case WRITE_10:
1260 scsi_10_lba_len(scsicmd, &block, &n_block);
1261 if (unlikely(scsicmd[1] & (1 << 3)))
1262 tf->flags |= ATA_TFLAG_FUA;
1263 break;
1264 case READ_6:
1265 case WRITE_6:
1266 scsi_6_lba_len(scsicmd, &block, &n_block);
1267
1268 /* for 6-byte r/w commands, transfer length 0
1269 * means 256 blocks of data, not 0 block.
1270 */
1271 if (!n_block)
1272 n_block = 256;
1273 break;
1274 case READ_16:
1275 case WRITE_16:
1276 scsi_16_lba_len(scsicmd, &block, &n_block);
1277 if (unlikely(scsicmd[1] & (1 << 3)))
1278 tf->flags |= ATA_TFLAG_FUA;
1279 break;
1280 default:
1281 DPRINTK("no-byte command\n");
1282 goto invalid_fld;
1283 }
1284
1285 /* Check and compose ATA command */
1286 if (!n_block)
1287 /* For 10-byte and 16-byte SCSI R/W commands, transfer
1288 * length 0 means transfer 0 block of data.
1289 * However, for ATA R/W commands, sector count 0 means
1290 * 256 or 65536 sectors, not 0 sectors as in SCSI.
1291 *
1292 * WARNING: one or two older ATA drives treat 0 as 0...
1293 */
1294 goto nothing_to_do;
1295
1296 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
1297 /* yay, NCQ */
1298 if (!lba_48_ok(block, n_block))
1299 goto out_of_range;
1300
1301 tf->protocol = ATA_PROT_NCQ;
1302 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1303
1304 if (tf->flags & ATA_TFLAG_WRITE)
1305 tf->command = ATA_CMD_FPDMA_WRITE;
1306 else
1307 tf->command = ATA_CMD_FPDMA_READ;
1308
1309 qc->nsect = n_block;
1310
1311 tf->nsect = qc->tag << 3;
1312 tf->hob_feature = (n_block >> 8) & 0xff;
1313 tf->feature = n_block & 0xff;
1314
1315 tf->hob_lbah = (block >> 40) & 0xff;
1316 tf->hob_lbam = (block >> 32) & 0xff;
1317 tf->hob_lbal = (block >> 24) & 0xff;
1318 tf->lbah = (block >> 16) & 0xff;
1319 tf->lbam = (block >> 8) & 0xff;
1320 tf->lbal = block & 0xff;
1321
1322 tf->device = 1 << 6;
1323 if (tf->flags & ATA_TFLAG_FUA)
1324 tf->device |= 1 << 7;
1325 } else if (dev->flags & ATA_DFLAG_LBA) {
1326 tf->flags |= ATA_TFLAG_LBA;
1327
1328 if (lba_28_ok(block, n_block)) {
1329 /* use LBA28 */
1330 tf->device |= (block >> 24) & 0xf;
1331 } else if (lba_48_ok(block, n_block)) {
1332 if (!(dev->flags & ATA_DFLAG_LBA48))
1333 goto out_of_range;
1334
1335 /* use LBA48 */
1336 tf->flags |= ATA_TFLAG_LBA48;
1337
1338 tf->hob_nsect = (n_block >> 8) & 0xff;
1339
1340 tf->hob_lbah = (block >> 40) & 0xff;
1341 tf->hob_lbam = (block >> 32) & 0xff;
1342 tf->hob_lbal = (block >> 24) & 0xff;
1343 } else
1344 /* request too large even for LBA48 */
1345 goto out_of_range;
1346
1347 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1348 goto invalid_fld;
1349
1350 qc->nsect = n_block;
1351 tf->nsect = n_block & 0xff;
1352
1353 tf->lbah = (block >> 16) & 0xff;
1354 tf->lbam = (block >> 8) & 0xff;
1355 tf->lbal = block & 0xff;
1356
1357 tf->device |= ATA_LBA;
1358 } else {
1359 /* CHS */
1360 u32 sect, head, cyl, track;
1361
1362 /* The request -may- be too large for CHS addressing. */
1363 if (!lba_28_ok(block, n_block))
1364 goto out_of_range;
1365
1366 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1367 goto invalid_fld;
1368
1369 /* Convert LBA to CHS */
1370 track = (u32)block / dev->sectors;
1371 cyl = track / dev->heads;
1372 head = track % dev->heads;
1373 sect = (u32)block % dev->sectors + 1;
1374
1375 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1376 (u32)block, track, cyl, head, sect);
1377
1378 /* Check whether the converted CHS can fit.
1379 Cylinder: 0-65535
1380 Head: 0-15
1381 Sector: 1-255*/
1382 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1383 goto out_of_range;
1384
1385 qc->nsect = n_block;
1386 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1387 tf->lbal = sect;
1388 tf->lbam = cyl;
1389 tf->lbah = cyl >> 8;
1390 tf->device |= head;
1391 }
1392
1393 return 0;
1394
1395invalid_fld:
1396 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
1397 /* "Invalid field in cbd" */
1398 return 1;
1399
1400out_of_range:
1401 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
1402 /* "Logical Block Address out of range" */
1403 return 1;
1404
1405nothing_to_do:
1406 qc->scsicmd->result = SAM_STAT_GOOD;
1407 return 1;
1408}
1409
1410static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1411{
1412 struct scsi_cmnd *cmd = qc->scsicmd;
1413 u8 *cdb = cmd->cmnd;
1414 int need_sense = (qc->err_mask != 0);
1415
1416 /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and
1417 * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE
1418 * cache
1419 */
1420 if (!need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) &&
1421 ((qc->tf.feature == SETFEATURES_WC_ON) ||
1422 (qc->tf.feature == SETFEATURES_WC_OFF))) {
1423 qc->ap->eh_info.action |= ATA_EH_REVALIDATE;
1424 ata_port_schedule_eh(qc->ap);
1425 }
1426
1427 /* For ATA pass thru (SAT) commands, generate a sense block if
1428 * user mandated it or if there's an error. Note that if we
1429 * generate because the user forced us to, a check condition
1430 * is generated and the ATA register values are returned
1431 * whether the command completed successfully or not. If there
1432 * was no error, SK, ASC and ASCQ will all be zero.
1433 */
1434 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1435 ((cdb[2] & 0x20) || need_sense)) {
1436 ata_gen_ata_desc_sense(qc);
1437 } else {
1438 if (!need_sense) {
1439 cmd->result = SAM_STAT_GOOD;
1440 } else {
1441 /* TODO: decide which descriptor format to use
1442 * for 48b LBA devices and call that here
1443 * instead of the fixed desc, which is only
1444 * good for smaller LBA (and maybe CHS?)
1445 * devices.
1446 */
1447 ata_gen_fixed_sense(qc);
1448 }
1449 }
1450
1451 if (need_sense && !qc->ap->ops->error_handler)
1452 ata_dump_status(qc->ap->id, &qc->result_tf);
1453
1454 qc->scsidone(cmd);
1455
1456 ata_qc_free(qc);
1457}
1458
1459/**
1460 * ata_scmd_need_defer - Check whether we need to defer scmd
1461 * @dev: ATA device to which the command is addressed
1462 * @is_io: Is the command IO (and thus possibly NCQ)?
1463 *
1464 * NCQ and non-NCQ commands cannot run together. As upper layer
1465 * only knows the queue depth, we are responsible for maintaining
1466 * exclusion. This function checks whether a new command can be
1467 * issued to @dev.
1468 *
1469 * LOCKING:
1470 * spin_lock_irqsave(host lock)
1471 *
1472 * RETURNS:
1473 * 1 if deferring is needed, 0 otherwise.
1474 */
1475static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
1476{
1477 struct ata_port *ap = dev->ap;
1478
1479 if (!(dev->flags & ATA_DFLAG_NCQ))
1480 return 0;
1481
1482 if (is_io) {
1483 if (!ata_tag_valid(ap->active_tag))
1484 return 0;
1485 } else {
1486 if (!ata_tag_valid(ap->active_tag) && !ap->sactive)
1487 return 0;
1488 }
1489 return 1;
1490}
1491
1492/**
1493 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1494 * @dev: ATA device to which the command is addressed
1495 * @cmd: SCSI command to execute
1496 * @done: SCSI command completion function
1497 * @xlat_func: Actor which translates @cmd to an ATA taskfile
1498 *
1499 * Our ->queuecommand() function has decided that the SCSI
1500 * command issued can be directly translated into an ATA
1501 * command, rather than handled internally.
1502 *
1503 * This function sets up an ata_queued_cmd structure for the
1504 * SCSI command, and sends that ata_queued_cmd to the hardware.
1505 *
1506 * The xlat_func argument (actor) returns 0 if ready to execute
1507 * ATA command, else 1 to finish translation. If 1 is returned
1508 * then cmd->result (and possibly cmd->sense_buffer) are assumed
1509 * to be set reflecting an error condition or clean (early)
1510 * termination.
1511 *
1512 * LOCKING:
1513 * spin_lock_irqsave(host lock)
1514 *
1515 * RETURNS:
1516 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
1517 * needs to be deferred.
1518 */
1519static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1520 void (*done)(struct scsi_cmnd *),
1521 ata_xlat_func_t xlat_func)
1522{
1523 struct ata_queued_cmd *qc;
1524 u8 *scsicmd = cmd->cmnd;
1525 int is_io = xlat_func == ata_scsi_rw_xlat;
1526
1527 VPRINTK("ENTER\n");
1528
1529 if (unlikely(ata_scmd_need_defer(dev, is_io)))
1530 goto defer;
1531
1532 qc = ata_scsi_qc_new(dev, cmd, done);
1533 if (!qc)
1534 goto err_mem;
1535
1536 /* data is present; dma-map it */
1537 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1538 cmd->sc_data_direction == DMA_TO_DEVICE) {
1539 if (unlikely(cmd->request_bufflen < 1)) {
1540 ata_dev_printk(dev, KERN_WARNING,
1541 "WARNING: zero len r/w req\n");
1542 goto err_did;
1543 }
1544
1545 if (cmd->use_sg)
1546 ata_sg_init(qc, cmd->request_buffer, cmd->use_sg);
1547 else
1548 ata_sg_init_one(qc, cmd->request_buffer,
1549 cmd->request_bufflen);
1550
1551 qc->dma_dir = cmd->sc_data_direction;
1552 }
1553
1554 qc->complete_fn = ata_scsi_qc_complete;
1555
1556 if (xlat_func(qc, scsicmd))
1557 goto early_finish;
1558
1559 /* select device, send command to hardware */
1560 ata_qc_issue(qc);
1561
1562 VPRINTK("EXIT\n");
1563 return 0;
1564
1565early_finish:
1566 ata_qc_free(qc);
1567 done(cmd);
1568 DPRINTK("EXIT - early finish (good or error)\n");
1569 return 0;
1570
1571err_did:
1572 ata_qc_free(qc);
1573err_mem:
1574 cmd->result = (DID_ERROR << 16);
1575 done(cmd);
1576 DPRINTK("EXIT - internal\n");
1577 return 0;
1578
1579defer:
1580 DPRINTK("EXIT - defer\n");
1581 return SCSI_MLQUEUE_DEVICE_BUSY;
1582}
1583
1584/**
1585 * ata_scsi_rbuf_get - Map response buffer.
1586 * @cmd: SCSI command containing buffer to be mapped.
1587 * @buf_out: Pointer to mapped area.
1588 *
1589 * Maps buffer contained within SCSI command @cmd.
1590 *
1591 * LOCKING:
1592 * spin_lock_irqsave(host lock)
1593 *
1594 * RETURNS:
1595 * Length of response buffer.
1596 */
1597
1598static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
1599{
1600 u8 *buf;
1601 unsigned int buflen;
1602
1603 if (cmd->use_sg) {
1604 struct scatterlist *sg;
1605
1606 sg = (struct scatterlist *) cmd->request_buffer;
1607 buf = kmap_atomic(sg->page, KM_USER0) + sg->offset;
1608 buflen = sg->length;
1609 } else {
1610 buf = cmd->request_buffer;
1611 buflen = cmd->request_bufflen;
1612 }
1613
1614 *buf_out = buf;
1615 return buflen;
1616}
1617
1618/**
1619 * ata_scsi_rbuf_put - Unmap response buffer.
1620 * @cmd: SCSI command containing buffer to be unmapped.
1621 * @buf: buffer to unmap
1622 *
1623 * Unmaps response buffer contained within @cmd.
1624 *
1625 * LOCKING:
1626 * spin_lock_irqsave(host lock)
1627 */
1628
1629static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
1630{
1631 if (cmd->use_sg) {
1632 struct scatterlist *sg;
1633
1634 sg = (struct scatterlist *) cmd->request_buffer;
1635 kunmap_atomic(buf - sg->offset, KM_USER0);
1636 }
1637}
1638
1639/**
1640 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators
1641 * @args: device IDENTIFY data / SCSI command of interest.
1642 * @actor: Callback hook for desired SCSI command simulator
1643 *
1644 * Takes care of the hard work of simulating a SCSI command...
1645 * Mapping the response buffer, calling the command's handler,
1646 * and handling the handler's return value. This return value
1647 * indicates whether the handler wishes the SCSI command to be
1648 * completed successfully (0), or not (in which case cmd->result
1649 * and sense buffer are assumed to be set).
1650 *
1651 * LOCKING:
1652 * spin_lock_irqsave(host lock)
1653 */
1654
1655void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1656 unsigned int (*actor) (struct ata_scsi_args *args,
1657 u8 *rbuf, unsigned int buflen))
1658{
1659 u8 *rbuf;
1660 unsigned int buflen, rc;
1661 struct scsi_cmnd *cmd = args->cmd;
1662
1663 buflen = ata_scsi_rbuf_get(cmd, &rbuf);
1664 memset(rbuf, 0, buflen);
1665 rc = actor(args, rbuf, buflen);
1666 ata_scsi_rbuf_put(cmd, rbuf);
1667
1668 if (rc == 0)
1669 cmd->result = SAM_STAT_GOOD;
1670 args->done(cmd);
1671}
1672
1673/**
1674 * ata_scsiop_inq_std - Simulate INQUIRY command
1675 * @args: device IDENTIFY data / SCSI command of interest.
1676 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1677 * @buflen: Response buffer length.
1678 *
1679 * Returns standard device identification data associated
1680 * with non-VPD INQUIRY command output.
1681 *
1682 * LOCKING:
1683 * spin_lock_irqsave(host lock)
1684 */
1685
1686unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1687 unsigned int buflen)
1688{
1689 u8 hdr[] = {
1690 TYPE_DISK,
1691 0,
1692 0x5, /* claim SPC-3 version compatibility */
1693 2,
1694 95 - 4
1695 };
1696
1697 /* set scsi removeable (RMB) bit per ata bit */
1698 if (ata_id_removeable(args->id))
1699 hdr[1] |= (1 << 7);
1700
1701 VPRINTK("ENTER\n");
1702
1703 memcpy(rbuf, hdr, sizeof(hdr));
1704
1705 if (buflen > 35) {
1706 memcpy(&rbuf[8], "ATA ", 8);
1707 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16);
1708 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4);
1709 if (rbuf[32] == 0 || rbuf[32] == ' ')
1710 memcpy(&rbuf[32], "n/a ", 4);
1711 }
1712
1713 if (buflen > 63) {
1714 const u8 versions[] = {
1715 0x60, /* SAM-3 (no version claimed) */
1716
1717 0x03,
1718 0x20, /* SBC-2 (no version claimed) */
1719
1720 0x02,
1721 0x60 /* SPC-3 (no version claimed) */
1722 };
1723
1724 memcpy(rbuf + 59, versions, sizeof(versions));
1725 }
1726
1727 return 0;
1728}
1729
1730/**
1731 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
1732 * @args: device IDENTIFY data / SCSI command of interest.
1733 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1734 * @buflen: Response buffer length.
1735 *
1736 * Returns list of inquiry VPD pages available.
1737 *
1738 * LOCKING:
1739 * spin_lock_irqsave(host lock)
1740 */
1741
1742unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
1743 unsigned int buflen)
1744{
1745 const u8 pages[] = {
1746 0x00, /* page 0x00, this page */
1747 0x80, /* page 0x80, unit serial no page */
1748 0x83 /* page 0x83, device ident page */
1749 };
1750 rbuf[3] = sizeof(pages); /* number of supported VPD pages */
1751
1752 if (buflen > 6)
1753 memcpy(rbuf + 4, pages, sizeof(pages));
1754
1755 return 0;
1756}
1757
1758/**
1759 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
1760 * @args: device IDENTIFY data / SCSI command of interest.
1761 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1762 * @buflen: Response buffer length.
1763 *
1764 * Returns ATA device serial number.
1765 *
1766 * LOCKING:
1767 * spin_lock_irqsave(host lock)
1768 */
1769
1770unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1771 unsigned int buflen)
1772{
1773 const u8 hdr[] = {
1774 0,
1775 0x80, /* this page code */
1776 0,
1777 ATA_SERNO_LEN, /* page len */
1778 };
1779 memcpy(rbuf, hdr, sizeof(hdr));
1780
1781 if (buflen > (ATA_SERNO_LEN + 4 - 1))
1782 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1783 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1784
1785 return 0;
1786}
1787
1788/**
1789 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
1790 * @args: device IDENTIFY data / SCSI command of interest.
1791 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1792 * @buflen: Response buffer length.
1793 *
1794 * Yields two logical unit device identification designators:
1795 * - vendor specific ASCII containing the ATA serial number
1796 * - SAT defined "t10 vendor id based" containing ASCII vendor
1797 * name ("ATA "), model and serial numbers.
1798 *
1799 * LOCKING:
1800 * spin_lock_irqsave(host lock)
1801 */
1802
1803unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
1804 unsigned int buflen)
1805{
1806 int num;
1807 const int sat_model_serial_desc_len = 68;
1808 const int ata_model_byte_len = 40;
1809
1810 rbuf[1] = 0x83; /* this page code */
1811 num = 4;
1812
1813 if (buflen > (ATA_SERNO_LEN + num + 3)) {
1814 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
1815 rbuf[num + 0] = 2;
1816 rbuf[num + 3] = ATA_SERNO_LEN;
1817 num += 4;
1818 ata_id_string(args->id, (unsigned char *) rbuf + num,
1819 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1820 num += ATA_SERNO_LEN;
1821 }
1822 if (buflen > (sat_model_serial_desc_len + num + 3)) {
1823 /* SAT defined lu model and serial numbers descriptor */
1824 /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
1825 rbuf[num + 0] = 2;
1826 rbuf[num + 1] = 1;
1827 rbuf[num + 3] = sat_model_serial_desc_len;
1828 num += 4;
1829 memcpy(rbuf + num, "ATA ", 8);
1830 num += 8;
1831 ata_id_string(args->id, (unsigned char *) rbuf + num,
1832 ATA_ID_PROD_OFS, ata_model_byte_len);
1833 num += ata_model_byte_len;
1834 ata_id_string(args->id, (unsigned char *) rbuf + num,
1835 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1836 num += ATA_SERNO_LEN;
1837 }
1838 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
1839 return 0;
1840}
1841
1842/**
1843 * ata_scsiop_noop - Command handler that simply returns success.
1844 * @args: device IDENTIFY data / SCSI command of interest.
1845 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1846 * @buflen: Response buffer length.
1847 *
1848 * No operation. Simply returns success to caller, to indicate
1849 * that the caller should successfully complete this SCSI command.
1850 *
1851 * LOCKING:
1852 * spin_lock_irqsave(host lock)
1853 */
1854
1855unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
1856 unsigned int buflen)
1857{
1858 VPRINTK("ENTER\n");
1859 return 0;
1860}
1861
1862/**
1863 * ata_msense_push - Push data onto MODE SENSE data output buffer
1864 * @ptr_io: (input/output) Location to store more output data
1865 * @last: End of output data buffer
1866 * @buf: Pointer to BLOB being added to output buffer
1867 * @buflen: Length of BLOB
1868 *
1869 * Store MODE SENSE data on an output buffer.
1870 *
1871 * LOCKING:
1872 * None.
1873 */
1874
1875static void ata_msense_push(u8 **ptr_io, const u8 *last,
1876 const u8 *buf, unsigned int buflen)
1877{
1878 u8 *ptr = *ptr_io;
1879
1880 if ((ptr + buflen - 1) > last)
1881 return;
1882
1883 memcpy(ptr, buf, buflen);
1884
1885 ptr += buflen;
1886
1887 *ptr_io = ptr;
1888}
1889
1890/**
1891 * ata_msense_caching - Simulate MODE SENSE caching info page
1892 * @id: device IDENTIFY data
1893 * @ptr_io: (input/output) Location to store more output data
1894 * @last: End of output data buffer
1895 *
1896 * Generate a caching info page, which conditionally indicates
1897 * write caching to the SCSI layer, depending on device
1898 * capabilities.
1899 *
1900 * LOCKING:
1901 * None.
1902 */
1903
1904static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io,
1905 const u8 *last)
1906{
1907 u8 page[CACHE_MPAGE_LEN];
1908
1909 memcpy(page, def_cache_mpage, sizeof(page));
1910 if (ata_id_wcache_enabled(id))
1911 page[2] |= (1 << 2); /* write cache enable */
1912 if (!ata_id_rahead_enabled(id))
1913 page[12] |= (1 << 5); /* disable read ahead */
1914
1915 ata_msense_push(ptr_io, last, page, sizeof(page));
1916 return sizeof(page);
1917}
1918
1919/**
1920 * ata_msense_ctl_mode - Simulate MODE SENSE control mode page
1921 * @dev: Device associated with this MODE SENSE command
1922 * @ptr_io: (input/output) Location to store more output data
1923 * @last: End of output data buffer
1924 *
1925 * Generate a generic MODE SENSE control mode page.
1926 *
1927 * LOCKING:
1928 * None.
1929 */
1930
1931static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last)
1932{
1933 ata_msense_push(ptr_io, last, def_control_mpage,
1934 sizeof(def_control_mpage));
1935 return sizeof(def_control_mpage);
1936}
1937
1938/**
1939 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
1940 * @dev: Device associated with this MODE SENSE command
1941 * @ptr_io: (input/output) Location to store more output data
1942 * @last: End of output data buffer
1943 *
1944 * Generate a generic MODE SENSE r/w error recovery page.
1945 *
1946 * LOCKING:
1947 * None.
1948 */
1949
1950static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
1951{
1952
1953 ata_msense_push(ptr_io, last, def_rw_recovery_mpage,
1954 sizeof(def_rw_recovery_mpage));
1955 return sizeof(def_rw_recovery_mpage);
1956}
1957
1958/*
1959 * We can turn this into a real blacklist if it's needed, for now just
1960 * blacklist any Maxtor BANC1G10 revision firmware
1961 */
1962static int ata_dev_supports_fua(u16 *id)
1963{
1964 unsigned char model[41], fw[9];
1965
1966 if (!libata_fua)
1967 return 0;
1968 if (!ata_id_has_fua(id))
1969 return 0;
1970
1971 ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model));
1972 ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw));
1973
1974 if (strcmp(model, "Maxtor"))
1975 return 1;
1976 if (strcmp(fw, "BANC1G10"))
1977 return 1;
1978
1979 return 0; /* blacklisted */
1980}
1981
1982/**
1983 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
1984 * @args: device IDENTIFY data / SCSI command of interest.
1985 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1986 * @buflen: Response buffer length.
1987 *
1988 * Simulate MODE SENSE commands. Assume this is invoked for direct
1989 * access devices (e.g. disks) only. There should be no block
1990 * descriptor for other device types.
1991 *
1992 * LOCKING:
1993 * spin_lock_irqsave(host lock)
1994 */
1995
1996unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1997 unsigned int buflen)
1998{
1999 struct ata_device *dev = args->dev;
2000 u8 *scsicmd = args->cmd->cmnd, *p, *last;
2001 const u8 sat_blk_desc[] = {
2002 0, 0, 0, 0, /* number of blocks: sat unspecified */
2003 0,
2004 0, 0x2, 0x0 /* block length: 512 bytes */
2005 };
2006 u8 pg, spg;
2007 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen;
2008 u8 dpofua;
2009
2010 VPRINTK("ENTER\n");
2011
2012 six_byte = (scsicmd[0] == MODE_SENSE);
2013 ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
2014 /*
2015 * LLBA bit in msense(10) ignored (compliant)
2016 */
2017
2018 page_control = scsicmd[2] >> 6;
2019 switch (page_control) {
2020 case 0: /* current */
2021 break; /* supported */
2022 case 3: /* saved */
2023 goto saving_not_supp;
2024 case 1: /* changeable */
2025 case 2: /* defaults */
2026 default:
2027 goto invalid_fld;
2028 }
2029
2030 if (six_byte) {
2031 output_len = 4 + (ebd ? 8 : 0);
2032 alloc_len = scsicmd[4];
2033 } else {
2034 output_len = 8 + (ebd ? 8 : 0);
2035 alloc_len = (scsicmd[7] << 8) + scsicmd[8];
2036 }
2037 minlen = (alloc_len < buflen) ? alloc_len : buflen;
2038
2039 p = rbuf + output_len;
2040 last = rbuf + minlen - 1;
2041
2042 pg = scsicmd[2] & 0x3f;
2043 spg = scsicmd[3];
2044 /*
2045 * No mode subpages supported (yet) but asking for _all_
2046 * subpages may be valid
2047 */
2048 if (spg && (spg != ALL_SUB_MPAGES))
2049 goto invalid_fld;
2050
2051 switch(pg) {
2052 case RW_RECOVERY_MPAGE:
2053 output_len += ata_msense_rw_recovery(&p, last);
2054 break;
2055
2056 case CACHE_MPAGE:
2057 output_len += ata_msense_caching(args->id, &p, last);
2058 break;
2059
2060 case CONTROL_MPAGE: {
2061 output_len += ata_msense_ctl_mode(&p, last);
2062 break;
2063 }
2064
2065 case ALL_MPAGES:
2066 output_len += ata_msense_rw_recovery(&p, last);
2067 output_len += ata_msense_caching(args->id, &p, last);
2068 output_len += ata_msense_ctl_mode(&p, last);
2069 break;
2070
2071 default: /* invalid page code */
2072 goto invalid_fld;
2073 }
2074
2075 if (minlen < 1)
2076 return 0;
2077
2078 dpofua = 0;
2079 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
2080 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
2081 dpofua = 1 << 4;
2082
2083 if (six_byte) {
2084 output_len--;
2085 rbuf[0] = output_len;
2086 if (minlen > 2)
2087 rbuf[2] |= dpofua;
2088 if (ebd) {
2089 if (minlen > 3)
2090 rbuf[3] = sizeof(sat_blk_desc);
2091 if (minlen > 11)
2092 memcpy(rbuf + 4, sat_blk_desc,
2093 sizeof(sat_blk_desc));
2094 }
2095 } else {
2096 output_len -= 2;
2097 rbuf[0] = output_len >> 8;
2098 if (minlen > 1)
2099 rbuf[1] = output_len;
2100 if (minlen > 3)
2101 rbuf[3] |= dpofua;
2102 if (ebd) {
2103 if (minlen > 7)
2104 rbuf[7] = sizeof(sat_blk_desc);
2105 if (minlen > 15)
2106 memcpy(rbuf + 8, sat_blk_desc,
2107 sizeof(sat_blk_desc));
2108 }
2109 }
2110 return 0;
2111
2112invalid_fld:
2113 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
2114 /* "Invalid field in cbd" */
2115 return 1;
2116
2117saving_not_supp:
2118 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
2119 /* "Saving parameters not supported" */
2120 return 1;
2121}
2122
2123/**
2124 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
2125 * @args: device IDENTIFY data / SCSI command of interest.
2126 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2127 * @buflen: Response buffer length.
2128 *
2129 * Simulate READ CAPACITY commands.
2130 *
2131 * LOCKING:
2132 * spin_lock_irqsave(host lock)
2133 */
2134
2135unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
2136 unsigned int buflen)
2137{
2138 u64 n_sectors;
2139 u32 tmp;
2140
2141 VPRINTK("ENTER\n");
2142
2143 if (ata_id_has_lba(args->id)) {
2144 if (ata_id_has_lba48(args->id))
2145 n_sectors = ata_id_u64(args->id, 100);
2146 else
2147 n_sectors = ata_id_u32(args->id, 60);
2148 } else {
2149 /* CHS default translation */
2150 n_sectors = args->id[1] * args->id[3] * args->id[6];
2151
2152 if (ata_id_current_chs_valid(args->id))
2153 /* CHS current translation */
2154 n_sectors = ata_id_u32(args->id, 57);
2155 }
2156
2157 n_sectors--; /* ATA TotalUserSectors - 1 */
2158
2159 if (args->cmd->cmnd[0] == READ_CAPACITY) {
2160 if( n_sectors >= 0xffffffffULL )
2161 tmp = 0xffffffff ; /* Return max count on overflow */
2162 else
2163 tmp = n_sectors ;
2164
2165 /* sector count, 32-bit */
2166 rbuf[0] = tmp >> (8 * 3);
2167 rbuf[1] = tmp >> (8 * 2);
2168 rbuf[2] = tmp >> (8 * 1);
2169 rbuf[3] = tmp;
2170
2171 /* sector size */
2172 tmp = ATA_SECT_SIZE;
2173 rbuf[6] = tmp >> 8;
2174 rbuf[7] = tmp;
2175
2176 } else {
2177 /* sector count, 64-bit */
2178 tmp = n_sectors >> (8 * 4);
2179 rbuf[2] = tmp >> (8 * 3);
2180 rbuf[3] = tmp >> (8 * 2);
2181 rbuf[4] = tmp >> (8 * 1);
2182 rbuf[5] = tmp;
2183 tmp = n_sectors;
2184 rbuf[6] = tmp >> (8 * 3);
2185 rbuf[7] = tmp >> (8 * 2);
2186 rbuf[8] = tmp >> (8 * 1);
2187 rbuf[9] = tmp;
2188
2189 /* sector size */
2190 tmp = ATA_SECT_SIZE;
2191 rbuf[12] = tmp >> 8;
2192 rbuf[13] = tmp;
2193 }
2194
2195 return 0;
2196}
2197
2198/**
2199 * ata_scsiop_report_luns - Simulate REPORT LUNS command
2200 * @args: device IDENTIFY data / SCSI command of interest.
2201 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2202 * @buflen: Response buffer length.
2203 *
2204 * Simulate REPORT LUNS command.
2205 *
2206 * LOCKING:
2207 * spin_lock_irqsave(host lock)
2208 */
2209
2210unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
2211 unsigned int buflen)
2212{
2213 VPRINTK("ENTER\n");
2214 rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
2215
2216 return 0;
2217}
2218
2219/**
2220 * ata_scsi_set_sense - Set SCSI sense data and status
2221 * @cmd: SCSI request to be handled
2222 * @sk: SCSI-defined sense key
2223 * @asc: SCSI-defined additional sense code
2224 * @ascq: SCSI-defined additional sense code qualifier
2225 *
2226 * Helper function that builds a valid fixed format, current
2227 * response code and the given sense key (sk), additional sense
2228 * code (asc) and additional sense code qualifier (ascq) with
2229 * a SCSI command status of %SAM_STAT_CHECK_CONDITION and
2230 * DRIVER_SENSE set in the upper bits of scsi_cmnd::result .
2231 *
2232 * LOCKING:
2233 * Not required
2234 */
2235
2236void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
2237{
2238 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
2239
2240 cmd->sense_buffer[0] = 0x70; /* fixed format, current */
2241 cmd->sense_buffer[2] = sk;
2242 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */
2243 cmd->sense_buffer[12] = asc;
2244 cmd->sense_buffer[13] = ascq;
2245}
2246
2247/**
2248 * ata_scsi_badcmd - End a SCSI request with an error
2249 * @cmd: SCSI request to be handled
2250 * @done: SCSI command completion function
2251 * @asc: SCSI-defined additional sense code
2252 * @ascq: SCSI-defined additional sense code qualifier
2253 *
2254 * Helper function that completes a SCSI command with
2255 * %SAM_STAT_CHECK_CONDITION, with a sense key %ILLEGAL_REQUEST
2256 * and the specified additional sense codes.
2257 *
2258 * LOCKING:
2259 * spin_lock_irqsave(host lock)
2260 */
2261
2262void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
2263{
2264 DPRINTK("ENTER\n");
2265 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq);
2266
2267 done(cmd);
2268}
2269
2270static void atapi_sense_complete(struct ata_queued_cmd *qc)
2271{
2272 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2273 /* FIXME: not quite right; we don't want the
2274 * translation of taskfile registers into
2275 * a sense descriptors, since that's only
2276 * correct for ATA, not ATAPI
2277 */
2278 ata_gen_ata_desc_sense(qc);
2279 }
2280
2281 qc->scsidone(qc->scsicmd);
2282 ata_qc_free(qc);
2283}
2284
2285/* is it pointless to prefer PIO for "safety reasons"? */
2286static inline int ata_pio_use_silly(struct ata_port *ap)
2287{
2288 return (ap->flags & ATA_FLAG_PIO_DMA);
2289}
2290
2291static void atapi_request_sense(struct ata_queued_cmd *qc)
2292{
2293 struct ata_port *ap = qc->ap;
2294 struct scsi_cmnd *cmd = qc->scsicmd;
2295
2296 DPRINTK("ATAPI request sense\n");
2297
2298 /* FIXME: is this needed? */
2299 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2300
2301 ap->ops->tf_read(ap, &qc->tf);
2302
2303 /* fill these in, for the case where they are -not- overwritten */
2304 cmd->sense_buffer[0] = 0x70;
2305 cmd->sense_buffer[2] = qc->tf.feature >> 4;
2306
2307 ata_qc_reinit(qc);
2308
2309 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2310 qc->dma_dir = DMA_FROM_DEVICE;
2311
2312 memset(&qc->cdb, 0, qc->dev->cdb_len);
2313 qc->cdb[0] = REQUEST_SENSE;
2314 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2315
2316 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2317 qc->tf.command = ATA_CMD_PACKET;
2318
2319 if (ata_pio_use_silly(ap)) {
2320 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2321 qc->tf.feature |= ATAPI_PKT_DMA;
2322 } else {
2323 qc->tf.protocol = ATA_PROT_ATAPI;
2324 qc->tf.lbam = (8 * 1024) & 0xff;
2325 qc->tf.lbah = (8 * 1024) >> 8;
2326 }
2327 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2328
2329 qc->complete_fn = atapi_sense_complete;
2330
2331 ata_qc_issue(qc);
2332
2333 DPRINTK("EXIT\n");
2334}
2335
2336static void atapi_qc_complete(struct ata_queued_cmd *qc)
2337{
2338 struct scsi_cmnd *cmd = qc->scsicmd;
2339 unsigned int err_mask = qc->err_mask;
2340
2341 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2342
2343 /* handle completion from new EH */
2344 if (unlikely(qc->ap->ops->error_handler &&
2345 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2346
2347 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2348 /* FIXME: not quite right; we don't want the
2349 * translation of taskfile registers into a
2350 * sense descriptors, since that's only
2351 * correct for ATA, not ATAPI
2352 */
2353 ata_gen_ata_desc_sense(qc);
2354 }
2355
2356 /* SCSI EH automatically locks door if sdev->locked is
2357 * set. Sometimes door lock request continues to
2358 * fail, for example, when no media is present. This
2359 * creates a loop - SCSI EH issues door lock which
2360 * fails and gets invoked again to acquire sense data
2361 * for the failed command.
2362 *
2363 * If door lock fails, always clear sdev->locked to
2364 * avoid this infinite loop.
2365 */
2366 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
2367 qc->dev->sdev->locked = 0;
2368
2369 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2370 qc->scsidone(cmd);
2371 ata_qc_free(qc);
2372 return;
2373 }
2374
2375 /* successful completion or old EH failure path */
2376 if (unlikely(err_mask & AC_ERR_DEV)) {
2377 cmd->result = SAM_STAT_CHECK_CONDITION;
2378 atapi_request_sense(qc);
2379 return;
2380 } else if (unlikely(err_mask)) {
2381 /* FIXME: not quite right; we don't want the
2382 * translation of taskfile registers into
2383 * a sense descriptors, since that's only
2384 * correct for ATA, not ATAPI
2385 */
2386 ata_gen_ata_desc_sense(qc);
2387 } else {
2388 u8 *scsicmd = cmd->cmnd;
2389
2390 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
2391 u8 *buf = NULL;
2392 unsigned int buflen;
2393
2394 buflen = ata_scsi_rbuf_get(cmd, &buf);
2395
2396 /* ATAPI devices typically report zero for their SCSI version,
2397 * and sometimes deviate from the spec WRT response data
2398 * format. If SCSI version is reported as zero like normal,
2399 * then we make the following fixups: 1) Fake MMC-5 version,
2400 * to indicate to the Linux scsi midlayer this is a modern
2401 * device. 2) Ensure response data format / ATAPI information
2402 * are always correct.
2403 */
2404 if (buf[2] == 0) {
2405 buf[2] = 0x5;
2406 buf[3] = 0x32;
2407 }
2408
2409 ata_scsi_rbuf_put(cmd, buf);
2410 }
2411
2412 cmd->result = SAM_STAT_GOOD;
2413 }
2414
2415 qc->scsidone(cmd);
2416 ata_qc_free(qc);
2417}
2418/**
2419 * atapi_xlat - Initialize PACKET taskfile
2420 * @qc: command structure to be initialized
2421 * @scsicmd: SCSI CDB associated with this PACKET command
2422 *
2423 * LOCKING:
2424 * spin_lock_irqsave(host lock)
2425 *
2426 * RETURNS:
2427 * Zero on success, non-zero on failure.
2428 */
2429
2430static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2431{
2432 struct scsi_cmnd *cmd = qc->scsicmd;
2433 struct ata_device *dev = qc->dev;
2434 int using_pio = (dev->flags & ATA_DFLAG_PIO);
2435 int nodata = (cmd->sc_data_direction == DMA_NONE);
2436
2437 if (!using_pio)
2438 /* Check whether ATAPI DMA is safe */
2439 if (ata_check_atapi_dma(qc))
2440 using_pio = 1;
2441
2442 memcpy(&qc->cdb, scsicmd, dev->cdb_len);
2443
2444 qc->complete_fn = atapi_qc_complete;
2445
2446 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2447 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2448 qc->tf.flags |= ATA_TFLAG_WRITE;
2449 DPRINTK("direction: write\n");
2450 }
2451
2452 qc->tf.command = ATA_CMD_PACKET;
2453
2454 /* no data, or PIO data xfer */
2455 if (using_pio || nodata) {
2456 if (nodata)
2457 qc->tf.protocol = ATA_PROT_ATAPI_NODATA;
2458 else
2459 qc->tf.protocol = ATA_PROT_ATAPI;
2460 qc->tf.lbam = (8 * 1024) & 0xff;
2461 qc->tf.lbah = (8 * 1024) >> 8;
2462 }
2463
2464 /* DMA data xfer */
2465 else {
2466 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2467 qc->tf.feature |= ATAPI_PKT_DMA;
2468
2469 if (atapi_dmadir && (cmd->sc_data_direction != DMA_TO_DEVICE))
2470 /* some SATA bridges need us to indicate data xfer direction */
2471 qc->tf.feature |= ATAPI_DMADIR;
2472 }
2473
2474 qc->nbytes = cmd->request_bufflen;
2475
2476 return 0;
2477}
2478
2479static struct ata_device * ata_find_dev(struct ata_port *ap, int id)
2480{
2481 if (likely(id < ATA_MAX_DEVICES))
2482 return &ap->device[id];
2483 return NULL;
2484}
2485
2486static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
2487 const struct scsi_device *scsidev)
2488{
2489 /* skip commands not addressed to targets we simulate */
2490 if (unlikely(scsidev->channel || scsidev->lun))
2491 return NULL;
2492
2493 return ata_find_dev(ap, scsidev->id);
2494}
2495
2496/**
2497 * ata_scsi_dev_enabled - determine if device is enabled
2498 * @dev: ATA device
2499 *
2500 * Determine if commands should be sent to the specified device.
2501 *
2502 * LOCKING:
2503 * spin_lock_irqsave(host lock)
2504 *
2505 * RETURNS:
2506 * 0 if commands are not allowed / 1 if commands are allowed
2507 */
2508
2509static int ata_scsi_dev_enabled(struct ata_device *dev)
2510{
2511 if (unlikely(!ata_dev_enabled(dev)))
2512 return 0;
2513
2514 if (!atapi_enabled || (dev->ap->flags & ATA_FLAG_NO_ATAPI)) {
2515 if (unlikely(dev->class == ATA_DEV_ATAPI)) {
2516 ata_dev_printk(dev, KERN_WARNING,
2517 "WARNING: ATAPI is %s, device ignored.\n",
2518 atapi_enabled ? "not supported with this driver" : "disabled");
2519 return 0;
2520 }
2521 }
2522
2523 return 1;
2524}
2525
2526/**
2527 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2528 * @ap: ATA port to which the device is attached
2529 * @scsidev: SCSI device from which we derive the ATA device
2530 *
2531 * Given various information provided in struct scsi_cmnd,
2532 * map that onto an ATA bus, and using that mapping
2533 * determine which ata_device is associated with the
2534 * SCSI command to be sent.
2535 *
2536 * LOCKING:
2537 * spin_lock_irqsave(host lock)
2538 *
2539 * RETURNS:
2540 * Associated ATA device, or %NULL if not found.
2541 */
2542static struct ata_device *
2543ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2544{
2545 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2546
2547 if (unlikely(!dev || !ata_scsi_dev_enabled(dev)))
2548 return NULL;
2549
2550 return dev;
2551}
2552
2553/*
2554 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
2555 * @byte1: Byte 1 from pass-thru CDB.
2556 *
2557 * RETURNS:
2558 * ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise.
2559 */
2560static u8
2561ata_scsi_map_proto(u8 byte1)
2562{
2563 switch((byte1 & 0x1e) >> 1) {
2564 case 3: /* Non-data */
2565 return ATA_PROT_NODATA;
2566
2567 case 6: /* DMA */
2568 return ATA_PROT_DMA;
2569
2570 case 4: /* PIO Data-in */
2571 case 5: /* PIO Data-out */
2572 return ATA_PROT_PIO;
2573
2574 case 10: /* Device Reset */
2575 case 0: /* Hard Reset */
2576 case 1: /* SRST */
2577 case 2: /* Bus Idle */
2578 case 7: /* Packet */
2579 case 8: /* DMA Queued */
2580 case 9: /* Device Diagnostic */
2581 case 11: /* UDMA Data-in */
2582 case 12: /* UDMA Data-Out */
2583 case 13: /* FPDMA */
2584 default: /* Reserved */
2585 break;
2586 }
2587
2588 return ATA_PROT_UNKNOWN;
2589}
2590
2591/**
2592 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
2593 * @qc: command structure to be initialized
2594 * @scsicmd: SCSI command to convert
2595 *
2596 * Handles either 12 or 16-byte versions of the CDB.
2597 *
2598 * RETURNS:
2599 * Zero on success, non-zero on failure.
2600 */
2601static unsigned int
2602ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2603{
2604 struct ata_taskfile *tf = &(qc->tf);
2605 struct scsi_cmnd *cmd = qc->scsicmd;
2606 struct ata_device *dev = qc->dev;
2607
2608 if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN)
2609 goto invalid_fld;
2610
2611 /* We may not issue DMA commands if no DMA mode is set */
2612 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2613 goto invalid_fld;
2614
2615 if (scsicmd[1] & 0xe0)
2616 /* PIO multi not supported yet */
2617 goto invalid_fld;
2618
2619 /*
2620 * 12 and 16 byte CDBs use different offsets to
2621 * provide the various register values.
2622 */
2623 if (scsicmd[0] == ATA_16) {
2624 /*
2625 * 16-byte CDB - may contain extended commands.
2626 *
2627 * If that is the case, copy the upper byte register values.
2628 */
2629 if (scsicmd[1] & 0x01) {
2630 tf->hob_feature = scsicmd[3];
2631 tf->hob_nsect = scsicmd[5];
2632 tf->hob_lbal = scsicmd[7];
2633 tf->hob_lbam = scsicmd[9];
2634 tf->hob_lbah = scsicmd[11];
2635 tf->flags |= ATA_TFLAG_LBA48;
2636 } else
2637 tf->flags &= ~ATA_TFLAG_LBA48;
2638
2639 /*
2640 * Always copy low byte, device and command registers.
2641 */
2642 tf->feature = scsicmd[4];
2643 tf->nsect = scsicmd[6];
2644 tf->lbal = scsicmd[8];
2645 tf->lbam = scsicmd[10];
2646 tf->lbah = scsicmd[12];
2647 tf->device = scsicmd[13];
2648 tf->command = scsicmd[14];
2649 } else {
2650 /*
2651 * 12-byte CDB - incapable of extended commands.
2652 */
2653 tf->flags &= ~ATA_TFLAG_LBA48;
2654
2655 tf->feature = scsicmd[3];
2656 tf->nsect = scsicmd[4];
2657 tf->lbal = scsicmd[5];
2658 tf->lbam = scsicmd[6];
2659 tf->lbah = scsicmd[7];
2660 tf->device = scsicmd[8];
2661 tf->command = scsicmd[9];
2662 }
2663 /*
2664 * If slave is possible, enforce correct master/slave bit
2665 */
2666 if (qc->ap->flags & ATA_FLAG_SLAVE_POSS)
2667 tf->device = qc->dev->devno ?
2668 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
2669
2670 /*
2671 * Filter SET_FEATURES - XFER MODE command -- otherwise,
2672 * SET_FEATURES - XFER MODE must be preceded/succeeded
2673 * by an update to hardware-specific registers for each
2674 * controller (i.e. the reason for ->set_piomode(),
2675 * ->set_dmamode(), and ->post_set_mode() hooks).
2676 */
2677 if ((tf->command == ATA_CMD_SET_FEATURES)
2678 && (tf->feature == SETFEATURES_XFER))
2679 goto invalid_fld;
2680
2681 /*
2682 * Set flags so that all registers will be written,
2683 * and pass on write indication (used for PIO/DMA
2684 * setup.)
2685 */
2686 tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE);
2687
2688 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2689 tf->flags |= ATA_TFLAG_WRITE;
2690
2691 /*
2692 * Set transfer length.
2693 *
2694 * TODO: find out if we need to do more here to
2695 * cover scatter/gather case.
2696 */
2697 qc->nsect = cmd->request_bufflen / ATA_SECT_SIZE;
2698
2699 /* request result TF */
2700 qc->flags |= ATA_QCFLAG_RESULT_TF;
2701
2702 return 0;
2703
2704 invalid_fld:
2705 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x00);
2706 /* "Invalid field in cdb" */
2707 return 1;
2708}
2709
2710/**
2711 * ata_get_xlat_func - check if SCSI to ATA translation is possible
2712 * @dev: ATA device
2713 * @cmd: SCSI command opcode to consider
2714 *
2715 * Look up the SCSI command given, and determine whether the
2716 * SCSI command is to be translated or simulated.
2717 *
2718 * RETURNS:
2719 * Pointer to translation function if possible, %NULL if not.
2720 */
2721
2722static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
2723{
2724 switch (cmd) {
2725 case READ_6:
2726 case READ_10:
2727 case READ_16:
2728
2729 case WRITE_6:
2730 case WRITE_10:
2731 case WRITE_16:
2732 return ata_scsi_rw_xlat;
2733
2734 case SYNCHRONIZE_CACHE:
2735 if (ata_try_flush_cache(dev))
2736 return ata_scsi_flush_xlat;
2737 break;
2738
2739 case VERIFY:
2740 case VERIFY_16:
2741 return ata_scsi_verify_xlat;
2742
2743 case ATA_12:
2744 case ATA_16:
2745 return ata_scsi_pass_thru;
2746
2747 case START_STOP:
2748 return ata_scsi_start_stop_xlat;
2749 }
2750
2751 return NULL;
2752}
2753
2754/**
2755 * ata_scsi_dump_cdb - dump SCSI command contents to dmesg
2756 * @ap: ATA port to which the command was being sent
2757 * @cmd: SCSI command to dump
2758 *
2759 * Prints the contents of a SCSI command via printk().
2760 */
2761
2762static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2763 struct scsi_cmnd *cmd)
2764{
2765#ifdef ATA_DEBUG
2766 struct scsi_device *scsidev = cmd->device;
2767 u8 *scsicmd = cmd->cmnd;
2768
2769 DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
2770 ap->id,
2771 scsidev->channel, scsidev->id, scsidev->lun,
2772 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
2773 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
2774 scsicmd[8]);
2775#endif
2776}
2777
2778static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
2779 void (*done)(struct scsi_cmnd *),
2780 struct ata_device *dev)
2781{
2782 int rc = 0;
2783
2784 if (dev->class == ATA_DEV_ATA) {
2785 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
2786 cmd->cmnd[0]);
2787
2788 if (xlat_func)
2789 rc = ata_scsi_translate(dev, cmd, done, xlat_func);
2790 else
2791 ata_scsi_simulate(dev, cmd, done);
2792 } else
2793 rc = ata_scsi_translate(dev, cmd, done, atapi_xlat);
2794
2795 return rc;
2796}
2797
2798/**
2799 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
2800 * @cmd: SCSI command to be sent
2801 * @done: Completion function, called when command is complete
2802 *
2803 * In some cases, this function translates SCSI commands into
2804 * ATA taskfiles, and queues the taskfiles to be sent to
2805 * hardware. In other cases, this function simulates a
2806 * SCSI device by evaluating and responding to certain
2807 * SCSI commands. This creates the overall effect of
2808 * ATA and ATAPI devices appearing as SCSI devices.
2809 *
2810 * LOCKING:
2811 * Releases scsi-layer-held lock, and obtains host lock.
2812 *
2813 * RETURNS:
2814 * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
2815 * 0 otherwise.
2816 */
2817int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2818{
2819 struct ata_port *ap;
2820 struct ata_device *dev;
2821 struct scsi_device *scsidev = cmd->device;
2822 struct Scsi_Host *shost = scsidev->host;
2823 int rc = 0;
2824
2825 ap = ata_shost_to_port(shost);
2826
2827 spin_unlock(shost->host_lock);
2828 spin_lock(ap->lock);
2829
2830 ata_scsi_dump_cdb(ap, cmd);
2831
2832 dev = ata_scsi_find_dev(ap, scsidev);
2833 if (likely(dev))
2834 rc = __ata_scsi_queuecmd(cmd, done, dev);
2835 else {
2836 cmd->result = (DID_BAD_TARGET << 16);
2837 done(cmd);
2838 }
2839
2840 spin_unlock(ap->lock);
2841 spin_lock(shost->host_lock);
2842 return rc;
2843}
2844
2845/**
2846 * ata_scsi_simulate - simulate SCSI command on ATA device
2847 * @dev: the target device
2848 * @cmd: SCSI command being sent to device.
2849 * @done: SCSI command completion function.
2850 *
2851 * Interprets and directly executes a select list of SCSI commands
2852 * that can be handled internally.
2853 *
2854 * LOCKING:
2855 * spin_lock_irqsave(host lock)
2856 */
2857
2858void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2859 void (*done)(struct scsi_cmnd *))
2860{
2861 struct ata_scsi_args args;
2862 const u8 *scsicmd = cmd->cmnd;
2863
2864 args.dev = dev;
2865 args.id = dev->id;
2866 args.cmd = cmd;
2867 args.done = done;
2868
2869 switch(scsicmd[0]) {
2870 /* no-op's, complete with success */
2871 case SYNCHRONIZE_CACHE:
2872 case REZERO_UNIT:
2873 case SEEK_6:
2874 case SEEK_10:
2875 case TEST_UNIT_READY:
2876 case FORMAT_UNIT: /* FIXME: correct? */
2877 case SEND_DIAGNOSTIC: /* FIXME: correct? */
2878 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
2879 break;
2880
2881 case INQUIRY:
2882 if (scsicmd[1] & 2) /* is CmdDt set? */
2883 ata_scsi_invalid_field(cmd, done);
2884 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
2885 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
2886 else if (scsicmd[2] == 0x00)
2887 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
2888 else if (scsicmd[2] == 0x80)
2889 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
2890 else if (scsicmd[2] == 0x83)
2891 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
2892 else
2893 ata_scsi_invalid_field(cmd, done);
2894 break;
2895
2896 case MODE_SENSE:
2897 case MODE_SENSE_10:
2898 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
2899 break;
2900
2901 case MODE_SELECT: /* unconditionally return */
2902 case MODE_SELECT_10: /* bad-field-in-cdb */
2903 ata_scsi_invalid_field(cmd, done);
2904 break;
2905
2906 case READ_CAPACITY:
2907 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
2908 break;
2909
2910 case SERVICE_ACTION_IN:
2911 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
2912 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
2913 else
2914 ata_scsi_invalid_field(cmd, done);
2915 break;
2916
2917 case REPORT_LUNS:
2918 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
2919 break;
2920
2921 /* mandatory commands we haven't implemented yet */
2922 case REQUEST_SENSE:
2923
2924 /* all other commands */
2925 default:
2926 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
2927 /* "Invalid command operation code" */
2928 done(cmd);
2929 break;
2930 }
2931}
2932
2933void ata_scsi_scan_host(struct ata_port *ap)
2934{
2935 unsigned int i;
2936
2937 if (ap->flags & ATA_FLAG_DISABLED)
2938 return;
2939
2940 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2941 struct ata_device *dev = &ap->device[i];
2942 struct scsi_device *sdev;
2943
2944 if (!ata_dev_enabled(dev) || dev->sdev)
2945 continue;
2946
2947 sdev = __scsi_add_device(ap->scsi_host, 0, i, 0, NULL);
2948 if (!IS_ERR(sdev)) {
2949 dev->sdev = sdev;
2950 scsi_device_put(sdev);
2951 }
2952 }
2953}
2954
2955/**
2956 * ata_scsi_offline_dev - offline attached SCSI device
2957 * @dev: ATA device to offline attached SCSI device for
2958 *
2959 * This function is called from ata_eh_hotplug() and responsible
2960 * for taking the SCSI device attached to @dev offline. This
2961 * function is called with host lock which protects dev->sdev
2962 * against clearing.
2963 *
2964 * LOCKING:
2965 * spin_lock_irqsave(host lock)
2966 *
2967 * RETURNS:
2968 * 1 if attached SCSI device exists, 0 otherwise.
2969 */
2970int ata_scsi_offline_dev(struct ata_device *dev)
2971{
2972 if (dev->sdev) {
2973 scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
2974 return 1;
2975 }
2976 return 0;
2977}
2978
2979/**
2980 * ata_scsi_remove_dev - remove attached SCSI device
2981 * @dev: ATA device to remove attached SCSI device for
2982 *
2983 * This function is called from ata_eh_scsi_hotplug() and
2984 * responsible for removing the SCSI device attached to @dev.
2985 *
2986 * LOCKING:
2987 * Kernel thread context (may sleep).
2988 */
2989static void ata_scsi_remove_dev(struct ata_device *dev)
2990{
2991 struct ata_port *ap = dev->ap;
2992 struct scsi_device *sdev;
2993 unsigned long flags;
2994
2995 /* Alas, we need to grab scan_mutex to ensure SCSI device
2996 * state doesn't change underneath us and thus
2997 * scsi_device_get() always succeeds. The mutex locking can
2998 * be removed if there is __scsi_device_get() interface which
2999 * increments reference counts regardless of device state.
3000 */
3001 mutex_lock(&ap->scsi_host->scan_mutex);
3002 spin_lock_irqsave(ap->lock, flags);
3003
3004 /* clearing dev->sdev is protected by host lock */
3005 sdev = dev->sdev;
3006 dev->sdev = NULL;
3007
3008 if (sdev) {
3009 /* If user initiated unplug races with us, sdev can go
3010 * away underneath us after the host lock and
3011 * scan_mutex are released. Hold onto it.
3012 */
3013 if (scsi_device_get(sdev) == 0) {
3014 /* The following ensures the attached sdev is
3015 * offline on return from ata_scsi_offline_dev()
3016 * regardless it wins or loses the race
3017 * against this function.
3018 */
3019 scsi_device_set_state(sdev, SDEV_OFFLINE);
3020 } else {
3021 WARN_ON(1);
3022 sdev = NULL;
3023 }
3024 }
3025
3026 spin_unlock_irqrestore(ap->lock, flags);
3027 mutex_unlock(&ap->scsi_host->scan_mutex);
3028
3029 if (sdev) {
3030 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
3031 sdev->sdev_gendev.bus_id);
3032
3033 scsi_remove_device(sdev);
3034 scsi_device_put(sdev);
3035 }
3036}
3037
3038/**
3039 * ata_scsi_hotplug - SCSI part of hotplug
3040 * @data: Pointer to ATA port to perform SCSI hotplug on
3041 *
3042 * Perform SCSI part of hotplug. It's executed from a separate
3043 * workqueue after EH completes. This is necessary because SCSI
3044 * hot plugging requires working EH and hot unplugging is
3045 * synchronized with hot plugging with a mutex.
3046 *
3047 * LOCKING:
3048 * Kernel thread context (may sleep).
3049 */
3050void ata_scsi_hotplug(void *data)
3051{
3052 struct ata_port *ap = data;
3053 int i;
3054
3055 if (ap->pflags & ATA_PFLAG_UNLOADING) {
3056 DPRINTK("ENTER/EXIT - unloading\n");
3057 return;
3058 }
3059
3060 DPRINTK("ENTER\n");
3061
3062 /* unplug detached devices */
3063 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3064 struct ata_device *dev = &ap->device[i];
3065 unsigned long flags;
3066
3067 if (!(dev->flags & ATA_DFLAG_DETACHED))
3068 continue;
3069
3070 spin_lock_irqsave(ap->lock, flags);
3071 dev->flags &= ~ATA_DFLAG_DETACHED;
3072 spin_unlock_irqrestore(ap->lock, flags);
3073
3074 ata_scsi_remove_dev(dev);
3075 }
3076
3077 /* scan for new ones */
3078 ata_scsi_scan_host(ap);
3079
3080 /* If we scanned while EH was in progress, scan would have
3081 * failed silently. Requeue if there are enabled but
3082 * unattached devices.
3083 */
3084 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3085 struct ata_device *dev = &ap->device[i];
3086 if (ata_dev_enabled(dev) && !dev->sdev) {
3087 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, HZ);
3088 break;
3089 }
3090 }
3091
3092 DPRINTK("EXIT\n");
3093}
3094
3095/**
3096 * ata_scsi_user_scan - indication for user-initiated bus scan
3097 * @shost: SCSI host to scan
3098 * @channel: Channel to scan
3099 * @id: ID to scan
3100 * @lun: LUN to scan
3101 *
3102 * This function is called when user explicitly requests bus
3103 * scan. Set probe pending flag and invoke EH.
3104 *
3105 * LOCKING:
3106 * SCSI layer (we don't care)
3107 *
3108 * RETURNS:
3109 * Zero.
3110 */
3111static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3112 unsigned int id, unsigned int lun)
3113{
3114 struct ata_port *ap = ata_shost_to_port(shost);
3115 unsigned long flags;
3116 int rc = 0;
3117
3118 if (!ap->ops->error_handler)
3119 return -EOPNOTSUPP;
3120
3121 if ((channel != SCAN_WILD_CARD && channel != 0) ||
3122 (lun != SCAN_WILD_CARD && lun != 0))
3123 return -EINVAL;
3124
3125 spin_lock_irqsave(ap->lock, flags);
3126
3127 if (id == SCAN_WILD_CARD) {
3128 ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
3129 ap->eh_info.action |= ATA_EH_SOFTRESET;
3130 } else {
3131 struct ata_device *dev = ata_find_dev(ap, id);
3132
3133 if (dev) {
3134 ap->eh_info.probe_mask |= 1 << dev->devno;
3135 ap->eh_info.action |= ATA_EH_SOFTRESET;
3136 ap->eh_info.flags |= ATA_EHI_RESUME_LINK;
3137 } else
3138 rc = -EINVAL;
3139 }
3140
3141 if (rc == 0)
3142 ata_port_schedule_eh(ap);
3143
3144 spin_unlock_irqrestore(ap->lock, flags);
3145
3146 return rc;
3147}
3148
3149/**
3150 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
3151 * @data: Pointer to ATA port to perform scsi_rescan_device()
3152 *
3153 * After ATA pass thru (SAT) commands are executed successfully,
3154 * libata need to propagate the changes to SCSI layer. This
3155 * function must be executed from ata_aux_wq such that sdev
3156 * attach/detach don't race with rescan.
3157 *
3158 * LOCKING:
3159 * Kernel thread context (may sleep).
3160 */
3161void ata_scsi_dev_rescan(void *data)
3162{
3163 struct ata_port *ap = data;
3164 struct ata_device *dev;
3165 unsigned int i;
3166
3167 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3168 dev = &ap->device[i];
3169
3170 if (ata_dev_enabled(dev) && dev->sdev)
3171 scsi_rescan_device(&(dev->sdev->sdev_gendev));
3172 }
3173}
3174
3175/**
3176 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device
3177 * @pdev: PCI device that the scsi device is attached to
3178 * @port_info: Information from low-level host driver
3179 * @shost: SCSI host that the scsi device is attached to
3180 *
3181 * LOCKING:
3182 * PCI/etc. bus probe sem.
3183 *
3184 * RETURNS:
3185 * ata_port pointer on success / NULL on failure.
3186 */
3187
3188struct ata_port *ata_sas_port_alloc(struct ata_host *host,
3189 struct ata_port_info *port_info,
3190 struct Scsi_Host *shost)
3191{
3192 struct ata_port *ap = kzalloc(sizeof(*ap), GFP_KERNEL);
3193 struct ata_probe_ent *ent;
3194
3195 if (!ap)
3196 return NULL;
3197
3198 ent = ata_probe_ent_alloc(host->dev, port_info);
3199 if (!ent) {
3200 kfree(ap);
3201 return NULL;
3202 }
3203
3204 ata_port_init(ap, host, ent, 0);
3205 ap->lock = shost->host_lock;
3206 kfree(ent);
3207 return ap;
3208}
3209EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3210
3211/**
3212 * ata_sas_port_start - Set port up for dma.
3213 * @ap: Port to initialize
3214 *
3215 * Called just after data structures for each port are
3216 * initialized. Allocates DMA pad.
3217 *
3218 * May be used as the port_start() entry in ata_port_operations.
3219 *
3220 * LOCKING:
3221 * Inherited from caller.
3222 */
3223int ata_sas_port_start(struct ata_port *ap)
3224{
3225 return ata_pad_alloc(ap, ap->dev);
3226}
3227EXPORT_SYMBOL_GPL(ata_sas_port_start);
3228
3229/**
3230 * ata_port_stop - Undo ata_sas_port_start()
3231 * @ap: Port to shut down
3232 *
3233 * Frees the DMA pad.
3234 *
3235 * May be used as the port_stop() entry in ata_port_operations.
3236 *
3237 * LOCKING:
3238 * Inherited from caller.
3239 */
3240
3241void ata_sas_port_stop(struct ata_port *ap)
3242{
3243 ata_pad_free(ap, ap->dev);
3244}
3245EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3246
3247/**
3248 * ata_sas_port_init - Initialize a SATA device
3249 * @ap: SATA port to initialize
3250 *
3251 * LOCKING:
3252 * PCI/etc. bus probe sem.
3253 *
3254 * RETURNS:
3255 * Zero on success, non-zero on error.
3256 */
3257
3258int ata_sas_port_init(struct ata_port *ap)
3259{
3260 int rc = ap->ops->port_start(ap);
3261
3262 if (!rc)
3263 rc = ata_bus_probe(ap);
3264
3265 return rc;
3266}
3267EXPORT_SYMBOL_GPL(ata_sas_port_init);
3268
3269/**
3270 * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
3271 * @ap: SATA port to destroy
3272 *
3273 */
3274
3275void ata_sas_port_destroy(struct ata_port *ap)
3276{
3277 ap->ops->port_stop(ap);
3278 kfree(ap);
3279}
3280EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
3281
3282/**
3283 * ata_sas_slave_configure - Default slave_config routine for libata devices
3284 * @sdev: SCSI device to configure
3285 * @ap: ATA port to which SCSI device is attached
3286 *
3287 * RETURNS:
3288 * Zero.
3289 */
3290
3291int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
3292{
3293 ata_scsi_sdev_config(sdev);
3294 ata_scsi_dev_config(sdev, ap->device);
3295 return 0;
3296}
3297EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
3298
3299/**
3300 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
3301 * @cmd: SCSI command to be sent
3302 * @done: Completion function, called when command is complete
3303 * @ap: ATA port to which the command is being sent
3304 *
3305 * RETURNS:
3306 * Zero.
3307 */
3308
3309int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
3310 struct ata_port *ap)
3311{
3312 ata_scsi_dump_cdb(ap, cmd);
3313
3314 if (likely(ata_scsi_dev_enabled(ap->device)))
3315 __ata_scsi_queuecmd(cmd, done, ap->device);
3316 else {
3317 cmd->result = (DID_BAD_TARGET << 16);
3318 done(cmd);
3319 }
3320 return 0;
3321}
3322EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
new file mode 100644
index 000000000000..688bb55e197a
--- /dev/null
+++ b/drivers/ata/libata-sff.c
@@ -0,0 +1,1121 @@
1/*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/pci.h>
37#include <linux/libata.h>
38
39#include "libata.h"
40
41/**
42 * ata_tf_load_pio - send taskfile registers to host controller
43 * @ap: Port to which output is sent
44 * @tf: ATA taskfile register set
45 *
46 * Outputs ATA taskfile to standard ATA host controller.
47 *
48 * LOCKING:
49 * Inherited from caller.
50 */
51
52static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
53{
54 struct ata_ioports *ioaddr = &ap->ioaddr;
55 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
56
57 if (tf->ctl != ap->last_ctl) {
58 outb(tf->ctl, ioaddr->ctl_addr);
59 ap->last_ctl = tf->ctl;
60 ata_wait_idle(ap);
61 }
62
63 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
64 outb(tf->hob_feature, ioaddr->feature_addr);
65 outb(tf->hob_nsect, ioaddr->nsect_addr);
66 outb(tf->hob_lbal, ioaddr->lbal_addr);
67 outb(tf->hob_lbam, ioaddr->lbam_addr);
68 outb(tf->hob_lbah, ioaddr->lbah_addr);
69 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
70 tf->hob_feature,
71 tf->hob_nsect,
72 tf->hob_lbal,
73 tf->hob_lbam,
74 tf->hob_lbah);
75 }
76
77 if (is_addr) {
78 outb(tf->feature, ioaddr->feature_addr);
79 outb(tf->nsect, ioaddr->nsect_addr);
80 outb(tf->lbal, ioaddr->lbal_addr);
81 outb(tf->lbam, ioaddr->lbam_addr);
82 outb(tf->lbah, ioaddr->lbah_addr);
83 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
84 tf->feature,
85 tf->nsect,
86 tf->lbal,
87 tf->lbam,
88 tf->lbah);
89 }
90
91 if (tf->flags & ATA_TFLAG_DEVICE) {
92 outb(tf->device, ioaddr->device_addr);
93 VPRINTK("device 0x%X\n", tf->device);
94 }
95
96 ata_wait_idle(ap);
97}
98
99/**
100 * ata_tf_load_mmio - send taskfile registers to host controller
101 * @ap: Port to which output is sent
102 * @tf: ATA taskfile register set
103 *
104 * Outputs ATA taskfile to standard ATA host controller using MMIO.
105 *
106 * LOCKING:
107 * Inherited from caller.
108 */
109
110static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
111{
112 struct ata_ioports *ioaddr = &ap->ioaddr;
113 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
114
115 if (tf->ctl != ap->last_ctl) {
116 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
117 ap->last_ctl = tf->ctl;
118 ata_wait_idle(ap);
119 }
120
121 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
122 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
123 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
124 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
125 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
126 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
127 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
128 tf->hob_feature,
129 tf->hob_nsect,
130 tf->hob_lbal,
131 tf->hob_lbam,
132 tf->hob_lbah);
133 }
134
135 if (is_addr) {
136 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
137 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
138 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
139 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
140 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
141 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
142 tf->feature,
143 tf->nsect,
144 tf->lbal,
145 tf->lbam,
146 tf->lbah);
147 }
148
149 if (tf->flags & ATA_TFLAG_DEVICE) {
150 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
151 VPRINTK("device 0x%X\n", tf->device);
152 }
153
154 ata_wait_idle(ap);
155}
156
157
158/**
159 * ata_tf_load - send taskfile registers to host controller
160 * @ap: Port to which output is sent
161 * @tf: ATA taskfile register set
162 *
163 * Outputs ATA taskfile to standard ATA host controller using MMIO
164 * or PIO as indicated by the ATA_FLAG_MMIO flag.
165 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
166 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
167 * hob_lbal, hob_lbam, and hob_lbah.
168 *
169 * This function waits for idle (!BUSY and !DRQ) after writing
170 * registers. If the control register has a new value, this
171 * function also waits for idle after writing control and before
172 * writing the remaining registers.
173 *
174 * May be used as the tf_load() entry in ata_port_operations.
175 *
176 * LOCKING:
177 * Inherited from caller.
178 */
179void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
180{
181 if (ap->flags & ATA_FLAG_MMIO)
182 ata_tf_load_mmio(ap, tf);
183 else
184 ata_tf_load_pio(ap, tf);
185}
186
187/**
188 * ata_exec_command_pio - issue ATA command to host controller
189 * @ap: port to which command is being issued
190 * @tf: ATA taskfile register set
191 *
192 * Issues PIO write to ATA command register, with proper
193 * synchronization with interrupt handler / other threads.
194 *
195 * LOCKING:
196 * spin_lock_irqsave(host lock)
197 */
198
199static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
200{
201 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
202
203 outb(tf->command, ap->ioaddr.command_addr);
204 ata_pause(ap);
205}
206
207
208/**
209 * ata_exec_command_mmio - issue ATA command to host controller
210 * @ap: port to which command is being issued
211 * @tf: ATA taskfile register set
212 *
213 * Issues MMIO write to ATA command register, with proper
214 * synchronization with interrupt handler / other threads.
215 *
216 * FIXME: missing write posting for 400nS delay enforcement
217 *
218 * LOCKING:
219 * spin_lock_irqsave(host lock)
220 */
221
222static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
223{
224 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
225
226 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
227 ata_pause(ap);
228}
229
230
231/**
232 * ata_exec_command - issue ATA command to host controller
233 * @ap: port to which command is being issued
234 * @tf: ATA taskfile register set
235 *
236 * Issues PIO/MMIO write to ATA command register, with proper
237 * synchronization with interrupt handler / other threads.
238 *
239 * LOCKING:
240 * spin_lock_irqsave(host lock)
241 */
242void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
243{
244 if (ap->flags & ATA_FLAG_MMIO)
245 ata_exec_command_mmio(ap, tf);
246 else
247 ata_exec_command_pio(ap, tf);
248}
249
250/**
251 * ata_tf_read_pio - input device's ATA taskfile shadow registers
252 * @ap: Port from which input is read
253 * @tf: ATA taskfile register set for storing input
254 *
255 * Reads ATA taskfile registers for currently-selected device
256 * into @tf.
257 *
258 * LOCKING:
259 * Inherited from caller.
260 */
261
262static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
263{
264 struct ata_ioports *ioaddr = &ap->ioaddr;
265
266 tf->command = ata_check_status(ap);
267 tf->feature = inb(ioaddr->error_addr);
268 tf->nsect = inb(ioaddr->nsect_addr);
269 tf->lbal = inb(ioaddr->lbal_addr);
270 tf->lbam = inb(ioaddr->lbam_addr);
271 tf->lbah = inb(ioaddr->lbah_addr);
272 tf->device = inb(ioaddr->device_addr);
273
274 if (tf->flags & ATA_TFLAG_LBA48) {
275 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
276 tf->hob_feature = inb(ioaddr->error_addr);
277 tf->hob_nsect = inb(ioaddr->nsect_addr);
278 tf->hob_lbal = inb(ioaddr->lbal_addr);
279 tf->hob_lbam = inb(ioaddr->lbam_addr);
280 tf->hob_lbah = inb(ioaddr->lbah_addr);
281 }
282}
283
284/**
285 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
286 * @ap: Port from which input is read
287 * @tf: ATA taskfile register set for storing input
288 *
289 * Reads ATA taskfile registers for currently-selected device
290 * into @tf via MMIO.
291 *
292 * LOCKING:
293 * Inherited from caller.
294 */
295
296static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
297{
298 struct ata_ioports *ioaddr = &ap->ioaddr;
299
300 tf->command = ata_check_status(ap);
301 tf->feature = readb((void __iomem *)ioaddr->error_addr);
302 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
303 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
304 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
305 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
306 tf->device = readb((void __iomem *)ioaddr->device_addr);
307
308 if (tf->flags & ATA_TFLAG_LBA48) {
309 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
310 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
311 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
312 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
313 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
314 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
315 }
316}
317
318
319/**
320 * ata_tf_read - input device's ATA taskfile shadow registers
321 * @ap: Port from which input is read
322 * @tf: ATA taskfile register set for storing input
323 *
324 * Reads ATA taskfile registers for currently-selected device
325 * into @tf.
326 *
327 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
328 * is set, also reads the hob registers.
329 *
330 * May be used as the tf_read() entry in ata_port_operations.
331 *
332 * LOCKING:
333 * Inherited from caller.
334 */
335void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
336{
337 if (ap->flags & ATA_FLAG_MMIO)
338 ata_tf_read_mmio(ap, tf);
339 else
340 ata_tf_read_pio(ap, tf);
341}
342
343/**
344 * ata_check_status_pio - Read device status reg & clear interrupt
345 * @ap: port where the device is
346 *
347 * Reads ATA taskfile status register for currently-selected device
348 * and return its value. This also clears pending interrupts
349 * from this device
350 *
351 * LOCKING:
352 * Inherited from caller.
353 */
354static u8 ata_check_status_pio(struct ata_port *ap)
355{
356 return inb(ap->ioaddr.status_addr);
357}
358
359/**
360 * ata_check_status_mmio - Read device status reg & clear interrupt
361 * @ap: port where the device is
362 *
363 * Reads ATA taskfile status register for currently-selected device
364 * via MMIO and return its value. This also clears pending interrupts
365 * from this device
366 *
367 * LOCKING:
368 * Inherited from caller.
369 */
370static u8 ata_check_status_mmio(struct ata_port *ap)
371{
372 return readb((void __iomem *) ap->ioaddr.status_addr);
373}
374
375
376/**
377 * ata_check_status - Read device status reg & clear interrupt
378 * @ap: port where the device is
379 *
380 * Reads ATA taskfile status register for currently-selected device
381 * and return its value. This also clears pending interrupts
382 * from this device
383 *
384 * May be used as the check_status() entry in ata_port_operations.
385 *
386 * LOCKING:
387 * Inherited from caller.
388 */
389u8 ata_check_status(struct ata_port *ap)
390{
391 if (ap->flags & ATA_FLAG_MMIO)
392 return ata_check_status_mmio(ap);
393 return ata_check_status_pio(ap);
394}
395
396
397/**
398 * ata_altstatus - Read device alternate status reg
399 * @ap: port where the device is
400 *
401 * Reads ATA taskfile alternate status register for
402 * currently-selected device and return its value.
403 *
404 * Note: may NOT be used as the check_altstatus() entry in
405 * ata_port_operations.
406 *
407 * LOCKING:
408 * Inherited from caller.
409 */
410u8 ata_altstatus(struct ata_port *ap)
411{
412 if (ap->ops->check_altstatus)
413 return ap->ops->check_altstatus(ap);
414
415 if (ap->flags & ATA_FLAG_MMIO)
416 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
417 return inb(ap->ioaddr.altstatus_addr);
418}
419
420/**
421 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
422 * @qc: Info associated with this ATA transaction.
423 *
424 * LOCKING:
425 * spin_lock_irqsave(host lock)
426 */
427
428static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
429{
430 struct ata_port *ap = qc->ap;
431 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
432 u8 dmactl;
433 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
434
435 /* load PRD table addr. */
436 mb(); /* make sure PRD table writes are visible to controller */
437 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
438
439 /* specify data direction, triple-check start bit is clear */
440 dmactl = readb(mmio + ATA_DMA_CMD);
441 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
442 if (!rw)
443 dmactl |= ATA_DMA_WR;
444 writeb(dmactl, mmio + ATA_DMA_CMD);
445
446 /* issue r/w command */
447 ap->ops->exec_command(ap, &qc->tf);
448}
449
450/**
451 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
452 * @qc: Info associated with this ATA transaction.
453 *
454 * LOCKING:
455 * spin_lock_irqsave(host lock)
456 */
457
458static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
459{
460 struct ata_port *ap = qc->ap;
461 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
462 u8 dmactl;
463
464 /* start host DMA transaction */
465 dmactl = readb(mmio + ATA_DMA_CMD);
466 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
467
468 /* Strictly, one may wish to issue a readb() here, to
469 * flush the mmio write. However, control also passes
470 * to the hardware at this point, and it will interrupt
471 * us when we are to resume control. So, in effect,
472 * we don't care when the mmio write flushes.
473 * Further, a read of the DMA status register _immediately_
474 * following the write may not be what certain flaky hardware
475 * is expected, so I think it is best to not add a readb()
476 * without first all the MMIO ATA cards/mobos.
477 * Or maybe I'm just being paranoid.
478 */
479}
480
481/**
482 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
483 * @qc: Info associated with this ATA transaction.
484 *
485 * LOCKING:
486 * spin_lock_irqsave(host lock)
487 */
488
489static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
490{
491 struct ata_port *ap = qc->ap;
492 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
493 u8 dmactl;
494
495 /* load PRD table addr. */
496 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
497
498 /* specify data direction, triple-check start bit is clear */
499 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
500 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
501 if (!rw)
502 dmactl |= ATA_DMA_WR;
503 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
504
505 /* issue r/w command */
506 ap->ops->exec_command(ap, &qc->tf);
507}
508
509/**
510 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
511 * @qc: Info associated with this ATA transaction.
512 *
513 * LOCKING:
514 * spin_lock_irqsave(host lock)
515 */
516
517static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
518{
519 struct ata_port *ap = qc->ap;
520 u8 dmactl;
521
522 /* start host DMA transaction */
523 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
524 outb(dmactl | ATA_DMA_START,
525 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
526}
527
528
529/**
530 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
531 * @qc: Info associated with this ATA transaction.
532 *
533 * Writes the ATA_DMA_START flag to the DMA command register.
534 *
535 * May be used as the bmdma_start() entry in ata_port_operations.
536 *
537 * LOCKING:
538 * spin_lock_irqsave(host lock)
539 */
540void ata_bmdma_start(struct ata_queued_cmd *qc)
541{
542 if (qc->ap->flags & ATA_FLAG_MMIO)
543 ata_bmdma_start_mmio(qc);
544 else
545 ata_bmdma_start_pio(qc);
546}
547
548
549/**
550 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
551 * @qc: Info associated with this ATA transaction.
552 *
553 * Writes address of PRD table to device's PRD Table Address
554 * register, sets the DMA control register, and calls
555 * ops->exec_command() to start the transfer.
556 *
557 * May be used as the bmdma_setup() entry in ata_port_operations.
558 *
559 * LOCKING:
560 * spin_lock_irqsave(host lock)
561 */
562void ata_bmdma_setup(struct ata_queued_cmd *qc)
563{
564 if (qc->ap->flags & ATA_FLAG_MMIO)
565 ata_bmdma_setup_mmio(qc);
566 else
567 ata_bmdma_setup_pio(qc);
568}
569
570
571/**
572 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
573 * @ap: Port associated with this ATA transaction.
574 *
575 * Clear interrupt and error flags in DMA status register.
576 *
577 * May be used as the irq_clear() entry in ata_port_operations.
578 *
579 * LOCKING:
580 * spin_lock_irqsave(host lock)
581 */
582
583void ata_bmdma_irq_clear(struct ata_port *ap)
584{
585 if (!ap->ioaddr.bmdma_addr)
586 return;
587
588 if (ap->flags & ATA_FLAG_MMIO) {
589 void __iomem *mmio =
590 ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
591 writeb(readb(mmio), mmio);
592 } else {
593 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
594 outb(inb(addr), addr);
595 }
596}
597
598
599/**
600 * ata_bmdma_status - Read PCI IDE BMDMA status
601 * @ap: Port associated with this ATA transaction.
602 *
603 * Read and return BMDMA status register.
604 *
605 * May be used as the bmdma_status() entry in ata_port_operations.
606 *
607 * LOCKING:
608 * spin_lock_irqsave(host lock)
609 */
610
611u8 ata_bmdma_status(struct ata_port *ap)
612{
613 u8 host_stat;
614 if (ap->flags & ATA_FLAG_MMIO) {
615 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
616 host_stat = readb(mmio + ATA_DMA_STATUS);
617 } else
618 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
619 return host_stat;
620}
621
622
623/**
624 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
625 * @qc: Command we are ending DMA for
626 *
627 * Clears the ATA_DMA_START flag in the dma control register
628 *
629 * May be used as the bmdma_stop() entry in ata_port_operations.
630 *
631 * LOCKING:
632 * spin_lock_irqsave(host lock)
633 */
634
635void ata_bmdma_stop(struct ata_queued_cmd *qc)
636{
637 struct ata_port *ap = qc->ap;
638 if (ap->flags & ATA_FLAG_MMIO) {
639 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
640
641 /* clear start/stop bit */
642 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
643 mmio + ATA_DMA_CMD);
644 } else {
645 /* clear start/stop bit */
646 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
647 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
648 }
649
650 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
651 ata_altstatus(ap); /* dummy read */
652}
653
654/**
655 * ata_bmdma_freeze - Freeze BMDMA controller port
656 * @ap: port to freeze
657 *
658 * Freeze BMDMA controller port.
659 *
660 * LOCKING:
661 * Inherited from caller.
662 */
663void ata_bmdma_freeze(struct ata_port *ap)
664{
665 struct ata_ioports *ioaddr = &ap->ioaddr;
666
667 ap->ctl |= ATA_NIEN;
668 ap->last_ctl = ap->ctl;
669
670 if (ap->flags & ATA_FLAG_MMIO)
671 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
672 else
673 outb(ap->ctl, ioaddr->ctl_addr);
674}
675
676/**
677 * ata_bmdma_thaw - Thaw BMDMA controller port
678 * @ap: port to thaw
679 *
680 * Thaw BMDMA controller port.
681 *
682 * LOCKING:
683 * Inherited from caller.
684 */
685void ata_bmdma_thaw(struct ata_port *ap)
686{
687 /* clear & re-enable interrupts */
688 ata_chk_status(ap);
689 ap->ops->irq_clear(ap);
690 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
691 ata_irq_on(ap);
692}
693
694/**
695 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
696 * @ap: port to handle error for
697 * @prereset: prereset method (can be NULL)
698 * @softreset: softreset method (can be NULL)
699 * @hardreset: hardreset method (can be NULL)
700 * @postreset: postreset method (can be NULL)
701 *
702 * Handle error for ATA BMDMA controller. It can handle both
703 * PATA and SATA controllers. Many controllers should be able to
704 * use this EH as-is or with some added handling before and
705 * after.
706 *
707 * This function is intended to be used for constructing
708 * ->error_handler callback by low level drivers.
709 *
710 * LOCKING:
711 * Kernel thread context (may sleep)
712 */
713void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
714 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
715 ata_postreset_fn_t postreset)
716{
717 struct ata_eh_context *ehc = &ap->eh_context;
718 struct ata_queued_cmd *qc;
719 unsigned long flags;
720 int thaw = 0;
721
722 qc = __ata_qc_from_tag(ap, ap->active_tag);
723 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
724 qc = NULL;
725
726 /* reset PIO HSM and stop DMA engine */
727 spin_lock_irqsave(ap->lock, flags);
728
729 ap->hsm_task_state = HSM_ST_IDLE;
730
731 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
732 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
733 u8 host_stat;
734
735 host_stat = ata_bmdma_status(ap);
736
737 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
738
739 /* BMDMA controllers indicate host bus error by
740 * setting DMA_ERR bit and timing out. As it wasn't
741 * really a timeout event, adjust error mask and
742 * cancel frozen state.
743 */
744 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
745 qc->err_mask = AC_ERR_HOST_BUS;
746 thaw = 1;
747 }
748
749 ap->ops->bmdma_stop(qc);
750 }
751
752 ata_altstatus(ap);
753 ata_chk_status(ap);
754 ap->ops->irq_clear(ap);
755
756 spin_unlock_irqrestore(ap->lock, flags);
757
758 if (thaw)
759 ata_eh_thaw_port(ap);
760
761 /* PIO and DMA engines have been stopped, perform recovery */
762 ata_do_eh(ap, prereset, softreset, hardreset, postreset);
763}
764
765/**
766 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
767 * @ap: port to handle error for
768 *
769 * Stock error handler for BMDMA controller.
770 *
771 * LOCKING:
772 * Kernel thread context (may sleep)
773 */
774void ata_bmdma_error_handler(struct ata_port *ap)
775{
776 ata_reset_fn_t hardreset;
777
778 hardreset = NULL;
779 if (sata_scr_valid(ap))
780 hardreset = sata_std_hardreset;
781
782 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
783 ata_std_postreset);
784}
785
786/**
787 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
788 * BMDMA controller
789 * @qc: internal command to clean up
790 *
791 * LOCKING:
792 * Kernel thread context (may sleep)
793 */
794void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
795{
796 ata_bmdma_stop(qc);
797}
798
799#ifdef CONFIG_PCI
800/**
801 * ata_pci_init_native_mode - Initialize native-mode driver
802 * @pdev: pci device to be initialized
803 * @port: array[2] of pointers to port info structures.
804 * @ports: bitmap of ports present
805 *
806 * Utility function which allocates and initializes an
807 * ata_probe_ent structure for a standard dual-port
808 * PIO-based IDE controller. The returned ata_probe_ent
809 * structure can be passed to ata_device_add(). The returned
810 * ata_probe_ent structure should then be freed with kfree().
811 *
812 * The caller need only pass the address of the primary port, the
813 * secondary will be deduced automatically. If the device has non
814 * standard secondary port mappings this function can be called twice,
815 * once for each interface.
816 */
817
818struct ata_probe_ent *
819ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
820{
821 struct ata_probe_ent *probe_ent =
822 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
823 int p = 0;
824 unsigned long bmdma;
825
826 if (!probe_ent)
827 return NULL;
828
829 probe_ent->irq = pdev->irq;
830 probe_ent->irq_flags = IRQF_SHARED;
831 probe_ent->private_data = port[0]->private_data;
832
833 if (ports & ATA_PORT_PRIMARY) {
834 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
835 probe_ent->port[p].altstatus_addr =
836 probe_ent->port[p].ctl_addr =
837 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
838 bmdma = pci_resource_start(pdev, 4);
839 if (bmdma) {
840 if (inb(bmdma + 2) & 0x80)
841 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
842 probe_ent->port[p].bmdma_addr = bmdma;
843 }
844 ata_std_ports(&probe_ent->port[p]);
845 p++;
846 }
847
848 if (ports & ATA_PORT_SECONDARY) {
849 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
850 probe_ent->port[p].altstatus_addr =
851 probe_ent->port[p].ctl_addr =
852 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
853 bmdma = pci_resource_start(pdev, 4);
854 if (bmdma) {
855 bmdma += 8;
856 if(inb(bmdma + 2) & 0x80)
857 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
858 probe_ent->port[p].bmdma_addr = bmdma;
859 }
860 ata_std_ports(&probe_ent->port[p]);
861 probe_ent->pinfo2 = port[1];
862 p++;
863 }
864
865 probe_ent->n_ports = p;
866 return probe_ent;
867}
868
869
870static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
871 struct ata_port_info **port, int port_mask)
872{
873 struct ata_probe_ent *probe_ent;
874 unsigned long bmdma = pci_resource_start(pdev, 4);
875
876 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
877 if (!probe_ent)
878 return NULL;
879
880 probe_ent->n_ports = 2;
881 probe_ent->private_data = port[0]->private_data;
882
883 if (port_mask & ATA_PORT_PRIMARY) {
884 probe_ent->irq = 14;
885 probe_ent->port[0].cmd_addr = ATA_PRIMARY_CMD;
886 probe_ent->port[0].altstatus_addr =
887 probe_ent->port[0].ctl_addr = ATA_PRIMARY_CTL;
888 if (bmdma) {
889 probe_ent->port[0].bmdma_addr = bmdma;
890 if (inb(bmdma + 2) & 0x80)
891 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
892 }
893 ata_std_ports(&probe_ent->port[0]);
894 } else
895 probe_ent->dummy_port_mask |= ATA_PORT_PRIMARY;
896
897 if (port_mask & ATA_PORT_SECONDARY) {
898 if (probe_ent->irq)
899 probe_ent->irq2 = 15;
900 else
901 probe_ent->irq = 15;
902 probe_ent->port[1].cmd_addr = ATA_SECONDARY_CMD;
903 probe_ent->port[1].altstatus_addr =
904 probe_ent->port[1].ctl_addr = ATA_SECONDARY_CTL;
905 if (bmdma) {
906 probe_ent->port[1].bmdma_addr = bmdma + 8;
907 if (inb(bmdma + 10) & 0x80)
908 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
909 }
910 ata_std_ports(&probe_ent->port[1]);
911 probe_ent->pinfo2 = port[1];
912 } else
913 probe_ent->dummy_port_mask |= ATA_PORT_SECONDARY;
914
915 return probe_ent;
916}
917
918
919/**
920 * ata_pci_init_one - Initialize/register PCI IDE host controller
921 * @pdev: Controller to be initialized
922 * @port_info: Information from low-level host driver
923 * @n_ports: Number of ports attached to host controller
924 *
925 * This is a helper function which can be called from a driver's
926 * xxx_init_one() probe function if the hardware uses traditional
927 * IDE taskfile registers.
928 *
929 * This function calls pci_enable_device(), reserves its register
930 * regions, sets the dma mask, enables bus master mode, and calls
931 * ata_device_add()
932 *
933 * ASSUMPTION:
934 * Nobody makes a single channel controller that appears solely as
935 * the secondary legacy port on PCI.
936 *
937 * LOCKING:
938 * Inherited from PCI layer (may sleep).
939 *
940 * RETURNS:
941 * Zero on success, negative on errno-based value on error.
942 */
943
944int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
945 unsigned int n_ports)
946{
947 struct ata_probe_ent *probe_ent = NULL;
948 struct ata_port_info *port[2];
949 u8 tmp8, mask;
950 unsigned int legacy_mode = 0;
951 int disable_dev_on_err = 1;
952 int rc;
953
954 DPRINTK("ENTER\n");
955
956 port[0] = port_info[0];
957 if (n_ports > 1)
958 port[1] = port_info[1];
959 else
960 port[1] = port[0];
961
962 if ((port[0]->flags & ATA_FLAG_NO_LEGACY) == 0
963 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
964 /* TODO: What if one channel is in native mode ... */
965 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
966 mask = (1 << 2) | (1 << 0);
967 if ((tmp8 & mask) != mask)
968 legacy_mode = (1 << 3);
969 }
970
971 /* FIXME... */
972 if ((!legacy_mode) && (n_ports > 2)) {
973 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
974 n_ports = 2;
975 /* For now */
976 }
977
978 /* FIXME: Really for ATA it isn't safe because the device may be
979 multi-purpose and we want to leave it alone if it was already
980 enabled. Secondly for shared use as Arjan says we want refcounting
981
982 Checking dev->is_enabled is insufficient as this is not set at
983 boot for the primary video which is BIOS enabled
984 */
985
986 rc = pci_enable_device(pdev);
987 if (rc)
988 return rc;
989
990 rc = pci_request_regions(pdev, DRV_NAME);
991 if (rc) {
992 disable_dev_on_err = 0;
993 goto err_out;
994 }
995
996 if (legacy_mode) {
997 if (!request_region(ATA_PRIMARY_CMD, 8, "libata")) {
998 struct resource *conflict, res;
999 res.start = ATA_PRIMARY_CMD;
1000 res.end = ATA_PRIMARY_CMD + 8 - 1;
1001 conflict = ____request_resource(&ioport_resource, &res);
1002 while (conflict->child)
1003 conflict = ____request_resource(conflict, &res);
1004 if (!strcmp(conflict->name, "libata"))
1005 legacy_mode |= ATA_PORT_PRIMARY;
1006 else {
1007 disable_dev_on_err = 0;
1008 printk(KERN_WARNING "ata: 0x%0X IDE port busy\n" \
1009 "ata: conflict with %s\n",
1010 ATA_PRIMARY_CMD,
1011 conflict->name);
1012 }
1013 } else
1014 legacy_mode |= ATA_PORT_PRIMARY;
1015
1016 if (!request_region(ATA_SECONDARY_CMD, 8, "libata")) {
1017 struct resource *conflict, res;
1018 res.start = ATA_SECONDARY_CMD;
1019 res.end = ATA_SECONDARY_CMD + 8 - 1;
1020 conflict = ____request_resource(&ioport_resource, &res);
1021 while (conflict->child)
1022 conflict = ____request_resource(conflict, &res);
1023 if (!strcmp(conflict->name, "libata"))
1024 legacy_mode |= ATA_PORT_SECONDARY;
1025 else {
1026 disable_dev_on_err = 0;
1027 printk(KERN_WARNING "ata: 0x%X IDE port busy\n" \
1028 "ata: conflict with %s\n",
1029 ATA_SECONDARY_CMD,
1030 conflict->name);
1031 }
1032 } else
1033 legacy_mode |= ATA_PORT_SECONDARY;
1034 }
1035
1036 /* we have legacy mode, but all ports are unavailable */
1037 if (legacy_mode == (1 << 3)) {
1038 rc = -EBUSY;
1039 goto err_out_regions;
1040 }
1041
1042 /* FIXME: If we get no DMA mask we should fall back to PIO */
1043 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1044 if (rc)
1045 goto err_out_regions;
1046 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1047 if (rc)
1048 goto err_out_regions;
1049
1050 if (legacy_mode) {
1051 probe_ent = ata_pci_init_legacy_port(pdev, port, legacy_mode);
1052 } else {
1053 if (n_ports == 2)
1054 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1055 else
1056 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
1057 }
1058 if (!probe_ent) {
1059 rc = -ENOMEM;
1060 goto err_out_regions;
1061 }
1062
1063 pci_set_master(pdev);
1064
1065 /* FIXME: check ata_device_add return */
1066 ata_device_add(probe_ent);
1067
1068 kfree(probe_ent);
1069
1070 return 0;
1071
1072err_out_regions:
1073 if (legacy_mode & ATA_PORT_PRIMARY)
1074 release_region(ATA_PRIMARY_CMD, 8);
1075 if (legacy_mode & ATA_PORT_SECONDARY)
1076 release_region(ATA_SECONDARY_CMD, 8);
1077 pci_release_regions(pdev);
1078err_out:
1079 if (disable_dev_on_err)
1080 pci_disable_device(pdev);
1081 return rc;
1082}
1083
1084/**
1085 * ata_pci_clear_simplex - attempt to kick device out of simplex
1086 * @pdev: PCI device
1087 *
1088 * Some PCI ATA devices report simplex mode but in fact can be told to
1089 * enter non simplex mode. This implements the neccessary logic to
1090 * perform the task on such devices. Calling it on other devices will
1091 * have -undefined- behaviour.
1092 */
1093
1094int ata_pci_clear_simplex(struct pci_dev *pdev)
1095{
1096 unsigned long bmdma = pci_resource_start(pdev, 4);
1097 u8 simplex;
1098
1099 if (bmdma == 0)
1100 return -ENOENT;
1101
1102 simplex = inb(bmdma + 0x02);
1103 outb(simplex & 0x60, bmdma + 0x02);
1104 simplex = inb(bmdma + 0x02);
1105 if (simplex & 0x80)
1106 return -EOPNOTSUPP;
1107 return 0;
1108}
1109
1110unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask)
1111{
1112 /* Filter out DMA modes if the device has been configured by
1113 the BIOS as PIO only */
1114
1115 if (ap->ioaddr.bmdma_addr == 0)
1116 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
1117 return xfer_mask;
1118}
1119
1120#endif /* CONFIG_PCI */
1121
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
new file mode 100644
index 000000000000..a5ecb71390a9
--- /dev/null
+++ b/drivers/ata/libata.h
@@ -0,0 +1,122 @@
1/*
2 * libata.h - helper library for ATA
3 *
4 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 * Copyright 2003-2004 Jeff Garzik
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 */
27
28#ifndef __LIBATA_H__
29#define __LIBATA_H__
30
31#define DRV_NAME "libata"
32#define DRV_VERSION "2.00" /* must be exactly four chars */
33
34struct ata_scsi_args {
35 struct ata_device *dev;
36 u16 *id;
37 struct scsi_cmnd *cmd;
38 void (*done)(struct scsi_cmnd *);
39};
40
41/* libata-core.c */
42extern struct workqueue_struct *ata_aux_wq;
43extern int atapi_enabled;
44extern int atapi_dmadir;
45extern int libata_fua;
46extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_dev_disable(struct ata_device *dev);
49extern void ata_port_flush_task(struct ata_port *ap);
50extern unsigned ata_exec_internal(struct ata_device *dev,
51 struct ata_taskfile *tf, const u8 *cdb,
52 int dma_dir, void *buf, unsigned int buflen);
53extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
54extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
55 int post_reset, u16 *id);
56extern int ata_dev_configure(struct ata_device *dev, int print_info);
57extern int sata_down_spd_limit(struct ata_port *ap);
58extern int sata_set_spd_needed(struct ata_port *ap);
59extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
60extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
61extern void ata_qc_free(struct ata_queued_cmd *qc);
62extern void ata_qc_issue(struct ata_queued_cmd *qc);
63extern void __ata_qc_complete(struct ata_queued_cmd *qc);
64extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
65extern void ata_dev_select(struct ata_port *ap, unsigned int device,
66 unsigned int wait, unsigned int can_sleep);
67extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
68extern int ata_flush_cache(struct ata_device *dev);
69extern void ata_dev_init(struct ata_device *dev);
70extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
71extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
72extern void ata_port_init(struct ata_port *ap, struct ata_host *host,
73 const struct ata_probe_ent *ent, unsigned int port_no);
74extern struct ata_probe_ent *ata_probe_ent_alloc(struct device *dev,
75 const struct ata_port_info *port);
76
77
78/* libata-scsi.c */
79extern struct scsi_transport_template ata_scsi_transport_template;
80
81extern void ata_scsi_scan_host(struct ata_port *ap);
82extern int ata_scsi_offline_dev(struct ata_device *dev);
83extern void ata_scsi_hotplug(void *data);
84extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
85 unsigned int buflen);
86
87extern unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
88 unsigned int buflen);
89
90extern unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
91 unsigned int buflen);
92extern unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
93 unsigned int buflen);
94extern unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
95 unsigned int buflen);
96extern unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf,
97 unsigned int buflen);
98extern unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
99 unsigned int buflen);
100extern unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
101 unsigned int buflen);
102extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
103 unsigned int buflen);
104extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
105 void (*done)(struct scsi_cmnd *),
106 u8 asc, u8 ascq);
107extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
108 u8 sk, u8 asc, u8 ascq);
109extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
110 unsigned int (*actor) (struct ata_scsi_args *args,
111 u8 *rbuf, unsigned int buflen));
112extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
113extern void ata_scsi_dev_rescan(void *data);
114extern int ata_bus_probe(struct ata_port *ap);
115
116/* libata-eh.c */
117extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
118extern void ata_scsi_error(struct Scsi_Host *host);
119extern void ata_port_wait_eh(struct ata_port *ap);
120extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
121
122#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
new file mode 100644
index 000000000000..8448ee6e0eed
--- /dev/null
+++ b/drivers/ata/pata_ali.c
@@ -0,0 +1,679 @@
1/*
2 * pata_ali.c - ALI 15x3 PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based in part upon
7 * linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02
8 *
9 * Copyright (C) 1998-2000 Michel Aubry, Maintainer
10 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
11 * Copyright (C) 1999-2000 CJ, cjtsai@ali.com.tw, Maintainer
12 *
13 * Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org)
14 * May be copied or modified under the terms of the GNU General Public License
15 * Copyright (C) 2002 Alan Cox <alan@redhat.com>
16 * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
17 *
18 * Documentation
19 * Chipset documentation available under NDA only
20 *
21 * TODO/CHECK
22 * Cannot have ATAPI on both master & slave for rev < c2 (???) but
23 * otherwise should do atapi DMA.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/blkdev.h>
31#include <linux/delay.h>
32#include <scsi/scsi_host.h>
33#include <linux/libata.h>
34#include <linux/dmi.h>
35
36#define DRV_NAME "pata_ali"
37#define DRV_VERSION "0.6.5"
38
39/*
40 * Cable special cases
41 */
42
43static struct dmi_system_id cable_dmi_table[] = {
44 {
45 .ident = "HP Pavilion N5430",
46 .matches = {
47 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
48 DMI_MATCH(DMI_BOARD_NAME, "OmniBook N32N-736"),
49 },
50 },
51 { }
52};
53
54static int ali_cable_override(struct pci_dev *pdev)
55{
56 /* Fujitsu P2000 */
57 if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF)
58 return 1;
59 /* Systems by DMI */
60 if (dmi_check_system(cable_dmi_table))
61 return 1;
62 return 0;
63}
64
65/**
66 * ali_c2_cable_detect - cable detection
67 * @ap: ATA port
68 *
69 * Perform cable detection for C2 and later revisions
70 */
71
72static int ali_c2_cable_detect(struct ata_port *ap)
73{
74 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
75 u8 ata66;
76
77 /* Certain laptops use short but suitable cables and don't
78 implement the detect logic */
79
80 if (ali_cable_override(pdev))
81 return ATA_CBL_PATA80;
82
83 /* Host view cable detect 0x4A bit 0 primary bit 1 secondary
84 Bit set for 40 pin */
85 pci_read_config_byte(pdev, 0x4A, &ata66);
86 if (ata66 & (1 << ap->port_no))
87 return ATA_CBL_PATA40;
88 else
89 return ATA_CBL_PATA80;
90}
91
92/**
93 * ali_early_error_handler - reset for eary chip
94 * @ap: ATA port
95 *
96 * Handle the reset callback for the later chips with cable detect
97 */
98
99static int ali_c2_pre_reset(struct ata_port *ap)
100{
101 ap->cbl = ali_c2_cable_detect(ap);
102 return ata_std_prereset(ap);
103}
104
105static void ali_c2_error_handler(struct ata_port *ap)
106{
107 ata_bmdma_drive_eh(ap, ali_c2_pre_reset,
108 ata_std_softreset, NULL,
109 ata_std_postreset);
110}
111
112/**
113 * ali_early_cable_detect - cable detection
114 * @ap: ATA port
115 *
116 * Perform cable detection for older chipsets. This turns out to be
117 * rather easy to implement
118 */
119
120static int ali_early_cable_detect(struct ata_port *ap)
121{
122 return ATA_CBL_PATA40;
123}
124
125/**
126 * ali_early_probe_init - reset for early chip
127 * @ap: ATA port
128 *
129 * Handle the reset callback for the early (pre cable detect) chips.
130 */
131
132static int ali_early_pre_reset(struct ata_port *ap)
133{
134 ap->cbl = ali_early_cable_detect(ap);
135 return ata_std_prereset(ap);
136}
137
138static void ali_early_error_handler(struct ata_port *ap)
139{
140 return ata_bmdma_drive_eh(ap, ali_early_pre_reset,
141 ata_std_softreset, NULL,
142 ata_std_postreset);
143}
144
145/**
146 * ali_20_filter - filter for earlier ALI DMA
147 * @ap: ALi ATA port
148 * @adev: attached device
149 *
150 * Ensure that we do not do DMA on CD devices. We may be able to
151 * fix that later on. Also ensure we do not do UDMA on WDC drives
152 */
153
154static unsigned long ali_20_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
155{
156 char model_num[40];
157 /* No DMA on anything but a disk for now */
158 if (adev->class != ATA_DEV_ATA)
159 mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
160 ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
161 if (strstr(model_num, "WDC"))
162 return mask &= ~ATA_MASK_UDMA;
163 return ata_pci_default_filter(ap, adev, mask);
164}
165
166/**
167 * ali_fifo_control - FIFO manager
168 * @ap: ALi channel to control
169 * @adev: device for FIFO control
170 * @on: 0 for off 1 for on
171 *
172 * Enable or disable the FIFO on a given device. Because of the way the
173 * ALi FIFO works it provides a boost on ATA disk but can be confused by
174 * ATAPI and we must therefore manage it.
175 */
176
177static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int on)
178{
179 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
180 int pio_fifo = 0x54 + ap->port_no;
181 u8 fifo;
182 int shift = 4 * adev->devno;
183
184 /* ATA - FIFO on set nibble to 0x05, ATAPI - FIFO off, set nibble to
185 0x00. Not all the docs agree but the behaviour we now use is the
186 one stated in the BIOS Programming Guide */
187
188 pci_read_config_byte(pdev, pio_fifo, &fifo);
189 fifo &= ~(0x0F << shift);
190 if (on)
191 fifo |= (on << shift);
192 pci_write_config_byte(pdev, pio_fifo, fifo);
193}
194
195/**
196 * ali_program_modes - load mode registers
197 * @ap: ALi channel to load
198 * @adev: Device the timing is for
199 * @cmd: Command timing
200 * @data: Data timing
201 * @ultra: UDMA timing or zero for off
202 *
203 * Loads the timing registers for cmd/data and disable UDMA if
204 * ultra is zero. If ultra is set then load and enable the UDMA
205 * timing but do not touch the command/data timing.
206 */
207
208static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, struct ata_timing *t, u8 ultra)
209{
210 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
211 int cas = 0x58 + 4 * ap->port_no; /* Command timing */
212 int cbt = 0x59 + 4 * ap->port_no; /* Command timing */
213 int drwt = 0x5A + 4 * ap->port_no + adev->devno; /* R/W timing */
214 int udmat = 0x56 + ap->port_no; /* UDMA timing */
215 int shift = 4 * adev->devno;
216 u8 udma;
217
218 if (t != NULL) {
219 t->setup = FIT(t->setup, 1, 8) & 7;
220 t->act8b = FIT(t->act8b, 1, 8) & 7;
221 t->rec8b = FIT(t->rec8b, 1, 16) & 15;
222 t->active = FIT(t->active, 1, 8) & 7;
223 t->recover = FIT(t->recover, 1, 16) & 15;
224
225 pci_write_config_byte(pdev, cas, t->setup);
226 pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b);
227 pci_write_config_byte(pdev, drwt, (t->active << 4) | t->recover);
228 }
229
230 /* Set up the UDMA enable */
231 pci_read_config_byte(pdev, udmat, &udma);
232 udma &= ~(0x0F << shift);
233 udma |= ultra << shift;
234 pci_write_config_byte(pdev, udmat, udma);
235}
236
237/**
238 * ali_set_piomode - set initial PIO mode data
239 * @ap: ATA interface
240 * @adev: ATA device
241 *
242 * Program the ALi registers for PIO mode. FIXME: add timings for
243 * PIO5.
244 */
245
246static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev)
247{
248 struct ata_device *pair = ata_dev_pair(adev);
249 struct ata_timing t;
250 unsigned long T = 1000000000 / 33333; /* PCI clock based */
251
252 ata_timing_compute(adev, adev->pio_mode, &t, T, 1);
253 if (pair) {
254 struct ata_timing p;
255 ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
256 ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
257 if (pair->dma_mode) {
258 ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
259 ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
260 }
261 }
262
263 /* PIO FIFO is only permitted on ATA disk */
264 if (adev->class != ATA_DEV_ATA)
265 ali_fifo_control(ap, adev, 0x00);
266 ali_program_modes(ap, adev, &t, 0);
267 if (adev->class == ATA_DEV_ATA)
268 ali_fifo_control(ap, adev, 0x05);
269
270}
271
272/**
273 * ali_set_dmamode - set initial DMA mode data
274 * @ap: ATA interface
275 * @adev: ATA device
276 *
277 * FIXME: MWDMA timings
278 */
279
280static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev)
281{
282 static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
283 struct ata_device *pair = ata_dev_pair(adev);
284 struct ata_timing t;
285 unsigned long T = 1000000000 / 33333; /* PCI clock based */
286 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
287
288
289 if (adev->class == ATA_DEV_ATA)
290 ali_fifo_control(ap, adev, 0x08);
291
292 if (adev->dma_mode >= XFER_UDMA_0) {
293 ali_program_modes(ap, adev, NULL, udma_timing[adev->dma_mode - XFER_UDMA_0]);
294 if (adev->dma_mode >= XFER_UDMA_3) {
295 u8 reg4b;
296 pci_read_config_byte(pdev, 0x4B, &reg4b);
297 reg4b |= 1;
298 pci_write_config_byte(pdev, 0x4B, reg4b);
299 }
300 } else {
301 ata_timing_compute(adev, adev->dma_mode, &t, T, 1);
302 if (pair) {
303 struct ata_timing p;
304 ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
305 ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
306 if (pair->dma_mode) {
307 ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
308 ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
309 }
310 }
311 ali_program_modes(ap, adev, &t, 0);
312 }
313}
314
315/**
316 * ali_lock_sectors - Keep older devices to 255 sector mode
317 * @ap: ATA port
318 * @adev: Device
319 *
320 * Called during the bus probe for each device that is found. We use
321 * this call to lock the sector count of the device to 255 or less on
322 * older ALi controllers. If we didn't do this then large I/O's would
323 * require LBA48 commands which the older ALi requires are issued by
324 * slower PIO methods
325 */
326
327static void ali_lock_sectors(struct ata_port *ap, struct ata_device *adev)
328{
329 adev->max_sectors = 255;
330}
331
332static struct scsi_host_template ali_sht = {
333 .module = THIS_MODULE,
334 .name = DRV_NAME,
335 .ioctl = ata_scsi_ioctl,
336 .queuecommand = ata_scsi_queuecmd,
337 .can_queue = ATA_DEF_QUEUE,
338 .this_id = ATA_SHT_THIS_ID,
339 .sg_tablesize = LIBATA_MAX_PRD,
340 /* Keep LBA28 counts so large I/O's don't turn LBA48 and PIO
341 with older controllers. Not locked so will grow on C5 or later */
342 .max_sectors = 255,
343 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
344 .emulated = ATA_SHT_EMULATED,
345 .use_clustering = ATA_SHT_USE_CLUSTERING,
346 .proc_name = DRV_NAME,
347 .dma_boundary = ATA_DMA_BOUNDARY,
348 .slave_configure = ata_scsi_slave_config,
349 .bios_param = ata_std_bios_param,
350};
351
352/*
353 * Port operations for PIO only ALi
354 */
355
356static struct ata_port_operations ali_early_port_ops = {
357 .port_disable = ata_port_disable,
358 .set_piomode = ali_set_piomode,
359 .tf_load = ata_tf_load,
360 .tf_read = ata_tf_read,
361 .check_status = ata_check_status,
362 .exec_command = ata_exec_command,
363 .dev_select = ata_std_dev_select,
364
365 .freeze = ata_bmdma_freeze,
366 .thaw = ata_bmdma_thaw,
367 .error_handler = ali_early_error_handler,
368 .post_internal_cmd = ata_bmdma_post_internal_cmd,
369
370 .qc_prep = ata_qc_prep,
371 .qc_issue = ata_qc_issue_prot,
372 .eng_timeout = ata_eng_timeout,
373 .data_xfer = ata_pio_data_xfer,
374
375 .irq_handler = ata_interrupt,
376 .irq_clear = ata_bmdma_irq_clear,
377
378 .port_start = ata_port_start,
379 .port_stop = ata_port_stop,
380 .host_stop = ata_host_stop
381};
382
383/*
384 * Port operations for DMA capable ALi without cable
385 * detect
386 */
387static struct ata_port_operations ali_20_port_ops = {
388 .port_disable = ata_port_disable,
389
390 .set_piomode = ali_set_piomode,
391 .set_dmamode = ali_set_dmamode,
392 .mode_filter = ali_20_filter,
393
394 .tf_load = ata_tf_load,
395 .tf_read = ata_tf_read,
396 .check_status = ata_check_status,
397 .exec_command = ata_exec_command,
398 .dev_select = ata_std_dev_select,
399 .dev_config = ali_lock_sectors,
400
401 .freeze = ata_bmdma_freeze,
402 .thaw = ata_bmdma_thaw,
403 .error_handler = ali_early_error_handler,
404 .post_internal_cmd = ata_bmdma_post_internal_cmd,
405
406 .bmdma_setup = ata_bmdma_setup,
407 .bmdma_start = ata_bmdma_start,
408 .bmdma_stop = ata_bmdma_stop,
409 .bmdma_status = ata_bmdma_status,
410
411 .qc_prep = ata_qc_prep,
412 .qc_issue = ata_qc_issue_prot,
413 .eng_timeout = ata_eng_timeout,
414 .data_xfer = ata_pio_data_xfer,
415
416 .irq_handler = ata_interrupt,
417 .irq_clear = ata_bmdma_irq_clear,
418
419 .port_start = ata_port_start,
420 .port_stop = ata_port_stop,
421 .host_stop = ata_host_stop
422};
423
424/*
425 * Port operations for DMA capable ALi with cable detect
426 */
427static struct ata_port_operations ali_c2_port_ops = {
428 .port_disable = ata_port_disable,
429 .set_piomode = ali_set_piomode,
430 .set_dmamode = ali_set_dmamode,
431 .mode_filter = ata_pci_default_filter,
432 .tf_load = ata_tf_load,
433 .tf_read = ata_tf_read,
434 .check_status = ata_check_status,
435 .exec_command = ata_exec_command,
436 .dev_select = ata_std_dev_select,
437 .dev_config = ali_lock_sectors,
438
439 .freeze = ata_bmdma_freeze,
440 .thaw = ata_bmdma_thaw,
441 .error_handler = ali_c2_error_handler,
442 .post_internal_cmd = ata_bmdma_post_internal_cmd,
443
444 .bmdma_setup = ata_bmdma_setup,
445 .bmdma_start = ata_bmdma_start,
446 .bmdma_stop = ata_bmdma_stop,
447 .bmdma_status = ata_bmdma_status,
448
449 .qc_prep = ata_qc_prep,
450 .qc_issue = ata_qc_issue_prot,
451 .eng_timeout = ata_eng_timeout,
452 .data_xfer = ata_pio_data_xfer,
453
454 .irq_handler = ata_interrupt,
455 .irq_clear = ata_bmdma_irq_clear,
456
457 .port_start = ata_port_start,
458 .port_stop = ata_port_stop,
459 .host_stop = ata_host_stop
460};
461
462/*
463 * Port operations for DMA capable ALi with cable detect and LBA48
464 */
465static struct ata_port_operations ali_c5_port_ops = {
466 .port_disable = ata_port_disable,
467 .set_piomode = ali_set_piomode,
468 .set_dmamode = ali_set_dmamode,
469 .mode_filter = ata_pci_default_filter,
470 .tf_load = ata_tf_load,
471 .tf_read = ata_tf_read,
472 .check_status = ata_check_status,
473 .exec_command = ata_exec_command,
474 .dev_select = ata_std_dev_select,
475
476 .freeze = ata_bmdma_freeze,
477 .thaw = ata_bmdma_thaw,
478 .error_handler = ali_c2_error_handler,
479 .post_internal_cmd = ata_bmdma_post_internal_cmd,
480
481 .bmdma_setup = ata_bmdma_setup,
482 .bmdma_start = ata_bmdma_start,
483 .bmdma_stop = ata_bmdma_stop,
484 .bmdma_status = ata_bmdma_status,
485
486 .qc_prep = ata_qc_prep,
487 .qc_issue = ata_qc_issue_prot,
488 .eng_timeout = ata_eng_timeout,
489 .data_xfer = ata_pio_data_xfer,
490
491 .irq_handler = ata_interrupt,
492 .irq_clear = ata_bmdma_irq_clear,
493
494 .port_start = ata_port_start,
495 .port_stop = ata_port_stop,
496 .host_stop = ata_host_stop
497};
498
499/**
500 * ali_init_one - discovery callback
501 * @pdev: PCI device ID
502 * @id: PCI table info
503 *
504 * An ALi IDE interface has been discovered. Figure out what revision
505 * and perform configuration work before handing it to the ATA layer
506 */
507
508static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
509{
510 static struct ata_port_info info_early = {
511 .sht = &ali_sht,
512 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
513 .pio_mask = 0x1f,
514 .port_ops = &ali_early_port_ops
515 };
516 /* Revision 0x20 added DMA */
517 static struct ata_port_info info_20 = {
518 .sht = &ali_sht,
519 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
520 .pio_mask = 0x1f,
521 .mwdma_mask = 0x07,
522 .port_ops = &ali_20_port_ops
523 };
524 /* Revision 0x20 with support logic added UDMA */
525 static struct ata_port_info info_20_udma = {
526 .sht = &ali_sht,
527 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
528 .pio_mask = 0x1f,
529 .mwdma_mask = 0x07,
530 .udma_mask = 0x07, /* UDMA33 */
531 .port_ops = &ali_20_port_ops
532 };
533 /* Revision 0xC2 adds UDMA66 */
534 static struct ata_port_info info_c2 = {
535 .sht = &ali_sht,
536 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
537 .pio_mask = 0x1f,
538 .mwdma_mask = 0x07,
539 .udma_mask = 0x1f,
540 .port_ops = &ali_c2_port_ops
541 };
542 /* Revision 0xC3 is UDMA100 */
543 static struct ata_port_info info_c3 = {
544 .sht = &ali_sht,
545 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
546 .pio_mask = 0x1f,
547 .mwdma_mask = 0x07,
548 .udma_mask = 0x3f,
549 .port_ops = &ali_c2_port_ops
550 };
551 /* Revision 0xC4 is UDMA133 */
552 static struct ata_port_info info_c4 = {
553 .sht = &ali_sht,
554 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
555 .pio_mask = 0x1f,
556 .mwdma_mask = 0x07,
557 .udma_mask = 0x7f,
558 .port_ops = &ali_c2_port_ops
559 };
560 /* Revision 0xC5 is UDMA133 with LBA48 DMA */
561 static struct ata_port_info info_c5 = {
562 .sht = &ali_sht,
563 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
564 .pio_mask = 0x1f,
565 .mwdma_mask = 0x07,
566 .udma_mask = 0x7f,
567 .port_ops = &ali_c5_port_ops
568 };
569
570 static struct ata_port_info *port_info[2];
571 u8 rev, tmp;
572 struct pci_dev *north, *isa_bridge;
573
574 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
575
576 /*
577 * The chipset revision selects the driver operations and
578 * mode data.
579 */
580
581 if (rev < 0x20) {
582 port_info[0] = port_info[1] = &info_early;
583 } else if (rev < 0xC2) {
584 /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
585 pci_read_config_byte(pdev, 0x4B, &tmp);
586 /* Clear CD-ROM DMA write bit */
587 tmp &= 0x7F;
588 pci_write_config_byte(pdev, 0x4B, tmp);
589 port_info[0] = port_info[1] = &info_20;
590 } else if (rev == 0xC2) {
591 port_info[0] = port_info[1] = &info_c2;
592 } else if (rev == 0xC3) {
593 port_info[0] = port_info[1] = &info_c3;
594 } else if (rev == 0xC4) {
595 port_info[0] = port_info[1] = &info_c4;
596 } else
597 port_info[0] = port_info[1] = &info_c5;
598
599 if (rev >= 0xC2) {
600 /* Enable cable detection logic */
601 pci_read_config_byte(pdev, 0x4B, &tmp);
602 pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
603 }
604
605 north = pci_get_slot(pdev->bus, PCI_DEVFN(0,0));
606 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
607
608 if (north && north->vendor == PCI_VENDOR_ID_AL) {
609 /* Configure the ALi bridge logic. For non ALi rely on BIOS.
610 Set the south bridge enable bit */
611 pci_read_config_byte(isa_bridge, 0x79, &tmp);
612 if (rev == 0xC2)
613 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
614 else if (rev > 0xC2)
615 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
616 }
617
618 if (rev >= 0x20) {
619 if (rev < 0xC2) {
620 /* Are we paired with a UDMA capable chip */
621 pci_read_config_byte(isa_bridge, 0x5E, &tmp);
622 if ((tmp & 0x1E) == 0x12)
623 port_info[0] = port_info[1] = &info_20_udma;
624 }
625 /*
626 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
627 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
628 * via 0x54/55.
629 */
630 pci_read_config_byte(pdev, 0x53, &tmp);
631 if (rev <= 0x20)
632 tmp &= ~0x02;
633 if (rev == 0xc7)
634 tmp |= 0x03;
635 else
636 tmp |= 0x01; /* CD_ROM enable for DMA */
637 pci_write_config_byte(pdev, 0x53, tmp);
638 }
639
640 pci_dev_put(isa_bridge);
641 pci_dev_put(north);
642
643 ata_pci_clear_simplex(pdev);
644 return ata_pci_init_one(pdev, port_info, 2);
645}
646
647static struct pci_device_id ali[] = {
648 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5228), },
649 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5229), },
650 { 0, },
651};
652
653static struct pci_driver ali_pci_driver = {
654 .name = DRV_NAME,
655 .id_table = ali,
656 .probe = ali_init_one,
657 .remove = ata_pci_remove_one
658};
659
660static int __init ali_init(void)
661{
662 return pci_register_driver(&ali_pci_driver);
663}
664
665
666static void __exit ali_exit(void)
667{
668 pci_unregister_driver(&ali_pci_driver);
669}
670
671
672MODULE_AUTHOR("Alan Cox");
673MODULE_DESCRIPTION("low-level driver for ALi PATA");
674MODULE_LICENSE("GPL");
675MODULE_DEVICE_TABLE(pci, ali);
676MODULE_VERSION(DRV_VERSION);
677
678module_init(ali_init);
679module_exit(ali_exit);
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
new file mode 100644
index 000000000000..3293cf9a7eb5
--- /dev/null
+++ b/drivers/ata/pata_amd.c
@@ -0,0 +1,718 @@
1/*
2 * pata_amd.c - AMD PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based on pata-sil680. Errata information is taken from data sheets
7 * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
8 * claimed by sata-nv.c.
9 *
10 * TODO:
11 * Variable system clock when/if it makes sense
12 * Power management on ports
13 *
14 *
15 * Documentation publically available.
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/pci.h>
21#include <linux/init.h>
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <scsi/scsi_host.h>
25#include <linux/libata.h>
26
27#define DRV_NAME "pata_amd"
28#define DRV_VERSION "0.2.3"
29
30/**
31 * timing_setup - shared timing computation and load
32 * @ap: ATA port being set up
33 * @adev: drive being configured
34 * @offset: port offset
35 * @speed: target speed
36 * @clock: clock multiplier (number of times 33MHz for this part)
37 *
38 * Perform the actual timing set up for Nvidia or AMD PATA devices.
39 * The actual devices vary so they all call into this helper function
40 * providing the clock multipler and offset (because AMD and Nvidia put
41 * the ports at different locations).
42 */
43
44static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
45{
46 static const unsigned char amd_cyc2udma[] = {
47 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
48 };
49
50 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
51 struct ata_device *peer = ata_dev_pair(adev);
52 int dn = ap->port_no * 2 + adev->devno;
53 struct ata_timing at, apeer;
54 int T, UT;
55 const int amd_clock = 33333; /* KHz. */
56 u8 t;
57
58 T = 1000000000 / amd_clock;
59 UT = T / min_t(int, max_t(int, clock, 1), 2);
60
61 if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
62 dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
63 return;
64 }
65
66 if (peer) {
67 /* This may be over conservative */
68 if (peer->dma_mode) {
69 ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
70 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
71 }
72 ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
73 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
74 }
75
76 if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
77 if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
78
79 /*
80 * Now do the setup work
81 */
82
83 /* Configure the address set up timing */
84 pci_read_config_byte(pdev, offset + 0x0C, &t);
85 t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
86 pci_write_config_byte(pdev, offset + 0x0C , t);
87
88 /* Configure the 8bit I/O timing */
89 pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
90 ((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1));
91
92 /* Drive timing */
93 pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
94 ((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1));
95
96 switch (clock) {
97 case 1:
98 t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03;
99 break;
100
101 case 2:
102 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03;
103 break;
104
105 case 3:
106 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03;
107 break;
108
109 case 4:
110 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03;
111 break;
112
113 default:
114 return;
115 }
116
117 /* UDMA timing */
118 pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
119}
120
121/**
122 * amd_probe_init - cable detection
123 * @ap: ATA port
124 *
125 * Perform cable detection. The BIOS stores this in PCI config
126 * space for us.
127 */
128
129static int amd_pre_reset(struct ata_port *ap)
130{
131 static const u32 bitmask[2] = {0x03, 0xC0};
132 static const struct pci_bits amd_enable_bits[] = {
133 { 0x40, 1, 0x02, 0x02 },
134 { 0x40, 1, 0x01, 0x01 }
135 };
136
137 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
138 u8 ata66;
139
140 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) {
141 ata_port_disable(ap);
142 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
143 return 0;
144 }
145
146 pci_read_config_byte(pdev, 0x42, &ata66);
147 if (ata66 & bitmask[ap->port_no])
148 ap->cbl = ATA_CBL_PATA80;
149 else
150 ap->cbl = ATA_CBL_PATA40;
151 return ata_std_prereset(ap);
152
153}
154
155static void amd_error_handler(struct ata_port *ap)
156{
157 return ata_bmdma_drive_eh(ap, amd_pre_reset,
158 ata_std_softreset, NULL,
159 ata_std_postreset);
160}
161
162static int amd_early_pre_reset(struct ata_port *ap)
163{
164 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
165 static struct pci_bits amd_enable_bits[] = {
166 { 0x40, 1, 0x02, 0x02 },
167 { 0x40, 1, 0x01, 0x01 }
168 };
169
170 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) {
171 ata_port_disable(ap);
172 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
173 return 0;
174 }
175 /* No host side cable detection */
176 ap->cbl = ATA_CBL_PATA80;
177 return ata_std_prereset(ap);
178
179}
180
181static void amd_early_error_handler(struct ata_port *ap)
182{
183 ata_bmdma_drive_eh(ap, amd_early_pre_reset,
184 ata_std_softreset, NULL,
185 ata_std_postreset);
186}
187
188/**
189 * amd33_set_piomode - set initial PIO mode data
190 * @ap: ATA interface
191 * @adev: ATA device
192 *
193 * Program the AMD registers for PIO mode.
194 */
195
196static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
197{
198 timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
199}
200
201static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
202{
203 timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
204}
205
206static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
207{
208 timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
209}
210
211static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
212{
213 timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
214}
215
216/**
217 * amd33_set_dmamode - set initial DMA mode data
218 * @ap: ATA interface
219 * @adev: ATA device
220 *
221 * Program the MWDMA/UDMA modes for the AMD and Nvidia
222 * chipset.
223 */
224
225static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
226{
227 timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
228}
229
230static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
231{
232 timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
233}
234
235static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
236{
237 timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
238}
239
240static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
241{
242 timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
243}
244
245
246/**
247 * nv_probe_init - cable detection
248 * @ap: ATA port
249 *
250 * Perform cable detection. The BIOS stores this in PCI config
251 * space for us.
252 */
253
254static int nv_pre_reset(struct ata_port *ap) {
255 static const u8 bitmask[2] = {0x03, 0xC0};
256 static const struct pci_bits nv_enable_bits[] = {
257 { 0x50, 1, 0x02, 0x02 },
258 { 0x50, 1, 0x01, 0x01 }
259 };
260
261 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
262 u8 ata66;
263 u16 udma;
264
265 if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no])) {
266 ata_port_disable(ap);
267 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
268 return 0;
269 }
270
271
272 pci_read_config_byte(pdev, 0x52, &ata66);
273 if (ata66 & bitmask[ap->port_no])
274 ap->cbl = ATA_CBL_PATA80;
275 else
276 ap->cbl = ATA_CBL_PATA40;
277
278 /* We now have to double check because the Nvidia boxes BIOS
279 doesn't always set the cable bits but does set mode bits */
280
281 pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
282 if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
283 ap->cbl = ATA_CBL_PATA80;
284 return ata_std_prereset(ap);
285}
286
287static void nv_error_handler(struct ata_port *ap)
288{
289 ata_bmdma_drive_eh(ap, nv_pre_reset,
290 ata_std_softreset, NULL,
291 ata_std_postreset);
292}
293/**
294 * nv100_set_piomode - set initial PIO mode data
295 * @ap: ATA interface
296 * @adev: ATA device
297 *
298 * Program the AMD registers for PIO mode.
299 */
300
301static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
302{
303 timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
304}
305
306static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
307{
308 timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
309}
310
311/**
312 * nv100_set_dmamode - set initial DMA mode data
313 * @ap: ATA interface
314 * @adev: ATA device
315 *
316 * Program the MWDMA/UDMA modes for the AMD and Nvidia
317 * chipset.
318 */
319
320static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
321{
322 timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
323}
324
325static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
326{
327 timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
328}
329
330static struct scsi_host_template amd_sht = {
331 .module = THIS_MODULE,
332 .name = DRV_NAME,
333 .ioctl = ata_scsi_ioctl,
334 .queuecommand = ata_scsi_queuecmd,
335 .can_queue = ATA_DEF_QUEUE,
336 .this_id = ATA_SHT_THIS_ID,
337 .sg_tablesize = LIBATA_MAX_PRD,
338 .max_sectors = ATA_MAX_SECTORS,
339 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
340 .emulated = ATA_SHT_EMULATED,
341 .use_clustering = ATA_SHT_USE_CLUSTERING,
342 .proc_name = DRV_NAME,
343 .dma_boundary = ATA_DMA_BOUNDARY,
344 .slave_configure = ata_scsi_slave_config,
345 .bios_param = ata_std_bios_param,
346};
347
348static struct ata_port_operations amd33_port_ops = {
349 .port_disable = ata_port_disable,
350 .set_piomode = amd33_set_piomode,
351 .set_dmamode = amd33_set_dmamode,
352 .mode_filter = ata_pci_default_filter,
353 .tf_load = ata_tf_load,
354 .tf_read = ata_tf_read,
355 .check_status = ata_check_status,
356 .exec_command = ata_exec_command,
357 .dev_select = ata_std_dev_select,
358
359 .freeze = ata_bmdma_freeze,
360 .thaw = ata_bmdma_thaw,
361 .error_handler = amd_early_error_handler,
362 .post_internal_cmd = ata_bmdma_post_internal_cmd,
363
364 .bmdma_setup = ata_bmdma_setup,
365 .bmdma_start = ata_bmdma_start,
366 .bmdma_stop = ata_bmdma_stop,
367 .bmdma_status = ata_bmdma_status,
368
369 .qc_prep = ata_qc_prep,
370 .qc_issue = ata_qc_issue_prot,
371 .eng_timeout = ata_eng_timeout,
372 .data_xfer = ata_pio_data_xfer,
373
374 .irq_handler = ata_interrupt,
375 .irq_clear = ata_bmdma_irq_clear,
376
377 .port_start = ata_port_start,
378 .port_stop = ata_port_stop,
379 .host_stop = ata_host_stop
380};
381
382static struct ata_port_operations amd66_port_ops = {
383 .port_disable = ata_port_disable,
384 .set_piomode = amd66_set_piomode,
385 .set_dmamode = amd66_set_dmamode,
386 .mode_filter = ata_pci_default_filter,
387 .tf_load = ata_tf_load,
388 .tf_read = ata_tf_read,
389 .check_status = ata_check_status,
390 .exec_command = ata_exec_command,
391 .dev_select = ata_std_dev_select,
392
393 .freeze = ata_bmdma_freeze,
394 .thaw = ata_bmdma_thaw,
395 .error_handler = amd_early_error_handler,
396 .post_internal_cmd = ata_bmdma_post_internal_cmd,
397
398 .bmdma_setup = ata_bmdma_setup,
399 .bmdma_start = ata_bmdma_start,
400 .bmdma_stop = ata_bmdma_stop,
401 .bmdma_status = ata_bmdma_status,
402
403 .qc_prep = ata_qc_prep,
404 .qc_issue = ata_qc_issue_prot,
405 .eng_timeout = ata_eng_timeout,
406 .data_xfer = ata_pio_data_xfer,
407
408 .irq_handler = ata_interrupt,
409 .irq_clear = ata_bmdma_irq_clear,
410
411 .port_start = ata_port_start,
412 .port_stop = ata_port_stop,
413 .host_stop = ata_host_stop
414};
415
416static struct ata_port_operations amd100_port_ops = {
417 .port_disable = ata_port_disable,
418 .set_piomode = amd100_set_piomode,
419 .set_dmamode = amd100_set_dmamode,
420 .mode_filter = ata_pci_default_filter,
421 .tf_load = ata_tf_load,
422 .tf_read = ata_tf_read,
423 .check_status = ata_check_status,
424 .exec_command = ata_exec_command,
425 .dev_select = ata_std_dev_select,
426
427 .freeze = ata_bmdma_freeze,
428 .thaw = ata_bmdma_thaw,
429 .error_handler = amd_error_handler,
430 .post_internal_cmd = ata_bmdma_post_internal_cmd,
431
432 .bmdma_setup = ata_bmdma_setup,
433 .bmdma_start = ata_bmdma_start,
434 .bmdma_stop = ata_bmdma_stop,
435 .bmdma_status = ata_bmdma_status,
436
437 .qc_prep = ata_qc_prep,
438 .qc_issue = ata_qc_issue_prot,
439 .eng_timeout = ata_eng_timeout,
440 .data_xfer = ata_pio_data_xfer,
441
442 .irq_handler = ata_interrupt,
443 .irq_clear = ata_bmdma_irq_clear,
444
445 .port_start = ata_port_start,
446 .port_stop = ata_port_stop,
447 .host_stop = ata_host_stop
448};
449
450static struct ata_port_operations amd133_port_ops = {
451 .port_disable = ata_port_disable,
452 .set_piomode = amd133_set_piomode,
453 .set_dmamode = amd133_set_dmamode,
454 .mode_filter = ata_pci_default_filter,
455 .tf_load = ata_tf_load,
456 .tf_read = ata_tf_read,
457 .check_status = ata_check_status,
458 .exec_command = ata_exec_command,
459 .dev_select = ata_std_dev_select,
460
461 .freeze = ata_bmdma_freeze,
462 .thaw = ata_bmdma_thaw,
463 .error_handler = amd_error_handler,
464 .post_internal_cmd = ata_bmdma_post_internal_cmd,
465
466 .bmdma_setup = ata_bmdma_setup,
467 .bmdma_start = ata_bmdma_start,
468 .bmdma_stop = ata_bmdma_stop,
469 .bmdma_status = ata_bmdma_status,
470
471 .qc_prep = ata_qc_prep,
472 .qc_issue = ata_qc_issue_prot,
473 .eng_timeout = ata_eng_timeout,
474 .data_xfer = ata_pio_data_xfer,
475
476 .irq_handler = ata_interrupt,
477 .irq_clear = ata_bmdma_irq_clear,
478
479 .port_start = ata_port_start,
480 .port_stop = ata_port_stop,
481 .host_stop = ata_host_stop
482};
483
484static struct ata_port_operations nv100_port_ops = {
485 .port_disable = ata_port_disable,
486 .set_piomode = nv100_set_piomode,
487 .set_dmamode = nv100_set_dmamode,
488 .mode_filter = ata_pci_default_filter,
489 .tf_load = ata_tf_load,
490 .tf_read = ata_tf_read,
491 .check_status = ata_check_status,
492 .exec_command = ata_exec_command,
493 .dev_select = ata_std_dev_select,
494
495 .freeze = ata_bmdma_freeze,
496 .thaw = ata_bmdma_thaw,
497 .error_handler = nv_error_handler,
498 .post_internal_cmd = ata_bmdma_post_internal_cmd,
499
500 .bmdma_setup = ata_bmdma_setup,
501 .bmdma_start = ata_bmdma_start,
502 .bmdma_stop = ata_bmdma_stop,
503 .bmdma_status = ata_bmdma_status,
504
505 .qc_prep = ata_qc_prep,
506 .qc_issue = ata_qc_issue_prot,
507 .eng_timeout = ata_eng_timeout,
508 .data_xfer = ata_pio_data_xfer,
509
510 .irq_handler = ata_interrupt,
511 .irq_clear = ata_bmdma_irq_clear,
512
513 .port_start = ata_port_start,
514 .port_stop = ata_port_stop,
515 .host_stop = ata_host_stop
516};
517
518static struct ata_port_operations nv133_port_ops = {
519 .port_disable = ata_port_disable,
520 .set_piomode = nv133_set_piomode,
521 .set_dmamode = nv133_set_dmamode,
522 .mode_filter = ata_pci_default_filter,
523 .tf_load = ata_tf_load,
524 .tf_read = ata_tf_read,
525 .check_status = ata_check_status,
526 .exec_command = ata_exec_command,
527 .dev_select = ata_std_dev_select,
528
529 .freeze = ata_bmdma_freeze,
530 .thaw = ata_bmdma_thaw,
531 .error_handler = nv_error_handler,
532 .post_internal_cmd = ata_bmdma_post_internal_cmd,
533
534 .bmdma_setup = ata_bmdma_setup,
535 .bmdma_start = ata_bmdma_start,
536 .bmdma_stop = ata_bmdma_stop,
537 .bmdma_status = ata_bmdma_status,
538
539 .qc_prep = ata_qc_prep,
540 .qc_issue = ata_qc_issue_prot,
541 .eng_timeout = ata_eng_timeout,
542 .data_xfer = ata_pio_data_xfer,
543
544 .irq_handler = ata_interrupt,
545 .irq_clear = ata_bmdma_irq_clear,
546
547 .port_start = ata_port_start,
548 .port_stop = ata_port_stop,
549 .host_stop = ata_host_stop
550};
551
552static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
553{
554 static struct ata_port_info info[10] = {
555 { /* 0: AMD 7401 */
556 .sht = &amd_sht,
557 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
558 .pio_mask = 0x1f,
559 .mwdma_mask = 0x07, /* No SWDMA */
560 .udma_mask = 0x07, /* UDMA 33 */
561 .port_ops = &amd33_port_ops
562 },
563 { /* 1: Early AMD7409 - no swdma */
564 .sht = &amd_sht,
565 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
566 .pio_mask = 0x1f,
567 .mwdma_mask = 0x07,
568 .udma_mask = 0x1f, /* UDMA 66 */
569 .port_ops = &amd66_port_ops
570 },
571 { /* 2: AMD 7409, no swdma errata */
572 .sht = &amd_sht,
573 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
574 .pio_mask = 0x1f,
575 .mwdma_mask = 0x07,
576 .udma_mask = 0x1f, /* UDMA 66 */
577 .port_ops = &amd66_port_ops
578 },
579 { /* 3: AMD 7411 */
580 .sht = &amd_sht,
581 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
582 .pio_mask = 0x1f,
583 .mwdma_mask = 0x07,
584 .udma_mask = 0x3f, /* UDMA 100 */
585 .port_ops = &amd100_port_ops
586 },
587 { /* 4: AMD 7441 */
588 .sht = &amd_sht,
589 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
590 .pio_mask = 0x1f,
591 .mwdma_mask = 0x07,
592 .udma_mask = 0x3f, /* UDMA 100 */
593 .port_ops = &amd100_port_ops
594 },
595 { /* 5: AMD 8111*/
596 .sht = &amd_sht,
597 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
598 .pio_mask = 0x1f,
599 .mwdma_mask = 0x07,
600 .udma_mask = 0x7f, /* UDMA 133, no swdma */
601 .port_ops = &amd133_port_ops
602 },
603 { /* 6: AMD 8111 UDMA 100 (Serenade) */
604 .sht = &amd_sht,
605 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
606 .pio_mask = 0x1f,
607 .mwdma_mask = 0x07,
608 .udma_mask = 0x3f, /* UDMA 100, no swdma */
609 .port_ops = &amd133_port_ops
610 },
611 { /* 7: Nvidia Nforce */
612 .sht = &amd_sht,
613 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
614 .pio_mask = 0x1f,
615 .mwdma_mask = 0x07,
616 .udma_mask = 0x3f, /* UDMA 100 */
617 .port_ops = &nv100_port_ops
618 },
619 { /* 8: Nvidia Nforce2 and later */
620 .sht = &amd_sht,
621 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
622 .pio_mask = 0x1f,
623 .mwdma_mask = 0x07,
624 .udma_mask = 0x7f, /* UDMA 133, no swdma */
625 .port_ops = &nv133_port_ops
626 },
627 { /* 9: AMD CS5536 (Geode companion) */
628 .sht = &amd_sht,
629 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
630 .pio_mask = 0x1f,
631 .mwdma_mask = 0x07,
632 .udma_mask = 0x3f, /* UDMA 100 */
633 .port_ops = &amd100_port_ops
634 }
635 };
636 static struct ata_port_info *port_info[2];
637 static int printed_version;
638 int type = id->driver_data;
639 u8 rev;
640 u8 fifo;
641
642 if (!printed_version++)
643 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
644
645 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
646 pci_read_config_byte(pdev, 0x41, &fifo);
647
648 /* Check for AMD7409 without swdma errata and if found adjust type */
649 if (type == 1 && rev > 0x7)
650 type = 2;
651
652 /* Check for AMD7411 */
653 if (type == 3)
654 /* FIFO is broken */
655 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
656 else
657 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
658
659 /* Serenade ? */
660 if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
661 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
662 type = 6; /* UDMA 100 only */
663
664 if (type < 3)
665 ata_pci_clear_simplex(pdev);
666
667 /* And fire it up */
668
669 port_info[0] = port_info[1] = &info[type];
670 return ata_pci_init_one(pdev, port_info, 2);
671}
672
673static const struct pci_device_id amd[] = {
674 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_COBRA_7401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
675 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7409, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
676 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
677 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_OPUS_7441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
678 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
679 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
680 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
681 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
682 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
683 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
684 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
685 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
686 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
687 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
688 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
689 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
690 { 0, },
691};
692
693static struct pci_driver amd_pci_driver = {
694 .name = DRV_NAME,
695 .id_table = amd,
696 .probe = amd_init_one,
697 .remove = ata_pci_remove_one
698};
699
700static int __init amd_init(void)
701{
702 return pci_register_driver(&amd_pci_driver);
703}
704
705static void __exit amd_exit(void)
706{
707 pci_unregister_driver(&amd_pci_driver);
708}
709
710
711MODULE_AUTHOR("Alan Cox");
712MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
713MODULE_LICENSE("GPL");
714MODULE_DEVICE_TABLE(pci, amd);
715MODULE_VERSION(DRV_VERSION);
716
717module_init(amd_init);
718module_exit(amd_exit);
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
new file mode 100644
index 000000000000..d6ef3bf1bac7
--- /dev/null
+++ b/drivers/ata/pata_artop.c
@@ -0,0 +1,518 @@
1/*
2 * pata_artop.c - ARTOP ATA controller driver
3 *
4 * (C) 2006 Red Hat <alan@redhat.com>
5 *
6 * Based in part on drivers/ide/pci/aec62xx.c
7 * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
8 * 865/865R fixes for Macintosh card version from a patch to the old
9 * driver by Thibaut VARENE <varenet@parisc-linux.org>
10 * When setting the PCI latency we must set 0x80 or higher for burst
11 * performance Alessandro Zummo <alessandro.zummo@towertech.it>
12 *
13 * TODO
14 * 850 serialization once the core supports it
15 * Investigate no_dsc on 850R
16 * Clock detect
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/pci.h>
22#include <linux/init.h>
23#include <linux/blkdev.h>
24#include <linux/delay.h>
25#include <linux/device.h>
26#include <scsi/scsi_host.h>
27#include <linux/libata.h>
28#include <linux/ata.h>
29
30#define DRV_NAME "pata_artop"
31#define DRV_VERSION "0.4.1"
32
33/*
34 * The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
35 * get PCI bus speed functionality we leave this as 0. Its a variable
36 * for when we get the functionality and also for folks wanting to
37 * test stuff.
38 */
39
40static int clock = 0;
41
42static int artop6210_pre_reset(struct ata_port *ap)
43{
44 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
45 const struct pci_bits artop_enable_bits[] = {
46 { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
47 { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
48 };
49
50 if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) {
51 ata_port_disable(ap);
52 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
53 return 0;
54 }
55 ap->cbl = ATA_CBL_PATA40;
56 return ata_std_prereset(ap);
57}
58
59/**
60 * artop6210_error_handler - Probe specified port on PATA host controller
61 * @ap: Port to probe
62 *
63 * LOCKING:
64 * None (inherited from caller).
65 */
66
67static void artop6210_error_handler(struct ata_port *ap)
68{
69 ata_bmdma_drive_eh(ap, artop6210_pre_reset,
70 ata_std_softreset, NULL,
71 ata_std_postreset);
72}
73
74/**
75 * artop6260_pre_reset - check for 40/80 pin
76 * @ap: Port
77 *
78 * The ARTOP hardware reports the cable detect bits in register 0x49.
79 * Nothing complicated needed here.
80 */
81
82static int artop6260_pre_reset(struct ata_port *ap)
83{
84 static const struct pci_bits artop_enable_bits[] = {
85 { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
86 { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
87 };
88
89 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
90 u8 tmp;
91
92 /* Odd numbered device ids are the units with enable bits (the -R cards) */
93 if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) {
94 ata_port_disable(ap);
95 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
96 return 0;
97 }
98 pci_read_config_byte(pdev, 0x49, &tmp);
99 if (tmp & (1 >> ap->port_no))
100 ap->cbl = ATA_CBL_PATA40;
101 else
102 ap->cbl = ATA_CBL_PATA80;
103 return ata_std_prereset(ap);
104}
105
106/**
107 * artop6260_error_handler - Probe specified port on PATA host controller
108 * @ap: Port to probe
109 *
110 * LOCKING:
111 * None (inherited from caller).
112 */
113
114static void artop6260_error_handler(struct ata_port *ap)
115{
116 ata_bmdma_drive_eh(ap, artop6260_pre_reset,
117 ata_std_softreset, NULL,
118 ata_std_postreset);
119}
120
121/**
122 * artop6210_load_piomode - Load a set of PATA PIO timings
123 * @ap: Port whose timings we are configuring
124 * @adev: Device
125 * @pio: PIO mode
126 *
127 * Set PIO mode for device, in host controller PCI config space. This
128 * is used both to set PIO timings in PIO mode and also to set the
129 * matching PIO clocking for UDMA, as well as the MWDMA timings.
130 *
131 * LOCKING:
132 * None (inherited from caller).
133 */
134
135static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio)
136{
137 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
138 int dn = adev->devno + 2 * ap->port_no;
139 const u16 timing[2][5] = {
140 { 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
141 { 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
142
143 };
144 /* Load the PIO timing active/recovery bits */
145 pci_write_config_word(pdev, 0x40 + 2 * dn, timing[clock][pio]);
146}
147
148/**
149 * artop6210_set_piomode - Initialize host controller PATA PIO timings
150 * @ap: Port whose timings we are configuring
151 * @adev: Device we are configuring
152 *
153 * Set PIO mode for device, in host controller PCI config space. For
154 * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
155 * the event UDMA is used the later call to set_dmamode will set the
156 * bits as required.
157 *
158 * LOCKING:
159 * None (inherited from caller).
160 */
161
162static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev)
163{
164 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
165 int dn = adev->devno + 2 * ap->port_no;
166 u8 ultra;
167
168 artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
169
170 /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
171 pci_read_config_byte(pdev, 0x54, &ultra);
172 ultra &= ~(3 << (2 * dn));
173 pci_write_config_byte(pdev, 0x54, ultra);
174}
175
176/**
177 * artop6260_load_piomode - Initialize host controller PATA PIO timings
178 * @ap: Port whose timings we are configuring
179 * @adev: Device we are configuring
180 * @pio: PIO mode
181 *
182 * Set PIO mode for device, in host controller PCI config space. The
183 * ARTOP6260 and relatives store the timing data differently.
184 *
185 * LOCKING:
186 * None (inherited from caller).
187 */
188
189static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio)
190{
191 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
192 int dn = adev->devno + 2 * ap->port_no;
193 const u8 timing[2][5] = {
194 { 0x00, 0x0A, 0x08, 0x33, 0x31 },
195 { 0x70, 0x7A, 0x78, 0x43, 0x41 }
196
197 };
198 /* Load the PIO timing active/recovery bits */
199 pci_write_config_byte(pdev, 0x40 + dn, timing[clock][pio]);
200}
201
202/**
203 * artop6260_set_piomode - Initialize host controller PATA PIO timings
204 * @ap: Port whose timings we are configuring
205 * @adev: Device we are configuring
206 *
207 * Set PIO mode for device, in host controller PCI config space. For
208 * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
209 * the event UDMA is used the later call to set_dmamode will set the
210 * bits as required.
211 *
212 * LOCKING:
213 * None (inherited from caller).
214 */
215
216static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev)
217{
218 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
219 u8 ultra;
220
221 artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
222
223 /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
224 pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
225 ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
226 pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
227}
228
229/**
230 * artop6210_set_dmamode - Initialize host controller PATA PIO timings
231 * @ap: Port whose timings we are configuring
232 * @adev: um
233 *
234 * Set DMA mode for device, in host controller PCI config space.
235 *
236 * LOCKING:
237 * None (inherited from caller).
238 */
239
240static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev)
241{
242 unsigned int pio;
243 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
244 int dn = adev->devno + 2 * ap->port_no;
245 u8 ultra;
246
247 if (adev->dma_mode == XFER_MW_DMA_0)
248 pio = 1;
249 else
250 pio = 4;
251
252 /* Load the PIO timing active/recovery bits */
253 artop6210_load_piomode(ap, adev, pio);
254
255 pci_read_config_byte(pdev, 0x54, &ultra);
256 ultra &= ~(3 << (2 * dn));
257
258 /* Add ultra DMA bits if in UDMA mode */
259 if (adev->dma_mode >= XFER_UDMA_0) {
260 u8 mode = (adev->dma_mode - XFER_UDMA_0) + 1 - clock;
261 if (mode == 0)
262 mode = 1;
263 ultra |= (mode << (2 * dn));
264 }
265 pci_write_config_byte(pdev, 0x54, ultra);
266}
267
268/**
269 * artop6260_set_dmamode - Initialize host controller PATA PIO timings
270 * @ap: Port whose timings we are configuring
271 * @adev: Device we are configuring
272 *
273 * Set DMA mode for device, in host controller PCI config space. The
274 * ARTOP6260 and relatives store the timing data differently.
275 *
276 * LOCKING:
277 * None (inherited from caller).
278 */
279
280static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev)
281{
282 unsigned int pio = adev->pio_mode - XFER_PIO_0;
283 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
284 u8 ultra;
285
286 if (adev->dma_mode == XFER_MW_DMA_0)
287 pio = 1;
288 else
289 pio = 4;
290
291 /* Load the PIO timing active/recovery bits */
292 artop6260_load_piomode(ap, adev, pio);
293
294 /* Add ultra DMA bits if in UDMA mode */
295 pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
296 ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
297 if (adev->dma_mode >= XFER_UDMA_0) {
298 u8 mode = adev->dma_mode - XFER_UDMA_0 + 1 - clock;
299 if (mode == 0)
300 mode = 1;
301 ultra |= (mode << (4 * adev->devno));
302 }
303 pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
304}
305
306static struct scsi_host_template artop_sht = {
307 .module = THIS_MODULE,
308 .name = DRV_NAME,
309 .ioctl = ata_scsi_ioctl,
310 .queuecommand = ata_scsi_queuecmd,
311 .can_queue = ATA_DEF_QUEUE,
312 .this_id = ATA_SHT_THIS_ID,
313 .sg_tablesize = LIBATA_MAX_PRD,
314 .max_sectors = ATA_MAX_SECTORS,
315 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
316 .emulated = ATA_SHT_EMULATED,
317 .use_clustering = ATA_SHT_USE_CLUSTERING,
318 .proc_name = DRV_NAME,
319 .dma_boundary = ATA_DMA_BOUNDARY,
320 .slave_configure = ata_scsi_slave_config,
321 .bios_param = ata_std_bios_param,
322};
323
324static const struct ata_port_operations artop6210_ops = {
325 .port_disable = ata_port_disable,
326 .set_piomode = artop6210_set_piomode,
327 .set_dmamode = artop6210_set_dmamode,
328 .mode_filter = ata_pci_default_filter,
329
330 .tf_load = ata_tf_load,
331 .tf_read = ata_tf_read,
332 .check_status = ata_check_status,
333 .exec_command = ata_exec_command,
334 .dev_select = ata_std_dev_select,
335
336 .freeze = ata_bmdma_freeze,
337 .thaw = ata_bmdma_thaw,
338 .error_handler = artop6210_error_handler,
339 .post_internal_cmd = ata_bmdma_post_internal_cmd,
340
341 .bmdma_setup = ata_bmdma_setup,
342 .bmdma_start = ata_bmdma_start,
343 .bmdma_stop = ata_bmdma_stop,
344 .bmdma_status = ata_bmdma_status,
345 .qc_prep = ata_qc_prep,
346 .qc_issue = ata_qc_issue_prot,
347 .eng_timeout = ata_eng_timeout,
348 .data_xfer = ata_pio_data_xfer,
349
350 .irq_handler = ata_interrupt,
351 .irq_clear = ata_bmdma_irq_clear,
352
353 .port_start = ata_port_start,
354 .port_stop = ata_port_stop,
355 .host_stop = ata_host_stop,
356};
357
358static const struct ata_port_operations artop6260_ops = {
359 .port_disable = ata_port_disable,
360 .set_piomode = artop6260_set_piomode,
361 .set_dmamode = artop6260_set_dmamode,
362
363 .tf_load = ata_tf_load,
364 .tf_read = ata_tf_read,
365 .check_status = ata_check_status,
366 .exec_command = ata_exec_command,
367 .dev_select = ata_std_dev_select,
368
369 .freeze = ata_bmdma_freeze,
370 .thaw = ata_bmdma_thaw,
371 .error_handler = artop6260_error_handler,
372 .post_internal_cmd = ata_bmdma_post_internal_cmd,
373
374 .bmdma_setup = ata_bmdma_setup,
375 .bmdma_start = ata_bmdma_start,
376 .bmdma_stop = ata_bmdma_stop,
377 .bmdma_status = ata_bmdma_status,
378 .qc_prep = ata_qc_prep,
379 .qc_issue = ata_qc_issue_prot,
380 .data_xfer = ata_pio_data_xfer,
381
382 .eng_timeout = ata_eng_timeout,
383
384 .irq_handler = ata_interrupt,
385 .irq_clear = ata_bmdma_irq_clear,
386
387 .port_start = ata_port_start,
388 .port_stop = ata_port_stop,
389 .host_stop = ata_host_stop,
390};
391
392
393/**
394 * artop_init_one - Register ARTOP ATA PCI device with kernel services
395 * @pdev: PCI device to register
396 * @ent: Entry in artop_pci_tbl matching with @pdev
397 *
398 * Called from kernel PCI layer.
399 *
400 * LOCKING:
401 * Inherited from PCI layer (may sleep).
402 *
403 * RETURNS:
404 * Zero on success, or -ERRNO value.
405 */
406
407static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
408{
409 static int printed_version;
410 static struct ata_port_info info_6210 = {
411 .sht = &artop_sht,
412 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
413 .pio_mask = 0x1f, /* pio0-4 */
414 .mwdma_mask = 0x07, /* mwdma0-2 */
415 .udma_mask = ATA_UDMA2,
416 .port_ops = &artop6210_ops,
417 };
418 static struct ata_port_info info_626x = {
419 .sht = &artop_sht,
420 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
421 .pio_mask = 0x1f, /* pio0-4 */
422 .mwdma_mask = 0x07, /* mwdma0-2 */
423 .udma_mask = ATA_UDMA4,
424 .port_ops = &artop6260_ops,
425 };
426 static struct ata_port_info info_626x_fast = {
427 .sht = &artop_sht,
428 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
429 .pio_mask = 0x1f, /* pio0-4 */
430 .mwdma_mask = 0x07, /* mwdma0-2 */
431 .udma_mask = ATA_UDMA5,
432 .port_ops = &artop6260_ops,
433 };
434 struct ata_port_info *port_info[2];
435 struct ata_port_info *info;
436 int ports = 2;
437
438 if (!printed_version++)
439 dev_printk(KERN_DEBUG, &pdev->dev,
440 "version " DRV_VERSION "\n");
441
442 if (id->driver_data == 0) { /* 6210 variant */
443 info = &info_6210;
444 /* BIOS may have left us in UDMA, clear it before libata probe */
445 pci_write_config_byte(pdev, 0x54, 0);
446 /* For the moment (also lacks dsc) */
447 printk(KERN_WARNING "ARTOP 6210 requires serialize functionality not yet supported by libata.\n");
448 printk(KERN_WARNING "Secondary ATA ports will not be activated.\n");
449 ports = 1;
450 }
451 else if (id->driver_data == 1) /* 6260 */
452 info = &info_626x;
453 else if (id->driver_data == 2) { /* 6260 or 6260 + fast */
454 unsigned long io = pci_resource_start(pdev, 4);
455 u8 reg;
456
457 info = &info_626x;
458 if (inb(io) & 0x10)
459 info = &info_626x_fast;
460 /* Mac systems come up with some registers not set as we
461 will need them */
462
463 /* Clear reset & test bits */
464 pci_read_config_byte(pdev, 0x49, &reg);
465 pci_write_config_byte(pdev, 0x49, reg & ~ 0x30);
466
467 /* PCI latency must be > 0x80 for burst mode, tweak it
468 * if required.
469 */
470 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg);
471 if (reg <= 0x80)
472 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
473
474 /* Enable IRQ output and burst mode */
475 pci_read_config_byte(pdev, 0x4a, &reg);
476 pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
477
478 }
479 port_info[0] = port_info[1] = info;
480 return ata_pci_init_one(pdev, port_info, ports);
481}
482
483static const struct pci_device_id artop_pci_tbl[] = {
484 { 0x1191, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
485 { 0x1191, 0x0006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
486 { 0x1191, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
487 { 0x1191, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
488 { 0x1191, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
489 { } /* terminate list */
490};
491
492static struct pci_driver artop_pci_driver = {
493 .name = DRV_NAME,
494 .id_table = artop_pci_tbl,
495 .probe = artop_init_one,
496 .remove = ata_pci_remove_one,
497};
498
499static int __init artop_init(void)
500{
501 return pci_register_driver(&artop_pci_driver);
502}
503
504static void __exit artop_exit(void)
505{
506 pci_unregister_driver(&artop_pci_driver);
507}
508
509
510module_init(artop_init);
511module_exit(artop_exit);
512
513MODULE_AUTHOR("Alan Cox");
514MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA");
515MODULE_LICENSE("GPL");
516MODULE_DEVICE_TABLE(pci, artop_pci_tbl);
517MODULE_VERSION(DRV_VERSION);
518
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
new file mode 100644
index 000000000000..3f78a1e54a75
--- /dev/null
+++ b/drivers/ata/pata_atiixp.c
@@ -0,0 +1,306 @@
1/*
2 * pata_atiixp.c - ATI PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based on
7 *
8 * linux/drivers/ide/pci/atiixp.c Version 0.01-bart2 Feb. 26, 2004
9 *
10 * Copyright (C) 2003 ATI Inc. <hyu@ati.com>
11 * Copyright (C) 2004 Bartlomiej Zolnierkiewicz
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/init.h>
19#include <linux/blkdev.h>
20#include <linux/delay.h>
21#include <scsi/scsi_host.h>
22#include <linux/libata.h>
23
24#define DRV_NAME "pata_atiixp"
25#define DRV_VERSION "0.4.2"
26
27enum {
28 ATIIXP_IDE_PIO_TIMING = 0x40,
29 ATIIXP_IDE_MWDMA_TIMING = 0x44,
30 ATIIXP_IDE_PIO_CONTROL = 0x48,
31 ATIIXP_IDE_PIO_MODE = 0x4a,
32 ATIIXP_IDE_UDMA_CONTROL = 0x54,
33 ATIIXP_IDE_UDMA_MODE = 0x56
34};
35
36static int atiixp_pre_reset(struct ata_port *ap)
37{
38 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
39 static struct pci_bits atiixp_enable_bits[] = {
40 { 0x48, 1, 0x01, 0x00 },
41 { 0x48, 1, 0x08, 0x00 }
42 };
43
44 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no])) {
45 ata_port_disable(ap);
46 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
47 return 0;
48 }
49 ap->cbl = ATA_CBL_PATA80;
50 return ata_std_prereset(ap);
51}
52
53static void atiixp_error_handler(struct ata_port *ap)
54{
55 ata_bmdma_drive_eh(ap, atiixp_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
56}
57
58/**
59 * atiixp_set_pio_timing - set initial PIO mode data
60 * @ap: ATA interface
61 * @adev: ATA device
62 *
63 * Called by both the pio and dma setup functions to set the controller
64 * timings for PIO transfers. We must load both the mode number and
65 * timing values into the controller.
66 */
67
68static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
69{
70 static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
71
72 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
73 int dn = 2 * ap->port_no + adev->devno;
74
75 /* Check this is correct - the order is odd in both drivers */
76 int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
77 u16 pio_mode_data, pio_timing_data;
78
79 pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
80 pio_mode_data &= ~(0x7 << (4 * dn));
81 pio_mode_data |= pio << (4 * dn);
82 pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
83
84 pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
85 pio_mode_data &= ~(0xFF << timing_shift);
86 pio_mode_data |= (pio_timings[pio] << timing_shift);
87 pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
88}
89
90/**
91 * atiixp_set_piomode - set initial PIO mode data
92 * @ap: ATA interface
93 * @adev: ATA device
94 *
95 * Called to do the PIO mode setup. We use a shared helper for this
96 * as the DMA setup must also adjust the PIO timing information.
97 */
98
99static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
100{
101 atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0);
102}
103
104/**
105 * atiixp_set_dmamode - set initial DMA mode data
106 * @ap: ATA interface
107 * @adev: ATA device
108 *
109 * Called to do the DMA mode setup. We use timing tables for most
110 * modes but must tune an appropriate PIO mode to match.
111 */
112
113static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
114{
115 static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
116
117 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
118 int dma = adev->dma_mode;
119 int dn = 2 * ap->port_no + adev->devno;
120 int wanted_pio;
121
122 if (adev->dma_mode >= XFER_UDMA_0) {
123 u16 udma_mode_data;
124
125 dma -= XFER_UDMA_0;
126
127 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data);
128 udma_mode_data &= ~(0x7 << (4 * dn));
129 udma_mode_data |= dma << (4 * dn);
130 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
131 } else {
132 u16 mwdma_timing_data;
133 /* Check this is correct - the order is odd in both drivers */
134 int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
135
136 dma -= XFER_MW_DMA_0;
137
138 pci_read_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, &mwdma_timing_data);
139 mwdma_timing_data &= ~(0xFF << timing_shift);
140 mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
141 pci_write_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, mwdma_timing_data);
142 }
143 /*
144 * We must now look at the PIO mode situation. We may need to
145 * adjust the PIO mode to keep the timings acceptable
146 */
147 if (adev->dma_mode >= XFER_MW_DMA_2)
148 wanted_pio = 4;
149 else if (adev->dma_mode == XFER_MW_DMA_1)
150 wanted_pio = 3;
151 else if (adev->dma_mode == XFER_MW_DMA_0)
152 wanted_pio = 0;
153 else BUG();
154
155 if (adev->pio_mode != wanted_pio)
156 atiixp_set_pio_timing(ap, adev, wanted_pio);
157}
158
159/**
160 * atiixp_bmdma_start - DMA start callback
161 * @qc: Command in progress
162 *
163 * When DMA begins we need to ensure that the UDMA control
164 * register for the channel is correctly set.
165 */
166
167static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
168{
169 struct ata_port *ap = qc->ap;
170 struct ata_device *adev = qc->dev;
171
172 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
173 int dn = (2 * ap->port_no) + adev->devno;
174 u16 tmp16;
175
176 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
177 if (adev->dma_mode >= XFER_UDMA_0)
178 tmp16 |= (1 << dn);
179 else
180 tmp16 &= ~(1 << dn);
181 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
182 ata_bmdma_start(qc);
183}
184
185/**
186 * atiixp_dma_stop - DMA stop callback
187 * @qc: Command in progress
188 *
189 * DMA has completed. Clear the UDMA flag as the next operations will
190 * be PIO ones not UDMA data transfer.
191 */
192
193static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
194{
195 struct ata_port *ap = qc->ap;
196 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
197 int dn = (2 * ap->port_no) + qc->dev->devno;
198 u16 tmp16;
199
200 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
201 tmp16 &= ~(1 << dn);
202 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
203 ata_bmdma_stop(qc);
204}
205
206static struct scsi_host_template atiixp_sht = {
207 .module = THIS_MODULE,
208 .name = DRV_NAME,
209 .ioctl = ata_scsi_ioctl,
210 .queuecommand = ata_scsi_queuecmd,
211 .can_queue = ATA_DEF_QUEUE,
212 .this_id = ATA_SHT_THIS_ID,
213 .sg_tablesize = LIBATA_MAX_PRD,
214 .max_sectors = ATA_MAX_SECTORS,
215 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
216 .emulated = ATA_SHT_EMULATED,
217 .use_clustering = ATA_SHT_USE_CLUSTERING,
218 .proc_name = DRV_NAME,
219 .dma_boundary = ATA_DMA_BOUNDARY,
220 .slave_configure = ata_scsi_slave_config,
221 .bios_param = ata_std_bios_param,
222};
223
224static struct ata_port_operations atiixp_port_ops = {
225 .port_disable = ata_port_disable,
226 .set_piomode = atiixp_set_piomode,
227 .set_dmamode = atiixp_set_dmamode,
228 .mode_filter = ata_pci_default_filter,
229 .tf_load = ata_tf_load,
230 .tf_read = ata_tf_read,
231 .check_status = ata_check_status,
232 .exec_command = ata_exec_command,
233 .dev_select = ata_std_dev_select,
234
235 .freeze = ata_bmdma_freeze,
236 .thaw = ata_bmdma_thaw,
237 .error_handler = atiixp_error_handler,
238 .post_internal_cmd = ata_bmdma_post_internal_cmd,
239
240 .bmdma_setup = ata_bmdma_setup,
241 .bmdma_start = atiixp_bmdma_start,
242 .bmdma_stop = atiixp_bmdma_stop,
243 .bmdma_status = ata_bmdma_status,
244
245 .qc_prep = ata_qc_prep,
246 .qc_issue = ata_qc_issue_prot,
247 .eng_timeout = ata_eng_timeout,
248 .data_xfer = ata_pio_data_xfer,
249
250 .irq_handler = ata_interrupt,
251 .irq_clear = ata_bmdma_irq_clear,
252
253 .port_start = ata_port_start,
254 .port_stop = ata_port_stop,
255 .host_stop = ata_host_stop
256};
257
258static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
259{
260 static struct ata_port_info info = {
261 .sht = &atiixp_sht,
262 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
263 .pio_mask = 0x1f,
264 .mwdma_mask = 0x06, /* No MWDMA0 support */
265 .udma_mask = 0x3F,
266 .port_ops = &atiixp_port_ops
267 };
268 static struct ata_port_info *port_info[2] = { &info, &info };
269 return ata_pci_init_one(dev, port_info, 2);
270}
271
272static struct pci_device_id atiixp[] = {
273 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
274 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
275 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
276 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
277 { 0, },
278};
279
280static struct pci_driver atiixp_pci_driver = {
281 .name = DRV_NAME,
282 .id_table = atiixp,
283 .probe = atiixp_init_one,
284 .remove = ata_pci_remove_one
285};
286
287static int __init atiixp_init(void)
288{
289 return pci_register_driver(&atiixp_pci_driver);
290}
291
292
293static void __exit atiixp_exit(void)
294{
295 pci_unregister_driver(&atiixp_pci_driver);
296}
297
298
299MODULE_AUTHOR("Alan Cox");
300MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
301MODULE_LICENSE("GPL");
302MODULE_DEVICE_TABLE(pci, atiixp);
303MODULE_VERSION(DRV_VERSION);
304
305module_init(atiixp_init);
306module_exit(atiixp_exit);
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
new file mode 100644
index 000000000000..abf1bb7bd322
--- /dev/null
+++ b/drivers/ata/pata_cmd64x.c
@@ -0,0 +1,505 @@
1/*
2 * pata_cmd64x.c - ATI PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based upon
7 * linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002
8 *
9 * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
10 * Note, this driver is not used at all on other systems because
11 * there the "BIOS" has done all of the following already.
12 * Due to massive hardware bugs, UltraDMA is only supported
13 * on the 646U2 and not on the 646U.
14 *
15 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
16 * Copyright (C) 1998 David S. Miller (davem@redhat.com)
17 *
18 * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
19 *
20 * TODO
21 * Testing work
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/blkdev.h>
29#include <linux/delay.h>
30#include <scsi/scsi_host.h>
31#include <linux/libata.h>
32
33#define DRV_NAME "pata_cmd64x"
34#define DRV_VERSION "0.2.1"
35
36/*
37 * CMD64x specific registers definition.
38 */
39
40enum {
41 CFR = 0x50,
42 CFR_INTR_CH0 = 0x02,
43 CNTRL = 0x51,
44 CNTRL_DIS_RA0 = 0x40,
45 CNTRL_DIS_RA1 = 0x80,
46 CNTRL_ENA_2ND = 0x08,
47 CMDTIM = 0x52,
48 ARTTIM0 = 0x53,
49 DRWTIM0 = 0x54,
50 ARTTIM1 = 0x55,
51 DRWTIM1 = 0x56,
52 ARTTIM23 = 0x57,
53 ARTTIM23_DIS_RA2 = 0x04,
54 ARTTIM23_DIS_RA3 = 0x08,
55 ARTTIM23_INTR_CH1 = 0x10,
56 ARTTIM2 = 0x57,
57 ARTTIM3 = 0x57,
58 DRWTIM23 = 0x58,
59 DRWTIM2 = 0x58,
60 BRST = 0x59,
61 DRWTIM3 = 0x5b,
62 BMIDECR0 = 0x70,
63 MRDMODE = 0x71,
64 MRDMODE_INTR_CH0 = 0x04,
65 MRDMODE_INTR_CH1 = 0x08,
66 MRDMODE_BLK_CH0 = 0x10,
67 MRDMODE_BLK_CH1 = 0x20,
68 BMIDESR0 = 0x72,
69 UDIDETCR0 = 0x73,
70 DTPR0 = 0x74,
71 BMIDECR1 = 0x78,
72 BMIDECSR = 0x79,
73 BMIDESR1 = 0x7A,
74 UDIDETCR1 = 0x7B,
75 DTPR1 = 0x7C
76};
77
78static int cmd64x_pre_reset(struct ata_port *ap)
79{
80 ap->cbl = ATA_CBL_PATA40;
81 return ata_std_prereset(ap);
82}
83
84static int cmd648_pre_reset(struct ata_port *ap)
85{
86 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
87 u8 r;
88
89 /* Check cable detect bits */
90 pci_read_config_byte(pdev, BMIDECSR, &r);
91 if (r & (1 << ap->port_no))
92 ap->cbl = ATA_CBL_PATA80;
93 else
94 ap->cbl = ATA_CBL_PATA40;
95
96 return ata_std_prereset(ap);
97}
98
99static void cmd64x_error_handler(struct ata_port *ap)
100{
101 return ata_bmdma_drive_eh(ap, cmd64x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
102}
103
104static void cmd648_error_handler(struct ata_port *ap)
105{
106 ata_bmdma_drive_eh(ap, cmd648_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
107}
108
109/**
110 * cmd64x_set_piomode - set initial PIO mode data
111 * @ap: ATA interface
112 * @adev: ATA device
113 *
114 * Called to do the PIO mode setup.
115 */
116
117static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
118{
119 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
120 struct ata_timing t;
121 const unsigned long T = 1000000 / 33;
122 const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 };
123
124 u8 reg;
125
126 /* Port layout is not logical so use a table */
127 const u8 arttim_port[2][2] = {
128 { ARTTIM0, ARTTIM1 },
129 { ARTTIM23, ARTTIM23 }
130 };
131 const u8 drwtim_port[2][2] = {
132 { DRWTIM0, DRWTIM1 },
133 { DRWTIM2, DRWTIM3 }
134 };
135
136 int arttim = arttim_port[ap->port_no][adev->devno];
137 int drwtim = drwtim_port[ap->port_no][adev->devno];
138
139
140 if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) {
141 printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
142 return;
143 }
144 if (ap->port_no) {
145 /* Slave has shared address setup */
146 struct ata_device *pair = ata_dev_pair(adev);
147
148 if (pair) {
149 struct ata_timing tp;
150 ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
151 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
152 }
153 }
154
155 printk(KERN_DEBUG DRV_NAME ": active %d recovery %d setup %d.\n",
156 t.active, t.recover, t.setup);
157 if (t.recover > 16) {
158 t.active += t.recover - 16;
159 t.recover = 16;
160 }
161 if (t.active > 16)
162 t.active = 16;
163
164 /* Now convert the clocks into values we can actually stuff into
165 the chip */
166
167 if (t.recover > 1)
168 t.recover--;
169 else
170 t.recover = 15;
171
172 if (t.setup > 4)
173 t.setup = 0xC0;
174 else
175 t.setup = setup_data[t.setup];
176
177 t.active &= 0x0F; /* 0 = 16 */
178
179 /* Load setup timing */
180 pci_read_config_byte(pdev, arttim, &reg);
181 reg &= 0x3F;
182 reg |= t.setup;
183 pci_write_config_byte(pdev, arttim, reg);
184
185 /* Load active/recovery */
186 pci_write_config_byte(pdev, drwtim, (t.active << 4) | t.recover);
187}
188
189/**
190 * cmd64x_set_dmamode - set initial DMA mode data
191 * @ap: ATA interface
192 * @adev: ATA device
193 *
194 * Called to do the DMA mode setup.
195 */
196
197static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
198{
199 static const u8 udma_data[] = {
200 0x31, 0x21, 0x11, 0x25, 0x15, 0x05
201 };
202 static const u8 mwdma_data[] = {
203 0x30, 0x20, 0x10
204 };
205
206 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
207 u8 regU, regD;
208
209 int pciU = UDIDETCR0 + 8 * ap->port_no;
210 int pciD = BMIDESR0 + 8 * ap->port_no;
211 int shift = 2 * adev->devno;
212
213 pci_read_config_byte(pdev, pciD, &regD);
214 pci_read_config_byte(pdev, pciU, &regU);
215
216 regD &= ~(0x20 << shift);
217 regU &= ~(0x35 << shift);
218
219 if (adev->dma_mode >= XFER_UDMA_0)
220 regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
221 else
222 regD |= mwdma_data[adev->dma_mode - XFER_MW_DMA_0] << shift;
223
224 regD |= 0x20 << adev->devno;
225
226 pci_write_config_byte(pdev, pciU, regU);
227 pci_write_config_byte(pdev, pciD, regD);
228}
229
230/**
231 * cmd648_dma_stop - DMA stop callback
232 * @qc: Command in progress
233 *
234 * DMA has completed.
235 */
236
237static void cmd648_bmdma_stop(struct ata_queued_cmd *qc)
238{
239 struct ata_port *ap = qc->ap;
240 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
241 u8 dma_intr;
242 int dma_reg = ap->port_no ? ARTTIM23_INTR_CH1 : CFR_INTR_CH0;
243 int dma_mask = ap->port_no ? ARTTIM2 : CFR;
244
245 ata_bmdma_stop(qc);
246
247 pci_read_config_byte(pdev, dma_reg, &dma_intr);
248 pci_write_config_byte(pdev, dma_reg, dma_intr | dma_mask);
249}
250
251/**
252 * cmd646r1_dma_stop - DMA stop callback
253 * @qc: Command in progress
254 *
255 * Stub for now while investigating the r1 quirk in the old driver.
256 */
257
258static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc)
259{
260 ata_bmdma_stop(qc);
261}
262
263static struct scsi_host_template cmd64x_sht = {
264 .module = THIS_MODULE,
265 .name = DRV_NAME,
266 .ioctl = ata_scsi_ioctl,
267 .queuecommand = ata_scsi_queuecmd,
268 .can_queue = ATA_DEF_QUEUE,
269 .this_id = ATA_SHT_THIS_ID,
270 .sg_tablesize = LIBATA_MAX_PRD,
271 .max_sectors = ATA_MAX_SECTORS,
272 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
273 .emulated = ATA_SHT_EMULATED,
274 .use_clustering = ATA_SHT_USE_CLUSTERING,
275 .proc_name = DRV_NAME,
276 .dma_boundary = ATA_DMA_BOUNDARY,
277 .slave_configure = ata_scsi_slave_config,
278 .bios_param = ata_std_bios_param,
279};
280
281static struct ata_port_operations cmd64x_port_ops = {
282 .port_disable = ata_port_disable,
283 .set_piomode = cmd64x_set_piomode,
284 .set_dmamode = cmd64x_set_dmamode,
285 .mode_filter = ata_pci_default_filter,
286 .tf_load = ata_tf_load,
287 .tf_read = ata_tf_read,
288 .check_status = ata_check_status,
289 .exec_command = ata_exec_command,
290 .dev_select = ata_std_dev_select,
291
292 .freeze = ata_bmdma_freeze,
293 .thaw = ata_bmdma_thaw,
294 .error_handler = cmd64x_error_handler,
295 .post_internal_cmd = ata_bmdma_post_internal_cmd,
296
297 .bmdma_setup = ata_bmdma_setup,
298 .bmdma_start = ata_bmdma_start,
299 .bmdma_stop = ata_bmdma_stop,
300 .bmdma_status = ata_bmdma_status,
301
302 .qc_prep = ata_qc_prep,
303 .qc_issue = ata_qc_issue_prot,
304 .eng_timeout = ata_eng_timeout,
305 .data_xfer = ata_pio_data_xfer,
306
307 .irq_handler = ata_interrupt,
308 .irq_clear = ata_bmdma_irq_clear,
309
310 .port_start = ata_port_start,
311 .port_stop = ata_port_stop,
312 .host_stop = ata_host_stop
313};
314
315static struct ata_port_operations cmd646r1_port_ops = {
316 .port_disable = ata_port_disable,
317 .set_piomode = cmd64x_set_piomode,
318 .set_dmamode = cmd64x_set_dmamode,
319 .mode_filter = ata_pci_default_filter,
320 .tf_load = ata_tf_load,
321 .tf_read = ata_tf_read,
322 .check_status = ata_check_status,
323 .exec_command = ata_exec_command,
324 .dev_select = ata_std_dev_select,
325
326 .freeze = ata_bmdma_freeze,
327 .thaw = ata_bmdma_thaw,
328 .error_handler = cmd64x_error_handler,
329 .post_internal_cmd = ata_bmdma_post_internal_cmd,
330
331 .bmdma_setup = ata_bmdma_setup,
332 .bmdma_start = ata_bmdma_start,
333 .bmdma_stop = cmd646r1_bmdma_stop,
334 .bmdma_status = ata_bmdma_status,
335
336 .qc_prep = ata_qc_prep,
337 .qc_issue = ata_qc_issue_prot,
338 .eng_timeout = ata_eng_timeout,
339 .data_xfer = ata_pio_data_xfer,
340
341 .irq_handler = ata_interrupt,
342 .irq_clear = ata_bmdma_irq_clear,
343
344 .port_start = ata_port_start,
345 .port_stop = ata_port_stop,
346 .host_stop = ata_host_stop
347};
348
349static struct ata_port_operations cmd648_port_ops = {
350 .port_disable = ata_port_disable,
351 .set_piomode = cmd64x_set_piomode,
352 .set_dmamode = cmd64x_set_dmamode,
353 .mode_filter = ata_pci_default_filter,
354 .tf_load = ata_tf_load,
355 .tf_read = ata_tf_read,
356 .check_status = ata_check_status,
357 .exec_command = ata_exec_command,
358 .dev_select = ata_std_dev_select,
359
360 .freeze = ata_bmdma_freeze,
361 .thaw = ata_bmdma_thaw,
362 .error_handler = cmd648_error_handler,
363 .post_internal_cmd = ata_bmdma_post_internal_cmd,
364
365 .bmdma_setup = ata_bmdma_setup,
366 .bmdma_start = ata_bmdma_start,
367 .bmdma_stop = cmd648_bmdma_stop,
368 .bmdma_status = ata_bmdma_status,
369
370 .qc_prep = ata_qc_prep,
371 .qc_issue = ata_qc_issue_prot,
372 .eng_timeout = ata_eng_timeout,
373 .data_xfer = ata_pio_data_xfer,
374
375 .irq_handler = ata_interrupt,
376 .irq_clear = ata_bmdma_irq_clear,
377
378 .port_start = ata_port_start,
379 .port_stop = ata_port_stop,
380 .host_stop = ata_host_stop
381};
382
383static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
384{
385 u32 class_rev;
386
387 static struct ata_port_info cmd_info[6] = {
388 { /* CMD 643 - no UDMA */
389 .sht = &cmd64x_sht,
390 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
391 .pio_mask = 0x1f,
392 .mwdma_mask = 0x07,
393 .port_ops = &cmd64x_port_ops
394 },
395 { /* CMD 646 with broken UDMA */
396 .sht = &cmd64x_sht,
397 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
398 .pio_mask = 0x1f,
399 .mwdma_mask = 0x07,
400 .port_ops = &cmd64x_port_ops
401 },
402 { /* CMD 646 with working UDMA */
403 .sht = &cmd64x_sht,
404 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
405 .pio_mask = 0x1f,
406 .mwdma_mask = 0x07,
407 .udma_mask = ATA_UDMA1,
408 .port_ops = &cmd64x_port_ops
409 },
410 { /* CMD 646 rev 1 */
411 .sht = &cmd64x_sht,
412 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
413 .pio_mask = 0x1f,
414 .mwdma_mask = 0x07,
415 .port_ops = &cmd646r1_port_ops
416 },
417 { /* CMD 648 */
418 .sht = &cmd64x_sht,
419 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
420 .pio_mask = 0x1f,
421 .mwdma_mask = 0x07,
422 .udma_mask = ATA_UDMA2,
423 .port_ops = &cmd648_port_ops
424 },
425 { /* CMD 649 */
426 .sht = &cmd64x_sht,
427 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
428 .pio_mask = 0x1f,
429 .mwdma_mask = 0x07,
430 .udma_mask = ATA_UDMA3,
431 .port_ops = &cmd648_port_ops
432 }
433 };
434 static struct ata_port_info *port_info[2], *info;
435 u8 mrdmode;
436
437 info = &cmd_info[id->driver_data];
438
439 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
440 class_rev &= 0xFF;
441
442 if (id->driver_data == 0) /* 643 */
443 ata_pci_clear_simplex(pdev);
444
445 if (pdev->device == PCI_DEVICE_ID_CMD_646) {
446 /* Does UDMA work ? */
447 if (class_rev > 4)
448 info = &cmd_info[2];
449 /* Early rev with other problems ? */
450 else if (class_rev == 1)
451 info = &cmd_info[3];
452 }
453
454 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
455 pci_read_config_byte(pdev, MRDMODE, &mrdmode);
456 mrdmode &= ~ 0x30; /* IRQ set up */
457 mrdmode |= 0x02; /* Memory read line enable */
458 pci_write_config_byte(pdev, MRDMODE, mrdmode);
459
460 /* Force PIO 0 here.. */
461
462 /* PPC specific fixup copied from old driver */
463#ifdef CONFIG_PPC
464 pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
465#endif
466
467 port_info[0] = port_info[1] = info;
468 return ata_pci_init_one(pdev, port_info, 2);
469}
470
471static struct pci_device_id cmd64x[] = {
472 { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_643, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
473 { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
474 { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
475 { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
476 { 0, },
477};
478
479static struct pci_driver cmd64x_pci_driver = {
480 .name = DRV_NAME,
481 .id_table = cmd64x,
482 .probe = cmd64x_init_one,
483 .remove = ata_pci_remove_one
484};
485
486static int __init cmd64x_init(void)
487{
488 return pci_register_driver(&cmd64x_pci_driver);
489}
490
491
492static void __exit cmd64x_exit(void)
493{
494 pci_unregister_driver(&cmd64x_pci_driver);
495}
496
497
498MODULE_AUTHOR("Alan Cox");
499MODULE_DESCRIPTION("low-level driver for CMD64x series PATA controllers");
500MODULE_LICENSE("GPL");
501MODULE_DEVICE_TABLE(pci, cmd64x);
502MODULE_VERSION(DRV_VERSION);
503
504module_init(cmd64x_init);
505module_exit(cmd64x_exit);
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
new file mode 100644
index 000000000000..792ce4828510
--- /dev/null
+++ b/drivers/ata/pata_cs5520.c
@@ -0,0 +1,336 @@
1/*
2 * IDE tuning and bus mastering support for the CS5510/CS5520
3 * chipsets
4 *
5 * The CS5510/CS5520 are slightly unusual devices. Unlike the
6 * typical IDE controllers they do bus mastering with the drive in
7 * PIO mode and smarter silicon.
8 *
9 * The practical upshot of this is that we must always tune the
10 * drive for the right PIO mode. We must also ignore all the blacklists
11 * and the drive bus mastering DMA information. Also to confuse matters
12 * further we can do DMA on PIO only drives.
13 *
14 * DMA on the 5510 also requires we disable_hlt() during DMA on early
15 * revisions.
16 *
17 * *** This driver is strictly experimental ***
18 *
19 * (c) Copyright Red Hat Inc 2002
20 *
21 * This program is free software; you can redistribute it and/or modify it
22 * under the terms of the GNU General Public License as published by the
23 * Free Software Foundation; either version 2, or (at your option) any
24 * later version.
25 *
26 * This program is distributed in the hope that it will be useful, but
27 * WITHOUT ANY WARRANTY; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
29 * General Public License for more details.
30 *
31 * Documentation:
32 * Not publically available.
33 */
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/init.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <scsi/scsi_host.h>
41#include <linux/libata.h>
42
43#define DRV_NAME "pata_cs5520"
44#define DRV_VERSION "0.6.2"
45
46struct pio_clocks
47{
48 int address;
49 int assert;
50 int recovery;
51};
52
53static const struct pio_clocks cs5520_pio_clocks[]={
54 {3, 6, 11},
55 {2, 5, 6},
56 {1, 4, 3},
57 {1, 3, 2},
58 {1, 2, 1}
59};
60
61/**
62 * cs5520_set_timings - program PIO timings
63 * @ap: ATA port
64 * @adev: ATA device
65 *
66 * Program the PIO mode timings for the controller according to the pio
67 * clocking table.
68 */
69
70static void cs5520_set_timings(struct ata_port *ap, struct ata_device *adev, int pio)
71{
72 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
73 int slave = adev->devno;
74
75 pio -= XFER_PIO_0;
76
77 /* Channel command timing */
78 pci_write_config_byte(pdev, 0x62 + ap->port_no,
79 (cs5520_pio_clocks[pio].recovery << 4) |
80 (cs5520_pio_clocks[pio].assert));
81 /* FIXME: should these use address ? */
82 /* Read command timing */
83 pci_write_config_byte(pdev, 0x64 + 4*ap->port_no + slave,
84 (cs5520_pio_clocks[pio].recovery << 4) |
85 (cs5520_pio_clocks[pio].assert));
86 /* Write command timing */
87 pci_write_config_byte(pdev, 0x66 + 4*ap->port_no + slave,
88 (cs5520_pio_clocks[pio].recovery << 4) |
89 (cs5520_pio_clocks[pio].assert));
90}
91
92/**
93 * cs5520_enable_dma - turn on DMA bits
94 *
95 * Turn on the DMA bits for this disk. Needed because the BIOS probably
96 * has not done the work for us. Belongs in the core SATA code.
97 */
98
99static void cs5520_enable_dma(struct ata_port *ap, struct ata_device *adev)
100{
101 /* Set the DMA enable/disable flag */
102 u8 reg = inb(ap->ioaddr.bmdma_addr + 0x02);
103 reg |= 1<<(adev->devno + 5);
104 outb(reg, ap->ioaddr.bmdma_addr + 0x02);
105}
106
107/**
108 * cs5520_set_dmamode - program DMA timings
109 * @ap: ATA port
110 * @adev: ATA device
111 *
112 * Program the DMA mode timings for the controller according to the pio
113 * clocking table. Note that this device sets the DMA timings to PIO
114 * mode values. This may seem bizarre but the 5520 architecture talks
115 * PIO mode to the disk and DMA mode to the controller so the underlying
116 * transfers are PIO timed.
117 */
118
119static void cs5520_set_dmamode(struct ata_port *ap, struct ata_device *adev)
120{
121 static const int dma_xlate[3] = { XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 };
122 cs5520_set_timings(ap, adev, dma_xlate[adev->dma_mode]);
123 cs5520_enable_dma(ap, adev);
124}
125
126/**
127 * cs5520_set_piomode - program PIO timings
128 * @ap: ATA port
129 * @adev: ATA device
130 *
131 * Program the PIO mode timings for the controller according to the pio
132 * clocking table. We know pio_mode will equal dma_mode because of the
133 * CS5520 architecture. At least once we turned DMA on and wrote a
134 * mode setter.
135 */
136
137static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev)
138{
139 cs5520_set_timings(ap, adev, adev->pio_mode);
140}
141
142
143static int cs5520_pre_reset(struct ata_port *ap)
144{
145 ap->cbl = ATA_CBL_PATA40;
146 return ata_std_prereset(ap);
147}
148
149static void cs5520_error_handler(struct ata_port *ap)
150{
151 return ata_bmdma_drive_eh(ap, cs5520_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
152}
153
154static struct scsi_host_template cs5520_sht = {
155 .module = THIS_MODULE,
156 .name = DRV_NAME,
157 .ioctl = ata_scsi_ioctl,
158 .queuecommand = ata_scsi_queuecmd,
159 .can_queue = ATA_DEF_QUEUE,
160 .this_id = ATA_SHT_THIS_ID,
161 .sg_tablesize = LIBATA_MAX_PRD,
162 .max_sectors = ATA_MAX_SECTORS,
163 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
164 .emulated = ATA_SHT_EMULATED,
165 .use_clustering = ATA_SHT_USE_CLUSTERING,
166 .proc_name = DRV_NAME,
167 .dma_boundary = ATA_DMA_BOUNDARY,
168 .slave_configure = ata_scsi_slave_config,
169 .bios_param = ata_std_bios_param,
170};
171
172static struct ata_port_operations cs5520_port_ops = {
173 .port_disable = ata_port_disable,
174 .set_piomode = cs5520_set_piomode,
175 .set_dmamode = cs5520_set_dmamode,
176
177 .tf_load = ata_tf_load,
178 .tf_read = ata_tf_read,
179 .check_status = ata_check_status,
180 .exec_command = ata_exec_command,
181 .dev_select = ata_std_dev_select,
182
183 .freeze = ata_bmdma_freeze,
184 .thaw = ata_bmdma_thaw,
185 .error_handler = cs5520_error_handler,
186 .post_internal_cmd = ata_bmdma_post_internal_cmd,
187
188 .bmdma_setup = ata_bmdma_setup,
189 .bmdma_start = ata_bmdma_start,
190 .bmdma_stop = ata_bmdma_stop,
191 .bmdma_status = ata_bmdma_status,
192 .qc_prep = ata_qc_prep,
193 .qc_issue = ata_qc_issue_prot,
194 .data_xfer = ata_pio_data_xfer,
195
196 .eng_timeout = ata_eng_timeout,
197
198 .irq_handler = ata_interrupt,
199 .irq_clear = ata_bmdma_irq_clear,
200
201 .port_start = ata_port_start,
202 .port_stop = ata_port_stop,
203 .host_stop = ata_host_stop,
204};
205
206static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
207{
208 u8 pcicfg;
209 static struct ata_probe_ent probe[2];
210 int ports = 0;
211
212 /* IDE port enable bits */
213 pci_read_config_byte(dev, 0x60, &pcicfg);
214
215 /* Check if the ATA ports are enabled */
216 if ((pcicfg & 3) == 0)
217 return -ENODEV;
218
219 if ((pcicfg & 0x40) == 0) {
220 printk(KERN_WARNING DRV_NAME ": DMA mode disabled. Enabling.\n");
221 pci_write_config_byte(dev, 0x60, pcicfg | 0x40);
222 }
223
224 /* Perform set up for DMA */
225 if (pci_enable_device_bars(dev, 1<<2)) {
226 printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
227 return -ENODEV;
228 }
229 pci_set_master(dev);
230 if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
231 printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
232 return -ENODEV;
233 }
234 if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
235 printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
236 return -ENODEV;
237 }
238
239 /* We have to do our own plumbing as the PCI setup for this
240 chipset is non-standard so we can't punt to the libata code */
241
242 INIT_LIST_HEAD(&probe[0].node);
243 probe[0].dev = pci_dev_to_dev(dev);
244 probe[0].port_ops = &cs5520_port_ops;
245 probe[0].sht = &cs5520_sht;
246 probe[0].pio_mask = 0x1F;
247 probe[0].mwdma_mask = id->driver_data;
248 probe[0].irq = 14;
249 probe[0].irq_flags = 0;
250 probe[0].port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST;
251 probe[0].n_ports = 1;
252 probe[0].port[0].cmd_addr = 0x1F0;
253 probe[0].port[0].ctl_addr = 0x3F6;
254 probe[0].port[0].altstatus_addr = 0x3F6;
255 probe[0].port[0].bmdma_addr = pci_resource_start(dev, 2);
256
257 /* The secondary lurks at different addresses but is otherwise
258 the same beastie */
259
260 probe[1] = probe[0];
261 INIT_LIST_HEAD(&probe[1].node);
262 probe[1].irq = 15;
263 probe[1].port[0].cmd_addr = 0x170;
264 probe[1].port[0].ctl_addr = 0x376;
265 probe[1].port[0].altstatus_addr = 0x376;
266 probe[1].port[0].bmdma_addr = pci_resource_start(dev, 2) + 8;
267
268 /* Let libata fill in the port details */
269 ata_std_ports(&probe[0].port[0]);
270 ata_std_ports(&probe[1].port[0]);
271
272 /* Now add the ports that are active */
273 if (pcicfg & 1)
274 ports += ata_device_add(&probe[0]);
275 if (pcicfg & 2)
276 ports += ata_device_add(&probe[1]);
277 if (ports)
278 return 0;
279 return -ENODEV;
280}
281
282/**
283 * cs5520_remove_one - device unload
284 * @pdev: PCI device being removed
285 *
286 * Handle an unplug/unload event for a PCI device. Unload the
287 * PCI driver but do not use the default handler as we manage
288 * resources ourself and *MUST NOT* disable the device as it has
289 * other functions.
290 */
291
292static void __devexit cs5520_remove_one(struct pci_dev *pdev)
293{
294 struct device *dev = pci_dev_to_dev(pdev);
295 struct ata_host *host = dev_get_drvdata(dev);
296
297 ata_host_remove(host);
298 dev_set_drvdata(dev, NULL);
299}
300
301/* For now keep DMA off. We can set it for all but A rev CS5510 once the
302 core ATA code can handle it */
303
304static struct pci_device_id pata_cs5520[] = {
305 { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
306 { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
307 { 0, },
308};
309
310static struct pci_driver cs5520_pci_driver = {
311 .name = DRV_NAME,
312 .id_table = pata_cs5520,
313 .probe = cs5520_init_one,
314 .remove = cs5520_remove_one
315};
316
317
318static int __init cs5520_init(void)
319{
320 return pci_register_driver(&cs5520_pci_driver);
321}
322
323static void __exit cs5520_exit(void)
324{
325 pci_unregister_driver(&cs5520_pci_driver);
326}
327
328MODULE_AUTHOR("Alan Cox");
329MODULE_DESCRIPTION("low-level driver for Cyrix CS5510/5520");
330MODULE_LICENSE("GPL");
331MODULE_DEVICE_TABLE(pci, pata_cs5520);
332MODULE_VERSION(DRV_VERSION);
333
334module_init(cs5520_init);
335module_exit(cs5520_exit);
336
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
new file mode 100644
index 000000000000..f3d8a3bc1e78
--- /dev/null
+++ b/drivers/ata/pata_cs5530.c
@@ -0,0 +1,387 @@
1/*
2 * pata-cs5530.c - CS5530 PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based upon cs5530.c by Mark Lord.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Loosely based on the piix & svwks drivers.
22 *
23 * Documentation:
24 * Available from AMD web site.
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/blkdev.h>
32#include <linux/delay.h>
33#include <scsi/scsi_host.h>
34#include <linux/libata.h>
35#include <linux/dmi.h>
36
37#define DRV_NAME "pata_cs5530"
38#define DRV_VERSION "0.6"
39
40/**
41 * cs5530_set_piomode - PIO setup
42 * @ap: ATA interface
43 * @adev: device on the interface
44 *
45 * Set our PIO requirements. This is fairly simple on the CS5530
46 * chips.
47 */
48
49static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev)
50{
51 static const unsigned int cs5530_pio_timings[2][5] = {
52 {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010},
53 {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}
54 };
55 unsigned long base = ( ap->ioaddr.bmdma_addr & ~0x0F) + 0x20 + 0x10 * ap->port_no;
56 u32 tuning;
57 int format;
58
59 /* Find out which table to use */
60 tuning = inl(base + 0x04);
61 format = (tuning & 0x80000000UL) ? 1 : 0;
62
63 /* Now load the right timing register */
64 if (adev->devno)
65 base += 0x08;
66
67 outl(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base);
68}
69
70/**
71 * cs5530_set_dmamode - DMA timing setup
72 * @ap: ATA interface
73 * @adev: Device being configured
74 *
75 * We cannot mix MWDMA and UDMA without reloading timings each switch
76 * master to slave. We track the last DMA setup in order to minimise
77 * reloads.
78 */
79
80static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev)
81{
82 unsigned long base = ( ap->ioaddr.bmdma_addr & ~0x0F) + 0x20 + 0x10 * ap->port_no;
83 u32 tuning, timing = 0;
84 u8 reg;
85
86 /* Find out which table to use */
87 tuning = inl(base + 0x04);
88
89 switch(adev->dma_mode) {
90 case XFER_UDMA_0:
91 timing = 0x00921250;break;
92 case XFER_UDMA_1:
93 timing = 0x00911140;break;
94 case XFER_UDMA_2:
95 timing = 0x00911030;break;
96 case XFER_MW_DMA_0:
97 timing = 0x00077771;break;
98 case XFER_MW_DMA_1:
99 timing = 0x00012121;break;
100 case XFER_MW_DMA_2:
101 timing = 0x00002020;break;
102 default:
103 BUG();
104 }
105 /* Merge in the PIO format bit */
106 timing |= (tuning & 0x80000000UL);
107 if (adev->devno == 0) /* Master */
108 outl(timing, base + 0x04);
109 else {
110 if (timing & 0x00100000)
111 tuning |= 0x00100000; /* UDMA for both */
112 else
113 tuning &= ~0x00100000; /* MWDMA for both */
114 outl(tuning, base + 0x04);
115 outl(timing, base + 0x0C);
116 }
117
118 /* Set the DMA capable bit in the BMDMA area */
119 reg = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
120 reg |= (1 << (5 + adev->devno));
121 outb(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
122
123 /* Remember the last DMA setup we did */
124
125 ap->private_data = adev;
126}
127
128/**
129 * cs5530_qc_issue_prot - command issue
130 * @qc: command pending
131 *
132 * Called when the libata layer is about to issue a command. We wrap
133 * this interface so that we can load the correct ATA timings if
134 * neccessary. Specifically we have a problem that there is only
135 * one MWDMA/UDMA bit.
136 */
137
138static unsigned int cs5530_qc_issue_prot(struct ata_queued_cmd *qc)
139{
140 struct ata_port *ap = qc->ap;
141 struct ata_device *adev = qc->dev;
142 struct ata_device *prev = ap->private_data;
143
144 /* See if the DMA settings could be wrong */
145 if (adev->dma_mode != 0 && adev != prev && prev != NULL) {
146 /* Maybe, but do the channels match MWDMA/UDMA ? */
147 if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) ||
148 (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0))
149 /* Switch the mode bits */
150 cs5530_set_dmamode(ap, adev);
151 }
152
153 return ata_qc_issue_prot(qc);
154}
155
156static int cs5530_pre_reset(struct ata_port *ap)
157{
158 ap->cbl = ATA_CBL_PATA40;
159 return ata_std_prereset(ap);
160}
161
162static void cs5530_error_handler(struct ata_port *ap)
163{
164 return ata_bmdma_drive_eh(ap, cs5530_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
165}
166
167
168static struct scsi_host_template cs5530_sht = {
169 .module = THIS_MODULE,
170 .name = DRV_NAME,
171 .ioctl = ata_scsi_ioctl,
172 .queuecommand = ata_scsi_queuecmd,
173 .can_queue = ATA_DEF_QUEUE,
174 .this_id = ATA_SHT_THIS_ID,
175 .sg_tablesize = LIBATA_MAX_PRD,
176 .max_sectors = ATA_MAX_SECTORS,
177 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
178 .emulated = ATA_SHT_EMULATED,
179 .use_clustering = ATA_SHT_USE_CLUSTERING,
180 .proc_name = DRV_NAME,
181 .dma_boundary = ATA_DMA_BOUNDARY,
182 .slave_configure = ata_scsi_slave_config,
183 .bios_param = ata_std_bios_param,
184};
185
186static struct ata_port_operations cs5530_port_ops = {
187 .port_disable = ata_port_disable,
188 .set_piomode = cs5530_set_piomode,
189 .set_dmamode = cs5530_set_dmamode,
190 .mode_filter = ata_pci_default_filter,
191
192 .tf_load = ata_tf_load,
193 .tf_read = ata_tf_read,
194 .check_status = ata_check_status,
195 .exec_command = ata_exec_command,
196 .dev_select = ata_std_dev_select,
197
198 .bmdma_setup = ata_bmdma_setup,
199 .bmdma_start = ata_bmdma_start,
200 .bmdma_stop = ata_bmdma_stop,
201 .bmdma_status = ata_bmdma_status,
202
203 .freeze = ata_bmdma_freeze,
204 .thaw = ata_bmdma_thaw,
205 .error_handler = cs5530_error_handler,
206 .post_internal_cmd = ata_bmdma_post_internal_cmd,
207
208 .qc_prep = ata_qc_prep,
209 .qc_issue = cs5530_qc_issue_prot,
210 .eng_timeout = ata_eng_timeout,
211 .data_xfer = ata_pio_data_xfer,
212
213 .irq_handler = ata_interrupt,
214 .irq_clear = ata_bmdma_irq_clear,
215
216 .port_start = ata_port_start,
217 .port_stop = ata_port_stop,
218 .host_stop = ata_host_stop
219};
220
221static struct dmi_system_id palmax_dmi_table[] = {
222 {
223 .ident = "Palmax PD1100",
224 .matches = {
225 DMI_MATCH(DMI_SYS_VENDOR, "Cyrix"),
226 DMI_MATCH(DMI_PRODUCT_NAME, "Caddis"),
227 },
228 },
229 { }
230};
231
232static int cs5530_is_palmax(void)
233{
234 if (dmi_check_system(palmax_dmi_table)) {
235 printk(KERN_INFO "Palmax PD1100: Disabling DMA on docking port.\n");
236 return 1;
237 }
238 return 0;
239}
240
241/**
242 * cs5530_init_one - Initialise a CS5530
243 * @dev: PCI device
244 * @id: Entry in match table
245 *
246 * Install a driver for the newly found CS5530 companion chip. Most of
247 * this is just housekeeping. We have to set the chip up correctly and
248 * turn off various bits of emulation magic.
249 */
250
251static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
252{
253 int compiler_warning_pointless_fix;
254 struct pci_dev *master_0 = NULL, *cs5530_0 = NULL;
255 static struct ata_port_info info = {
256 .sht = &cs5530_sht,
257 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
258 .pio_mask = 0x1f,
259 .mwdma_mask = 0x07,
260 .udma_mask = 0x07,
261 .port_ops = &cs5530_port_ops
262 };
263 /* The docking connector doesn't do UDMA, and it seems not MWDMA */
264 static struct ata_port_info info_palmax_secondary = {
265 .sht = &cs5530_sht,
266 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
267 .pio_mask = 0x1f,
268 .port_ops = &cs5530_port_ops
269 };
270 static struct ata_port_info *port_info[2] = { &info, &info };
271
272 dev = NULL;
273 while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
274 switch (dev->device) {
275 case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
276 master_0 = pci_dev_get(dev);
277 break;
278 case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
279 cs5530_0 = pci_dev_get(dev);
280 break;
281 }
282 }
283 if (!master_0) {
284 printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n");
285 goto fail_put;
286 }
287 if (!cs5530_0) {
288 printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n");
289 goto fail_put;
290 }
291
292 pci_set_master(cs5530_0);
293 compiler_warning_pointless_fix = pci_set_mwi(cs5530_0);
294
295 /*
296 * Set PCI CacheLineSize to 16-bytes:
297 * --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530
298 *
299 * Note: This value is constant because the 5530 is only a Geode companion
300 */
301
302 pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04);
303
304 /*
305 * Disable trapping of UDMA register accesses (Win98 hack):
306 * --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530
307 */
308
309 pci_write_config_word(cs5530_0, 0xd0, 0x5006);
310
311 /*
312 * Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus:
313 * The other settings are what is necessary to get the register
314 * into a sane state for IDE DMA operation.
315 */
316
317 pci_write_config_byte(master_0, 0x40, 0x1e);
318
319 /*
320 * Set max PCI burst size (16-bytes seems to work best):
321 * 16bytes: set bit-1 at 0x41 (reg value of 0x16)
322 * all others: clear bit-1 at 0x41, and do:
323 * 128bytes: OR 0x00 at 0x41
324 * 256bytes: OR 0x04 at 0x41
325 * 512bytes: OR 0x08 at 0x41
326 * 1024bytes: OR 0x0c at 0x41
327 */
328
329 pci_write_config_byte(master_0, 0x41, 0x14);
330
331 /*
332 * These settings are necessary to get the chip
333 * into a sane state for IDE DMA operation.
334 */
335
336 pci_write_config_byte(master_0, 0x42, 0x00);
337 pci_write_config_byte(master_0, 0x43, 0xc1);
338
339 pci_dev_put(master_0);
340 pci_dev_put(cs5530_0);
341
342 if (cs5530_is_palmax())
343 port_info[1] = &info_palmax_secondary;
344
345 /* Now kick off ATA set up */
346 return ata_pci_init_one(dev, port_info, 2);
347
348fail_put:
349 if (master_0)
350 pci_dev_put(master_0);
351 if (cs5530_0)
352 pci_dev_put(cs5530_0);
353 return -ENODEV;
354}
355
356static struct pci_device_id cs5530[] = {
357 { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
358 { 0, },
359};
360
361static struct pci_driver cs5530_pci_driver = {
362 .name = DRV_NAME,
363 .id_table = cs5530,
364 .probe = cs5530_init_one,
365 .remove = ata_pci_remove_one
366};
367
368static int __init cs5530_init(void)
369{
370 return pci_register_driver(&cs5530_pci_driver);
371}
372
373
374static void __exit cs5530_exit(void)
375{
376 pci_unregister_driver(&cs5530_pci_driver);
377}
378
379
380MODULE_AUTHOR("Alan Cox");
381MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530");
382MODULE_LICENSE("GPL");
383MODULE_DEVICE_TABLE(pci, cs5530);
384MODULE_VERSION(DRV_VERSION);
385
386module_init(cs5530_init);
387module_exit(cs5530_exit);
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
new file mode 100644
index 000000000000..69d6b4258724
--- /dev/null
+++ b/drivers/ata/pata_cs5535.c
@@ -0,0 +1,291 @@
1/*
2 * pata-cs5535.c - CS5535 PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based upon cs5535.c from AMD <Jens.Altmann@amd.com> as cleaned up and
7 * made readable and Linux style by Wolfgang Zuleger <wolfgang.zuleger@gmx.de
8 * and Alexander Kiausch <alex.kiausch@t-online.de>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * Loosely based on the piix & svwks drivers.
24 *
25 * Documentation:
26 * Available from AMD web site.
27 * TODO
28 * Review errata to see if serializing is neccessary
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/init.h>
35#include <linux/blkdev.h>
36#include <linux/delay.h>
37#include <scsi/scsi_host.h>
38#include <linux/libata.h>
39#include <asm/msr.h>
40
41#define DRV_NAME "cs5535"
42#define DRV_VERSION "0.2.10"
43
44/*
45 * The Geode (Aka Athlon GX now) uses an internal MSR based
46 * bus system for control. Demented but there you go.
47 */
48
49#define MSR_ATAC_BASE 0x51300000
50#define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0)
51#define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01)
52#define ATAC_GLD_MSR_SMI (MSR_ATAC_BASE+0x02)
53#define ATAC_GLD_MSR_ERROR (MSR_ATAC_BASE+0x03)
54#define ATAC_GLD_MSR_PM (MSR_ATAC_BASE+0x04)
55#define ATAC_GLD_MSR_DIAG (MSR_ATAC_BASE+0x05)
56#define ATAC_IO_BAR (MSR_ATAC_BASE+0x08)
57#define ATAC_RESET (MSR_ATAC_BASE+0x10)
58#define ATAC_CH0D0_PIO (MSR_ATAC_BASE+0x20)
59#define ATAC_CH0D0_DMA (MSR_ATAC_BASE+0x21)
60#define ATAC_CH0D1_PIO (MSR_ATAC_BASE+0x22)
61#define ATAC_CH0D1_DMA (MSR_ATAC_BASE+0x23)
62#define ATAC_PCI_ABRTERR (MSR_ATAC_BASE+0x24)
63
64#define ATAC_BM0_CMD_PRIM 0x00
65#define ATAC_BM0_STS_PRIM 0x02
66#define ATAC_BM0_PRD 0x04
67
68#define CS5535_CABLE_DETECT 0x48
69
70#define CS5535_BAD_PIO(timings) ( (timings&~0x80000000UL)==0x00009172 )
71
72/**
73 * cs5535_pre_reset - detect cable type
74 * @ap: Port to detect on
75 *
76 * Perform cable detection for ATA66 capable cable. Return a libata
77 * cable type.
78 */
79
80static int cs5535_pre_reset(struct ata_port *ap)
81{
82 u8 cable;
83 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
84
85 pci_read_config_byte(pdev, CS5535_CABLE_DETECT, &cable);
86 if (cable & 1)
87 ap->cbl = ATA_CBL_PATA80;
88 else
89 ap->cbl = ATA_CBL_PATA40;
90 return ata_std_prereset(ap);
91}
92
93/**
94 * cs5535_error_handler - reset/probe
95 * @ap: Port to reset
96 *
97 * Reset and configure a port
98 */
99
100static void cs5535_error_handler(struct ata_port *ap)
101{
102 ata_bmdma_drive_eh(ap, cs5535_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
103}
104
105/**
106 * cs5535_set_piomode - PIO setup
107 * @ap: ATA interface
108 * @adev: device on the interface
109 *
110 * Set our PIO requirements. The CS5535 is pretty clean about all this
111 */
112
113static void cs5535_set_piomode(struct ata_port *ap, struct ata_device *adev)
114{
115 static const u16 pio_timings[5] = {
116 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
117 };
118 static const u16 pio_cmd_timings[5] = {
119 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
120 };
121 u32 reg, dummy;
122 struct ata_device *pair = ata_dev_pair(adev);
123
124 int mode = adev->pio_mode - XFER_PIO_0;
125 int cmdmode = mode;
126
127 /* Command timing has to be for the lowest of the pair of devices */
128 if (pair) {
129 int pairmode = pair->pio_mode - XFER_PIO_0;
130 cmdmode = min(mode, pairmode);
131 /* Write the other drive timing register if it changed */
132 if (cmdmode < pairmode)
133 wrmsr(ATAC_CH0D0_PIO + 2 * pair->devno,
134 pio_cmd_timings[cmdmode] << 16 | pio_timings[pairmode], 0);
135 }
136 /* Write the drive timing register */
137 wrmsr(ATAC_CH0D0_PIO + 2 * adev->devno,
138 pio_cmd_timings[cmdmode] << 16 | pio_timings[mode], 0);
139
140 /* Set the PIO "format 1" bit in the DMA timing register */
141 rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
142 wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg | 0x80000000UL, 0);
143}
144
145/**
146 * cs5535_set_dmamode - DMA timing setup
147 * @ap: ATA interface
148 * @adev: Device being configured
149 *
150 */
151
152static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev)
153{
154 static const u32 udma_timings[5] = {
155 0x7F7436A1, 0x7F733481, 0x7F723261, 0x7F713161, 0x7F703061
156 };
157 static const u32 mwdma_timings[3] = {
158 0x7F0FFFF3, 0x7F035352, 0x7F024241
159 };
160 u32 reg, dummy;
161 int mode = adev->dma_mode;
162
163 rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
164 reg &= 0x80000000UL;
165 if (mode >= XFER_UDMA_0)
166 reg |= udma_timings[mode - XFER_UDMA_0];
167 else
168 reg |= mwdma_timings[mode - XFER_MW_DMA_0];
169 wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, 0);
170}
171
172static struct scsi_host_template cs5535_sht = {
173 .module = THIS_MODULE,
174 .name = DRV_NAME,
175 .ioctl = ata_scsi_ioctl,
176 .queuecommand = ata_scsi_queuecmd,
177 .can_queue = ATA_DEF_QUEUE,
178 .this_id = ATA_SHT_THIS_ID,
179 .sg_tablesize = LIBATA_MAX_PRD,
180 .max_sectors = ATA_MAX_SECTORS,
181 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
182 .emulated = ATA_SHT_EMULATED,
183 .use_clustering = ATA_SHT_USE_CLUSTERING,
184 .proc_name = DRV_NAME,
185 .dma_boundary = ATA_DMA_BOUNDARY,
186 .slave_configure = ata_scsi_slave_config,
187 .bios_param = ata_std_bios_param,
188};
189
190static struct ata_port_operations cs5535_port_ops = {
191 .port_disable = ata_port_disable,
192 .set_piomode = cs5535_set_piomode,
193 .set_dmamode = cs5535_set_dmamode,
194 .mode_filter = ata_pci_default_filter,
195
196 .tf_load = ata_tf_load,
197 .tf_read = ata_tf_read,
198 .check_status = ata_check_status,
199 .exec_command = ata_exec_command,
200 .dev_select = ata_std_dev_select,
201
202 .freeze = ata_bmdma_freeze,
203 .thaw = ata_bmdma_thaw,
204 .error_handler = cs5535_error_handler,
205 .post_internal_cmd = ata_bmdma_post_internal_cmd,
206
207 .bmdma_setup = ata_bmdma_setup,
208 .bmdma_start = ata_bmdma_start,
209 .bmdma_stop = ata_bmdma_stop,
210 .bmdma_status = ata_bmdma_status,
211
212 .qc_prep = ata_qc_prep,
213 .qc_issue = ata_qc_issue_prot,
214 .eng_timeout = ata_eng_timeout,
215 .data_xfer = ata_pio_data_xfer,
216
217 .irq_handler = ata_interrupt,
218 .irq_clear = ata_bmdma_irq_clear,
219
220 .port_start = ata_port_start,
221 .port_stop = ata_port_stop,
222 .host_stop = ata_host_stop
223};
224
225/**
226 * cs5535_init_one - Initialise a CS5530
227 * @dev: PCI device
228 * @id: Entry in match table
229 *
230 * Install a driver for the newly found CS5530 companion chip. Most of
231 * this is just housekeeping. We have to set the chip up correctly and
232 * turn off various bits of emulation magic.
233 */
234
235static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
236{
237 static struct ata_port_info info = {
238 .sht = &cs5535_sht,
239 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
240 .pio_mask = 0x1f,
241 .mwdma_mask = 0x07,
242 .udma_mask = 0x1f,
243 .port_ops = &cs5535_port_ops
244 };
245 struct ata_port_info *ports[1] = { &info };
246
247 u32 timings, dummy;
248
249 /* Check the BIOS set the initial timing clock. If not set the
250 timings for PIO0 */
251 rdmsr(ATAC_CH0D0_PIO, timings, dummy);
252 if (CS5535_BAD_PIO(timings))
253 wrmsr(ATAC_CH0D0_PIO, 0xF7F4F7F4UL, 0);
254 rdmsr(ATAC_CH0D1_PIO, timings, dummy);
255 if (CS5535_BAD_PIO(timings))
256 wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0);
257 return ata_pci_init_one(dev, ports, 1);
258}
259
260static struct pci_device_id cs5535[] = {
261 { PCI_DEVICE(PCI_VENDOR_ID_NS, 0x002D), },
262 { 0, },
263};
264
265static struct pci_driver cs5535_pci_driver = {
266 .name = DRV_NAME,
267 .id_table = cs5535,
268 .probe = cs5535_init_one,
269 .remove = ata_pci_remove_one
270};
271
272static int __init cs5535_init(void)
273{
274 return pci_register_driver(&cs5535_pci_driver);
275}
276
277
278static void __exit cs5535_exit(void)
279{
280 pci_unregister_driver(&cs5535_pci_driver);
281}
282
283
284MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch");
285MODULE_DESCRIPTION("low-level driver for the NS/AMD 5530");
286MODULE_LICENSE("GPL");
287MODULE_DEVICE_TABLE(pci, cs5535);
288MODULE_VERSION(DRV_VERSION);
289
290module_init(cs5535_init);
291module_exit(cs5535_exit);
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
new file mode 100644
index 000000000000..fd55474e0d15
--- /dev/null
+++ b/drivers/ata/pata_cypress.c
@@ -0,0 +1,227 @@
1/*
2 * pata_cypress.c - Cypress PATA for new ATA layer
3 * (C) 2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based heavily on
7 * linux/drivers/ide/pci/cy82c693.c Version 0.40 Sep. 10, 2002
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/init.h>
15#include <linux/blkdev.h>
16#include <linux/delay.h>
17#include <scsi/scsi_host.h>
18#include <linux/libata.h>
19
20#define DRV_NAME "pata_cypress"
21#define DRV_VERSION "0.1.2"
22
23/* here are the offset definitions for the registers */
24
25enum {
26 CY82_IDE_CMDREG = 0x04,
27 CY82_IDE_ADDRSETUP = 0x48,
28 CY82_IDE_MASTER_IOR = 0x4C,
29 CY82_IDE_MASTER_IOW = 0x4D,
30 CY82_IDE_SLAVE_IOR = 0x4E,
31 CY82_IDE_SLAVE_IOW = 0x4F,
32 CY82_IDE_MASTER_8BIT = 0x50,
33 CY82_IDE_SLAVE_8BIT = 0x51,
34
35 CY82_INDEX_PORT = 0x22,
36 CY82_DATA_PORT = 0x23,
37
38 CY82_INDEX_CTRLREG1 = 0x01,
39 CY82_INDEX_CHANNEL0 = 0x30,
40 CY82_INDEX_CHANNEL1 = 0x31,
41 CY82_INDEX_TIMEOUT = 0x32
42};
43
44static int cy82c693_pre_reset(struct ata_port *ap)
45{
46 ap->cbl = ATA_CBL_PATA40;
47 return ata_std_prereset(ap);
48}
49
50static void cy82c693_error_handler(struct ata_port *ap)
51{
52 ata_bmdma_drive_eh(ap, cy82c693_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
53}
54
55/**
56 * cy82c693_set_piomode - set initial PIO mode data
57 * @ap: ATA interface
58 * @adev: ATA device
59 *
60 * Called to do the PIO mode setup.
61 */
62
63static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev)
64{
65 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
66 struct ata_timing t;
67 const unsigned long T = 1000000 / 33;
68 short time_16, time_8;
69 u32 addr;
70
71 if (ata_timing_compute(adev, adev->pio_mode, &t, T, 1) < 0) {
72 printk(KERN_ERR DRV_NAME ": mome computation failed.\n");
73 return;
74 }
75
76 time_16 = FIT(t.recover, 0, 15) | (FIT(t.active, 0, 15) << 4);
77 time_8 = FIT(t.act8b, 0, 15) | (FIT(t.rec8b, 0, 15) << 4);
78
79 if (adev->devno == 0) {
80 pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
81
82 addr &= ~0x0F; /* Mask bits */
83 addr |= FIT(t.setup, 0, 15);
84
85 pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
86 pci_write_config_byte(pdev, CY82_IDE_MASTER_IOR, time_16);
87 pci_write_config_byte(pdev, CY82_IDE_MASTER_IOW, time_16);
88 pci_write_config_byte(pdev, CY82_IDE_MASTER_8BIT, time_8);
89 } else {
90 pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
91
92 addr &= ~0xF0; /* Mask bits */
93 addr |= (FIT(t.setup, 0, 15) << 4);
94
95 pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
96 pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOR, time_16);
97 pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOW, time_16);
98 pci_write_config_byte(pdev, CY82_IDE_SLAVE_8BIT, time_8);
99 }
100}
101
102/**
103 * cy82c693_set_dmamode - set initial DMA mode data
104 * @ap: ATA interface
105 * @adev: ATA device
106 *
107 * Called to do the DMA mode setup.
108 */
109
110static void cy82c693_set_dmamode(struct ata_port *ap, struct ata_device *adev)
111{
112 int reg = CY82_INDEX_CHANNEL0 + ap->port_no;
113
114 /* Be afraid, be very afraid. Magic registers in low I/O space */
115 outb(reg, 0x22);
116 outb(adev->dma_mode - XFER_MW_DMA_0, 0x23);
117
118 /* 0x50 gives the best behaviour on the Alpha's using this chip */
119 outb(CY82_INDEX_TIMEOUT, 0x22);
120 outb(0x50, 0x23);
121}
122
123static struct scsi_host_template cy82c693_sht = {
124 .module = THIS_MODULE,
125 .name = DRV_NAME,
126 .ioctl = ata_scsi_ioctl,
127 .queuecommand = ata_scsi_queuecmd,
128 .can_queue = ATA_DEF_QUEUE,
129 .this_id = ATA_SHT_THIS_ID,
130 .sg_tablesize = LIBATA_MAX_PRD,
131 .max_sectors = ATA_MAX_SECTORS,
132 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
133 .emulated = ATA_SHT_EMULATED,
134 .use_clustering = ATA_SHT_USE_CLUSTERING,
135 .proc_name = DRV_NAME,
136 .dma_boundary = ATA_DMA_BOUNDARY,
137 .slave_configure = ata_scsi_slave_config,
138 .bios_param = ata_std_bios_param,
139};
140
141static struct ata_port_operations cy82c693_port_ops = {
142 .port_disable = ata_port_disable,
143 .set_piomode = cy82c693_set_piomode,
144 .set_dmamode = cy82c693_set_dmamode,
145 .mode_filter = ata_pci_default_filter,
146
147 .tf_load = ata_tf_load,
148 .tf_read = ata_tf_read,
149 .check_status = ata_check_status,
150 .exec_command = ata_exec_command,
151 .dev_select = ata_std_dev_select,
152
153 .freeze = ata_bmdma_freeze,
154 .thaw = ata_bmdma_thaw,
155 .error_handler = cy82c693_error_handler,
156 .post_internal_cmd = ata_bmdma_post_internal_cmd,
157
158 .bmdma_setup = ata_bmdma_setup,
159 .bmdma_start = ata_bmdma_start,
160 .bmdma_stop = ata_bmdma_stop,
161 .bmdma_status = ata_bmdma_status,
162
163 .qc_prep = ata_qc_prep,
164 .qc_issue = ata_qc_issue_prot,
165 .eng_timeout = ata_eng_timeout,
166 .data_xfer = ata_pio_data_xfer,
167
168 .irq_handler = ata_interrupt,
169 .irq_clear = ata_bmdma_irq_clear,
170
171 .port_start = ata_port_start,
172 .port_stop = ata_port_stop,
173 .host_stop = ata_host_stop
174};
175
176static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
177{
178 static struct ata_port_info info = {
179 .sht = &cy82c693_sht,
180 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
181 .pio_mask = 0x1f,
182 .mwdma_mask = 0x07,
183 .port_ops = &cy82c693_port_ops
184 };
185 static struct ata_port_info *port_info[1] = { &info };
186
187 /* Devfn 1 is the ATA primary. The secondary is magic and on devfn2. For the
188 moment we don't handle the secondary. FIXME */
189
190 if (PCI_FUNC(pdev->devfn) != 1)
191 return -ENODEV;
192
193 return ata_pci_init_one(pdev, port_info, 1);
194}
195
196static struct pci_device_id cy82c693[] = {
197 { PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
198 { 0, },
199};
200
201static struct pci_driver cy82c693_pci_driver = {
202 .name = DRV_NAME,
203 .id_table = cy82c693,
204 .probe = cy82c693_init_one,
205 .remove = ata_pci_remove_one
206};
207
208static int __init cy82c693_init(void)
209{
210 return pci_register_driver(&cy82c693_pci_driver);
211}
212
213
214static void __exit cy82c693_exit(void)
215{
216 pci_unregister_driver(&cy82c693_pci_driver);
217}
218
219
220MODULE_AUTHOR("Alan Cox");
221MODULE_DESCRIPTION("low-level driver for the CY82C693 PATA controller");
222MODULE_LICENSE("GPL");
223MODULE_DEVICE_TABLE(pci, cy82c693);
224MODULE_VERSION(DRV_VERSION);
225
226module_init(cy82c693_init);
227module_exit(cy82c693_exit);
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
new file mode 100644
index 000000000000..c30bc181304f
--- /dev/null
+++ b/drivers/ata/pata_efar.c
@@ -0,0 +1,342 @@
1/*
2 * pata_efar.c - EFAR PIIX clone controller driver
3 *
4 * (C) 2005 Red Hat <alan@redhat.com>
5 *
6 * Some parts based on ata_piix.c by Jeff Garzik and others.
7 *
8 * The EFAR is a PIIX4 clone with UDMA66 support. Unlike the later
9 * Intel ICH controllers the EFAR widened the UDMA mode register bits
10 * and doesn't require the funky clock selection.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/pci.h>
16#include <linux/init.h>
17#include <linux/blkdev.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <scsi/scsi_host.h>
21#include <linux/libata.h>
22#include <linux/ata.h>
23
24#define DRV_NAME "pata_efar"
25#define DRV_VERSION "0.4.1"
26
27/**
28 * efar_pre_reset - check for 40/80 pin
29 * @ap: Port
30 *
31 * Perform cable detection for the EFAR ATA interface. This is
32 * different to the PIIX arrangement
33 */
34
35static int efar_pre_reset(struct ata_port *ap)
36{
37 static const struct pci_bits efar_enable_bits[] = {
38 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
39 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
40 };
41
42 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
43 u8 tmp;
44
45 if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no])) {
46 ata_port_disable(ap);
47 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
48 return 0;
49 }
50 pci_read_config_byte(pdev, 0x47, &tmp);
51 if (tmp & (2 >> ap->port_no))
52 ap->cbl = ATA_CBL_PATA40;
53 else
54 ap->cbl = ATA_CBL_PATA80;
55 return ata_std_prereset(ap);
56}
57
58/**
59 * efar_probe_reset - Probe specified port on PATA host controller
60 * @ap: Port to probe
61 *
62 * LOCKING:
63 * None (inherited from caller).
64 */
65
66static void efar_error_handler(struct ata_port *ap)
67{
68 ata_bmdma_drive_eh(ap, efar_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
69}
70
71/**
72 * efar_set_piomode - Initialize host controller PATA PIO timings
73 * @ap: Port whose timings we are configuring
74 * @adev: um
75 *
76 * Set PIO mode for device, in host controller PCI config space.
77 *
78 * LOCKING:
79 * None (inherited from caller).
80 */
81
82static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
83{
84 unsigned int pio = adev->pio_mode - XFER_PIO_0;
85 struct pci_dev *dev = to_pci_dev(ap->host->dev);
86 unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
87 u16 idetm_data;
88 int control = 0;
89
90 /*
91 * See Intel Document 298600-004 for the timing programing rules
92 * for PIIX/ICH. The EFAR is a clone so very similar
93 */
94
95 static const /* ISP RTC */
96 u8 timings[][2] = { { 0, 0 },
97 { 0, 0 },
98 { 1, 0 },
99 { 2, 1 },
100 { 2, 3 }, };
101
102 if (pio > 2)
103 control |= 1; /* TIME1 enable */
104 if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */
105 control |= 2; /* IE enable */
106 /* Intel specifies that the PPE functionality is for disk only */
107 if (adev->class == ATA_DEV_ATA)
108 control |= 4; /* PPE enable */
109
110 pci_read_config_word(dev, idetm_port, &idetm_data);
111
112 /* Enable PPE, IE and TIME as appropriate */
113
114 if (adev->devno == 0) {
115 idetm_data &= 0xCCF0;
116 idetm_data |= control;
117 idetm_data |= (timings[pio][0] << 12) |
118 (timings[pio][1] << 8);
119 } else {
120 int shift = 4 * ap->port_no;
121 u8 slave_data;
122
123 idetm_data &= 0xCC0F;
124 idetm_data |= (control << 4);
125
126 /* Slave timing in seperate register */
127 pci_read_config_byte(dev, 0x44, &slave_data);
128 slave_data &= 0x0F << shift;
129 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << shift;
130 pci_write_config_byte(dev, 0x44, slave_data);
131 }
132
133 idetm_data |= 0x4000; /* Ensure SITRE is enabled */
134 pci_write_config_word(dev, idetm_port, idetm_data);
135}
136
137/**
138 * efar_set_dmamode - Initialize host controller PATA DMA timings
139 * @ap: Port whose timings we are configuring
140 * @adev: Device to program
141 *
142 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
143 *
144 * LOCKING:
145 * None (inherited from caller).
146 */
147
148static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev)
149{
150 struct pci_dev *dev = to_pci_dev(ap->host->dev);
151 u8 master_port = ap->port_no ? 0x42 : 0x40;
152 u16 master_data;
153 u8 speed = adev->dma_mode;
154 int devid = adev->devno + 2 * ap->port_no;
155 u8 udma_enable;
156
157 static const /* ISP RTC */
158 u8 timings[][2] = { { 0, 0 },
159 { 0, 0 },
160 { 1, 0 },
161 { 2, 1 },
162 { 2, 3 }, };
163
164 pci_read_config_word(dev, master_port, &master_data);
165 pci_read_config_byte(dev, 0x48, &udma_enable);
166
167 if (speed >= XFER_UDMA_0) {
168 unsigned int udma = adev->dma_mode - XFER_UDMA_0;
169 u16 udma_timing;
170
171 udma_enable |= (1 << devid);
172
173 /* Load the UDMA mode number */
174 pci_read_config_word(dev, 0x4A, &udma_timing);
175 udma_timing &= ~(7 << (4 * devid));
176 udma_timing |= udma << (4 * devid);
177 pci_write_config_word(dev, 0x4A, udma_timing);
178 } else {
179 /*
180 * MWDMA is driven by the PIO timings. We must also enable
181 * IORDY unconditionally along with TIME1. PPE has already
182 * been set when the PIO timing was set.
183 */
184 unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
185 unsigned int control;
186 u8 slave_data;
187 const unsigned int needed_pio[3] = {
188 XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
189 };
190 int pio = needed_pio[mwdma] - XFER_PIO_0;
191
192 control = 3; /* IORDY|TIME1 */
193
194 /* If the drive MWDMA is faster than it can do PIO then
195 we must force PIO into PIO0 */
196
197 if (adev->pio_mode < needed_pio[mwdma])
198 /* Enable DMA timing only */
199 control |= 8; /* PIO cycles in PIO0 */
200
201 if (adev->devno) { /* Slave */
202 master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
203 master_data |= control << 4;
204 pci_read_config_byte(dev, 0x44, &slave_data);
205 slave_data &= (0x0F + 0xE1 * ap->port_no);
206 /* Load the matching timing */
207 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
208 pci_write_config_byte(dev, 0x44, slave_data);
209 } else { /* Master */
210 master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
211 and master timing bits */
212 master_data |= control;
213 master_data |=
214 (timings[pio][0] << 12) |
215 (timings[pio][1] << 8);
216 }
217 udma_enable &= ~(1 << devid);
218 pci_write_config_word(dev, master_port, master_data);
219 }
220 pci_write_config_byte(dev, 0x48, udma_enable);
221}
222
223static struct scsi_host_template efar_sht = {
224 .module = THIS_MODULE,
225 .name = DRV_NAME,
226 .ioctl = ata_scsi_ioctl,
227 .queuecommand = ata_scsi_queuecmd,
228 .can_queue = ATA_DEF_QUEUE,
229 .this_id = ATA_SHT_THIS_ID,
230 .sg_tablesize = LIBATA_MAX_PRD,
231 .max_sectors = ATA_MAX_SECTORS,
232 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
233 .emulated = ATA_SHT_EMULATED,
234 .use_clustering = ATA_SHT_USE_CLUSTERING,
235 .proc_name = DRV_NAME,
236 .dma_boundary = ATA_DMA_BOUNDARY,
237 .slave_configure = ata_scsi_slave_config,
238 .bios_param = ata_std_bios_param,
239};
240
241static const struct ata_port_operations efar_ops = {
242 .port_disable = ata_port_disable,
243 .set_piomode = efar_set_piomode,
244 .set_dmamode = efar_set_dmamode,
245 .mode_filter = ata_pci_default_filter,
246
247 .tf_load = ata_tf_load,
248 .tf_read = ata_tf_read,
249 .check_status = ata_check_status,
250 .exec_command = ata_exec_command,
251 .dev_select = ata_std_dev_select,
252
253 .freeze = ata_bmdma_freeze,
254 .thaw = ata_bmdma_thaw,
255 .error_handler = efar_error_handler,
256 .post_internal_cmd = ata_bmdma_post_internal_cmd,
257
258 .bmdma_setup = ata_bmdma_setup,
259 .bmdma_start = ata_bmdma_start,
260 .bmdma_stop = ata_bmdma_stop,
261 .bmdma_status = ata_bmdma_status,
262 .qc_prep = ata_qc_prep,
263 .qc_issue = ata_qc_issue_prot,
264 .data_xfer = ata_pio_data_xfer,
265
266 .eng_timeout = ata_eng_timeout,
267
268 .irq_handler = ata_interrupt,
269 .irq_clear = ata_bmdma_irq_clear,
270
271 .port_start = ata_port_start,
272 .port_stop = ata_port_stop,
273 .host_stop = ata_host_stop,
274};
275
276
277/**
278 * efar_init_one - Register EFAR ATA PCI device with kernel services
279 * @pdev: PCI device to register
280 * @ent: Entry in efar_pci_tbl matching with @pdev
281 *
282 * Called from kernel PCI layer.
283 *
284 * LOCKING:
285 * Inherited from PCI layer (may sleep).
286 *
287 * RETURNS:
288 * Zero on success, or -ERRNO value.
289 */
290
291static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
292{
293 static int printed_version;
294 static struct ata_port_info info = {
295 .sht = &efar_sht,
296 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
297 .pio_mask = 0x1f, /* pio0-4 */
298 .mwdma_mask = 0x07, /* mwdma1-2 */
299 .udma_mask = 0x0f, /* UDMA 66 */
300 .port_ops = &efar_ops,
301 };
302 static struct ata_port_info *port_info[2] = { &info, &info };
303
304 if (!printed_version++)
305 dev_printk(KERN_DEBUG, &pdev->dev,
306 "version " DRV_VERSION "\n");
307
308 return ata_pci_init_one(pdev, port_info, 2);
309}
310
311static const struct pci_device_id efar_pci_tbl[] = {
312 { 0x1055, 0x9130, PCI_ANY_ID, PCI_ANY_ID, },
313 { } /* terminate list */
314};
315
316static struct pci_driver efar_pci_driver = {
317 .name = DRV_NAME,
318 .id_table = efar_pci_tbl,
319 .probe = efar_init_one,
320 .remove = ata_pci_remove_one,
321};
322
323static int __init efar_init(void)
324{
325 return pci_register_driver(&efar_pci_driver);
326}
327
328static void __exit efar_exit(void)
329{
330 pci_unregister_driver(&efar_pci_driver);
331}
332
333
334module_init(efar_init);
335module_exit(efar_exit);
336
337MODULE_AUTHOR("Alan Cox");
338MODULE_DESCRIPTION("SCSI low-level driver for EFAR PIIX clones");
339MODULE_LICENSE("GPL");
340MODULE_DEVICE_TABLE(pci, efar_pci_tbl);
341MODULE_VERSION(DRV_VERSION);
342
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
new file mode 100644
index 000000000000..94bb1dfc3f19
--- /dev/null
+++ b/drivers/ata/pata_hpt366.c
@@ -0,0 +1,478 @@
1/*
2 * Libata driver for the highpoint 366 and 368 UDMA66 ATA controllers.
3 *
4 * This driver is heavily based upon:
5 *
6 * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
7 *
8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
9 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
10 * Portions Copyright (C) 2003 Red Hat Inc
11 *
12 *
13 * TODO
14 * Maybe PLL mode
15 * Look into engine reset on timeout errors. Should not be
16 * required.
17 */
18
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/init.h>
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <scsi/scsi_host.h>
27#include <linux/libata.h>
28
29#define DRV_NAME "pata_hpt366"
30#define DRV_VERSION "0.5"
31
32struct hpt_clock {
33 u8 xfer_speed;
34 u32 timing;
35};
36
37/* key for bus clock timings
38 * bit
39 * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
40 * DMA. cycles = value + 1
41 * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW
42 * DMA. cycles = value + 1
43 * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file
44 * register access.
45 * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file
46 * register access.
47 * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer.
48 * during task file register access.
49 * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA
50 * xfer.
51 * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task
52 * register access.
53 * 28 UDMA enable
54 * 29 DMA enable
55 * 30 PIO_MST enable. if set, the chip is in bus master mode during
56 * PIO.
57 * 31 FIFO enable.
58 */
59
60static const struct hpt_clock hpt366_40[] = {
61 { XFER_UDMA_4, 0x900fd943 },
62 { XFER_UDMA_3, 0x900ad943 },
63 { XFER_UDMA_2, 0x900bd943 },
64 { XFER_UDMA_1, 0x9008d943 },
65 { XFER_UDMA_0, 0x9008d943 },
66
67 { XFER_MW_DMA_2, 0xa008d943 },
68 { XFER_MW_DMA_1, 0xa010d955 },
69 { XFER_MW_DMA_0, 0xa010d9fc },
70
71 { XFER_PIO_4, 0xc008d963 },
72 { XFER_PIO_3, 0xc010d974 },
73 { XFER_PIO_2, 0xc010d997 },
74 { XFER_PIO_1, 0xc010d9c7 },
75 { XFER_PIO_0, 0xc018d9d9 },
76 { 0, 0x0120d9d9 }
77};
78
79static const struct hpt_clock hpt366_33[] = {
80 { XFER_UDMA_4, 0x90c9a731 },
81 { XFER_UDMA_3, 0x90cfa731 },
82 { XFER_UDMA_2, 0x90caa731 },
83 { XFER_UDMA_1, 0x90cba731 },
84 { XFER_UDMA_0, 0x90c8a731 },
85
86 { XFER_MW_DMA_2, 0xa0c8a731 },
87 { XFER_MW_DMA_1, 0xa0c8a732 }, /* 0xa0c8a733 */
88 { XFER_MW_DMA_0, 0xa0c8a797 },
89
90 { XFER_PIO_4, 0xc0c8a731 },
91 { XFER_PIO_3, 0xc0c8a742 },
92 { XFER_PIO_2, 0xc0d0a753 },
93 { XFER_PIO_1, 0xc0d0a7a3 }, /* 0xc0d0a793 */
94 { XFER_PIO_0, 0xc0d0a7aa }, /* 0xc0d0a7a7 */
95 { 0, 0x0120a7a7 }
96};
97
98static const struct hpt_clock hpt366_25[] = {
99 { XFER_UDMA_4, 0x90c98521 },
100 { XFER_UDMA_3, 0x90cf8521 },
101 { XFER_UDMA_2, 0x90cf8521 },
102 { XFER_UDMA_1, 0x90cb8521 },
103 { XFER_UDMA_0, 0x90cb8521 },
104
105 { XFER_MW_DMA_2, 0xa0ca8521 },
106 { XFER_MW_DMA_1, 0xa0ca8532 },
107 { XFER_MW_DMA_0, 0xa0ca8575 },
108
109 { XFER_PIO_4, 0xc0ca8521 },
110 { XFER_PIO_3, 0xc0ca8532 },
111 { XFER_PIO_2, 0xc0ca8542 },
112 { XFER_PIO_1, 0xc0d08572 },
113 { XFER_PIO_0, 0xc0d08585 },
114 { 0, 0x01208585 }
115};
116
117static const char *bad_ata33[] = {
118 "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
119 "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
120 "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
121 "Maxtor 90510D4",
122 "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
123 "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
124 "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
125 NULL
126};
127
128static const char *bad_ata66_4[] = {
129 "IBM-DTLA-307075",
130 "IBM-DTLA-307060",
131 "IBM-DTLA-307045",
132 "IBM-DTLA-307030",
133 "IBM-DTLA-307020",
134 "IBM-DTLA-307015",
135 "IBM-DTLA-305040",
136 "IBM-DTLA-305030",
137 "IBM-DTLA-305020",
138 "IC35L010AVER07-0",
139 "IC35L020AVER07-0",
140 "IC35L030AVER07-0",
141 "IC35L040AVER07-0",
142 "IC35L060AVER07-0",
143 "WDC AC310200R",
144 NULL
145};
146
147static const char *bad_ata66_3[] = {
148 "WDC AC310200R",
149 NULL
150};
151
152static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
153{
154 unsigned char model_num[40];
155 char *s;
156 unsigned int len;
157 int i = 0;
158
159 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
160 s = &model_num[0];
161 len = strnlen(s, sizeof(model_num));
162
163 /* ATAPI specifies that empty space is blank-filled; remove blanks */
164 while ((len > 0) && (s[len - 1] == ' ')) {
165 len--;
166 s[len] = 0;
167 }
168
169 while(list[i] != NULL) {
170 if (!strncmp(list[i], s, len)) {
171 printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
172 modestr, list[i]);
173 return 1;
174 }
175 i++;
176 }
177 return 0;
178}
179
180/**
181 * hpt366_filter - mode selection filter
182 * @ap: ATA interface
183 * @adev: ATA device
184 *
185 * Block UDMA on devices that cause trouble with this controller.
186 */
187
188static unsigned long hpt366_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
189{
190 if (adev->class == ATA_DEV_ATA) {
191 if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
192 mask &= ~ATA_MASK_UDMA;
193 if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3))
194 mask &= ~(0x07 << ATA_SHIFT_UDMA);
195 if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
196 mask &= ~(0x0F << ATA_SHIFT_UDMA);
197 }
198 return ata_pci_default_filter(ap, adev, mask);
199}
200
201/**
202 * hpt36x_find_mode - reset the hpt36x bus
203 * @ap: ATA port
204 * @speed: transfer mode
205 *
206 * Return the 32bit register programming information for this channel
207 * that matches the speed provided.
208 */
209
210static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
211{
212 struct hpt_clock *clocks = ap->host->private_data;
213
214 while(clocks->xfer_speed) {
215 if (clocks->xfer_speed == speed)
216 return clocks->timing;
217 clocks++;
218 }
219 BUG();
220 return 0xffffffffU; /* silence compiler warning */
221}
222
223static int hpt36x_pre_reset(struct ata_port *ap)
224{
225 u8 ata66;
226 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
227
228 pci_read_config_byte(pdev, 0x5A, &ata66);
229 if (ata66 & (1 << ap->port_no))
230 ap->cbl = ATA_CBL_PATA40;
231 else
232 ap->cbl = ATA_CBL_PATA80;
233 return ata_std_prereset(ap);
234}
235
236/**
237 * hpt36x_error_handler - reset the hpt36x bus
238 * @ap: ATA port to reset
239 *
240 * Perform the reset handling for the 366/368
241 */
242
243static void hpt36x_error_handler(struct ata_port *ap)
244{
245 ata_bmdma_drive_eh(ap, hpt36x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
246}
247
248/**
249 * hpt366_set_piomode - PIO setup
250 * @ap: ATA interface
251 * @adev: device on the interface
252 *
253 * Perform PIO mode setup.
254 */
255
256static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev)
257{
258 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
259 u32 addr1, addr2;
260 u32 reg;
261 u32 mode;
262 u8 fast;
263
264 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
265 addr2 = 0x51 + 4 * ap->port_no;
266
267 /* Fast interrupt prediction disable, hold off interrupt disable */
268 pci_read_config_byte(pdev, addr2, &fast);
269 if (fast & 0x80) {
270 fast &= ~0x80;
271 pci_write_config_byte(pdev, addr2, fast);
272 }
273
274 pci_read_config_dword(pdev, addr1, &reg);
275 mode = hpt36x_find_mode(ap, adev->pio_mode);
276 mode &= ~0x8000000; /* No FIFO in PIO */
277 mode &= ~0x30070000; /* Leave config bits alone */
278 reg &= 0x30070000; /* Strip timing bits */
279 pci_write_config_dword(pdev, addr1, reg | mode);
280}
281
282/**
283 * hpt366_set_dmamode - DMA timing setup
284 * @ap: ATA interface
285 * @adev: Device being configured
286 *
287 * Set up the channel for MWDMA or UDMA modes. Much the same as with
288 * PIO, load the mode number and then set MWDMA or UDMA flag.
289 */
290
291static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev)
292{
293 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
294 u32 addr1, addr2;
295 u32 reg;
296 u32 mode;
297 u8 fast;
298
299 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
300 addr2 = 0x51 + 4 * ap->port_no;
301
302 /* Fast interrupt prediction disable, hold off interrupt disable */
303 pci_read_config_byte(pdev, addr2, &fast);
304 if (fast & 0x80) {
305 fast &= ~0x80;
306 pci_write_config_byte(pdev, addr2, fast);
307 }
308
309 pci_read_config_dword(pdev, addr1, &reg);
310 mode = hpt36x_find_mode(ap, adev->dma_mode);
311 mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
312 mode &= ~0xC0000000; /* Leave config bits alone */
313 reg &= 0xC0000000; /* Strip timing bits */
314 pci_write_config_dword(pdev, addr1, reg | mode);
315}
316
317static struct scsi_host_template hpt36x_sht = {
318 .module = THIS_MODULE,
319 .name = DRV_NAME,
320 .ioctl = ata_scsi_ioctl,
321 .queuecommand = ata_scsi_queuecmd,
322 .can_queue = ATA_DEF_QUEUE,
323 .this_id = ATA_SHT_THIS_ID,
324 .sg_tablesize = LIBATA_MAX_PRD,
325 .max_sectors = ATA_MAX_SECTORS,
326 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
327 .emulated = ATA_SHT_EMULATED,
328 .use_clustering = ATA_SHT_USE_CLUSTERING,
329 .proc_name = DRV_NAME,
330 .dma_boundary = ATA_DMA_BOUNDARY,
331 .slave_configure = ata_scsi_slave_config,
332 .bios_param = ata_std_bios_param,
333};
334
335/*
336 * Configuration for HPT366/68
337 */
338
339static struct ata_port_operations hpt366_port_ops = {
340 .port_disable = ata_port_disable,
341 .set_piomode = hpt366_set_piomode,
342 .set_dmamode = hpt366_set_dmamode,
343 .mode_filter = hpt366_filter,
344
345 .tf_load = ata_tf_load,
346 .tf_read = ata_tf_read,
347 .check_status = ata_check_status,
348 .exec_command = ata_exec_command,
349 .dev_select = ata_std_dev_select,
350
351 .freeze = ata_bmdma_freeze,
352 .thaw = ata_bmdma_thaw,
353 .error_handler = hpt36x_error_handler,
354 .post_internal_cmd = ata_bmdma_post_internal_cmd,
355
356 .bmdma_setup = ata_bmdma_setup,
357 .bmdma_start = ata_bmdma_start,
358 .bmdma_stop = ata_bmdma_stop,
359 .bmdma_status = ata_bmdma_status,
360
361 .qc_prep = ata_qc_prep,
362 .qc_issue = ata_qc_issue_prot,
363 .eng_timeout = ata_eng_timeout,
364 .data_xfer = ata_pio_data_xfer,
365
366 .irq_handler = ata_interrupt,
367 .irq_clear = ata_bmdma_irq_clear,
368
369 .port_start = ata_port_start,
370 .port_stop = ata_port_stop,
371 .host_stop = ata_host_stop
372};
373
374/**
375 * hpt36x_init_one - Initialise an HPT366/368
376 * @dev: PCI device
377 * @id: Entry in match table
378 *
379 * Initialise an HPT36x device. There are some interesting complications
380 * here. Firstly the chip may report 366 and be one of several variants.
381 * Secondly all the timings depend on the clock for the chip which we must
382 * detect and look up
383 *
384 * This is the known chip mappings. It may be missing a couple of later
385 * releases.
386 *
387 * Chip version PCI Rev Notes
388 * HPT366 4 (HPT366) 0 UDMA66
389 * HPT366 4 (HPT366) 1 UDMA66
390 * HPT368 4 (HPT366) 2 UDMA66
391 * HPT37x/30x 4 (HPT366) 3+ Other driver
392 *
393 */
394
395static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
396{
397 static struct ata_port_info info_hpt366 = {
398 .sht = &hpt36x_sht,
399 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
400 .pio_mask = 0x1f,
401 .mwdma_mask = 0x07,
402 .udma_mask = 0x1f,
403 .port_ops = &hpt366_port_ops
404 };
405 struct ata_port_info *port_info[2] = {&info_hpt366, &info_hpt366};
406
407 u32 class_rev;
408 u32 reg1;
409 u8 drive_fast;
410
411 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
412 class_rev &= 0xFF;
413
414 /* May be a later chip in disguise. Check */
415 /* Newer chips are not in the HPT36x driver. Ignore them */
416 if (class_rev > 2)
417 return -ENODEV;
418
419 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
420 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
421 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
422 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
423
424 pci_read_config_byte(dev, 0x51, &drive_fast);
425 if (drive_fast & 0x80)
426 pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
427
428 pci_read_config_dword(dev, 0x40, &reg1);
429
430 /* PCI clocking determines the ATA timing values to use */
431 /* info_hpt366 is safe against re-entry so we can scribble on it */
432 switch(reg1 & 0x700) {
433 case 5:
434 info_hpt366.private_data = &hpt366_40;
435 break;
436 case 9:
437 info_hpt366.private_data = &hpt366_25;
438 break;
439 default:
440 info_hpt366.private_data = &hpt366_33;
441 break;
442 }
443 /* Now kick off ATA set up */
444 return ata_pci_init_one(dev, port_info, 2);
445}
446
447static struct pci_device_id hpt36x[] = {
448 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), },
449 { 0, },
450};
451
452static struct pci_driver hpt36x_pci_driver = {
453 .name = DRV_NAME,
454 .id_table = hpt36x,
455 .probe = hpt36x_init_one,
456 .remove = ata_pci_remove_one
457};
458
459static int __init hpt36x_init(void)
460{
461 return pci_register_driver(&hpt36x_pci_driver);
462}
463
464
465static void __exit hpt36x_exit(void)
466{
467 pci_unregister_driver(&hpt36x_pci_driver);
468}
469
470
471MODULE_AUTHOR("Alan Cox");
472MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368");
473MODULE_LICENSE("GPL");
474MODULE_DEVICE_TABLE(pci, hpt36x);
475MODULE_VERSION(DRV_VERSION);
476
477module_init(hpt36x_init);
478module_exit(hpt36x_exit);
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
new file mode 100644
index 000000000000..532a7928f803
--- /dev/null
+++ b/drivers/ata/pata_hpt37x.c
@@ -0,0 +1,1257 @@
1/*
2 * Libata driver for the highpoint 37x and 30x UDMA66 ATA controllers.
3 *
4 * This driver is heavily based upon:
5 *
6 * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
7 *
8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
9 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
10 * Portions Copyright (C) 2003 Red Hat Inc
11 *
12 * TODO
13 * PLL mode
14 * Look into engine reset on timeout errors. Should not be
15 * required.
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/pci.h>
21#include <linux/init.h>
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <scsi/scsi_host.h>
25#include <linux/libata.h>
26
27#define DRV_NAME "pata_hpt37x"
28#define DRV_VERSION "0.5"
29
30struct hpt_clock {
31 u8 xfer_speed;
32 u32 timing;
33};
34
35struct hpt_chip {
36 const char *name;
37 unsigned int base;
38 struct hpt_clock const *clocks[4];
39};
40
41/* key for bus clock timings
42 * bit
43 * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
44 * DMA. cycles = value + 1
45 * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW
46 * DMA. cycles = value + 1
47 * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file
48 * register access.
49 * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file
50 * register access.
51 * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer.
52 * during task file register access.
53 * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA
54 * xfer.
55 * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task
56 * register access.
57 * 28 UDMA enable
58 * 29 DMA enable
59 * 30 PIO_MST enable. if set, the chip is in bus master mode during
60 * PIO.
61 * 31 FIFO enable.
62 */
63
64/* from highpoint documentation. these are old values */
65static const struct hpt_clock hpt370_timings_33[] = {
66/* { XFER_UDMA_5, 0x1A85F442, 0x16454e31 }, */
67 { XFER_UDMA_5, 0x16454e31 },
68 { XFER_UDMA_4, 0x16454e31 },
69 { XFER_UDMA_3, 0x166d4e31 },
70 { XFER_UDMA_2, 0x16494e31 },
71 { XFER_UDMA_1, 0x164d4e31 },
72 { XFER_UDMA_0, 0x16514e31 },
73
74 { XFER_MW_DMA_2, 0x26514e21 },
75 { XFER_MW_DMA_1, 0x26514e33 },
76 { XFER_MW_DMA_0, 0x26514e97 },
77
78 { XFER_PIO_4, 0x06514e21 },
79 { XFER_PIO_3, 0x06514e22 },
80 { XFER_PIO_2, 0x06514e33 },
81 { XFER_PIO_1, 0x06914e43 },
82 { XFER_PIO_0, 0x06914e57 },
83 { 0, 0x06514e57 }
84};
85
86static const struct hpt_clock hpt370_timings_66[] = {
87 { XFER_UDMA_5, 0x14846231 },
88 { XFER_UDMA_4, 0x14886231 },
89 { XFER_UDMA_3, 0x148c6231 },
90 { XFER_UDMA_2, 0x148c6231 },
91 { XFER_UDMA_1, 0x14906231 },
92 { XFER_UDMA_0, 0x14986231 },
93
94 { XFER_MW_DMA_2, 0x26514e21 },
95 { XFER_MW_DMA_1, 0x26514e33 },
96 { XFER_MW_DMA_0, 0x26514e97 },
97
98 { XFER_PIO_4, 0x06514e21 },
99 { XFER_PIO_3, 0x06514e22 },
100 { XFER_PIO_2, 0x06514e33 },
101 { XFER_PIO_1, 0x06914e43 },
102 { XFER_PIO_0, 0x06914e57 },
103 { 0, 0x06514e57 }
104};
105
106/* these are the current (4 sep 2001) timings from highpoint */
107static const struct hpt_clock hpt370a_timings_33[] = {
108 { XFER_UDMA_5, 0x12446231 },
109 { XFER_UDMA_4, 0x12446231 },
110 { XFER_UDMA_3, 0x126c6231 },
111 { XFER_UDMA_2, 0x12486231 },
112 { XFER_UDMA_1, 0x124c6233 },
113 { XFER_UDMA_0, 0x12506297 },
114
115 { XFER_MW_DMA_2, 0x22406c31 },
116 { XFER_MW_DMA_1, 0x22406c33 },
117 { XFER_MW_DMA_0, 0x22406c97 },
118
119 { XFER_PIO_4, 0x06414e31 },
120 { XFER_PIO_3, 0x06414e42 },
121 { XFER_PIO_2, 0x06414e53 },
122 { XFER_PIO_1, 0x06814e93 },
123 { XFER_PIO_0, 0x06814ea7 },
124 { 0, 0x06814ea7 }
125};
126
127/* 2x 33MHz timings */
128static const struct hpt_clock hpt370a_timings_66[] = {
129 { XFER_UDMA_5, 0x1488e673 },
130 { XFER_UDMA_4, 0x1488e673 },
131 { XFER_UDMA_3, 0x1498e673 },
132 { XFER_UDMA_2, 0x1490e673 },
133 { XFER_UDMA_1, 0x1498e677 },
134 { XFER_UDMA_0, 0x14a0e73f },
135
136 { XFER_MW_DMA_2, 0x2480fa73 },
137 { XFER_MW_DMA_1, 0x2480fa77 },
138 { XFER_MW_DMA_0, 0x2480fb3f },
139
140 { XFER_PIO_4, 0x0c82be73 },
141 { XFER_PIO_3, 0x0c82be95 },
142 { XFER_PIO_2, 0x0c82beb7 },
143 { XFER_PIO_1, 0x0d02bf37 },
144 { XFER_PIO_0, 0x0d02bf5f },
145 { 0, 0x0d02bf5f }
146};
147
148static const struct hpt_clock hpt370a_timings_50[] = {
149 { XFER_UDMA_5, 0x12848242 },
150 { XFER_UDMA_4, 0x12ac8242 },
151 { XFER_UDMA_3, 0x128c8242 },
152 { XFER_UDMA_2, 0x120c8242 },
153 { XFER_UDMA_1, 0x12148254 },
154 { XFER_UDMA_0, 0x121882ea },
155
156 { XFER_MW_DMA_2, 0x22808242 },
157 { XFER_MW_DMA_1, 0x22808254 },
158 { XFER_MW_DMA_0, 0x228082ea },
159
160 { XFER_PIO_4, 0x0a81f442 },
161 { XFER_PIO_3, 0x0a81f443 },
162 { XFER_PIO_2, 0x0a81f454 },
163 { XFER_PIO_1, 0x0ac1f465 },
164 { XFER_PIO_0, 0x0ac1f48a },
165 { 0, 0x0ac1f48a }
166};
167
168static const struct hpt_clock hpt372_timings_33[] = {
169 { XFER_UDMA_6, 0x1c81dc62 },
170 { XFER_UDMA_5, 0x1c6ddc62 },
171 { XFER_UDMA_4, 0x1c8ddc62 },
172 { XFER_UDMA_3, 0x1c8edc62 }, /* checkme */
173 { XFER_UDMA_2, 0x1c91dc62 },
174 { XFER_UDMA_1, 0x1c9adc62 }, /* checkme */
175 { XFER_UDMA_0, 0x1c82dc62 }, /* checkme */
176
177 { XFER_MW_DMA_2, 0x2c829262 },
178 { XFER_MW_DMA_1, 0x2c829266 }, /* checkme */
179 { XFER_MW_DMA_0, 0x2c82922e }, /* checkme */
180
181 { XFER_PIO_4, 0x0c829c62 },
182 { XFER_PIO_3, 0x0c829c84 },
183 { XFER_PIO_2, 0x0c829ca6 },
184 { XFER_PIO_1, 0x0d029d26 },
185 { XFER_PIO_0, 0x0d029d5e },
186 { 0, 0x0d029d5e }
187};
188
189static const struct hpt_clock hpt372_timings_50[] = {
190 { XFER_UDMA_5, 0x12848242 },
191 { XFER_UDMA_4, 0x12ac8242 },
192 { XFER_UDMA_3, 0x128c8242 },
193 { XFER_UDMA_2, 0x120c8242 },
194 { XFER_UDMA_1, 0x12148254 },
195 { XFER_UDMA_0, 0x121882ea },
196
197 { XFER_MW_DMA_2, 0x22808242 },
198 { XFER_MW_DMA_1, 0x22808254 },
199 { XFER_MW_DMA_0, 0x228082ea },
200
201 { XFER_PIO_4, 0x0a81f442 },
202 { XFER_PIO_3, 0x0a81f443 },
203 { XFER_PIO_2, 0x0a81f454 },
204 { XFER_PIO_1, 0x0ac1f465 },
205 { XFER_PIO_0, 0x0ac1f48a },
206 { 0, 0x0a81f443 }
207};
208
209static const struct hpt_clock hpt372_timings_66[] = {
210 { XFER_UDMA_6, 0x1c869c62 },
211 { XFER_UDMA_5, 0x1cae9c62 },
212 { XFER_UDMA_4, 0x1c8a9c62 },
213 { XFER_UDMA_3, 0x1c8e9c62 },
214 { XFER_UDMA_2, 0x1c929c62 },
215 { XFER_UDMA_1, 0x1c9a9c62 },
216 { XFER_UDMA_0, 0x1c829c62 },
217
218 { XFER_MW_DMA_2, 0x2c829c62 },
219 { XFER_MW_DMA_1, 0x2c829c66 },
220 { XFER_MW_DMA_0, 0x2c829d2e },
221
222 { XFER_PIO_4, 0x0c829c62 },
223 { XFER_PIO_3, 0x0c829c84 },
224 { XFER_PIO_2, 0x0c829ca6 },
225 { XFER_PIO_1, 0x0d029d26 },
226 { XFER_PIO_0, 0x0d029d5e },
227 { 0, 0x0d029d26 }
228};
229
230static const struct hpt_clock hpt374_timings_33[] = {
231 { XFER_UDMA_6, 0x12808242 },
232 { XFER_UDMA_5, 0x12848242 },
233 { XFER_UDMA_4, 0x12ac8242 },
234 { XFER_UDMA_3, 0x128c8242 },
235 { XFER_UDMA_2, 0x120c8242 },
236 { XFER_UDMA_1, 0x12148254 },
237 { XFER_UDMA_0, 0x121882ea },
238
239 { XFER_MW_DMA_2, 0x22808242 },
240 { XFER_MW_DMA_1, 0x22808254 },
241 { XFER_MW_DMA_0, 0x228082ea },
242
243 { XFER_PIO_4, 0x0a81f442 },
244 { XFER_PIO_3, 0x0a81f443 },
245 { XFER_PIO_2, 0x0a81f454 },
246 { XFER_PIO_1, 0x0ac1f465 },
247 { XFER_PIO_0, 0x0ac1f48a },
248 { 0, 0x06814e93 }
249};
250
251static const struct hpt_chip hpt370 = {
252 "HPT370",
253 48,
254 {
255 hpt370_timings_33,
256 NULL,
257 NULL,
258 hpt370_timings_66
259 }
260};
261
262static const struct hpt_chip hpt370a = {
263 "HPT370A",
264 48,
265 {
266 hpt370a_timings_33,
267 NULL,
268 hpt370a_timings_50,
269 hpt370a_timings_66
270 }
271};
272
273static const struct hpt_chip hpt372 = {
274 "HPT372",
275 55,
276 {
277 hpt372_timings_33,
278 NULL,
279 hpt372_timings_50,
280 hpt372_timings_66
281 }
282};
283
284static const struct hpt_chip hpt302 = {
285 "HPT302",
286 66,
287 {
288 hpt372_timings_33,
289 NULL,
290 hpt372_timings_50,
291 hpt372_timings_66
292 }
293};
294
295static const struct hpt_chip hpt371 = {
296 "HPT371",
297 66,
298 {
299 hpt372_timings_33,
300 NULL,
301 hpt372_timings_50,
302 hpt372_timings_66
303 }
304};
305
306static const struct hpt_chip hpt372a = {
307 "HPT372A",
308 66,
309 {
310 hpt372_timings_33,
311 NULL,
312 hpt372_timings_50,
313 hpt372_timings_66
314 }
315};
316
317static const struct hpt_chip hpt374 = {
318 "HPT374",
319 48,
320 {
321 hpt374_timings_33,
322 NULL,
323 NULL,
324 NULL
325 }
326};
327
328/**
329 * hpt37x_find_mode - reset the hpt37x bus
330 * @ap: ATA port
331 * @speed: transfer mode
332 *
333 * Return the 32bit register programming information for this channel
334 * that matches the speed provided.
335 */
336
337static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
338{
339 struct hpt_clock *clocks = ap->host->private_data;
340
341 while(clocks->xfer_speed) {
342 if (clocks->xfer_speed == speed)
343 return clocks->timing;
344 clocks++;
345 }
346 BUG();
347 return 0xffffffffU; /* silence compiler warning */
348}
349
350static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
351{
352 unsigned char model_num[40];
353 char *s;
354 unsigned int len;
355 int i = 0;
356
357 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
358 sizeof(model_num));
359 s = &model_num[0];
360 len = strnlen(s, sizeof(model_num));
361
362 /* ATAPI specifies that empty space is blank-filled; remove blanks */
363 while ((len > 0) && (s[len - 1] == ' ')) {
364 len--;
365 s[len] = 0;
366 }
367
368 while(list[i] != NULL) {
369 if (!strncmp(list[i], s, len)) {
370 printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
371 modestr, list[i]);
372 return 1;
373 }
374 i++;
375 }
376 return 0;
377}
378
379static const char *bad_ata33[] = {
380 "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
381 "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
382 "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
383 "Maxtor 90510D4",
384 "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
385 "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
386 "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
387 NULL
388};
389
390static const char *bad_ata100_5[] = {
391 "IBM-DTLA-307075",
392 "IBM-DTLA-307060",
393 "IBM-DTLA-307045",
394 "IBM-DTLA-307030",
395 "IBM-DTLA-307020",
396 "IBM-DTLA-307015",
397 "IBM-DTLA-305040",
398 "IBM-DTLA-305030",
399 "IBM-DTLA-305020",
400 "IC35L010AVER07-0",
401 "IC35L020AVER07-0",
402 "IC35L030AVER07-0",
403 "IC35L040AVER07-0",
404 "IC35L060AVER07-0",
405 "WDC AC310200R",
406 NULL
407};
408
409/**
410 * hpt370_filter - mode selection filter
411 * @ap: ATA interface
412 * @adev: ATA device
413 *
414 * Block UDMA on devices that cause trouble with this controller.
415 */
416
417static unsigned long hpt370_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
418{
419 if (adev->class != ATA_DEV_ATA) {
420 if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
421 mask &= ~ATA_MASK_UDMA;
422 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
423 mask &= ~(0x1F << ATA_SHIFT_UDMA);
424 }
425 return ata_pci_default_filter(ap, adev, mask);
426}
427
428/**
429 * hpt370a_filter - mode selection filter
430 * @ap: ATA interface
431 * @adev: ATA device
432 *
433 * Block UDMA on devices that cause trouble with this controller.
434 */
435
436static unsigned long hpt370a_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
437{
438 if (adev->class != ATA_DEV_ATA) {
439 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
440 mask &= ~ (0x1F << ATA_SHIFT_UDMA);
441 }
442 return ata_pci_default_filter(ap, adev, mask);
443}
444
445/**
446 * hpt37x_pre_reset - reset the hpt37x bus
447 * @ap: ATA port to reset
448 *
449 * Perform the initial reset handling for the 370/372 and 374 func 0
450 */
451
452static int hpt37x_pre_reset(struct ata_port *ap)
453{
454 u8 scr2, ata66;
455 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
456
457 pci_read_config_byte(pdev, 0x5B, &scr2);
458 pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
459 /* Cable register now active */
460 pci_read_config_byte(pdev, 0x5A, &ata66);
461 /* Restore state */
462 pci_write_config_byte(pdev, 0x5B, scr2);
463
464 if (ata66 & (1 << ap->port_no))
465 ap->cbl = ATA_CBL_PATA40;
466 else
467 ap->cbl = ATA_CBL_PATA80;
468
469 /* Reset the state machine */
470 pci_write_config_byte(pdev, 0x50, 0x37);
471 pci_write_config_byte(pdev, 0x54, 0x37);
472 udelay(100);
473
474 return ata_std_prereset(ap);
475}
476
477/**
478 * hpt37x_error_handler - reset the hpt374
479 * @ap: ATA port to reset
480 *
481 * Perform probe for HPT37x, except for HPT374 channel 2
482 */
483
484static void hpt37x_error_handler(struct ata_port *ap)
485{
486 ata_bmdma_drive_eh(ap, hpt37x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
487}
488
489static int hpt374_pre_reset(struct ata_port *ap)
490{
491 u16 mcr3, mcr6;
492 u8 ata66;
493
494 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
495 /* Do the extra channel work */
496 pci_read_config_word(pdev, 0x52, &mcr3);
497 pci_read_config_word(pdev, 0x56, &mcr6);
498 /* Set bit 15 of 0x52 to enable TCBLID as input
499 Set bit 15 of 0x56 to enable FCBLID as input
500 */
501 pci_write_config_word(pdev, 0x52, mcr3 | 0x8000);
502 pci_write_config_word(pdev, 0x56, mcr6 | 0x8000);
503 pci_read_config_byte(pdev, 0x5A, &ata66);
504 /* Reset TCBLID/FCBLID to output */
505 pci_write_config_word(pdev, 0x52, mcr3);
506 pci_write_config_word(pdev, 0x56, mcr6);
507
508 if (ata66 & (1 << ap->port_no))
509 ap->cbl = ATA_CBL_PATA40;
510 else
511 ap->cbl = ATA_CBL_PATA80;
512
513 /* Reset the state machine */
514 pci_write_config_byte(pdev, 0x50, 0x37);
515 pci_write_config_byte(pdev, 0x54, 0x37);
516 udelay(100);
517
518 return ata_std_prereset(ap);
519}
520
521/**
522 * hpt374_error_handler - reset the hpt374
523 * @classes:
524 *
525 * The 374 cable detect is a little different due to the extra
526 * channels. The function 0 channels work like usual but function 1
527 * is special
528 */
529
530static void hpt374_error_handler(struct ata_port *ap)
531{
532 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
533
534 if (!(PCI_FUNC(pdev->devfn) & 1))
535 hpt37x_error_handler(ap);
536 else
537 ata_bmdma_drive_eh(ap, hpt374_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
538}
539
540/**
541 * hpt370_set_piomode - PIO setup
542 * @ap: ATA interface
543 * @adev: device on the interface
544 *
545 * Perform PIO mode setup.
546 */
547
548static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev)
549{
550 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
551 u32 addr1, addr2;
552 u32 reg;
553 u32 mode;
554 u8 fast;
555
556 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
557 addr2 = 0x51 + 4 * ap->port_no;
558
559 /* Fast interrupt prediction disable, hold off interrupt disable */
560 pci_read_config_byte(pdev, addr2, &fast);
561 fast &= ~0x02;
562 fast |= 0x01;
563 pci_write_config_byte(pdev, addr2, fast);
564
565 pci_read_config_dword(pdev, addr1, &reg);
566 mode = hpt37x_find_mode(ap, adev->pio_mode);
567 mode &= ~0x8000000; /* No FIFO in PIO */
568 mode &= ~0x30070000; /* Leave config bits alone */
569 reg &= 0x30070000; /* Strip timing bits */
570 pci_write_config_dword(pdev, addr1, reg | mode);
571}
572
573/**
574 * hpt370_set_dmamode - DMA timing setup
575 * @ap: ATA interface
576 * @adev: Device being configured
577 *
578 * Set up the channel for MWDMA or UDMA modes. Much the same as with
579 * PIO, load the mode number and then set MWDMA or UDMA flag.
580 */
581
582static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
583{
584 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
585 u32 addr1, addr2;
586 u32 reg;
587 u32 mode;
588 u8 fast;
589
590 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
591 addr2 = 0x51 + 4 * ap->port_no;
592
593 /* Fast interrupt prediction disable, hold off interrupt disable */
594 pci_read_config_byte(pdev, addr2, &fast);
595 fast &= ~0x02;
596 fast |= 0x01;
597 pci_write_config_byte(pdev, addr2, fast);
598
599 pci_read_config_dword(pdev, addr1, &reg);
600 mode = hpt37x_find_mode(ap, adev->dma_mode);
601 mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
602 mode &= ~0xC0000000; /* Leave config bits alone */
603 reg &= 0xC0000000; /* Strip timing bits */
604 pci_write_config_dword(pdev, addr1, reg | mode);
605}
606
607/**
608 * hpt370_bmdma_start - DMA engine begin
609 * @qc: ATA command
610 *
611 * The 370 and 370A want us to reset the DMA engine each time we
612 * use it. The 372 and later are fine.
613 */
614
615static void hpt370_bmdma_start(struct ata_queued_cmd *qc)
616{
617 struct ata_port *ap = qc->ap;
618 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
619 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
620 udelay(10);
621 ata_bmdma_start(qc);
622}
623
624/**
625 * hpt370_bmdma_end - DMA engine stop
626 * @qc: ATA command
627 *
628 * Work around the HPT370 DMA engine.
629 */
630
631static void hpt370_bmdma_stop(struct ata_queued_cmd *qc)
632{
633 struct ata_port *ap = qc->ap;
634 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
635 u8 dma_stat = inb(ap->ioaddr.bmdma_addr + 2);
636 u8 dma_cmd;
637 unsigned long bmdma = ap->ioaddr.bmdma_addr;
638
639 if (dma_stat & 0x01) {
640 udelay(20);
641 dma_stat = inb(bmdma + 2);
642 }
643 if (dma_stat & 0x01) {
644 /* Clear the engine */
645 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
646 udelay(10);
647 /* Stop DMA */
648 dma_cmd = inb(bmdma );
649 outb(dma_cmd & 0xFE, bmdma);
650 /* Clear Error */
651 dma_stat = inb(bmdma + 2);
652 outb(dma_stat | 0x06 , bmdma + 2);
653 /* Clear the engine */
654 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
655 udelay(10);
656 }
657 ata_bmdma_stop(qc);
658}
659
660/**
661 * hpt372_set_piomode - PIO setup
662 * @ap: ATA interface
663 * @adev: device on the interface
664 *
665 * Perform PIO mode setup.
666 */
667
668static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev)
669{
670 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
671 u32 addr1, addr2;
672 u32 reg;
673 u32 mode;
674 u8 fast;
675
676 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
677 addr2 = 0x51 + 4 * ap->port_no;
678
679 /* Fast interrupt prediction disable, hold off interrupt disable */
680 pci_read_config_byte(pdev, addr2, &fast);
681 fast &= ~0x07;
682 pci_write_config_byte(pdev, addr2, fast);
683
684 pci_read_config_dword(pdev, addr1, &reg);
685 mode = hpt37x_find_mode(ap, adev->pio_mode);
686
687 printk("Find mode for %d reports %X\n", adev->pio_mode, mode);
688 mode &= ~0x80000000; /* No FIFO in PIO */
689 mode &= ~0x30070000; /* Leave config bits alone */
690 reg &= 0x30070000; /* Strip timing bits */
691 pci_write_config_dword(pdev, addr1, reg | mode);
692}
693
694/**
695 * hpt372_set_dmamode - DMA timing setup
696 * @ap: ATA interface
697 * @adev: Device being configured
698 *
699 * Set up the channel for MWDMA or UDMA modes. Much the same as with
700 * PIO, load the mode number and then set MWDMA or UDMA flag.
701 */
702
703static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev)
704{
705 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
706 u32 addr1, addr2;
707 u32 reg;
708 u32 mode;
709 u8 fast;
710
711 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
712 addr2 = 0x51 + 4 * ap->port_no;
713
714 /* Fast interrupt prediction disable, hold off interrupt disable */
715 pci_read_config_byte(pdev, addr2, &fast);
716 fast &= ~0x07;
717 pci_write_config_byte(pdev, addr2, fast);
718
719 pci_read_config_dword(pdev, addr1, &reg);
720 mode = hpt37x_find_mode(ap, adev->dma_mode);
721 printk("Find mode for DMA %d reports %X\n", adev->dma_mode, mode);
722 mode &= ~0xC0000000; /* Leave config bits alone */
723 mode |= 0x80000000; /* FIFO in MWDMA or UDMA */
724 reg &= 0xC0000000; /* Strip timing bits */
725 pci_write_config_dword(pdev, addr1, reg | mode);
726}
727
728/**
729 * hpt37x_bmdma_end - DMA engine stop
730 * @qc: ATA command
731 *
732 * Clean up after the HPT372 and later DMA engine
733 */
734
735static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc)
736{
737 struct ata_port *ap = qc->ap;
738 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
739 int mscreg = 0x50 + 2 * ap->port_no;
740 u8 bwsr_stat, msc_stat;
741
742 pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
743 pci_read_config_byte(pdev, mscreg, &msc_stat);
744 if (bwsr_stat & (1 << ap->port_no))
745 pci_write_config_byte(pdev, mscreg, msc_stat | 0x30);
746 ata_bmdma_stop(qc);
747}
748
749
750static struct scsi_host_template hpt37x_sht = {
751 .module = THIS_MODULE,
752 .name = DRV_NAME,
753 .ioctl = ata_scsi_ioctl,
754 .queuecommand = ata_scsi_queuecmd,
755 .can_queue = ATA_DEF_QUEUE,
756 .this_id = ATA_SHT_THIS_ID,
757 .sg_tablesize = LIBATA_MAX_PRD,
758 .max_sectors = ATA_MAX_SECTORS,
759 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
760 .emulated = ATA_SHT_EMULATED,
761 .use_clustering = ATA_SHT_USE_CLUSTERING,
762 .proc_name = DRV_NAME,
763 .dma_boundary = ATA_DMA_BOUNDARY,
764 .slave_configure = ata_scsi_slave_config,
765 .bios_param = ata_std_bios_param,
766};
767
768/*
769 * Configuration for HPT370
770 */
771
772static struct ata_port_operations hpt370_port_ops = {
773 .port_disable = ata_port_disable,
774 .set_piomode = hpt370_set_piomode,
775 .set_dmamode = hpt370_set_dmamode,
776 .mode_filter = hpt370_filter,
777
778 .tf_load = ata_tf_load,
779 .tf_read = ata_tf_read,
780 .check_status = ata_check_status,
781 .exec_command = ata_exec_command,
782 .dev_select = ata_std_dev_select,
783
784 .freeze = ata_bmdma_freeze,
785 .thaw = ata_bmdma_thaw,
786 .error_handler = hpt37x_error_handler,
787 .post_internal_cmd = ata_bmdma_post_internal_cmd,
788
789 .bmdma_setup = ata_bmdma_setup,
790 .bmdma_start = hpt370_bmdma_start,
791 .bmdma_stop = hpt370_bmdma_stop,
792 .bmdma_status = ata_bmdma_status,
793
794 .qc_prep = ata_qc_prep,
795 .qc_issue = ata_qc_issue_prot,
796 .eng_timeout = ata_eng_timeout,
797 .data_xfer = ata_pio_data_xfer,
798
799 .irq_handler = ata_interrupt,
800 .irq_clear = ata_bmdma_irq_clear,
801
802 .port_start = ata_port_start,
803 .port_stop = ata_port_stop,
804 .host_stop = ata_host_stop
805};
806
807/*
808 * Configuration for HPT370A. Close to 370 but less filters
809 */
810
811static struct ata_port_operations hpt370a_port_ops = {
812 .port_disable = ata_port_disable,
813 .set_piomode = hpt370_set_piomode,
814 .set_dmamode = hpt370_set_dmamode,
815 .mode_filter = hpt370a_filter,
816
817 .tf_load = ata_tf_load,
818 .tf_read = ata_tf_read,
819 .check_status = ata_check_status,
820 .exec_command = ata_exec_command,
821 .dev_select = ata_std_dev_select,
822
823 .freeze = ata_bmdma_freeze,
824 .thaw = ata_bmdma_thaw,
825 .error_handler = hpt37x_error_handler,
826 .post_internal_cmd = ata_bmdma_post_internal_cmd,
827
828 .bmdma_setup = ata_bmdma_setup,
829 .bmdma_start = hpt370_bmdma_start,
830 .bmdma_stop = hpt370_bmdma_stop,
831 .bmdma_status = ata_bmdma_status,
832
833 .qc_prep = ata_qc_prep,
834 .qc_issue = ata_qc_issue_prot,
835 .eng_timeout = ata_eng_timeout,
836 .data_xfer = ata_pio_data_xfer,
837
838 .irq_handler = ata_interrupt,
839 .irq_clear = ata_bmdma_irq_clear,
840
841 .port_start = ata_port_start,
842 .port_stop = ata_port_stop,
843 .host_stop = ata_host_stop
844};
845
846/*
847 * Configuration for HPT372, HPT371, HPT302. Slightly different PIO
848 * and DMA mode setting functionality.
849 */
850
851static struct ata_port_operations hpt372_port_ops = {
852 .port_disable = ata_port_disable,
853 .set_piomode = hpt372_set_piomode,
854 .set_dmamode = hpt372_set_dmamode,
855 .mode_filter = ata_pci_default_filter,
856
857 .tf_load = ata_tf_load,
858 .tf_read = ata_tf_read,
859 .check_status = ata_check_status,
860 .exec_command = ata_exec_command,
861 .dev_select = ata_std_dev_select,
862
863 .freeze = ata_bmdma_freeze,
864 .thaw = ata_bmdma_thaw,
865 .error_handler = hpt37x_error_handler,
866 .post_internal_cmd = ata_bmdma_post_internal_cmd,
867
868 .bmdma_setup = ata_bmdma_setup,
869 .bmdma_start = ata_bmdma_start,
870 .bmdma_stop = hpt37x_bmdma_stop,
871 .bmdma_status = ata_bmdma_status,
872
873 .qc_prep = ata_qc_prep,
874 .qc_issue = ata_qc_issue_prot,
875 .eng_timeout = ata_eng_timeout,
876 .data_xfer = ata_pio_data_xfer,
877
878 .irq_handler = ata_interrupt,
879 .irq_clear = ata_bmdma_irq_clear,
880
881 .port_start = ata_port_start,
882 .port_stop = ata_port_stop,
883 .host_stop = ata_host_stop
884};
885
886/*
887 * Configuration for HPT374. Mode setting works like 372 and friends
888 * but we have a different cable detection procedure.
889 */
890
891static struct ata_port_operations hpt374_port_ops = {
892 .port_disable = ata_port_disable,
893 .set_piomode = hpt372_set_piomode,
894 .set_dmamode = hpt372_set_dmamode,
895 .mode_filter = ata_pci_default_filter,
896
897 .tf_load = ata_tf_load,
898 .tf_read = ata_tf_read,
899 .check_status = ata_check_status,
900 .exec_command = ata_exec_command,
901 .dev_select = ata_std_dev_select,
902
903 .freeze = ata_bmdma_freeze,
904 .thaw = ata_bmdma_thaw,
905 .error_handler = hpt374_error_handler,
906 .post_internal_cmd = ata_bmdma_post_internal_cmd,
907
908 .bmdma_setup = ata_bmdma_setup,
909 .bmdma_start = ata_bmdma_start,
910 .bmdma_stop = hpt37x_bmdma_stop,
911 .bmdma_status = ata_bmdma_status,
912
913 .qc_prep = ata_qc_prep,
914 .qc_issue = ata_qc_issue_prot,
915 .eng_timeout = ata_eng_timeout,
916 .data_xfer = ata_pio_data_xfer,
917
918 .irq_handler = ata_interrupt,
919 .irq_clear = ata_bmdma_irq_clear,
920
921 .port_start = ata_port_start,
922 .port_stop = ata_port_stop,
923 .host_stop = ata_host_stop
924};
925
926/**
927 * htp37x_clock_slot - Turn timing to PC clock entry
928 * @freq: Reported frequency timing
929 * @base: Base timing
930 *
931 * Turn the timing data intoa clock slot (0 for 33, 1 for 40, 2 for 50
932 * and 3 for 66Mhz)
933 */
934
935static int hpt37x_clock_slot(unsigned int freq, unsigned int base)
936{
937 unsigned int f = (base * freq) / 192; /* Mhz */
938 if (f < 40)
939 return 0; /* 33Mhz slot */
940 if (f < 45)
941 return 1; /* 40Mhz slot */
942 if (f < 55)
943 return 2; /* 50Mhz slot */
944 return 3; /* 60Mhz slot */
945}
946
947/**
948 * hpt37x_calibrate_dpll - Calibrate the DPLL loop
949 * @dev: PCI device
950 *
951 * Perform a calibration cycle on the HPT37x DPLL. Returns 1 if this
952 * succeeds
953 */
954
955static int hpt37x_calibrate_dpll(struct pci_dev *dev)
956{
957 u8 reg5b;
958 u32 reg5c;
959 int tries;
960
961 for(tries = 0; tries < 0x5000; tries++) {
962 udelay(50);
963 pci_read_config_byte(dev, 0x5b, &reg5b);
964 if (reg5b & 0x80) {
965 /* See if it stays set */
966 for(tries = 0; tries < 0x1000; tries ++) {
967 pci_read_config_byte(dev, 0x5b, &reg5b);
968 /* Failed ? */
969 if ((reg5b & 0x80) == 0)
970 return 0;
971 }
972 /* Turn off tuning, we have the DPLL set */
973 pci_read_config_dword(dev, 0x5c, &reg5c);
974 pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100);
975 return 1;
976 }
977 }
978 /* Never went stable */
979 return 0;
980}
981/**
982 * hpt37x_init_one - Initialise an HPT37X/302
983 * @dev: PCI device
984 * @id: Entry in match table
985 *
986 * Initialise an HPT37x device. There are some interesting complications
987 * here. Firstly the chip may report 366 and be one of several variants.
988 * Secondly all the timings depend on the clock for the chip which we must
989 * detect and look up
990 *
991 * This is the known chip mappings. It may be missing a couple of later
992 * releases.
993 *
994 * Chip version PCI Rev Notes
995 * HPT366 4 (HPT366) 0 Other driver
996 * HPT366 4 (HPT366) 1 Other driver
997 * HPT368 4 (HPT366) 2 Other driver
998 * HPT370 4 (HPT366) 3 UDMA100
999 * HPT370A 4 (HPT366) 4 UDMA100
1000 * HPT372 4 (HPT366) 5 UDMA133 (1)
1001 * HPT372N 4 (HPT366) 6 Other driver
1002 * HPT372A 5 (HPT372) 1 UDMA133 (1)
1003 * HPT372N 5 (HPT372) 2 Other driver
1004 * HPT302 6 (HPT302) 1 UDMA133
1005 * HPT302N 6 (HPT302) 2 Other driver
1006 * HPT371 7 (HPT371) * UDMA133
1007 * HPT374 8 (HPT374) * UDMA133 4 channel
1008 * HPT372N 9 (HPT372N) * Other driver
1009 *
1010 * (1) UDMA133 support depends on the bus clock
1011 */
1012
1013static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1014{
1015 /* HPT370 - UDMA100 */
1016 static struct ata_port_info info_hpt370 = {
1017 .sht = &hpt37x_sht,
1018 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
1019 .pio_mask = 0x1f,
1020 .mwdma_mask = 0x07,
1021 .udma_mask = 0x3f,
1022 .port_ops = &hpt370_port_ops
1023 };
1024 /* HPT370A - UDMA100 */
1025 static struct ata_port_info info_hpt370a = {
1026 .sht = &hpt37x_sht,
1027 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
1028 .pio_mask = 0x1f,
1029 .mwdma_mask = 0x07,
1030 .udma_mask = 0x3f,
1031 .port_ops = &hpt370a_port_ops
1032 };
1033 /* HPT371, 372 and friends - UDMA133 */
1034 static struct ata_port_info info_hpt372 = {
1035 .sht = &hpt37x_sht,
1036 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
1037 .pio_mask = 0x1f,
1038 .mwdma_mask = 0x07,
1039 .udma_mask = 0x7f,
1040 .port_ops = &hpt372_port_ops
1041 };
1042 /* HPT371, 372 and friends - UDMA100 at 50MHz clock */
1043 static struct ata_port_info info_hpt372_50 = {
1044 .sht = &hpt37x_sht,
1045 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
1046 .pio_mask = 0x1f,
1047 .mwdma_mask = 0x07,
1048 .udma_mask = 0x3f,
1049 .port_ops = &hpt372_port_ops
1050 };
1051 /* HPT374 - UDMA133 */
1052 static struct ata_port_info info_hpt374 = {
1053 .sht = &hpt37x_sht,
1054 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
1055 .pio_mask = 0x1f,
1056 .mwdma_mask = 0x07,
1057 .udma_mask = 0x7f,
1058 .port_ops = &hpt374_port_ops
1059 };
1060
1061 static const int MHz[4] = { 33, 40, 50, 66 };
1062
1063 struct ata_port_info *port_info[2];
1064 struct ata_port_info *port;
1065
1066 u8 irqmask;
1067 u32 class_rev;
1068 u32 freq;
1069
1070 const struct hpt_chip *chip_table;
1071 int clock_slot;
1072
1073 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
1074 class_rev &= 0xFF;
1075
1076 if (dev->device == PCI_DEVICE_ID_TTI_HPT366) {
1077 /* May be a later chip in disguise. Check */
1078 /* Older chips are in the HPT366 driver. Ignore them */
1079 if (class_rev < 3)
1080 return -ENODEV;
1081 /* N series chips have their own driver. Ignore */
1082 if (class_rev == 6)
1083 return -ENODEV;
1084
1085 switch(class_rev) {
1086 case 3:
1087 port = &info_hpt370;
1088 chip_table = &hpt370;
1089 break;
1090 case 4:
1091 port = &info_hpt370a;
1092 chip_table = &hpt370a;
1093 break;
1094 case 5:
1095 port = &info_hpt372;
1096 chip_table = &hpt372;
1097 break;
1098 default:
1099 printk(KERN_ERR "pata_hpt37x: Unknown HPT366 subtype please report (%d).\n", class_rev);
1100 return -ENODEV;
1101 }
1102 } else {
1103 switch(dev->device) {
1104 case PCI_DEVICE_ID_TTI_HPT372:
1105 /* 372N if rev >= 2*/
1106 if (class_rev >= 2)
1107 return -ENODEV;
1108 port = &info_hpt372;
1109 chip_table = &hpt372a;
1110 break;
1111 case PCI_DEVICE_ID_TTI_HPT302:
1112 /* 302N if rev > 1 */
1113 if (class_rev > 1)
1114 return -ENODEV;
1115 port = &info_hpt372;
1116 /* Check this */
1117 chip_table = &hpt302;
1118 break;
1119 case PCI_DEVICE_ID_TTI_HPT371:
1120 port = &info_hpt372;
1121 chip_table = &hpt371;
1122 break;
1123 case PCI_DEVICE_ID_TTI_HPT374:
1124 chip_table = &hpt374;
1125 port = &info_hpt374;
1126 break;
1127 default:
1128 printk(KERN_ERR "pata_hpt37x: PCI table is bogus please report (%d).\n", dev->device);
1129 return -ENODEV;
1130 }
1131 }
1132 /* Ok so this is a chip we support */
1133
1134 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
1135 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
1136 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
1137 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
1138
1139 pci_read_config_byte(dev, 0x5A, &irqmask);
1140 irqmask &= ~0x10;
1141 pci_write_config_byte(dev, 0x5a, irqmask);
1142
1143 /*
1144 * default to pci clock. make sure MA15/16 are set to output
1145 * to prevent drives having problems with 40-pin cables. Needed
1146 * for some drives such as IBM-DTLA which will not enter ready
1147 * state on reset when PDIAG is a input.
1148 */
1149
1150 pci_write_config_byte(dev, 0x5b, 0x23);
1151
1152 pci_read_config_dword(dev, 0x70, &freq);
1153 if ((freq >> 12) != 0xABCDE) {
1154 int i;
1155 u8 sr;
1156 u32 total = 0;
1157
1158 printk(KERN_WARNING "pata_hpt37x: BIOS has not set timing clocks.\n");
1159
1160 /* This is the process the HPT371 BIOS is reported to use */
1161 for(i = 0; i < 128; i++) {
1162 pci_read_config_byte(dev, 0x78, &sr);
1163 total += sr;
1164 udelay(15);
1165 }
1166 freq = total / 128;
1167 }
1168 freq &= 0x1FF;
1169
1170 /*
1171 * Turn the frequency check into a band and then find a timing
1172 * table to match it.
1173 */
1174
1175 clock_slot = hpt37x_clock_slot(freq, chip_table->base);
1176 if (chip_table->clocks[clock_slot] == NULL) {
1177 /*
1178 * We need to try PLL mode instead
1179 */
1180 unsigned int f_low = (MHz[clock_slot] * chip_table->base) / 192;
1181 unsigned int f_high = f_low + 2;
1182 int adjust;
1183
1184 for(adjust = 0; adjust < 8; adjust++) {
1185 if (hpt37x_calibrate_dpll(dev))
1186 break;
1187 /* See if it'll settle at a fractionally different clock */
1188 if ((adjust & 3) == 3) {
1189 f_low --;
1190 f_high ++;
1191 }
1192 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
1193 }
1194 if (adjust == 8) {
1195 printk(KERN_WARNING "hpt37x: DPLL did not stabilize.\n");
1196 return -ENODEV;
1197 }
1198 /* Check if this works for all cases */
1199 port->private_data = (void *)hpt370_timings_66;
1200
1201 printk(KERN_INFO "hpt37x: Bus clock %dMHz, using DPLL.\n", MHz[clock_slot]);
1202 } else {
1203 port->private_data = (void *)chip_table->clocks[clock_slot];
1204 /*
1205 * Perform a final fixup. The 371 and 372 clock determines
1206 * if UDMA133 is available.
1207 */
1208
1209 if (clock_slot == 2 && chip_table == &hpt372) { /* 50Mhz */
1210 printk(KERN_WARNING "pata_hpt37x: No UDMA133 support available with 50MHz bus clock.\n");
1211 if (port == &info_hpt372)
1212 port = &info_hpt372_50;
1213 else BUG();
1214 }
1215 printk(KERN_INFO "hpt37x: %s: Bus clock %dMHz.\n", chip_table->name, MHz[clock_slot]);
1216 }
1217 port_info[0] = port_info[1] = port;
1218 /* Now kick off ATA set up */
1219 return ata_pci_init_one(dev, port_info, 2);
1220}
1221
1222static struct pci_device_id hpt37x[] = {
1223 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), },
1224 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT371), },
1225 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372), },
1226 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT374), },
1227 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT302), },
1228 { 0, },
1229};
1230
1231static struct pci_driver hpt37x_pci_driver = {
1232 .name = DRV_NAME,
1233 .id_table = hpt37x,
1234 .probe = hpt37x_init_one,
1235 .remove = ata_pci_remove_one
1236};
1237
1238static int __init hpt37x_init(void)
1239{
1240 return pci_register_driver(&hpt37x_pci_driver);
1241}
1242
1243
1244static void __exit hpt37x_exit(void)
1245{
1246 pci_unregister_driver(&hpt37x_pci_driver);
1247}
1248
1249
1250MODULE_AUTHOR("Alan Cox");
1251MODULE_DESCRIPTION("low-level driver for the Highpoint HPT37x/30x");
1252MODULE_LICENSE("GPL");
1253MODULE_DEVICE_TABLE(pci, hpt37x);
1254MODULE_VERSION(DRV_VERSION);
1255
1256module_init(hpt37x_init);
1257module_exit(hpt37x_exit);
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
new file mode 100644
index 000000000000..06c8db079b91
--- /dev/null
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -0,0 +1,597 @@
1/*
2 * Libata driver for the highpoint 372N and 302N UDMA66 ATA controllers.
3 *
4 * This driver is heavily based upon:
5 *
6 * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
7 *
8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
9 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
10 * Portions Copyright (C) 2003 Red Hat Inc
11 *
12 *
13 * TODO
14 * 371N
15 * Work out best PLL policy
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/pci.h>
21#include <linux/init.h>
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <scsi/scsi_host.h>
25#include <linux/libata.h>
26
27#define DRV_NAME "pata_hpt3x2n"
28#define DRV_VERSION "0.3"
29
30enum {
31 HPT_PCI_FAST = (1 << 31),
32 PCI66 = (1 << 1),
33 USE_DPLL = (1 << 0)
34};
35
36struct hpt_clock {
37 u8 xfer_speed;
38 u32 timing;
39};
40
41struct hpt_chip {
42 const char *name;
43 struct hpt_clock *clocks[3];
44};
45
46/* key for bus clock timings
47 * bit
48 * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
49 * DMA. cycles = value + 1
50 * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW
51 * DMA. cycles = value + 1
52 * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file
53 * register access.
54 * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file
55 * register access.
56 * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer.
57 * during task file register access.
58 * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA
59 * xfer.
60 * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task
61 * register access.
62 * 28 UDMA enable
63 * 29 DMA enable
64 * 30 PIO_MST enable. if set, the chip is in bus master mode during
65 * PIO.
66 * 31 FIFO enable.
67 */
68
69/* 66MHz DPLL clocks */
70
71static struct hpt_clock hpt3x2n_clocks[] = {
72 { XFER_UDMA_7, 0x1c869c62 },
73 { XFER_UDMA_6, 0x1c869c62 },
74 { XFER_UDMA_5, 0x1c8a9c62 },
75 { XFER_UDMA_4, 0x1c8a9c62 },
76 { XFER_UDMA_3, 0x1c8e9c62 },
77 { XFER_UDMA_2, 0x1c929c62 },
78 { XFER_UDMA_1, 0x1c9a9c62 },
79 { XFER_UDMA_0, 0x1c829c62 },
80
81 { XFER_MW_DMA_2, 0x2c829c62 },
82 { XFER_MW_DMA_1, 0x2c829c66 },
83 { XFER_MW_DMA_0, 0x2c829d2c },
84
85 { XFER_PIO_4, 0x0c829c62 },
86 { XFER_PIO_3, 0x0c829c84 },
87 { XFER_PIO_2, 0x0c829ca6 },
88 { XFER_PIO_1, 0x0d029d26 },
89 { XFER_PIO_0, 0x0d029d5e },
90 { 0, 0x0d029d5e }
91};
92
93/**
94 * hpt3x2n_find_mode - reset the hpt3x2n bus
95 * @ap: ATA port
96 * @speed: transfer mode
97 *
98 * Return the 32bit register programming information for this channel
99 * that matches the speed provided. For the moment the clocks table
100 * is hard coded but easy to change. This will be needed if we use
101 * different DPLLs
102 */
103
104static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed)
105{
106 struct hpt_clock *clocks = hpt3x2n_clocks;
107
108 while(clocks->xfer_speed) {
109 if (clocks->xfer_speed == speed)
110 return clocks->timing;
111 clocks++;
112 }
113 BUG();
114 return 0xffffffffU; /* silence compiler warning */
115}
116
117/**
118 * hpt3x2n_pre_reset - reset the hpt3x2n bus
119 * @ap: ATA port to reset
120 *
121 * Perform the initial reset handling for the 3x2n series controllers.
122 * Reset the hardware and state machine, obtain the cable type.
123 */
124
125static int hpt3xn_pre_reset(struct ata_port *ap)
126{
127 u8 scr2, ata66;
128 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
129
130 pci_read_config_byte(pdev, 0x5B, &scr2);
131 pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
132 /* Cable register now active */
133 pci_read_config_byte(pdev, 0x5A, &ata66);
134 /* Restore state */
135 pci_write_config_byte(pdev, 0x5B, scr2);
136
137 if (ata66 & (1 << ap->port_no))
138 ap->cbl = ATA_CBL_PATA40;
139 else
140 ap->cbl = ATA_CBL_PATA80;
141
142 /* Reset the state machine */
143 pci_write_config_byte(pdev, 0x50, 0x37);
144 pci_write_config_byte(pdev, 0x54, 0x37);
145 udelay(100);
146
147 return ata_std_prereset(ap);
148}
149
150/**
151 * hpt3x2n_error_handler - probe the hpt3x2n bus
152 * @ap: ATA port to reset
153 *
154 * Perform the probe reset handling for the 3x2N
155 */
156
157static void hpt3x2n_error_handler(struct ata_port *ap)
158{
159 ata_bmdma_drive_eh(ap, hpt3xn_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
160}
161
162/**
163 * hpt3x2n_set_piomode - PIO setup
164 * @ap: ATA interface
165 * @adev: device on the interface
166 *
167 * Perform PIO mode setup.
168 */
169
170static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev)
171{
172 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
173 u32 addr1, addr2;
174 u32 reg;
175 u32 mode;
176 u8 fast;
177
178 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
179 addr2 = 0x51 + 4 * ap->port_no;
180
181 /* Fast interrupt prediction disable, hold off interrupt disable */
182 pci_read_config_byte(pdev, addr2, &fast);
183 fast &= ~0x07;
184 pci_write_config_byte(pdev, addr2, fast);
185
186 pci_read_config_dword(pdev, addr1, &reg);
187 mode = hpt3x2n_find_mode(ap, adev->pio_mode);
188 mode &= ~0x8000000; /* No FIFO in PIO */
189 mode &= ~0x30070000; /* Leave config bits alone */
190 reg &= 0x30070000; /* Strip timing bits */
191 pci_write_config_dword(pdev, addr1, reg | mode);
192}
193
194/**
195 * hpt3x2n_set_dmamode - DMA timing setup
196 * @ap: ATA interface
197 * @adev: Device being configured
198 *
199 * Set up the channel for MWDMA or UDMA modes. Much the same as with
200 * PIO, load the mode number and then set MWDMA or UDMA flag.
201 */
202
203static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev)
204{
205 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
206 u32 addr1, addr2;
207 u32 reg;
208 u32 mode;
209 u8 fast;
210
211 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
212 addr2 = 0x51 + 4 * ap->port_no;
213
214 /* Fast interrupt prediction disable, hold off interrupt disable */
215 pci_read_config_byte(pdev, addr2, &fast);
216 fast &= ~0x07;
217 pci_write_config_byte(pdev, addr2, fast);
218
219 pci_read_config_dword(pdev, addr1, &reg);
220 mode = hpt3x2n_find_mode(ap, adev->dma_mode);
221 mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
222 mode &= ~0xC0000000; /* Leave config bits alone */
223 reg &= 0xC0000000; /* Strip timing bits */
224 pci_write_config_dword(pdev, addr1, reg | mode);
225}
226
227/**
228 * hpt3x2n_bmdma_end - DMA engine stop
229 * @qc: ATA command
230 *
231 * Clean up after the HPT3x2n and later DMA engine
232 */
233
234static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
235{
236 struct ata_port *ap = qc->ap;
237 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
238 int mscreg = 0x50 + 2 * ap->port_no;
239 u8 bwsr_stat, msc_stat;
240
241 pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
242 pci_read_config_byte(pdev, mscreg, &msc_stat);
243 if (bwsr_stat & (1 << ap->port_no))
244 pci_write_config_byte(pdev, mscreg, msc_stat | 0x30);
245 ata_bmdma_stop(qc);
246}
247
248/**
249 * hpt3x2n_set_clock - clock control
250 * @ap: ATA port
251 * @source: 0x21 or 0x23 for PLL or PCI sourced clock
252 *
253 * Switch the ATA bus clock between the PLL and PCI clock sources
254 * while correctly isolating the bus and resetting internal logic
255 *
256 * We must use the DPLL for
257 * - writing
258 * - second channel UDMA7 (SATA ports) or higher
259 * - 66MHz PCI
260 *
261 * or we will underclock the device and get reduced performance.
262 */
263
264static void hpt3x2n_set_clock(struct ata_port *ap, int source)
265{
266 unsigned long bmdma = ap->ioaddr.bmdma_addr;
267
268 /* Tristate the bus */
269 outb(0x80, bmdma+0x73);
270 outb(0x80, bmdma+0x77);
271
272 /* Switch clock and reset channels */
273 outb(source, bmdma+0x7B);
274 outb(0xC0, bmdma+0x79);
275
276 /* Reset state machines */
277 outb(0x37, bmdma+0x70);
278 outb(0x37, bmdma+0x74);
279
280 /* Complete reset */
281 outb(0x00, bmdma+0x79);
282
283 /* Reconnect channels to bus */
284 outb(0x00, bmdma+0x73);
285 outb(0x00, bmdma+0x77);
286}
287
288/* Check if our partner interface is busy */
289
290static int hpt3x2n_pair_idle(struct ata_port *ap)
291{
292 struct ata_host *host = ap->host;
293 struct ata_port *pair = host->ports[ap->port_no ^ 1];
294
295 if (pair->hsm_task_state == HSM_ST_IDLE)
296 return 1;
297 return 0;
298}
299
300static int hpt3x2n_use_dpll(struct ata_port *ap, int reading)
301{
302 long flags = (long)ap->host->private_data;
303 /* See if we should use the DPLL */
304 if (reading == 0)
305 return USE_DPLL; /* Needed for write */
306 if (flags & PCI66)
307 return USE_DPLL; /* Needed at 66Mhz */
308 return 0;
309}
310
311static unsigned int hpt3x2n_qc_issue_prot(struct ata_queued_cmd *qc)
312{
313 struct ata_taskfile *tf = &qc->tf;
314 struct ata_port *ap = qc->ap;
315 int flags = (long)ap->host->private_data;
316
317 if (hpt3x2n_pair_idle(ap)) {
318 int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE));
319 if ((flags & USE_DPLL) != dpll) {
320 if (dpll == 1)
321 hpt3x2n_set_clock(ap, 0x21);
322 else
323 hpt3x2n_set_clock(ap, 0x23);
324 }
325 }
326 return ata_qc_issue_prot(qc);
327}
328
329static struct scsi_host_template hpt3x2n_sht = {
330 .module = THIS_MODULE,
331 .name = DRV_NAME,
332 .ioctl = ata_scsi_ioctl,
333 .queuecommand = ata_scsi_queuecmd,
334 .can_queue = ATA_DEF_QUEUE,
335 .this_id = ATA_SHT_THIS_ID,
336 .sg_tablesize = LIBATA_MAX_PRD,
337 .max_sectors = ATA_MAX_SECTORS,
338 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
339 .emulated = ATA_SHT_EMULATED,
340 .use_clustering = ATA_SHT_USE_CLUSTERING,
341 .proc_name = DRV_NAME,
342 .dma_boundary = ATA_DMA_BOUNDARY,
343 .slave_configure = ata_scsi_slave_config,
344 .bios_param = ata_std_bios_param,
345};
346
347/*
348 * Configuration for HPT3x2n.
349 */
350
351static struct ata_port_operations hpt3x2n_port_ops = {
352 .port_disable = ata_port_disable,
353 .set_piomode = hpt3x2n_set_piomode,
354 .set_dmamode = hpt3x2n_set_dmamode,
355 .mode_filter = ata_pci_default_filter,
356
357 .tf_load = ata_tf_load,
358 .tf_read = ata_tf_read,
359 .check_status = ata_check_status,
360 .exec_command = ata_exec_command,
361 .dev_select = ata_std_dev_select,
362
363 .freeze = ata_bmdma_freeze,
364 .thaw = ata_bmdma_thaw,
365 .error_handler = hpt3x2n_error_handler,
366 .post_internal_cmd = ata_bmdma_post_internal_cmd,
367
368 .bmdma_setup = ata_bmdma_setup,
369 .bmdma_start = ata_bmdma_start,
370 .bmdma_stop = hpt3x2n_bmdma_stop,
371 .bmdma_status = ata_bmdma_status,
372
373 .qc_prep = ata_qc_prep,
374 .qc_issue = hpt3x2n_qc_issue_prot,
375 .eng_timeout = ata_eng_timeout,
376 .data_xfer = ata_pio_data_xfer,
377
378 .irq_handler = ata_interrupt,
379 .irq_clear = ata_bmdma_irq_clear,
380
381 .port_start = ata_port_start,
382 .port_stop = ata_port_stop,
383 .host_stop = ata_host_stop
384};
385
386/**
387 * hpt3xn_calibrate_dpll - Calibrate the DPLL loop
388 * @dev: PCI device
389 *
390 * Perform a calibration cycle on the HPT3xN DPLL. Returns 1 if this
391 * succeeds
392 */
393
394static int hpt3xn_calibrate_dpll(struct pci_dev *dev)
395{
396 u8 reg5b;
397 u32 reg5c;
398 int tries;
399
400 for(tries = 0; tries < 0x5000; tries++) {
401 udelay(50);
402 pci_read_config_byte(dev, 0x5b, &reg5b);
403 if (reg5b & 0x80) {
404 /* See if it stays set */
405 for(tries = 0; tries < 0x1000; tries ++) {
406 pci_read_config_byte(dev, 0x5b, &reg5b);
407 /* Failed ? */
408 if ((reg5b & 0x80) == 0)
409 return 0;
410 }
411 /* Turn off tuning, we have the DPLL set */
412 pci_read_config_dword(dev, 0x5c, &reg5c);
413 pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100);
414 return 1;
415 }
416 }
417 /* Never went stable */
418 return 0;
419}
420
421static int hpt3x2n_pci_clock(struct pci_dev *pdev)
422{
423 unsigned long freq;
424 u32 fcnt;
425
426 pci_read_config_dword(pdev, 0x70/*CHECKME*/, &fcnt);
427 if ((fcnt >> 12) != 0xABCDE) {
428 printk(KERN_WARNING "hpt3xn: BIOS clock data not set.\n");
429 return 33; /* Not BIOS set */
430 }
431 fcnt &= 0x1FF;
432
433 freq = (fcnt * 77) / 192;
434
435 /* Clamp to bands */
436 if (freq < 40)
437 return 33;
438 if (freq < 45)
439 return 40;
440 if (freq < 55)
441 return 50;
442 return 66;
443}
444
445/**
446 * hpt3x2n_init_one - Initialise an HPT37X/302
447 * @dev: PCI device
448 * @id: Entry in match table
449 *
450 * Initialise an HPT3x2n device. There are some interesting complications
451 * here. Firstly the chip may report 366 and be one of several variants.
452 * Secondly all the timings depend on the clock for the chip which we must
453 * detect and look up
454 *
455 * This is the known chip mappings. It may be missing a couple of later
456 * releases.
457 *
458 * Chip version PCI Rev Notes
459 * HPT372 4 (HPT366) 5 Other driver
460 * HPT372N 4 (HPT366) 6 UDMA133
461 * HPT372 5 (HPT372) 1 Other driver
462 * HPT372N 5 (HPT372) 2 UDMA133
463 * HPT302 6 (HPT302) * Other driver
464 * HPT302N 6 (HPT302) > 1 UDMA133
465 * HPT371 7 (HPT371) * Other driver
466 * HPT371N 7 (HPT371) > 1 UDMA133
467 * HPT374 8 (HPT374) * Other driver
468 * HPT372N 9 (HPT372N) * UDMA133
469 *
470 * (1) UDMA133 support depends on the bus clock
471 *
472 * To pin down HPT371N
473 */
474
475static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
476{
477 /* HPT372N and friends - UDMA133 */
478 static struct ata_port_info info = {
479 .sht = &hpt3x2n_sht,
480 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
481 .pio_mask = 0x1f,
482 .mwdma_mask = 0x07,
483 .udma_mask = 0x7f,
484 .port_ops = &hpt3x2n_port_ops
485 };
486 struct ata_port_info *port_info[2];
487 struct ata_port_info *port = &info;
488
489 u8 irqmask;
490 u32 class_rev;
491
492 unsigned int pci_mhz;
493 unsigned int f_low, f_high;
494 int adjust;
495
496 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
497 class_rev &= 0xFF;
498
499 switch(dev->device) {
500 case PCI_DEVICE_ID_TTI_HPT366:
501 if (class_rev < 6)
502 return -ENODEV;
503 break;
504 case PCI_DEVICE_ID_TTI_HPT372:
505 /* 372N if rev >= 1*/
506 if (class_rev == 0)
507 return -ENODEV;
508 break;
509 case PCI_DEVICE_ID_TTI_HPT302:
510 if (class_rev < 2)
511 return -ENODEV;
512 break;
513 case PCI_DEVICE_ID_TTI_HPT372N:
514 break;
515 default:
516 printk(KERN_ERR "pata_hpt3x2n: PCI table is bogus please report (%d).\n", dev->device);
517 return -ENODEV;
518 }
519
520 /* Ok so this is a chip we support */
521
522 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
523 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
524 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
525 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
526
527 pci_read_config_byte(dev, 0x5A, &irqmask);
528 irqmask &= ~0x10;
529 pci_write_config_byte(dev, 0x5a, irqmask);
530
531 /* Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or
532 50 for UDMA100. Right now we always use 66 */
533
534 pci_mhz = hpt3x2n_pci_clock(dev);
535
536 f_low = (pci_mhz * 48) / 66; /* PCI Mhz for 66Mhz DPLL */
537 f_high = f_low + 2; /* Tolerance */
538
539 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
540 /* PLL clock */
541 pci_write_config_byte(dev, 0x5B, 0x21);
542
543 /* Unlike the 37x we don't try jiggling the frequency */
544 for(adjust = 0; adjust < 8; adjust++) {
545 if (hpt3xn_calibrate_dpll(dev))
546 break;
547 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
548 }
549 if (adjust == 8)
550 printk(KERN_WARNING "hpt3xn: DPLL did not stabilize.\n");
551
552 /* Set our private data up. We only need a few flags so we use
553 it directly */
554 port->private_data = NULL;
555 if (pci_mhz > 60)
556 port->private_data = (void *)PCI66;
557
558 /* Now kick off ATA set up */
559 port_info[0] = port_info[1] = port;
560 return ata_pci_init_one(dev, port_info, 2);
561}
562
563static struct pci_device_id hpt3x2n[] = {
564 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), },
565 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372), },
566 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT302), },
567 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372N), },
568 { 0, },
569};
570
571static struct pci_driver hpt3x2n_pci_driver = {
572 .name = DRV_NAME,
573 .id_table = hpt3x2n,
574 .probe = hpt3x2n_init_one,
575 .remove = ata_pci_remove_one
576};
577
578static int __init hpt3x2n_init(void)
579{
580 return pci_register_driver(&hpt3x2n_pci_driver);
581}
582
583
584static void __exit hpt3x2n_exit(void)
585{
586 pci_unregister_driver(&hpt3x2n_pci_driver);
587}
588
589
590MODULE_AUTHOR("Alan Cox");
591MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3x2n/30x");
592MODULE_LICENSE("GPL");
593MODULE_DEVICE_TABLE(pci, hpt3x2n);
594MODULE_VERSION(DRV_VERSION);
595
596module_init(hpt3x2n_init);
597module_exit(hpt3x2n_exit);
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
new file mode 100644
index 000000000000..152770133ab1
--- /dev/null
+++ b/drivers/ata/pata_hpt3x3.c
@@ -0,0 +1,226 @@
1/*
2 * pata_hpt3x3 - HPT3x3 driver
3 * (c) Copyright 2005-2006 Red Hat
4 *
5 * Was pata_hpt34x but the naming was confusing as it supported the
6 * 343 and 363 so it has been renamed.
7 *
8 * Based on:
9 * linux/drivers/ide/pci/hpt34x.c Version 0.40 Sept 10, 2002
10 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
11 *
12 * May be copied or modified under the terms of the GNU General Public
13 * License
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19#include <linux/init.h>
20#include <linux/blkdev.h>
21#include <linux/delay.h>
22#include <scsi/scsi_host.h>
23#include <linux/libata.h>
24
25#define DRV_NAME "pata_hpt3x3"
26#define DRV_VERSION "0.4.1"
27
28static int hpt3x3_probe_init(struct ata_port *ap)
29{
30 ap->cbl = ATA_CBL_PATA40;
31 return ata_std_prereset(ap);
32}
33
34/**
35 * hpt3x3_probe_reset - reset the hpt3x3 bus
36 * @ap: ATA port to reset
37 *
38 * Perform the housekeeping when doing an ATA bus reeset. We just
39 * need to force the cable type.
40 */
41
42static void hpt3x3_error_handler(struct ata_port *ap)
43{
44 return ata_bmdma_drive_eh(ap, hpt3x3_probe_init, ata_std_softreset, NULL, ata_std_postreset);
45}
46
47/**
48 * hpt3x3_set_piomode - PIO setup
49 * @ap: ATA interface
50 * @adev: device on the interface
51 *
52 * Set our PIO requirements. This is fairly simple on the HPT3x3 as
53 * all we have to do is clear the MWDMA and UDMA bits then load the
54 * mode number.
55 */
56
57static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev)
58{
59 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
60 u32 r1, r2;
61 int dn = 2 * ap->port_no + adev->devno;
62
63 pci_read_config_dword(pdev, 0x44, &r1);
64 pci_read_config_dword(pdev, 0x48, &r2);
65 /* Load the PIO timing number */
66 r1 &= ~(7 << (3 * dn));
67 r1 |= (adev->pio_mode - XFER_PIO_0) << (3 * dn);
68 r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
69
70 pci_write_config_dword(pdev, 0x44, r1);
71 pci_write_config_dword(pdev, 0x48, r2);
72}
73
74/**
75 * hpt3x3_set_dmamode - DMA timing setup
76 * @ap: ATA interface
77 * @adev: Device being configured
78 *
79 * Set up the channel for MWDMA or UDMA modes. Much the same as with
80 * PIO, load the mode number and then set MWDMA or UDMA flag.
81 */
82
83static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev)
84{
85 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
86 u32 r1, r2;
87 int dn = 2 * ap->port_no + adev->devno;
88 int mode_num = adev->dma_mode & 0x0F;
89
90 pci_read_config_dword(pdev, 0x44, &r1);
91 pci_read_config_dword(pdev, 0x48, &r2);
92 /* Load the timing number */
93 r1 &= ~(7 << (3 * dn));
94 r1 |= (mode_num << (3 * dn));
95 r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
96
97 if (adev->dma_mode >= XFER_UDMA_0)
98 r2 |= 0x01 << dn; /* Ultra mode */
99 else
100 r2 |= 0x10 << dn; /* MWDMA */
101
102 pci_write_config_dword(pdev, 0x44, r1);
103 pci_write_config_dword(pdev, 0x48, r2);
104}
105
106static struct scsi_host_template hpt3x3_sht = {
107 .module = THIS_MODULE,
108 .name = DRV_NAME,
109 .ioctl = ata_scsi_ioctl,
110 .queuecommand = ata_scsi_queuecmd,
111 .can_queue = ATA_DEF_QUEUE,
112 .this_id = ATA_SHT_THIS_ID,
113 .sg_tablesize = LIBATA_MAX_PRD,
114 .max_sectors = ATA_MAX_SECTORS,
115 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
116 .emulated = ATA_SHT_EMULATED,
117 .use_clustering = ATA_SHT_USE_CLUSTERING,
118 .proc_name = DRV_NAME,
119 .dma_boundary = ATA_DMA_BOUNDARY,
120 .slave_configure = ata_scsi_slave_config,
121 .bios_param = ata_std_bios_param,
122};
123
124static struct ata_port_operations hpt3x3_port_ops = {
125 .port_disable = ata_port_disable,
126 .set_piomode = hpt3x3_set_piomode,
127 .set_dmamode = hpt3x3_set_dmamode,
128 .mode_filter = ata_pci_default_filter,
129
130 .tf_load = ata_tf_load,
131 .tf_read = ata_tf_read,
132 .check_status = ata_check_status,
133 .exec_command = ata_exec_command,
134 .dev_select = ata_std_dev_select,
135
136 .freeze = ata_bmdma_freeze,
137 .thaw = ata_bmdma_thaw,
138 .error_handler = hpt3x3_error_handler,
139 .post_internal_cmd = ata_bmdma_post_internal_cmd,
140
141 .bmdma_setup = ata_bmdma_setup,
142 .bmdma_start = ata_bmdma_start,
143 .bmdma_stop = ata_bmdma_stop,
144 .bmdma_status = ata_bmdma_status,
145
146 .qc_prep = ata_qc_prep,
147 .qc_issue = ata_qc_issue_prot,
148 .eng_timeout = ata_eng_timeout,
149 .data_xfer = ata_pio_data_xfer,
150
151 .irq_handler = ata_interrupt,
152 .irq_clear = ata_bmdma_irq_clear,
153
154 .port_start = ata_port_start,
155 .port_stop = ata_port_stop,
156 .host_stop = ata_host_stop
157};
158
159/**
160 * hpt3x3_init_one - Initialise an HPT343/363
161 * @dev: PCI device
162 * @id: Entry in match table
163 *
164 * Perform basic initialisation. The chip has a quirk that it won't
165 * function unless it is at XX00. The old ATA driver touched this up
166 * but we leave it for pci quirks to do properly.
167 */
168
169static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
170{
171 static struct ata_port_info info = {
172 .sht = &hpt3x3_sht,
173 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
174 .pio_mask = 0x1f,
175 .mwdma_mask = 0x07,
176 .udma_mask = 0x07,
177 .port_ops = &hpt3x3_port_ops
178 };
179 static struct ata_port_info *port_info[2] = { &info, &info };
180 u16 cmd;
181
182 /* Initialize the board */
183 pci_write_config_word(dev, 0x80, 0x00);
184 /* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
185 pci_read_config_word(dev, PCI_COMMAND, &cmd);
186 if (cmd & PCI_COMMAND_MEMORY)
187 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
188 else
189 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
190
191 /* Now kick off ATA set up */
192 return ata_pci_init_one(dev, port_info, 2);
193}
194
195static struct pci_device_id hpt3x3[] = {
196 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT343), },
197 { 0, },
198};
199
200static struct pci_driver hpt3x3_pci_driver = {
201 .name = DRV_NAME,
202 .id_table = hpt3x3,
203 .probe = hpt3x3_init_one,
204 .remove = ata_pci_remove_one
205};
206
207static int __init hpt3x3_init(void)
208{
209 return pci_register_driver(&hpt3x3_pci_driver);
210}
211
212
213static void __exit hpt3x3_exit(void)
214{
215 pci_unregister_driver(&hpt3x3_pci_driver);
216}
217
218
219MODULE_AUTHOR("Alan Cox");
220MODULE_DESCRIPTION("low-level driver for the Highpoint HPT343/363");
221MODULE_LICENSE("GPL");
222MODULE_DEVICE_TABLE(pci, hpt3x3);
223MODULE_VERSION(DRV_VERSION);
224
225module_init(hpt3x3_init);
226module_exit(hpt3x3_exit);
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
new file mode 100644
index 000000000000..73948c8b7270
--- /dev/null
+++ b/drivers/ata/pata_isapnp.c
@@ -0,0 +1,156 @@
1
2/*
3 * pata-isapnp.c - ISA PnP PATA controller driver.
4 * Copyright 2005/2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
5 *
6 * Based in part on ide-pnp.c by Andrey Panin <pazke@donpac.ru>
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/isapnp.h>
12#include <linux/init.h>
13#include <linux/blkdev.h>
14#include <linux/delay.h>
15#include <scsi/scsi_host.h>
16#include <linux/ata.h>
17#include <linux/libata.h>
18
19#define DRV_NAME "pata_isapnp"
20#define DRV_VERSION "0.1.5"
21
22static struct scsi_host_template isapnp_sht = {
23 .module = THIS_MODULE,
24 .name = DRV_NAME,
25 .ioctl = ata_scsi_ioctl,
26 .queuecommand = ata_scsi_queuecmd,
27 .can_queue = ATA_DEF_QUEUE,
28 .this_id = ATA_SHT_THIS_ID,
29 .sg_tablesize = LIBATA_MAX_PRD,
30 .max_sectors = ATA_MAX_SECTORS,
31 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
32 .emulated = ATA_SHT_EMULATED,
33 .use_clustering = ATA_SHT_USE_CLUSTERING,
34 .proc_name = DRV_NAME,
35 .dma_boundary = ATA_DMA_BOUNDARY,
36 .slave_configure = ata_scsi_slave_config,
37 .bios_param = ata_std_bios_param,
38};
39
40static struct ata_port_operations isapnp_port_ops = {
41 .port_disable = ata_port_disable,
42 .tf_load = ata_tf_load,
43 .tf_read = ata_tf_read,
44 .check_status = ata_check_status,
45 .exec_command = ata_exec_command,
46 .dev_select = ata_std_dev_select,
47
48 .freeze = ata_bmdma_freeze,
49 .thaw = ata_bmdma_thaw,
50 .error_handler = ata_bmdma_error_handler,
51 .post_internal_cmd = ata_bmdma_post_internal_cmd,
52
53 .qc_prep = ata_qc_prep,
54 .qc_issue = ata_qc_issue_prot,
55 .eng_timeout = ata_eng_timeout,
56 .data_xfer = ata_pio_data_xfer,
57
58 .irq_handler = ata_interrupt,
59 .irq_clear = ata_bmdma_irq_clear,
60
61 .port_start = ata_port_start,
62 .port_stop = ata_port_stop,
63 .host_stop = ata_host_stop
64};
65
66/**
67 * isapnp_init_one - attach an isapnp interface
68 * @idev: PnP device
69 * @dev_id: matching detect line
70 *
71 * Register an ISA bus IDE interface. Such interfaces are PIO 0 and
72 * non shared IRQ.
73 */
74
75static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev_id)
76{
77 struct ata_probe_ent ae;
78
79 if (pnp_port_valid(idev, 0) == 0)
80 return -ENODEV;
81
82 /* FIXME: Should selected polled PIO here not fail */
83 if (pnp_irq_valid(idev, 0) == 0)
84 return -ENODEV;
85
86 memset(&ae, 0, sizeof(struct ata_probe_ent));
87 INIT_LIST_HEAD(&ae.node);
88 ae.dev = &idev->dev;
89 ae.port_ops = &isapnp_port_ops;
90 ae.sht = &isapnp_sht;
91 ae.n_ports = 1;
92 ae.pio_mask = 1; /* ISA so PIO 0 cycles */
93 ae.irq = pnp_irq(idev, 0);
94 ae.irq_flags = 0;
95 ae.port_flags = ATA_FLAG_SLAVE_POSS;
96 ae.port[0].cmd_addr = pnp_port_start(idev, 0);
97
98 if (pnp_port_valid(idev, 1) == 0) {
99 ae.port[0].altstatus_addr = pnp_port_start(idev, 1);
100 ae.port[0].ctl_addr = pnp_port_start(idev, 1);
101 ae.port_flags |= ATA_FLAG_SRST;
102 }
103 ata_std_ports(&ae.port[0]);
104
105 if (ata_device_add(&ae) == 0)
106 return -ENODEV;
107 return 0;
108}
109
110/**
111 * isapnp_remove_one - unplug an isapnp interface
112 * @idev: PnP device
113 *
114 * Remove a previously configured PnP ATA port. Called only on module
115 * unload events as the core does not currently deal with ISAPnP docking.
116 */
117
118static void isapnp_remove_one(struct pnp_dev *idev)
119{
120 struct device *dev = &idev->dev;
121 struct ata_host *host = dev_get_drvdata(dev);
122
123 ata_host_remove(host);
124 dev_set_drvdata(dev, NULL);
125}
126
127static struct pnp_device_id isapnp_devices[] = {
128 /* Generic ESDI/IDE/ATA compatible hard disk controller */
129 {.id = "PNP0600", .driver_data = 0},
130 {.id = ""}
131};
132
133static struct pnp_driver isapnp_driver = {
134 .name = DRV_NAME,
135 .id_table = isapnp_devices,
136 .probe = isapnp_init_one,
137 .remove = isapnp_remove_one,
138};
139
140static int __init isapnp_init(void)
141{
142 return pnp_register_driver(&isapnp_driver);
143}
144
145static void __exit isapnp_exit(void)
146{
147 pnp_unregister_driver(&isapnp_driver);
148}
149
150MODULE_AUTHOR("Alan Cox");
151MODULE_DESCRIPTION("low-level driver for ISA PnP ATA");
152MODULE_LICENSE("GPL");
153MODULE_VERSION(DRV_VERSION);
154
155module_init(isapnp_init);
156module_exit(isapnp_exit);
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
new file mode 100644
index 000000000000..af39097d8081
--- /dev/null
+++ b/drivers/ata/pata_it821x.c
@@ -0,0 +1,847 @@
1/*
2 * ata-it821x.c - IT821x PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based upon
7 *
8 * it821x.c
9 *
10 * linux/drivers/ide/pci/it821x.c Version 0.09 December 2004
11 *
12 * Copyright (C) 2004 Red Hat <alan@redhat.com>
13 *
14 * May be copied or modified under the terms of the GNU General Public License
15 * Based in part on the ITE vendor provided SCSI driver.
16 *
17 * Documentation available from
18 * http://www.ite.com.tw/pc/IT8212F_V04.pdf
19 * Some other documents are NDA.
20 *
21 * The ITE8212 isn't exactly a standard IDE controller. It has two
22 * modes. In pass through mode then it is an IDE controller. In its smart
23 * mode its actually quite a capable hardware raid controller disguised
24 * as an IDE controller. Smart mode only understands DMA read/write and
25 * identify, none of the fancier commands apply. The IT8211 is identical
26 * in other respects but lacks the raid mode.
27 *
28 * Errata:
29 * o Rev 0x10 also requires master/slave hold the same DMA timings and
30 * cannot do ATAPI MWDMA.
31 * o The identify data for raid volumes lacks CHS info (technically ok)
32 * but also fails to set the LBA28 and other bits. We fix these in
33 * the IDE probe quirk code.
34 * o If you write LBA48 sized I/O's (ie > 256 sector) in smart mode
35 * raid then the controller firmware dies
36 * o Smart mode without RAID doesn't clear all the necessary identify
37 * bits to reduce the command set to the one used
38 *
39 * This has a few impacts on the driver
40 * - In pass through mode we do all the work you would expect
41 * - In smart mode the clocking set up is done by the controller generally
42 * but we must watch the other limits and filter.
43 * - There are a few extra vendor commands that actually talk to the
44 * controller but only work PIO with no IRQ.
45 *
46 * Vendor areas of the identify block in smart mode are used for the
47 * timing and policy set up. Each HDD in raid mode also has a serial
48 * block on the disk. The hardware extra commands are get/set chip status,
49 * rebuild, get rebuild status.
50 *
51 * In Linux the driver supports pass through mode as if the device was
52 * just another IDE controller. If the smart mode is running then
53 * volumes are managed by the controller firmware and each IDE "disk"
54 * is a raid volume. Even more cute - the controller can do automated
55 * hotplug and rebuild.
56 *
57 * The pass through controller itself is a little demented. It has a
58 * flaw that it has a single set of PIO/MWDMA timings per channel so
59 * non UDMA devices restrict each others performance. It also has a
60 * single clock source per channel so mixed UDMA100/133 performance
61 * isn't perfect and we have to pick a clock. Thankfully none of this
62 * matters in smart mode. ATAPI DMA is not currently supported.
63 *
64 * It seems the smart mode is a win for RAID1/RAID10 but otherwise not.
65 *
66 * TODO
67 * - ATAPI and other speed filtering
68 * - Command filter in smart mode
69 * - RAID configuration ioctls
70 */
71
72#include <linux/kernel.h>
73#include <linux/module.h>
74#include <linux/pci.h>
75#include <linux/init.h>
76#include <linux/blkdev.h>
77#include <linux/delay.h>
78#include <scsi/scsi_host.h>
79#include <linux/libata.h>
80
81
82#define DRV_NAME "pata_it821x"
83#define DRV_VERSION "0.3.2"
84
85struct it821x_dev
86{
87 unsigned int smart:1, /* Are we in smart raid mode */
88 timing10:1; /* Rev 0x10 */
89 u8 clock_mode; /* 0, ATA_50 or ATA_66 */
90 u8 want[2][2]; /* Mode/Pri log for master slave */
91 /* We need these for switching the clock when DMA goes on/off
92 The high byte is the 66Mhz timing */
93 u16 pio[2]; /* Cached PIO values */
94 u16 mwdma[2]; /* Cached MWDMA values */
95 u16 udma[2]; /* Cached UDMA values (per drive) */
96 u16 last_device; /* Master or slave loaded ? */
97};
98
99#define ATA_66 0
100#define ATA_50 1
101#define ATA_ANY 2
102
103#define UDMA_OFF 0
104#define MWDMA_OFF 0
105
106/*
107 * We allow users to force the card into non raid mode without
108 * flashing the alternative BIOS. This is also neccessary right now
109 * for embedded platforms that cannot run a PC BIOS but are using this
110 * device.
111 */
112
113static int it8212_noraid;
114
115/**
116 * it821x_pre_reset - probe
117 * @ap: ATA port
118 *
119 * Set the cable type
120 */
121
122static int it821x_pre_reset(struct ata_port *ap)
123{
124 ap->cbl = ATA_CBL_PATA80;
125 return ata_std_prereset(ap);
126}
127
128/**
129 * it821x_error_handler - probe/reset
130 * @ap: ATA port
131 *
132 * Set the cable type and trigger a probe
133 */
134
135static void it821x_error_handler(struct ata_port *ap)
136{
137 return ata_bmdma_drive_eh(ap, it821x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
138}
139
140/**
141 * it821x_program - program the PIO/MWDMA registers
142 * @ap: ATA port
143 * @adev: Device to program
144 * @timing: Timing value (66Mhz in top 8bits, 50 in the low 8)
145 *
146 * Program the PIO/MWDMA timing for this channel according to the
147 * current clock. These share the same register so are managed by
148 * the DMA start/stop sequence as with the old driver.
149 */
150
151static void it821x_program(struct ata_port *ap, struct ata_device *adev, u16 timing)
152{
153 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
154 struct it821x_dev *itdev = ap->private_data;
155 int channel = ap->port_no;
156 u8 conf;
157
158 /* Program PIO/MWDMA timing bits */
159 if (itdev->clock_mode == ATA_66)
160 conf = timing >> 8;
161 else
162 conf = timing & 0xFF;
163 pci_write_config_byte(pdev, 0x54 + 4 * channel, conf);
164}
165
166
167/**
168 * it821x_program_udma - program the UDMA registers
169 * @ap: ATA port
170 * @adev: ATA device to update
171 * @timing: Timing bits. Top 8 are for 66Mhz bottom for 50Mhz
172 *
173 * Program the UDMA timing for this drive according to the
174 * current clock. Handles the dual clocks and also knows about
175 * the errata on the 0x10 revision. The UDMA errata is partly handled
176 * here and partly in start_dma.
177 */
178
179static void it821x_program_udma(struct ata_port *ap, struct ata_device *adev, u16 timing)
180{
181 struct it821x_dev *itdev = ap->private_data;
182 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
183 int channel = ap->port_no;
184 int unit = adev->devno;
185 u8 conf;
186
187 /* Program UDMA timing bits */
188 if (itdev->clock_mode == ATA_66)
189 conf = timing >> 8;
190 else
191 conf = timing & 0xFF;
192 if (itdev->timing10 == 0)
193 pci_write_config_byte(pdev, 0x56 + 4 * channel + unit, conf);
194 else {
195 /* Early revision must be programmed for both together */
196 pci_write_config_byte(pdev, 0x56 + 4 * channel, conf);
197 pci_write_config_byte(pdev, 0x56 + 4 * channel + 1, conf);
198 }
199}
200
201/**
202 * it821x_clock_strategy
203 * @ap: ATA interface
204 * @adev: ATA device being updated
205 *
206 * Select between the 50 and 66Mhz base clocks to get the best
207 * results for this interface.
208 */
209
210static void it821x_clock_strategy(struct ata_port *ap, struct ata_device *adev)
211{
212 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
213 struct it821x_dev *itdev = ap->private_data;
214 u8 unit = adev->devno;
215 struct ata_device *pair = ata_dev_pair(adev);
216
217 int clock, altclock;
218 u8 v;
219 int sel = 0;
220
221 /* Look for the most wanted clocking */
222 if (itdev->want[0][0] > itdev->want[1][0]) {
223 clock = itdev->want[0][1];
224 altclock = itdev->want[1][1];
225 } else {
226 clock = itdev->want[1][1];
227 altclock = itdev->want[0][1];
228 }
229
230 /* Master doesn't care does the slave ? */
231 if (clock == ATA_ANY)
232 clock = altclock;
233
234 /* Nobody cares - keep the same clock */
235 if (clock == ATA_ANY)
236 return;
237 /* No change */
238 if (clock == itdev->clock_mode)
239 return;
240
241 /* Load this into the controller */
242 if (clock == ATA_66)
243 itdev->clock_mode = ATA_66;
244 else {
245 itdev->clock_mode = ATA_50;
246 sel = 1;
247 }
248 pci_read_config_byte(pdev, 0x50, &v);
249 v &= ~(1 << (1 + ap->port_no));
250 v |= sel << (1 + ap->port_no);
251 pci_write_config_byte(pdev, 0x50, v);
252
253 /*
254 * Reprogram the UDMA/PIO of the pair drive for the switch
255 * MWDMA will be dealt with by the dma switcher
256 */
257 if (pair && itdev->udma[1-unit] != UDMA_OFF) {
258 it821x_program_udma(ap, pair, itdev->udma[1-unit]);
259 it821x_program(ap, pair, itdev->pio[1-unit]);
260 }
261 /*
262 * Reprogram the UDMA/PIO of our drive for the switch.
263 * MWDMA will be dealt with by the dma switcher
264 */
265 if (itdev->udma[unit] != UDMA_OFF) {
266 it821x_program_udma(ap, adev, itdev->udma[unit]);
267 it821x_program(ap, adev, itdev->pio[unit]);
268 }
269}
270
271/**
272 * it821x_passthru_set_piomode - set PIO mode data
273 * @ap: ATA interface
274 * @adev: ATA device
275 *
276 * Configure for PIO mode. This is complicated as the register is
277 * shared by PIO and MWDMA and for both channels.
278 */
279
280static void it821x_passthru_set_piomode(struct ata_port *ap, struct ata_device *adev)
281{
282 /* Spec says 89 ref driver uses 88 */
283 static const u16 pio[] = { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 };
284 static const u8 pio_want[] = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY };
285
286 struct it821x_dev *itdev = ap->private_data;
287 int unit = adev->devno;
288 int mode_wanted = adev->pio_mode - XFER_PIO_0;
289
290 /* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */
291 itdev->want[unit][1] = pio_want[mode_wanted];
292 itdev->want[unit][0] = 1; /* PIO is lowest priority */
293 itdev->pio[unit] = pio[mode_wanted];
294 it821x_clock_strategy(ap, adev);
295 it821x_program(ap, adev, itdev->pio[unit]);
296}
297
298/**
299 * it821x_passthru_set_dmamode - set initial DMA mode data
300 * @ap: ATA interface
301 * @adev: ATA device
302 *
303 * Set up the DMA modes. The actions taken depend heavily on the mode
304 * to use. If UDMA is used as is hopefully the usual case then the
305 * timing register is private and we need only consider the clock. If
306 * we are using MWDMA then we have to manage the setting ourself as
307 * we switch devices and mode.
308 */
309
310static void it821x_passthru_set_dmamode(struct ata_port *ap, struct ata_device *adev)
311{
312 static const u16 dma[] = { 0x8866, 0x3222, 0x3121 };
313 static const u8 mwdma_want[] = { ATA_ANY, ATA_66, ATA_ANY };
314 static const u16 udma[] = { 0x4433, 0x4231, 0x3121, 0x2121, 0x1111, 0x2211, 0x1111 };
315 static const u8 udma_want[] = { ATA_ANY, ATA_50, ATA_ANY, ATA_66, ATA_66, ATA_50, ATA_66 };
316
317 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
318 struct it821x_dev *itdev = ap->private_data;
319 int channel = ap->port_no;
320 int unit = adev->devno;
321 u8 conf;
322
323 if (adev->dma_mode >= XFER_UDMA_0) {
324 int mode_wanted = adev->dma_mode - XFER_UDMA_0;
325
326 itdev->want[unit][1] = udma_want[mode_wanted];
327 itdev->want[unit][0] = 3; /* UDMA is high priority */
328 itdev->mwdma[unit] = MWDMA_OFF;
329 itdev->udma[unit] = udma[mode_wanted];
330 if (mode_wanted >= 5)
331 itdev->udma[unit] |= 0x8080; /* UDMA 5/6 select on */
332
333 /* UDMA on. Again revision 0x10 must do the pair */
334 pci_read_config_byte(pdev, 0x50, &conf);
335 if (itdev->timing10)
336 conf &= channel ? 0x9F: 0xE7;
337 else
338 conf &= ~ (1 << (3 + 2 * channel + unit));
339 pci_write_config_byte(pdev, 0x50, conf);
340 it821x_clock_strategy(ap, adev);
341 it821x_program_udma(ap, adev, itdev->udma[unit]);
342 } else {
343 int mode_wanted = adev->dma_mode - XFER_MW_DMA_0;
344
345 itdev->want[unit][1] = mwdma_want[mode_wanted];
346 itdev->want[unit][0] = 2; /* MWDMA is low priority */
347 itdev->mwdma[unit] = dma[mode_wanted];
348 itdev->udma[unit] = UDMA_OFF;
349
350 /* UDMA bits off - Revision 0x10 do them in pairs */
351 pci_read_config_byte(pdev, 0x50, &conf);
352 if (itdev->timing10)
353 conf |= channel ? 0x60: 0x18;
354 else
355 conf |= 1 << (3 + 2 * channel + unit);
356 pci_write_config_byte(pdev, 0x50, conf);
357 it821x_clock_strategy(ap, adev);
358 }
359}
360
361/**
362 * it821x_passthru_dma_start - DMA start callback
363 * @qc: Command in progress
364 *
365 * Usually drivers set the DMA timing at the point the set_dmamode call
366 * is made. IT821x however requires we load new timings on the
367 * transitions in some cases.
368 */
369
370static void it821x_passthru_bmdma_start(struct ata_queued_cmd *qc)
371{
372 struct ata_port *ap = qc->ap;
373 struct ata_device *adev = qc->dev;
374 struct it821x_dev *itdev = ap->private_data;
375 int unit = adev->devno;
376
377 if (itdev->mwdma[unit] != MWDMA_OFF)
378 it821x_program(ap, adev, itdev->mwdma[unit]);
379 else if (itdev->udma[unit] != UDMA_OFF && itdev->timing10)
380 it821x_program_udma(ap, adev, itdev->udma[unit]);
381 ata_bmdma_start(qc);
382}
383
384/**
385 * it821x_passthru_dma_stop - DMA stop callback
386 * @qc: ATA command
387 *
388 * We loaded new timings in dma_start, as a result we need to restore
389 * the PIO timings in dma_stop so that the next command issue gets the
390 * right clock values.
391 */
392
393static void it821x_passthru_bmdma_stop(struct ata_queued_cmd *qc)
394{
395 struct ata_port *ap = qc->ap;
396 struct ata_device *adev = qc->dev;
397 struct it821x_dev *itdev = ap->private_data;
398 int unit = adev->devno;
399
400 ata_bmdma_stop(qc);
401 if (itdev->mwdma[unit] != MWDMA_OFF)
402 it821x_program(ap, adev, itdev->pio[unit]);
403}
404
405
406/**
407 * it821x_passthru_dev_select - Select master/slave
408 * @ap: ATA port
409 * @device: Device number (not pointer)
410 *
411 * Device selection hook. If neccessary perform clock switching
412 */
413
414static void it821x_passthru_dev_select(struct ata_port *ap,
415 unsigned int device)
416{
417 struct it821x_dev *itdev = ap->private_data;
418 if (itdev && device != itdev->last_device) {
419 struct ata_device *adev = &ap->device[device];
420 it821x_program(ap, adev, itdev->pio[adev->devno]);
421 itdev->last_device = device;
422 }
423 ata_std_dev_select(ap, device);
424}
425
426/**
427 * it821x_smart_qc_issue_prot - wrap qc issue prot
428 * @qc: command
429 *
430 * Wrap the command issue sequence for the IT821x. We need to
431 * perform out own device selection timing loads before the
432 * usual happenings kick off
433 */
434
435static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc)
436{
437 switch(qc->tf.command)
438 {
439 /* Commands the firmware supports */
440 case ATA_CMD_READ:
441 case ATA_CMD_READ_EXT:
442 case ATA_CMD_WRITE:
443 case ATA_CMD_WRITE_EXT:
444 case ATA_CMD_PIO_READ:
445 case ATA_CMD_PIO_READ_EXT:
446 case ATA_CMD_PIO_WRITE:
447 case ATA_CMD_PIO_WRITE_EXT:
448 case ATA_CMD_READ_MULTI:
449 case ATA_CMD_READ_MULTI_EXT:
450 case ATA_CMD_WRITE_MULTI:
451 case ATA_CMD_WRITE_MULTI_EXT:
452 case ATA_CMD_ID_ATA:
453 /* Arguably should just no-op this one */
454 case ATA_CMD_SET_FEATURES:
455 return ata_qc_issue_prot(qc);
456 }
457 printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
458 return AC_ERR_INVALID;
459}
460
461/**
462 * it821x_passthru_qc_issue_prot - wrap qc issue prot
463 * @qc: command
464 *
465 * Wrap the command issue sequence for the IT821x. We need to
466 * perform out own device selection timing loads before the
467 * usual happenings kick off
468 */
469
470static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc)
471{
472 it821x_passthru_dev_select(qc->ap, qc->dev->devno);
473 return ata_qc_issue_prot(qc);
474}
475
476/**
477 * it821x_smart_set_mode - mode setting
478 * @ap: interface to set up
479 *
480 * Use a non standard set_mode function. We don't want to be tuned.
481 * The BIOS configured everything. Our job is not to fiddle. We
482 * read the dma enabled bits from the PCI configuration of the device
483 * and respect them.
484 */
485
486static void it821x_smart_set_mode(struct ata_port *ap)
487{
488 int dma_enabled = 0;
489 int i;
490
491 /* Bits 5 and 6 indicate if DMA is active on master/slave */
492 /* It is possible that BMDMA isn't allocated */
493 if (ap->ioaddr.bmdma_addr)
494 dma_enabled = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
495
496 for (i = 0; i < ATA_MAX_DEVICES; i++) {
497 struct ata_device *dev = &ap->device[i];
498 if (ata_dev_enabled(dev)) {
499 /* We don't really care */
500 dev->pio_mode = XFER_PIO_0;
501 dev->dma_mode = XFER_MW_DMA_0;
502 /* We do need the right mode information for DMA or PIO
503 and this comes from the current configuration flags */
504 if (dma_enabled & (1 << (5 + i))) {
505 dev->xfer_mode = XFER_MW_DMA_0;
506 dev->xfer_shift = ATA_SHIFT_MWDMA;
507 dev->flags &= ~ATA_DFLAG_PIO;
508 } else {
509 dev->xfer_mode = XFER_PIO_0;
510 dev->xfer_shift = ATA_SHIFT_PIO;
511 dev->flags |= ATA_DFLAG_PIO;
512 }
513 }
514 }
515}
516
517/**
518 * it821x_dev_config - Called each device identify
519 * @ap: ATA port
520 * @adev: Device that has just been identified
521 *
522 * Perform the initial setup needed for each device that is chip
523 * special. In our case we need to lock the sector count to avoid
524 * blowing the brains out of the firmware with large LBA48 requests
525 *
526 * FIXME: When FUA appears we need to block FUA too. And SMART and
527 * basically we need to filter commands for this chip.
528 */
529
530static void it821x_dev_config(struct ata_port *ap, struct ata_device *adev)
531{
532 unsigned char model_num[40];
533 char *s;
534 unsigned int len;
535
536 /* This block ought to be a library routine as it is in several
537 drivers now */
538
539 ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS,
540 sizeof(model_num));
541 s = &model_num[0];
542 len = strnlen(s, sizeof(model_num));
543
544 /* ATAPI specifies that empty space is blank-filled; remove blanks */
545 while ((len > 0) && (s[len - 1] == ' ')) {
546 len--;
547 s[len] = 0;
548 }
549
550 if (adev->max_sectors > 255)
551 adev->max_sectors = 255;
552
553 if (strstr(model_num, "Integrated Technology Express")) {
554 /* RAID mode */
555 printk(KERN_INFO "IT821x %sRAID%d volume",
556 adev->id[147]?"Bootable ":"",
557 adev->id[129]);
558 if (adev->id[129] != 1)
559 printk("(%dK stripe)", adev->id[146]);
560 printk(".\n");
561 }
562}
563
564
565/**
566 * it821x_check_atapi_dma - ATAPI DMA handler
567 * @qc: Command we are about to issue
568 *
569 * Decide if this ATAPI command can be issued by DMA on this
570 * controller. Return 0 if it can be.
571 */
572
573static int it821x_check_atapi_dma(struct ata_queued_cmd *qc)
574{
575 struct ata_port *ap = qc->ap;
576 struct it821x_dev *itdev = ap->private_data;
577
578 /* No ATAPI DMA in smart mode */
579 if (itdev->smart)
580 return -EOPNOTSUPP;
581 /* No ATAPI DMA on rev 10 */
582 if (itdev->timing10)
583 return -EOPNOTSUPP;
584 /* Cool */
585 return 0;
586}
587
588
589/**
590 * it821x_port_start - port setup
591 * @ap: ATA port being set up
592 *
593 * The it821x needs to maintain private data structures and also to
594 * use the standard PCI interface which lacks support for this
595 * functionality. We instead set up the private data on the port
596 * start hook, and tear it down on port stop
597 */
598
599static int it821x_port_start(struct ata_port *ap)
600{
601 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
602 struct it821x_dev *itdev;
603 u8 conf;
604
605 int ret = ata_port_start(ap);
606 if (ret < 0)
607 return ret;
608
609 ap->private_data = kmalloc(sizeof(struct it821x_dev), GFP_KERNEL);
610 if (ap->private_data == NULL) {
611 ata_port_stop(ap);
612 return -ENOMEM;
613 }
614
615 itdev = ap->private_data;
616 memset(itdev, 0, sizeof(struct it821x_dev));
617
618 pci_read_config_byte(pdev, 0x50, &conf);
619
620 if (conf & 1) {
621 itdev->smart = 1;
622 /* Long I/O's although allowed in LBA48 space cause the
623 onboard firmware to enter the twighlight zone */
624 /* No ATAPI DMA in this mode either */
625 }
626 /* Pull the current clocks from 0x50 */
627 if (conf & (1 << (1 + ap->port_no)))
628 itdev->clock_mode = ATA_50;
629 else
630 itdev->clock_mode = ATA_66;
631
632 itdev->want[0][1] = ATA_ANY;
633 itdev->want[1][1] = ATA_ANY;
634 itdev->last_device = -1;
635
636 pci_read_config_byte(pdev, PCI_REVISION_ID, &conf);
637 if (conf == 0x10) {
638 itdev->timing10 = 1;
639 /* Need to disable ATAPI DMA for this case */
640 if (!itdev->smart)
641 printk(KERN_WARNING DRV_NAME": Revision 0x10, workarounds activated.\n");
642 }
643
644 return 0;
645}
646
647/**
648 * it821x_port_stop - port shutdown
649 * @ap: ATA port being removed
650 *
651 * Release the private objects we added in it821x_port_start
652 */
653
654static void it821x_port_stop(struct ata_port *ap) {
655 kfree(ap->private_data);
656 ap->private_data = NULL; /* We want an OOPS if we reuse this
657 too late! */
658 ata_port_stop(ap);
659}
660
661static struct scsi_host_template it821x_sht = {
662 .module = THIS_MODULE,
663 .name = DRV_NAME,
664 .ioctl = ata_scsi_ioctl,
665 .queuecommand = ata_scsi_queuecmd,
666 .can_queue = ATA_DEF_QUEUE,
667 .this_id = ATA_SHT_THIS_ID,
668 .sg_tablesize = LIBATA_MAX_PRD,
669 /* 255 sectors to begin with. This is locked in smart mode but not
670 in pass through */
671 .max_sectors = 255,
672 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
673 .emulated = ATA_SHT_EMULATED,
674 .use_clustering = ATA_SHT_USE_CLUSTERING,
675 .proc_name = DRV_NAME,
676 .dma_boundary = ATA_DMA_BOUNDARY,
677 .slave_configure = ata_scsi_slave_config,
678 .bios_param = ata_std_bios_param,
679};
680
681static struct ata_port_operations it821x_smart_port_ops = {
682 .set_mode = it821x_smart_set_mode,
683 .port_disable = ata_port_disable,
684 .tf_load = ata_tf_load,
685 .tf_read = ata_tf_read,
686 .mode_filter = ata_pci_default_filter,
687
688 .check_status = ata_check_status,
689 .check_atapi_dma= it821x_check_atapi_dma,
690 .exec_command = ata_exec_command,
691 .dev_select = ata_std_dev_select,
692 .dev_config = it821x_dev_config,
693
694 .freeze = ata_bmdma_freeze,
695 .thaw = ata_bmdma_thaw,
696 .error_handler = it821x_error_handler,
697 .post_internal_cmd = ata_bmdma_post_internal_cmd,
698
699 .bmdma_setup = ata_bmdma_setup,
700 .bmdma_start = ata_bmdma_start,
701 .bmdma_stop = ata_bmdma_stop,
702 .bmdma_status = ata_bmdma_status,
703
704 .qc_prep = ata_qc_prep,
705 .qc_issue = it821x_smart_qc_issue_prot,
706 .eng_timeout = ata_eng_timeout,
707 .data_xfer = ata_pio_data_xfer,
708
709 .irq_handler = ata_interrupt,
710 .irq_clear = ata_bmdma_irq_clear,
711
712 .port_start = it821x_port_start,
713 .port_stop = it821x_port_stop,
714 .host_stop = ata_host_stop
715};
716
717static struct ata_port_operations it821x_passthru_port_ops = {
718 .port_disable = ata_port_disable,
719 .set_piomode = it821x_passthru_set_piomode,
720 .set_dmamode = it821x_passthru_set_dmamode,
721 .mode_filter = ata_pci_default_filter,
722
723 .tf_load = ata_tf_load,
724 .tf_read = ata_tf_read,
725 .check_status = ata_check_status,
726 .exec_command = ata_exec_command,
727 .check_atapi_dma= it821x_check_atapi_dma,
728 .dev_select = it821x_passthru_dev_select,
729
730 .freeze = ata_bmdma_freeze,
731 .thaw = ata_bmdma_thaw,
732 .error_handler = it821x_error_handler,
733 .post_internal_cmd = ata_bmdma_post_internal_cmd,
734
735 .bmdma_setup = ata_bmdma_setup,
736 .bmdma_start = it821x_passthru_bmdma_start,
737 .bmdma_stop = it821x_passthru_bmdma_stop,
738 .bmdma_status = ata_bmdma_status,
739
740 .qc_prep = ata_qc_prep,
741 .qc_issue = it821x_passthru_qc_issue_prot,
742 .eng_timeout = ata_eng_timeout,
743 .data_xfer = ata_pio_data_xfer,
744
745 .irq_clear = ata_bmdma_irq_clear,
746 .irq_handler = ata_interrupt,
747
748 .port_start = it821x_port_start,
749 .port_stop = it821x_port_stop,
750 .host_stop = ata_host_stop
751};
752
753static void __devinit it821x_disable_raid(struct pci_dev *pdev)
754{
755 /* Reset local CPU, and set BIOS not ready */
756 pci_write_config_byte(pdev, 0x5E, 0x01);
757
758 /* Set to bypass mode, and reset PCI bus */
759 pci_write_config_byte(pdev, 0x50, 0x00);
760 pci_write_config_word(pdev, PCI_COMMAND,
761 PCI_COMMAND_PARITY | PCI_COMMAND_IO |
762 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
763 pci_write_config_word(pdev, 0x40, 0xA0F3);
764
765 pci_write_config_dword(pdev,0x4C, 0x02040204);
766 pci_write_config_byte(pdev, 0x42, 0x36);
767 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20);
768}
769
770
771static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
772{
773 u8 conf;
774
775 static struct ata_port_info info_smart = {
776 .sht = &it821x_sht,
777 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
778 .pio_mask = 0x1f,
779 .mwdma_mask = 0x07,
780 .port_ops = &it821x_smart_port_ops
781 };
782 static struct ata_port_info info_passthru = {
783 .sht = &it821x_sht,
784 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
785 .pio_mask = 0x1f,
786 .mwdma_mask = 0x07,
787 .udma_mask = 0x7f,
788 .port_ops = &it821x_passthru_port_ops
789 };
790 static struct ata_port_info *port_info[2];
791
792 static char *mode[2] = { "pass through", "smart" };
793
794 /* Force the card into bypass mode if so requested */
795 if (it8212_noraid) {
796 printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n");
797 it821x_disable_raid(pdev);
798 }
799 pci_read_config_byte(pdev, 0x50, &conf);
800 conf &= 1;
801
802 printk(KERN_INFO DRV_NAME ": controller in %s mode.\n", mode[conf]);
803 if (conf == 0)
804 port_info[0] = port_info[1] = &info_passthru;
805 else
806 port_info[0] = port_info[1] = &info_smart;
807
808 return ata_pci_init_one(pdev, port_info, 2);
809}
810
811static struct pci_device_id it821x[] = {
812 { PCI_DEVICE(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8211), },
813 { PCI_DEVICE(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8212), },
814 { 0, },
815};
816
817static struct pci_driver it821x_pci_driver = {
818 .name = DRV_NAME,
819 .id_table = it821x,
820 .probe = it821x_init_one,
821 .remove = ata_pci_remove_one
822};
823
824static int __init it821x_init(void)
825{
826 return pci_register_driver(&it821x_pci_driver);
827}
828
829
830static void __exit it821x_exit(void)
831{
832 pci_unregister_driver(&it821x_pci_driver);
833}
834
835
836MODULE_AUTHOR("Alan Cox");
837MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller");
838MODULE_LICENSE("GPL");
839MODULE_DEVICE_TABLE(pci, it821x);
840MODULE_VERSION(DRV_VERSION);
841
842
843module_param_named(noraid, it8212_noraid, int, S_IRUGO);
844MODULE_PARM_DESC(it8212_noraid, "Force card into bypass mode");
845
846module_init(it821x_init);
847module_exit(it821x_exit);
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
new file mode 100644
index 000000000000..6832a643a9eb
--- /dev/null
+++ b/drivers/ata/pata_jmicron.c
@@ -0,0 +1,266 @@
1/*
2 * pata_jmicron.c - JMicron ATA driver for non AHCI mode. This drives the
3 * PATA port of the controller. The SATA ports are
4 * driven by AHCI in the usual configuration although
5 * this driver can handle other setups if we need it.
6 *
7 * (c) 2006 Red Hat <alan@redhat.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/init.h>
14#include <linux/blkdev.h>
15#include <linux/delay.h>
16#include <linux/device.h>
17#include <scsi/scsi_host.h>
18#include <linux/libata.h>
19#include <linux/ata.h>
20
21#define DRV_NAME "pata_jmicron"
22#define DRV_VERSION "0.1.2"
23
24typedef enum {
25 PORT_PATA0 = 0,
26 PORT_PATA1 = 1,
27 PORT_SATA = 2,
28} port_type;
29
30/**
31 * jmicron_pre_reset - check for 40/80 pin
32 * @ap: Port
33 *
34 * Perform the PATA port setup we need.
35
36 * On the Jmicron 361/363 there is a single PATA port that can be mapped
37 * either as primary or secondary (or neither). We don't do any policy
38 * and setup here. We assume that has been done by init_one and the
39 * BIOS.
40 */
41
42static int jmicron_pre_reset(struct ata_port *ap)
43{
44 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
45 u32 control;
46 u32 control5;
47 int port_mask = 1<< (4 * ap->port_no);
48 int port = ap->port_no;
49 port_type port_map[2];
50
51 /* Check if our port is enabled */
52 pci_read_config_dword(pdev, 0x40, &control);
53 if ((control & port_mask) == 0)
54 return 0;
55
56 /* There are two basic mappings. One has the two SATA ports merged
57 as master/slave and the secondary as PATA, the other has only the
58 SATA port mapped */
59 if (control & (1 << 23)) {
60 port_map[0] = PORT_SATA;
61 port_map[1] = PORT_PATA0;
62 } else {
63 port_map[0] = PORT_SATA;
64 port_map[1] = PORT_SATA;
65 }
66
67 /* The 365/366 may have this bit set to map the second PATA port
68 as the internal primary channel */
69 pci_read_config_dword(pdev, 0x80, &control5);
70 if (control5 & (1<<24))
71 port_map[0] = PORT_PATA1;
72
73 /* The two ports may then be logically swapped by the firmware */
74 if (control & (1 << 22))
75 port = port ^ 1;
76
77 /*
78 * Now we know which physical port we are talking about we can
79 * actually do our cable checking etc. Thankfully we don't need
80 * to do the plumbing for other cases.
81 */
82 switch (port_map[port])
83 {
84 case PORT_PATA0:
85 if (control & (1 << 5))
86 return 0;
87 if (control & (1 << 3)) /* 40/80 pin primary */
88 ap->cbl = ATA_CBL_PATA40;
89 else
90 ap->cbl = ATA_CBL_PATA80;
91 break;
92 case PORT_PATA1:
93 /* Bit 21 is set if the port is enabled */
94 if ((control5 & (1 << 21)) == 0)
95 return 0;
96 if (control5 & (1 << 19)) /* 40/80 pin secondary */
97 ap->cbl = ATA_CBL_PATA40;
98 else
99 ap->cbl = ATA_CBL_PATA80;
100 break;
101 case PORT_SATA:
102 ap->cbl = ATA_CBL_SATA;
103 break;
104 }
105 return ata_std_prereset(ap);
106}
107
108/**
109 * jmicron_error_handler - Setup and error handler
110 * @ap: Port to handle
111 *
112 * LOCKING:
113 * None (inherited from caller).
114 */
115
116static void jmicron_error_handler(struct ata_port *ap)
117{
118 return ata_bmdma_drive_eh(ap, jmicron_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
119}
120
121/* No PIO or DMA methods needed for this device */
122
123static struct scsi_host_template jmicron_sht = {
124 .module = THIS_MODULE,
125 .name = DRV_NAME,
126 .ioctl = ata_scsi_ioctl,
127 .queuecommand = ata_scsi_queuecmd,
128 .can_queue = ATA_DEF_QUEUE,
129 .this_id = ATA_SHT_THIS_ID,
130 .sg_tablesize = LIBATA_MAX_PRD,
131 /* Special handling needed if you have sector or LBA48 limits */
132 .max_sectors = ATA_MAX_SECTORS,
133 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
134 .emulated = ATA_SHT_EMULATED,
135 .use_clustering = ATA_SHT_USE_CLUSTERING,
136 .proc_name = DRV_NAME,
137 .dma_boundary = ATA_DMA_BOUNDARY,
138 .slave_configure = ata_scsi_slave_config,
139 /* Use standard CHS mapping rules */
140 .bios_param = ata_std_bios_param,
141};
142
143static const struct ata_port_operations jmicron_ops = {
144 .port_disable = ata_port_disable,
145
146 /* Task file is PCI ATA format, use helpers */
147 .tf_load = ata_tf_load,
148 .tf_read = ata_tf_read,
149 .check_status = ata_check_status,
150 .exec_command = ata_exec_command,
151 .dev_select = ata_std_dev_select,
152
153 .freeze = ata_bmdma_freeze,
154 .thaw = ata_bmdma_thaw,
155 .error_handler = jmicron_error_handler,
156 .post_internal_cmd = ata_bmdma_post_internal_cmd,
157
158 /* BMDMA handling is PCI ATA format, use helpers */
159 .bmdma_setup = ata_bmdma_setup,
160 .bmdma_start = ata_bmdma_start,
161 .bmdma_stop = ata_bmdma_stop,
162 .bmdma_status = ata_bmdma_status,
163 .qc_prep = ata_qc_prep,
164 .qc_issue = ata_qc_issue_prot,
165 .data_xfer = ata_pio_data_xfer,
166
167 /* Timeout handling. Special recovery hooks here */
168 .eng_timeout = ata_eng_timeout,
169 .irq_handler = ata_interrupt,
170 .irq_clear = ata_bmdma_irq_clear,
171
172 /* Generic PATA PCI ATA helpers */
173 .port_start = ata_port_start,
174 .port_stop = ata_port_stop,
175 .host_stop = ata_host_stop,
176};
177
178
179/**
180 * jmicron_init_one - Register Jmicron ATA PCI device with kernel services
181 * @pdev: PCI device to register
182 * @ent: Entry in jmicron_pci_tbl matching with @pdev
183 *
184 * Called from kernel PCI layer.
185 *
186 * LOCKING:
187 * Inherited from PCI layer (may sleep).
188 *
189 * RETURNS:
190 * Zero on success, or -ERRNO value.
191 */
192
193static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
194{
195 static struct ata_port_info info = {
196 .sht = &jmicron_sht,
197 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
198
199 .pio_mask = 0x1f,
200 .mwdma_mask = 0x07,
201 .udma_mask = 0x3f,
202
203 .port_ops = &jmicron_ops,
204 };
205 struct ata_port_info *port_info[2] = { &info, &info };
206
207 u32 reg;
208
209 if (id->driver_data != 368) {
210 /* Put the controller into AHCI mode in case the AHCI driver
211 has not yet been loaded. This can be done with either
212 function present */
213
214 /* FIXME: We may want a way to override this in future */
215 pci_write_config_byte(pdev, 0x41, 0xa1);
216 }
217
218 /* PATA controller is fn 1, AHCI is fn 0 */
219 if (PCI_FUNC(pdev->devfn) != 1)
220 return -ENODEV;
221
222 if ( id->driver_data == 365 || id->driver_data == 366) {
223 /* The 365/66 have two PATA channels, redirect the second */
224 pci_read_config_dword(pdev, 0x80, &reg);
225 reg |= (1 << 24); /* IDE1 to PATA IDE secondary */
226 pci_write_config_dword(pdev, 0x80, reg);
227 }
228
229 return ata_pci_init_one(pdev, port_info, 2);
230}
231
232static const struct pci_device_id jmicron_pci_tbl[] = {
233 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361), 361},
234 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363), 363},
235 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365), 365},
236 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366), 366},
237 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368), 368},
238 { } /* terminate list */
239};
240
241static struct pci_driver jmicron_pci_driver = {
242 .name = DRV_NAME,
243 .id_table = jmicron_pci_tbl,
244 .probe = jmicron_init_one,
245 .remove = ata_pci_remove_one,
246};
247
248static int __init jmicron_init(void)
249{
250 return pci_register_driver(&jmicron_pci_driver);
251}
252
253static void __exit jmicron_exit(void)
254{
255 pci_unregister_driver(&jmicron_pci_driver);
256}
257
258module_init(jmicron_init);
259module_exit(jmicron_exit);
260
261MODULE_AUTHOR("Alan Cox");
262MODULE_DESCRIPTION("SCSI low-level driver for Jmicron PATA ports");
263MODULE_LICENSE("GPL");
264MODULE_DEVICE_TABLE(pci, jmicron_pci_tbl);
265MODULE_VERSION(DRV_VERSION);
266
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
new file mode 100644
index 000000000000..ad37c220bb2c
--- /dev/null
+++ b/drivers/ata/pata_legacy.c
@@ -0,0 +1,949 @@
1/*
2 * pata-legacy.c - Legacy port PATA/SATA controller driver.
3 * Copyright 2005/2006 Red Hat <alan@redhat.com>, all rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; see the file COPYING. If not, write to
17 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * An ATA driver for the legacy ATA ports.
20 *
21 * Data Sources:
22 * Opti 82C465/82C611 support: Data sheets at opti-inc.com
23 * HT6560 series:
24 * Promise 20230/20620:
25 * http://www.ryston.cz/petr/vlb/pdc20230b.html
26 * http://www.ryston.cz/petr/vlb/pdc20230c.html
27 * http://www.ryston.cz/petr/vlb/pdc20630.html
28 *
29 * Unsupported but docs exist:
30 * Appian/Adaptec AIC25VL01/Cirrus Logic PD7220
31 * Winbond W83759A
32 *
33 * This driver handles legacy (that is "ISA/VLB side") IDE ports found
34 * on PC class systems. There are three hybrid devices that are exceptions
35 * The Cyrix 5510/5520 where a pre SFF ATA device is on the bridge and
36 * the MPIIX where the tuning is PCI side but the IDE is "ISA side".
37 *
38 * Specific support is included for the ht6560a/ht6560b/opti82c611a/
39 * opti82c465mv/promise 20230c/20630
40 *
41 * Use the autospeed and pio_mask options with:
42 * Appian ADI/2 aka CLPD7220 or AIC25VL01.
43 * Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
44 * Goldstar GM82C711, PIC-1288A-125, UMC 82C871F, Winbond W83759,
45 * Winbond W83759A, Promise PDC20230-B
46 *
47 * For now use autospeed and pio_mask as above with the W83759A. This may
48 * change.
49 *
50 * TODO
51 * Merge existing pata_qdi driver
52 *
53 */
54
55#include <linux/kernel.h>
56#include <linux/module.h>
57#include <linux/pci.h>
58#include <linux/init.h>
59#include <linux/blkdev.h>
60#include <linux/delay.h>
61#include <scsi/scsi_host.h>
62#include <linux/ata.h>
63#include <linux/libata.h>
64#include <linux/platform_device.h>
65
66#define DRV_NAME "pata_legacy"
67#define DRV_VERSION "0.5.3"
68
69#define NR_HOST 6
70
71static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
72static int legacy_irq[NR_HOST] = { 15, 14, 11, 10, 8, 12 };
73
74struct legacy_data {
75 unsigned long timing;
76 u8 clock[2];
77 u8 last;
78 int fast;
79 struct platform_device *platform_dev;
80
81};
82
83static struct legacy_data legacy_data[NR_HOST];
84static struct ata_host *legacy_host[NR_HOST];
85static int nr_legacy_host;
86
87
88static int probe_all; /* Set to check all ISA port ranges */
89static int ht6560a; /* HT 6560A on primary 1, secondary 2, both 3 */
90static int ht6560b; /* HT 6560A on primary 1, secondary 2, both 3 */
91static int opti82c611a; /* Opti82c611A on primary 1, secondary 2, both 3 */
92static int opti82c46x; /* Opti 82c465MV present (pri/sec autodetect) */
93static int autospeed; /* Chip present which snoops speed changes */
94static int pio_mask = 0x1F; /* PIO range for autospeed devices */
95
96/**
97 * legacy_set_mode - mode setting
98 * @ap: IDE interface
99 *
100 * Use a non standard set_mode function. We don't want to be tuned.
101 *
102 * The BIOS configured everything. Our job is not to fiddle. Just use
103 * whatever PIO the hardware is using and leave it at that. When we
104 * get some kind of nice user driven API for control then we can
105 * expand on this as per hdparm in the base kernel.
106 */
107
108static void legacy_set_mode(struct ata_port *ap)
109{
110 int i;
111
112 for (i = 0; i < ATA_MAX_DEVICES; i++) {
113 struct ata_device *dev = &ap->device[i];
114 if (ata_dev_enabled(dev)) {
115 dev->pio_mode = XFER_PIO_0;
116 dev->xfer_mode = XFER_PIO_0;
117 dev->xfer_shift = ATA_SHIFT_PIO;
118 dev->flags |= ATA_DFLAG_PIO;
119 }
120 }
121}
122
123static struct scsi_host_template legacy_sht = {
124 .module = THIS_MODULE,
125 .name = DRV_NAME,
126 .ioctl = ata_scsi_ioctl,
127 .queuecommand = ata_scsi_queuecmd,
128 .can_queue = ATA_DEF_QUEUE,
129 .this_id = ATA_SHT_THIS_ID,
130 .sg_tablesize = LIBATA_MAX_PRD,
131 .max_sectors = ATA_MAX_SECTORS,
132 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
133 .emulated = ATA_SHT_EMULATED,
134 .use_clustering = ATA_SHT_USE_CLUSTERING,
135 .proc_name = DRV_NAME,
136 .dma_boundary = ATA_DMA_BOUNDARY,
137 .slave_configure = ata_scsi_slave_config,
138 .bios_param = ata_std_bios_param,
139};
140
141/*
142 * These ops are used if the user indicates the hardware
143 * snoops the commands to decide on the mode and handles the
144 * mode selection "magically" itself. Several legacy controllers
145 * do this. The mode range can be set if it is not 0x1F by setting
146 * pio_mask as well.
147 */
148
149static struct ata_port_operations simple_port_ops = {
150 .port_disable = ata_port_disable,
151 .tf_load = ata_tf_load,
152 .tf_read = ata_tf_read,
153 .check_status = ata_check_status,
154 .exec_command = ata_exec_command,
155 .dev_select = ata_std_dev_select,
156
157 .freeze = ata_bmdma_freeze,
158 .thaw = ata_bmdma_thaw,
159 .error_handler = ata_bmdma_error_handler,
160 .post_internal_cmd = ata_bmdma_post_internal_cmd,
161
162 .qc_prep = ata_qc_prep,
163 .qc_issue = ata_qc_issue_prot,
164 .eng_timeout = ata_eng_timeout,
165 .data_xfer = ata_pio_data_xfer_noirq,
166
167 .irq_handler = ata_interrupt,
168 .irq_clear = ata_bmdma_irq_clear,
169
170 .port_start = ata_port_start,
171 .port_stop = ata_port_stop,
172 .host_stop = ata_host_stop
173};
174
175static struct ata_port_operations legacy_port_ops = {
176 .set_mode = legacy_set_mode,
177
178 .port_disable = ata_port_disable,
179 .tf_load = ata_tf_load,
180 .tf_read = ata_tf_read,
181 .check_status = ata_check_status,
182 .exec_command = ata_exec_command,
183 .dev_select = ata_std_dev_select,
184
185 .error_handler = ata_bmdma_error_handler,
186
187 .qc_prep = ata_qc_prep,
188 .qc_issue = ata_qc_issue_prot,
189 .eng_timeout = ata_eng_timeout,
190 .data_xfer = ata_pio_data_xfer_noirq,
191
192 .irq_handler = ata_interrupt,
193 .irq_clear = ata_bmdma_irq_clear,
194
195 .port_start = ata_port_start,
196 .port_stop = ata_port_stop,
197 .host_stop = ata_host_stop
198};
199
200/*
201 * Promise 20230C and 20620 support
202 *
203 * This controller supports PIO0 to PIO2. We set PIO timings conservatively to
204 * allow for 50MHz Vesa Local Bus. The 20620 DMA support is weird being DMA to
205 * controller and PIO'd to the host and not supported.
206 */
207
208static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
209{
210 int tries = 5;
211 int pio = adev->pio_mode - XFER_PIO_0;
212 u8 rt;
213 unsigned long flags;
214
215 /* Safe as UP only. Force I/Os to occur together */
216
217 local_irq_save(flags);
218
219 /* Unlock the control interface */
220 do
221 {
222 inb(0x1F5);
223 outb(inb(0x1F2) | 0x80, 0x1F2);
224 inb(0x1F2);
225 inb(0x3F6);
226 inb(0x3F6);
227 inb(0x1F2);
228 inb(0x1F2);
229 }
230 while((inb(0x1F2) & 0x80) && --tries);
231
232 local_irq_restore(flags);
233
234 outb(inb(0x1F4) & 0x07, 0x1F4);
235
236 rt = inb(0x1F3);
237 rt &= 0x07 << (3 * adev->devno);
238 if (pio)
239 rt |= (1 + 3 * pio) << (3 * adev->devno);
240
241 udelay(100);
242 outb(inb(0x1F2) | 0x01, 0x1F2);
243 udelay(100);
244 inb(0x1F5);
245
246}
247
248static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
249{
250 struct ata_port *ap = adev->ap;
251 int slop = buflen & 3;
252 unsigned long flags;
253
254 if (ata_id_has_dword_io(adev->id)) {
255 local_irq_save(flags);
256
257 /* Perform the 32bit I/O synchronization sequence */
258 inb(ap->ioaddr.nsect_addr);
259 inb(ap->ioaddr.nsect_addr);
260 inb(ap->ioaddr.nsect_addr);
261
262 /* Now the data */
263
264 if (write_data)
265 outsl(ap->ioaddr.data_addr, buf, buflen >> 2);
266 else
267 insl(ap->ioaddr.data_addr, buf, buflen >> 2);
268
269 if (unlikely(slop)) {
270 u32 pad;
271 if (write_data) {
272 memcpy(&pad, buf + buflen - slop, slop);
273 outl(le32_to_cpu(pad), ap->ioaddr.data_addr);
274 } else {
275 pad = cpu_to_le16(inl(ap->ioaddr.data_addr));
276 memcpy(buf + buflen - slop, &pad, slop);
277 }
278 }
279 local_irq_restore(flags);
280 }
281 else
282 ata_pio_data_xfer_noirq(adev, buf, buflen, write_data);
283}
284
285static struct ata_port_operations pdc20230_port_ops = {
286 .set_piomode = pdc20230_set_piomode,
287
288 .port_disable = ata_port_disable,
289 .tf_load = ata_tf_load,
290 .tf_read = ata_tf_read,
291 .check_status = ata_check_status,
292 .exec_command = ata_exec_command,
293 .dev_select = ata_std_dev_select,
294
295 .error_handler = ata_bmdma_error_handler,
296
297 .qc_prep = ata_qc_prep,
298 .qc_issue = ata_qc_issue_prot,
299 .eng_timeout = ata_eng_timeout,
300 .data_xfer = pdc_data_xfer_vlb,
301
302 .irq_handler = ata_interrupt,
303 .irq_clear = ata_bmdma_irq_clear,
304
305 .port_start = ata_port_start,
306 .port_stop = ata_port_stop,
307 .host_stop = ata_host_stop
308};
309
310/*
311 * Holtek 6560A support
312 *
313 * This controller supports PIO0 to PIO2 (no IORDY even though higher timings
314 * can be loaded).
315 */
316
317static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
318{
319 u8 active, recover;
320 struct ata_timing t;
321
322 /* Get the timing data in cycles. For now play safe at 50Mhz */
323 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
324
325 active = FIT(t.active, 2, 15);
326 recover = FIT(t.recover, 4, 15);
327
328 inb(0x3E6);
329 inb(0x3E6);
330 inb(0x3E6);
331 inb(0x3E6);
332
333 outb(recover << 4 | active, ap->ioaddr.device_addr);
334 inb(ap->ioaddr.status_addr);
335}
336
337static struct ata_port_operations ht6560a_port_ops = {
338 .set_piomode = ht6560a_set_piomode,
339
340 .port_disable = ata_port_disable,
341 .tf_load = ata_tf_load,
342 .tf_read = ata_tf_read,
343 .check_status = ata_check_status,
344 .exec_command = ata_exec_command,
345 .dev_select = ata_std_dev_select,
346
347 .error_handler = ata_bmdma_error_handler,
348
349 .qc_prep = ata_qc_prep,
350 .qc_issue = ata_qc_issue_prot,
351 .eng_timeout = ata_eng_timeout,
352 .data_xfer = ata_pio_data_xfer, /* Check vlb/noirq */
353
354 .irq_handler = ata_interrupt,
355 .irq_clear = ata_bmdma_irq_clear,
356
357 .port_start = ata_port_start,
358 .port_stop = ata_port_stop,
359 .host_stop = ata_host_stop
360};
361
362/*
363 * Holtek 6560B support
364 *
365 * This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO setting
366 * unless we see an ATAPI device in which case we force it off.
367 *
368 * FIXME: need to implement 2nd channel support.
369 */
370
371static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
372{
373 u8 active, recover;
374 struct ata_timing t;
375
376 /* Get the timing data in cycles. For now play safe at 50Mhz */
377 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
378
379 active = FIT(t.active, 2, 15);
380 recover = FIT(t.recover, 2, 16);
381 recover &= 0x15;
382
383 inb(0x3E6);
384 inb(0x3E6);
385 inb(0x3E6);
386 inb(0x3E6);
387
388 outb(recover << 4 | active, ap->ioaddr.device_addr);
389
390 if (adev->class != ATA_DEV_ATA) {
391 u8 rconf = inb(0x3E6);
392 if (rconf & 0x24) {
393 rconf &= ~ 0x24;
394 outb(rconf, 0x3E6);
395 }
396 }
397 inb(ap->ioaddr.status_addr);
398}
399
400static struct ata_port_operations ht6560b_port_ops = {
401 .set_piomode = ht6560b_set_piomode,
402
403 .port_disable = ata_port_disable,
404 .tf_load = ata_tf_load,
405 .tf_read = ata_tf_read,
406 .check_status = ata_check_status,
407 .exec_command = ata_exec_command,
408 .dev_select = ata_std_dev_select,
409
410 .error_handler = ata_bmdma_error_handler,
411
412 .qc_prep = ata_qc_prep,
413 .qc_issue = ata_qc_issue_prot,
414 .eng_timeout = ata_eng_timeout,
415 .data_xfer = ata_pio_data_xfer, /* FIXME: Check 32bit and noirq */
416
417 .irq_handler = ata_interrupt,
418 .irq_clear = ata_bmdma_irq_clear,
419
420 .port_start = ata_port_start,
421 .port_stop = ata_port_stop,
422 .host_stop = ata_host_stop
423};
424
425/*
426 * Opti core chipset helpers
427 */
428
429/**
430 * opti_syscfg - read OPTI chipset configuration
431 * @reg: Configuration register to read
432 *
433 * Returns the value of an OPTI system board configuration register.
434 */
435
436static u8 opti_syscfg(u8 reg)
437{
438 unsigned long flags;
439 u8 r;
440
441 /* Uniprocessor chipset and must force cycles adjancent */
442 local_irq_save(flags);
443 outb(reg, 0x22);
444 r = inb(0x24);
445 local_irq_restore(flags);
446 return r;
447}
448
449/*
450 * Opti 82C611A
451 *
452 * This controller supports PIO0 to PIO3.
453 */
454
455static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev)
456{
457 u8 active, recover, setup;
458 struct ata_timing t;
459 struct ata_device *pair = ata_dev_pair(adev);
460 int clock;
461 int khz[4] = { 50000, 40000, 33000, 25000 };
462 u8 rc;
463
464 /* Enter configuration mode */
465 inw(ap->ioaddr.error_addr);
466 inw(ap->ioaddr.error_addr);
467 outb(3, ap->ioaddr.nsect_addr);
468
469 /* Read VLB clock strapping */
470 clock = 1000000000 / khz[inb(ap->ioaddr.lbah_addr) & 0x03];
471
472 /* Get the timing data in cycles */
473 ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
474
475 /* Setup timing is shared */
476 if (pair) {
477 struct ata_timing tp;
478 ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
479
480 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
481 }
482
483 active = FIT(t.active, 2, 17) - 2;
484 recover = FIT(t.recover, 1, 16) - 1;
485 setup = FIT(t.setup, 1, 4) - 1;
486
487 /* Select the right timing bank for write timing */
488 rc = inb(ap->ioaddr.lbal_addr);
489 rc &= 0x7F;
490 rc |= (adev->devno << 7);
491 outb(rc, ap->ioaddr.lbal_addr);
492
493 /* Write the timings */
494 outb(active << 4 | recover, ap->ioaddr.error_addr);
495
496 /* Select the right bank for read timings, also
497 load the shared timings for address */
498 rc = inb(ap->ioaddr.device_addr);
499 rc &= 0xC0;
500 rc |= adev->devno; /* Index select */
501 rc |= (setup << 4) | 0x04;
502 outb(rc, ap->ioaddr.device_addr);
503
504 /* Load the read timings */
505 outb(active << 4 | recover, ap->ioaddr.data_addr);
506
507 /* Ensure the timing register mode is right */
508 rc = inb (ap->ioaddr.lbal_addr);
509 rc &= 0x73;
510 rc |= 0x84;
511 outb(rc, ap->ioaddr.lbal_addr);
512
513 /* Exit command mode */
514 outb(0x83, ap->ioaddr.nsect_addr);
515}
516
517
518static struct ata_port_operations opti82c611a_port_ops = {
519 .set_piomode = opti82c611a_set_piomode,
520
521 .port_disable = ata_port_disable,
522 .tf_load = ata_tf_load,
523 .tf_read = ata_tf_read,
524 .check_status = ata_check_status,
525 .exec_command = ata_exec_command,
526 .dev_select = ata_std_dev_select,
527
528 .error_handler = ata_bmdma_error_handler,
529
530 .qc_prep = ata_qc_prep,
531 .qc_issue = ata_qc_issue_prot,
532 .eng_timeout = ata_eng_timeout,
533 .data_xfer = ata_pio_data_xfer,
534
535 .irq_handler = ata_interrupt,
536 .irq_clear = ata_bmdma_irq_clear,
537
538 .port_start = ata_port_start,
539 .port_stop = ata_port_stop,
540 .host_stop = ata_host_stop
541};
542
543/*
544 * Opti 82C465MV
545 *
546 * This controller supports PIO0 to PIO3. Unlike the 611A the MVB
547 * version is dual channel but doesn't have a lot of unique registers.
548 */
549
550static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
551{
552 u8 active, recover, setup;
553 struct ata_timing t;
554 struct ata_device *pair = ata_dev_pair(adev);
555 int clock;
556 int khz[4] = { 50000, 40000, 33000, 25000 };
557 u8 rc;
558 u8 sysclk;
559
560 /* Get the clock */
561 sysclk = opti_syscfg(0xAC) & 0xC0; /* BIOS set */
562
563 /* Enter configuration mode */
564 inw(ap->ioaddr.error_addr);
565 inw(ap->ioaddr.error_addr);
566 outb(3, ap->ioaddr.nsect_addr);
567
568 /* Read VLB clock strapping */
569 clock = 1000000000 / khz[sysclk];
570
571 /* Get the timing data in cycles */
572 ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
573
574 /* Setup timing is shared */
575 if (pair) {
576 struct ata_timing tp;
577 ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
578
579 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
580 }
581
582 active = FIT(t.active, 2, 17) - 2;
583 recover = FIT(t.recover, 1, 16) - 1;
584 setup = FIT(t.setup, 1, 4) - 1;
585
586 /* Select the right timing bank for write timing */
587 rc = inb(ap->ioaddr.lbal_addr);
588 rc &= 0x7F;
589 rc |= (adev->devno << 7);
590 outb(rc, ap->ioaddr.lbal_addr);
591
592 /* Write the timings */
593 outb(active << 4 | recover, ap->ioaddr.error_addr);
594
595 /* Select the right bank for read timings, also
596 load the shared timings for address */
597 rc = inb(ap->ioaddr.device_addr);
598 rc &= 0xC0;
599 rc |= adev->devno; /* Index select */
600 rc |= (setup << 4) | 0x04;
601 outb(rc, ap->ioaddr.device_addr);
602
603 /* Load the read timings */
604 outb(active << 4 | recover, ap->ioaddr.data_addr);
605
606 /* Ensure the timing register mode is right */
607 rc = inb (ap->ioaddr.lbal_addr);
608 rc &= 0x73;
609 rc |= 0x84;
610 outb(rc, ap->ioaddr.lbal_addr);
611
612 /* Exit command mode */
613 outb(0x83, ap->ioaddr.nsect_addr);
614
615 /* We need to know this for quad device on the MVB */
616 ap->host->private_data = ap;
617}
618
619/**
620 * opt82c465mv_qc_issue_prot - command issue
621 * @qc: command pending
622 *
623 * Called when the libata layer is about to issue a command. We wrap
624 * this interface so that we can load the correct ATA timings. The
625 * MVB has a single set of timing registers and these are shared
626 * across channels. As there are two registers we really ought to
627 * track the last two used values as a sort of register window. For
628 * now we just reload on a channel switch. On the single channel
629 * setup this condition never fires so we do nothing extra.
630 *
631 * FIXME: dual channel needs ->serialize support
632 */
633
634static unsigned int opti82c46x_qc_issue_prot(struct ata_queued_cmd *qc)
635{
636 struct ata_port *ap = qc->ap;
637 struct ata_device *adev = qc->dev;
638
639 /* If timings are set and for the wrong channel (2nd test is
640 due to a libata shortcoming and will eventually go I hope) */
641 if (ap->host->private_data != ap->host
642 && ap->host->private_data != NULL)
643 opti82c46x_set_piomode(ap, adev);
644
645 return ata_qc_issue_prot(qc);
646}
647
648static struct ata_port_operations opti82c46x_port_ops = {
649 .set_piomode = opti82c46x_set_piomode,
650
651 .port_disable = ata_port_disable,
652 .tf_load = ata_tf_load,
653 .tf_read = ata_tf_read,
654 .check_status = ata_check_status,
655 .exec_command = ata_exec_command,
656 .dev_select = ata_std_dev_select,
657
658 .error_handler = ata_bmdma_error_handler,
659
660 .qc_prep = ata_qc_prep,
661 .qc_issue = opti82c46x_qc_issue_prot,
662 .eng_timeout = ata_eng_timeout,
663 .data_xfer = ata_pio_data_xfer,
664
665 .irq_handler = ata_interrupt,
666 .irq_clear = ata_bmdma_irq_clear,
667
668 .port_start = ata_port_start,
669 .port_stop = ata_port_stop,
670 .host_stop = ata_host_stop
671};
672
673
674/**
675 * legacy_init_one - attach a legacy interface
676 * @port: port number
677 * @io: I/O port start
678 * @ctrl: control port
679 * @irq: interrupt line
680 *
681 * Register an ISA bus IDE interface. Such interfaces are PIO and we
682 * assume do not support IRQ sharing.
683 */
684
685static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl, int irq)
686{
687 struct legacy_data *ld = &legacy_data[nr_legacy_host];
688 struct ata_probe_ent ae;
689 struct platform_device *pdev;
690 int ret = -EBUSY;
691 struct ata_port_operations *ops = &legacy_port_ops;
692 int pio_modes = pio_mask;
693 u32 mask = (1 << port);
694
695 if (request_region(io, 8, "pata_legacy") == NULL)
696 return -EBUSY;
697 if (request_region(ctrl, 1, "pata_legacy") == NULL)
698 goto fail_io;
699
700 pdev = platform_device_register_simple(DRV_NAME, nr_legacy_host, NULL, 0);
701 if (pdev == NULL)
702 goto fail_dev;
703
704 if (ht6560a & mask) {
705 ops = &ht6560a_port_ops;
706 pio_modes = 0x07;
707 }
708 if (ht6560b & mask) {
709 ops = &ht6560b_port_ops;
710 pio_modes = 0x1F;
711 }
712 if (opti82c611a & mask) {
713 ops = &opti82c611a_port_ops;
714 pio_modes = 0x0F;
715 }
716 if (opti82c46x & mask) {
717 ops = &opti82c46x_port_ops;
718 pio_modes = 0x0F;
719 }
720
721 /* Probe for automatically detectable controllers */
722
723 if (io == 0x1F0 && ops == &legacy_port_ops) {
724 unsigned long flags;
725
726 local_irq_save(flags);
727
728 /* Probes */
729 inb(0x1F5);
730 outb(inb(0x1F2) | 0x80, 0x1F2);
731 inb(0x1F2);
732 inb(0x3F6);
733 inb(0x3F6);
734 inb(0x1F2);
735 inb(0x1F2);
736
737 if ((inb(0x1F2) & 0x80) == 0) {
738 /* PDC20230c or 20630 ? */
739 printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller detected.\n");
740 pio_modes = 0x07;
741 ops = &pdc20230_port_ops;
742 udelay(100);
743 inb(0x1F5);
744 } else {
745 outb(0x55, 0x1F2);
746 inb(0x1F2);
747 inb(0x1F2);
748 if (inb(0x1F2) == 0x00) {
749 printk(KERN_INFO "PDC20230-B VLB ATA controller detected.\n");
750 }
751 }
752 local_irq_restore(flags);
753 }
754
755
756 /* Chip does mode setting by command snooping */
757 if (ops == &legacy_port_ops && (autospeed & mask))
758 ops = &simple_port_ops;
759 memset(&ae, 0, sizeof(struct ata_probe_ent));
760 INIT_LIST_HEAD(&ae.node);
761 ae.dev = &pdev->dev;
762 ae.port_ops = ops;
763 ae.sht = &legacy_sht;
764 ae.n_ports = 1;
765 ae.pio_mask = pio_modes;
766 ae.irq = irq;
767 ae.irq_flags = 0;
768 ae.port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST;
769 ae.port[0].cmd_addr = io;
770 ae.port[0].altstatus_addr = ctrl;
771 ae.port[0].ctl_addr = ctrl;
772 ata_std_ports(&ae.port[0]);
773 ae.private_data = ld;
774
775 ret = ata_device_add(&ae);
776 if (ret == 0) {
777 ret = -ENODEV;
778 goto fail;
779 }
780 legacy_host[nr_legacy_host++] = dev_get_drvdata(&pdev->dev);
781 ld->platform_dev = pdev;
782 return 0;
783
784fail:
785 platform_device_unregister(pdev);
786fail_dev:
787 release_region(ctrl, 1);
788fail_io:
789 release_region(io, 8);
790 return ret;
791}
792
793/**
794 * legacy_check_special_cases - ATA special cases
795 * @p: PCI device to check
796 * @master: set this if we find an ATA master
797 * @master: set this if we find an ATA secondary
798 *
799 * A small number of vendors implemented early PCI ATA interfaces on bridge logic
800 * without the ATA interface being PCI visible. Where we have a matching PCI driver
801 * we must skip the relevant device here. If we don't know about it then the legacy
802 * driver is the right driver anyway.
803 */
804
805static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *secondary)
806{
807 /* Cyrix CS5510 pre SFF MWDMA ATA on the bridge */
808 if (p->vendor == 0x1078 && p->device == 0x0000) {
809 *primary = *secondary = 1;
810 return;
811 }
812 /* Cyrix CS5520 pre SFF MWDMA ATA on the bridge */
813 if (p->vendor == 0x1078 && p->device == 0x0002) {
814 *primary = *secondary = 1;
815 return;
816 }
817 /* Intel MPIIX - PIO ATA on non PCI side of bridge */
818 if (p->vendor == 0x8086 && p->device == 0x1234) {
819 u16 r;
820 pci_read_config_word(p, 0x6C, &r);
821 if (r & 0x8000) { /* ATA port enabled */
822 if (r & 0x4000)
823 *secondary = 1;
824 else
825 *primary = 1;
826 }
827 return;
828 }
829}
830
831
832/**
833 * legacy_init - attach legacy interfaces
834 *
835 * Attach legacy IDE interfaces by scanning the usual IRQ/port suspects.
836 * Right now we do not scan the ide0 and ide1 address but should do so
837 * for non PCI systems or systems with no PCI IDE legacy mode devices.
838 * If you fix that note there are special cases to consider like VLB
839 * drivers and CS5510/20.
840 */
841
842static __init int legacy_init(void)
843{
844 int i;
845 int ct = 0;
846 int primary = 0;
847 int secondary = 0;
848 int last_port = NR_HOST;
849
850 struct pci_dev *p = NULL;
851
852 for_each_pci_dev(p) {
853 int r;
854 /* Check for any overlap of the system ATA mappings. Native mode controllers
855 stuck on these addresses or some devices in 'raid' mode won't be found by
856 the storage class test */
857 for (r = 0; r < 6; r++) {
858 if (pci_resource_start(p, r) == 0x1f0)
859 primary = 1;
860 if (pci_resource_start(p, r) == 0x170)
861 secondary = 1;
862 }
863 /* Check for special cases */
864 legacy_check_special_cases(p, &primary, &secondary);
865
866 /* If PCI bus is present then don't probe for tertiary legacy ports */
867 if (probe_all == 0)
868 last_port = 2;
869 }
870
871 /* If an OPTI 82C46X is present find out where the channels are */
872 if (opti82c46x) {
873 static const char *optis[4] = {
874 "3/463MV", "5MV",
875 "5MVA", "5MVB"
876 };
877 u8 chans = 1;
878 u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6;
879
880 opti82c46x = 3; /* Assume master and slave first */
881 printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n", optis[ctrl]);
882 if (ctrl == 3)
883 chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1;
884 ctrl = opti_syscfg(0xAC);
885 /* Check enabled and this port is the 465MV port. On the
886 MVB we may have two channels */
887 if (ctrl & 8) {
888 if (ctrl & 4)
889 opti82c46x = 2; /* Slave */
890 else
891 opti82c46x = 1; /* Master */
892 if (chans == 2)
893 opti82c46x = 3; /* Master and Slave */
894 } /* Slave only */
895 else if (chans == 1)
896 opti82c46x = 1;
897 }
898
899 for (i = 0; i < last_port; i++) {
900 /* Skip primary if we have seen a PCI one */
901 if (i == 0 && primary == 1)
902 continue;
903 /* Skip secondary if we have seen a PCI one */
904 if (i == 1 && secondary == 1)
905 continue;
906 if (legacy_init_one(i, legacy_port[i],
907 legacy_port[i] + 0x0206,
908 legacy_irq[i]) == 0)
909 ct++;
910 }
911 if (ct != 0)
912 return 0;
913 return -ENODEV;
914}
915
916static __exit void legacy_exit(void)
917{
918 int i;
919
920 for (i = 0; i < nr_legacy_host; i++) {
921 struct legacy_data *ld = &legacy_data[i];
922 struct ata_port *ap =legacy_host[i]->ports[0];
923 unsigned long io = ap->ioaddr.cmd_addr;
924 unsigned long ctrl = ap->ioaddr.ctl_addr;
925 ata_host_remove(legacy_host[i]);
926 platform_device_unregister(ld->platform_dev);
927 if (ld->timing)
928 release_region(ld->timing, 2);
929 release_region(io, 8);
930 release_region(ctrl, 1);
931 }
932}
933
934MODULE_AUTHOR("Alan Cox");
935MODULE_DESCRIPTION("low-level driver for legacy ATA");
936MODULE_LICENSE("GPL");
937MODULE_VERSION(DRV_VERSION);
938
939module_param(probe_all, int, 0);
940module_param(autospeed, int, 0);
941module_param(ht6560a, int, 0);
942module_param(ht6560b, int, 0);
943module_param(opti82c611a, int, 0);
944module_param(opti82c46x, int, 0);
945module_param(pio_mask, int, 0);
946
947module_init(legacy_init);
948module_exit(legacy_exit);
949
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
new file mode 100644
index 000000000000..1958c4ed09a8
--- /dev/null
+++ b/drivers/ata/pata_mpiix.c
@@ -0,0 +1,313 @@
1/*
2 * pata_mpiix.c - Intel MPIIX PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * The MPIIX is different enough to the PIIX4 and friends that we give it
7 * a separate driver. The old ide/pci code handles this by just not tuning
8 * MPIIX at all.
9 *
10 * The MPIIX also differs in another important way from the majority of PIIX
11 * devices. The chip is a bridge (pardon the pun) between the old world of
12 * ISA IDE and PCI IDE. Although the ATA timings are PCI configured the actual
13 * IDE controller is not decoded in PCI space and the chip does not claim to
14 * be IDE class PCI. This requires slightly non-standard probe logic compared
15 * with PCI IDE and also that we do not disable the device when our driver is
16 * unloaded (as it has many other functions).
17 *
18 * The driver conciously keeps this logic internally to avoid pushing quirky
19 * PATA history into the clean libata layer.
20 *
21 * Thinkpad specific note: If you boot an MPIIX using thinkpad with a PCMCIA
22 * hard disk present this driver will not detect it. This is not a bug. In this
23 * configuration the secondary port of the MPIIX is disabled and the addresses
24 * are decoded by the PCMCIA bridge and therefore are for a generic IDE driver
25 * to operate.
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/blkdev.h>
33#include <linux/delay.h>
34#include <scsi/scsi_host.h>
35#include <linux/libata.h>
36
37#define DRV_NAME "pata_mpiix"
38#define DRV_VERSION "0.7.1"
39
40enum {
41 IDETIM = 0x6C, /* IDE control register */
42 IORDY = (1 << 1),
43 PPE = (1 << 2),
44 FTIM = (1 << 0),
45 ENABLED = (1 << 15),
46 SECONDARY = (1 << 14)
47};
48
49static int mpiix_pre_reset(struct ata_port *ap)
50{
51 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
52 static const struct pci_bits mpiix_enable_bits[] = {
53 { 0x6D, 1, 0x80, 0x80 },
54 { 0x6F, 1, 0x80, 0x80 }
55 };
56
57 if (!pci_test_config_bits(pdev, &mpiix_enable_bits[ap->port_no])) {
58 ata_port_disable(ap);
59 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
60 return 0;
61 }
62 ap->cbl = ATA_CBL_PATA40;
63 return ata_std_prereset(ap);
64}
65
66/**
67 * mpiix_error_handler - probe reset
68 * @ap: ATA port
69 *
70 * Perform the ATA probe and bus reset sequence plus specific handling
71 * for this hardware. The MPIIX has the enable bits in a different place
72 * to PIIX4 and friends. As a pure PIO device it has no cable detect
73 */
74
75static void mpiix_error_handler(struct ata_port *ap)
76{
77 ata_bmdma_drive_eh(ap, mpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
78}
79
80/**
81 * mpiix_set_piomode - set initial PIO mode data
82 * @ap: ATA interface
83 * @adev: ATA device
84 *
85 * Called to do the PIO mode setup. The MPIIX allows us to program the
86 * IORDY sample point (2-5 clocks), recovery 1-4 clocks and whether
87 * prefetching or iordy are used.
88 *
89 * This would get very ugly because we can only program timing for one
90 * device at a time, the other gets PIO0. Fortunately libata calls
91 * our qc_issue_prot command before a command is issued so we can
92 * flip the timings back and forth to reduce the pain.
93 */
94
95static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev)
96{
97 int control = 0;
98 int pio = adev->pio_mode - XFER_PIO_0;
99 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
100 u16 idetim;
101 static const /* ISP RTC */
102 u8 timings[][2] = { { 0, 0 },
103 { 0, 0 },
104 { 1, 0 },
105 { 2, 1 },
106 { 2, 3 }, };
107
108 pci_read_config_word(pdev, IDETIM, &idetim);
109 /* Mask the IORDY/TIME/PPE0 bank for this device */
110 if (adev->class == ATA_DEV_ATA)
111 control |= PPE; /* PPE enable for disk */
112 if (ata_pio_need_iordy(adev))
113 control |= IORDY; /* IORDY */
114 if (pio > 0)
115 control |= FTIM; /* This drive is on the fast timing bank */
116
117 /* Mask out timing and clear both TIME bank selects */
118 idetim &= 0xCCEE;
119 idetim &= ~(0x07 << (2 * adev->devno));
120 idetim |= (control << (2 * adev->devno));
121
122 idetim |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
123 pci_write_config_word(pdev, IDETIM, idetim);
124
125 /* We use ap->private_data as a pointer to the device currently
126 loaded for timing */
127 ap->private_data = adev;
128}
129
130/**
131 * mpiix_qc_issue_prot - command issue
132 * @qc: command pending
133 *
134 * Called when the libata layer is about to issue a command. We wrap
135 * this interface so that we can load the correct ATA timings if
136 * neccessary. Our logic also clears TIME0/TIME1 for the other device so
137 * that, even if we get this wrong, cycles to the other device will
138 * be made PIO0.
139 */
140
141static unsigned int mpiix_qc_issue_prot(struct ata_queued_cmd *qc)
142{
143 struct ata_port *ap = qc->ap;
144 struct ata_device *adev = qc->dev;
145
146 /* If modes have been configured and the channel data is not loaded
147 then load it. We have to check if pio_mode is set as the core code
148 does not set adev->pio_mode to XFER_PIO_0 while probing as would be
149 logical */
150
151 if (adev->pio_mode && adev != ap->private_data)
152 mpiix_set_piomode(ap, adev);
153
154 return ata_qc_issue_prot(qc);
155}
156
157static struct scsi_host_template mpiix_sht = {
158 .module = THIS_MODULE,
159 .name = DRV_NAME,
160 .ioctl = ata_scsi_ioctl,
161 .queuecommand = ata_scsi_queuecmd,
162 .can_queue = ATA_DEF_QUEUE,
163 .this_id = ATA_SHT_THIS_ID,
164 .sg_tablesize = LIBATA_MAX_PRD,
165 .max_sectors = ATA_MAX_SECTORS,
166 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
167 .emulated = ATA_SHT_EMULATED,
168 .use_clustering = ATA_SHT_USE_CLUSTERING,
169 .proc_name = DRV_NAME,
170 .dma_boundary = ATA_DMA_BOUNDARY,
171 .slave_configure = ata_scsi_slave_config,
172 .bios_param = ata_std_bios_param,
173};
174
175static struct ata_port_operations mpiix_port_ops = {
176 .port_disable = ata_port_disable,
177 .set_piomode = mpiix_set_piomode,
178
179 .tf_load = ata_tf_load,
180 .tf_read = ata_tf_read,
181 .check_status = ata_check_status,
182 .exec_command = ata_exec_command,
183 .dev_select = ata_std_dev_select,
184
185 .freeze = ata_bmdma_freeze,
186 .thaw = ata_bmdma_thaw,
187 .error_handler = mpiix_error_handler,
188 .post_internal_cmd = ata_bmdma_post_internal_cmd,
189
190 .qc_prep = ata_qc_prep,
191 .qc_issue = mpiix_qc_issue_prot,
192 .data_xfer = ata_pio_data_xfer,
193
194 .irq_handler = ata_interrupt,
195 .irq_clear = ata_bmdma_irq_clear,
196
197 .port_start = ata_port_start,
198 .port_stop = ata_port_stop,
199 .host_stop = ata_host_stop
200};
201
202static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
203{
204 /* Single threaded by the PCI probe logic */
205 static struct ata_probe_ent probe[2];
206 static int printed_version;
207 u16 idetim;
208 int enabled;
209
210 if (!printed_version++)
211 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
212
213 /* MPIIX has many functions which can be turned on or off according
214 to other devices present. Make sure IDE is enabled before we try
215 and use it */
216
217 pci_read_config_word(dev, IDETIM, &idetim);
218 if (!(idetim & ENABLED))
219 return -ENODEV;
220
221 /* We do our own plumbing to avoid leaking special cases for whacko
222 ancient hardware into the core code. There are two issues to
223 worry about. #1 The chip is a bridge so if in legacy mode and
224 without BARs set fools the setup. #2 If you pci_disable_device
225 the MPIIX your box goes castors up */
226
227 INIT_LIST_HEAD(&probe[0].node);
228 probe[0].dev = pci_dev_to_dev(dev);
229 probe[0].port_ops = &mpiix_port_ops;
230 probe[0].sht = &mpiix_sht;
231 probe[0].pio_mask = 0x1F;
232 probe[0].irq = 14;
233 probe[0].irq_flags = SA_SHIRQ;
234 probe[0].port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
235 probe[0].n_ports = 1;
236 probe[0].port[0].cmd_addr = 0x1F0;
237 probe[0].port[0].ctl_addr = 0x3F6;
238 probe[0].port[0].altstatus_addr = 0x3F6;
239
240 /* The secondary lurks at different addresses but is otherwise
241 the same beastie */
242
243 INIT_LIST_HEAD(&probe[1].node);
244 probe[1] = probe[0];
245 probe[1].irq = 15;
246 probe[1].port[0].cmd_addr = 0x170;
247 probe[1].port[0].ctl_addr = 0x376;
248 probe[1].port[0].altstatus_addr = 0x376;
249
250 /* Let libata fill in the port details */
251 ata_std_ports(&probe[0].port[0]);
252 ata_std_ports(&probe[1].port[0]);
253
254 /* Now add the port that is active */
255 enabled = (idetim & SECONDARY) ? 1 : 0;
256
257 if (ata_device_add(&probe[enabled]))
258 return 0;
259 return -ENODEV;
260}
261
262/**
263 * mpiix_remove_one - device unload
264 * @pdev: PCI device being removed
265 *
266 * Handle an unplug/unload event for a PCI device. Unload the
267 * PCI driver but do not use the default handler as we *MUST NOT*
268 * disable the device as it has other functions.
269 */
270
271static void __devexit mpiix_remove_one(struct pci_dev *pdev)
272{
273 struct device *dev = pci_dev_to_dev(pdev);
274 struct ata_host *host = dev_get_drvdata(dev);
275
276 ata_host_remove(host);
277 dev_set_drvdata(dev, NULL);
278}
279
280
281
282static const struct pci_device_id mpiix[] = {
283 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371MX), },
284 { 0, },
285};
286
287static struct pci_driver mpiix_pci_driver = {
288 .name = DRV_NAME,
289 .id_table = mpiix,
290 .probe = mpiix_init_one,
291 .remove = mpiix_remove_one
292};
293
294static int __init mpiix_init(void)
295{
296 return pci_register_driver(&mpiix_pci_driver);
297}
298
299
300static void __exit mpiix_exit(void)
301{
302 pci_unregister_driver(&mpiix_pci_driver);
303}
304
305
306MODULE_AUTHOR("Alan Cox");
307MODULE_DESCRIPTION("low-level driver for Intel MPIIX");
308MODULE_LICENSE("GPL");
309MODULE_DEVICE_TABLE(pci, mpiix);
310MODULE_VERSION(DRV_VERSION);
311
312module_init(mpiix_init);
313module_exit(mpiix_exit);
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
new file mode 100644
index 000000000000..16cb254cb973
--- /dev/null
+++ b/drivers/ata/pata_netcell.c
@@ -0,0 +1,175 @@
1/*
2 * pata_netcell.c - Netcell PATA driver
3 *
4 * (c) 2006 Red Hat <alan@redhat.com>
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/pci.h>
10#include <linux/init.h>
11#include <linux/blkdev.h>
12#include <linux/delay.h>
13#include <linux/device.h>
14#include <scsi/scsi_host.h>
15#include <linux/libata.h>
16#include <linux/ata.h>
17
18#define DRV_NAME "pata_netcell"
19#define DRV_VERSION "0.1.5"
20
21/**
22 * netcell_probe_init - check for 40/80 pin
23 * @ap: Port
24 *
25 * Cables are handled by the RAID controller. Report 80 pin.
26 */
27
28static int netcell_pre_reset(struct ata_port *ap)
29{
30 ap->cbl = ATA_CBL_PATA80;
31 return ata_std_prereset(ap);
32}
33
34/**
35 * netcell_probe_reset - Probe specified port on PATA host controller
36 * @ap: Port to probe
37 *
38 * LOCKING:
39 * None (inherited from caller).
40 */
41
42static void netcell_error_handler(struct ata_port *ap)
43{
44 return ata_bmdma_drive_eh(ap, netcell_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
45}
46
47/* No PIO or DMA methods needed for this device */
48
49static struct scsi_host_template netcell_sht = {
50 .module = THIS_MODULE,
51 .name = DRV_NAME,
52 .ioctl = ata_scsi_ioctl,
53 .queuecommand = ata_scsi_queuecmd,
54 .can_queue = ATA_DEF_QUEUE,
55 .this_id = ATA_SHT_THIS_ID,
56 .sg_tablesize = LIBATA_MAX_PRD,
57 /* Special handling needed if you have sector or LBA48 limits */
58 .max_sectors = ATA_MAX_SECTORS,
59 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
60 .emulated = ATA_SHT_EMULATED,
61 .use_clustering = ATA_SHT_USE_CLUSTERING,
62 .proc_name = DRV_NAME,
63 .dma_boundary = ATA_DMA_BOUNDARY,
64 .slave_configure = ata_scsi_slave_config,
65 /* Use standard CHS mapping rules */
66 .bios_param = ata_std_bios_param,
67};
68
69static const struct ata_port_operations netcell_ops = {
70 .port_disable = ata_port_disable,
71
72 /* Task file is PCI ATA format, use helpers */
73 .tf_load = ata_tf_load,
74 .tf_read = ata_tf_read,
75 .check_status = ata_check_status,
76 .exec_command = ata_exec_command,
77 .dev_select = ata_std_dev_select,
78
79 .freeze = ata_bmdma_freeze,
80 .thaw = ata_bmdma_thaw,
81 .error_handler = netcell_error_handler,
82 .post_internal_cmd = ata_bmdma_post_internal_cmd,
83
84 /* BMDMA handling is PCI ATA format, use helpers */
85 .bmdma_setup = ata_bmdma_setup,
86 .bmdma_start = ata_bmdma_start,
87 .bmdma_stop = ata_bmdma_stop,
88 .bmdma_status = ata_bmdma_status,
89 .qc_prep = ata_qc_prep,
90 .qc_issue = ata_qc_issue_prot,
91 .data_xfer = ata_pio_data_xfer,
92
93 /* Timeout handling. Special recovery hooks here */
94 .eng_timeout = ata_eng_timeout,
95 .irq_handler = ata_interrupt,
96 .irq_clear = ata_bmdma_irq_clear,
97
98 /* Generic PATA PCI ATA helpers */
99 .port_start = ata_port_start,
100 .port_stop = ata_port_stop,
101 .host_stop = ata_host_stop,
102};
103
104
105/**
106 * netcell_init_one - Register Netcell ATA PCI device with kernel services
107 * @pdev: PCI device to register
108 * @ent: Entry in netcell_pci_tbl matching with @pdev
109 *
110 * Called from kernel PCI layer.
111 *
112 * LOCKING:
113 * Inherited from PCI layer (may sleep).
114 *
115 * RETURNS:
116 * Zero on success, or -ERRNO value.
117 */
118
119static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
120{
121 static int printed_version;
122 static struct ata_port_info info = {
123 .sht = &netcell_sht,
124 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
125 /* Actually we don't really care about these as the
126 firmware deals with it */
127 .pio_mask = 0x1f, /* pio0-4 */
128 .mwdma_mask = 0x07, /* mwdma0-2 */
129 .udma_mask = 0x3f, /* UDMA 133 */
130 .port_ops = &netcell_ops,
131 };
132 static struct ata_port_info *port_info[2] = { &info, &info };
133
134 if (!printed_version++)
135 dev_printk(KERN_DEBUG, &pdev->dev,
136 "version " DRV_VERSION "\n");
137
138 /* Any chip specific setup/optimisation/messages here */
139 ata_pci_clear_simplex(pdev);
140
141 /* And let the library code do the work */
142 return ata_pci_init_one(pdev, port_info, 2);
143}
144
145static const struct pci_device_id netcell_pci_tbl[] = {
146 { PCI_DEVICE(PCI_VENDOR_ID_NETCELL, PCI_DEVICE_ID_REVOLUTION), },
147 { } /* terminate list */
148};
149
150static struct pci_driver netcell_pci_driver = {
151 .name = DRV_NAME,
152 .id_table = netcell_pci_tbl,
153 .probe = netcell_init_one,
154 .remove = ata_pci_remove_one,
155};
156
157static int __init netcell_init(void)
158{
159 return pci_register_driver(&netcell_pci_driver);
160}
161
162static void __exit netcell_exit(void)
163{
164 pci_unregister_driver(&netcell_pci_driver);
165}
166
167module_init(netcell_init);
168module_exit(netcell_exit);
169
170MODULE_AUTHOR("Alan Cox");
171MODULE_DESCRIPTION("SCSI low-level driver for Netcell PATA RAID");
172MODULE_LICENSE("GPL");
173MODULE_DEVICE_TABLE(pci, netcell_pci_tbl);
174MODULE_VERSION(DRV_VERSION);
175
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
new file mode 100644
index 000000000000..93d6646d2954
--- /dev/null
+++ b/drivers/ata/pata_ns87410.c
@@ -0,0 +1,236 @@
1/*
2 * pata_ns87410.c - National Semiconductor 87410 PATA for new ATA layer
3 * (C) 2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/blkdev.h>
26#include <linux/delay.h>
27#include <scsi/scsi_host.h>
28#include <linux/libata.h>
29
30#define DRV_NAME "pata_ns87410"
31#define DRV_VERSION "0.4.2"
32
33/**
34 * ns87410_pre_reset - probe begin
35 * @ap: ATA port
36 *
37 * Set up cable type and use generic probe init
38 */
39
40static int ns87410_pre_reset(struct ata_port *ap)
41{
42 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
43 static const struct pci_bits ns87410_enable_bits[] = {
44 { 0x43, 1, 0x08, 0x08 },
45 { 0x47, 1, 0x08, 0x08 }
46 };
47
48 if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no])) {
49 ata_port_disable(ap);
50 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
51 return 0;
52 }
53 ap->cbl = ATA_CBL_PATA40;
54 return ata_std_prereset(ap);
55}
56
57/**
58 * ns87410_error_handler - probe reset
59 * @ap: ATA port
60 *
61 * Perform the ATA probe and bus reset sequence plus specific handling
62 * for this hardware. The MPIIX has the enable bits in a different place
63 * to PIIX4 and friends. As a pure PIO device it has no cable detect
64 */
65
66static void ns87410_error_handler(struct ata_port *ap)
67{
68 ata_bmdma_drive_eh(ap, ns87410_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
69}
70
71/**
72 * ns87410_set_piomode - set initial PIO mode data
73 * @ap: ATA interface
74 * @adev: ATA device
75 *
76 * Program timing data. This is kept per channel not per device,
77 * and only affects the data port.
78 */
79
80static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev)
81{
82 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
83 int port = 0x40 + 4 * ap->port_no;
84 u8 idetcr, idefr;
85 struct ata_timing at;
86
87 static const u8 activebits[15] = {
88 0, 1, 2, 3, 4,
89 5, 5, 6, 6, 6,
90 6, 7, 7, 7, 7
91 };
92
93 static const u8 recoverbits[12] = {
94 0, 1, 2, 3, 4, 5, 6, 6, 7, 7, 7, 7
95 };
96
97 pci_read_config_byte(pdev, port + 3, &idefr);
98
99 if (ata_pio_need_iordy(adev))
100 idefr |= 0x04; /* IORDY enable */
101 else
102 idefr &= ~0x04;
103
104 if (ata_timing_compute(adev, adev->pio_mode, &at, 30303, 1) < 0) {
105 dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", adev->pio_mode);
106 return;
107 }
108
109 at.active = FIT(at.active, 2, 16) - 2;
110 at.setup = FIT(at.setup, 1, 4) - 1;
111 at.recover = FIT(at.recover, 1, 12) - 1;
112
113 idetcr = (at.setup << 6) | (recoverbits[at.recover] << 3) | activebits[at.active];
114
115 pci_write_config_byte(pdev, port, idetcr);
116 pci_write_config_byte(pdev, port + 3, idefr);
117 /* We use ap->private_data as a pointer to the device currently
118 loaded for timing */
119 ap->private_data = adev;
120}
121
122/**
123 * ns87410_qc_issue_prot - command issue
124 * @qc: command pending
125 *
126 * Called when the libata layer is about to issue a command. We wrap
127 * this interface so that we can load the correct ATA timings if
128 * neccessary.
129 */
130
131static unsigned int ns87410_qc_issue_prot(struct ata_queued_cmd *qc)
132{
133 struct ata_port *ap = qc->ap;
134 struct ata_device *adev = qc->dev;
135
136 /* If modes have been configured and the channel data is not loaded
137 then load it. We have to check if pio_mode is set as the core code
138 does not set adev->pio_mode to XFER_PIO_0 while probing as would be
139 logical */
140
141 if (adev->pio_mode && adev != ap->private_data)
142 ns87410_set_piomode(ap, adev);
143
144 return ata_qc_issue_prot(qc);
145}
146
147static struct scsi_host_template ns87410_sht = {
148 .module = THIS_MODULE,
149 .name = DRV_NAME,
150 .ioctl = ata_scsi_ioctl,
151 .queuecommand = ata_scsi_queuecmd,
152 .can_queue = ATA_DEF_QUEUE,
153 .this_id = ATA_SHT_THIS_ID,
154 .sg_tablesize = LIBATA_MAX_PRD,
155 .max_sectors = ATA_MAX_SECTORS,
156 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
157 .emulated = ATA_SHT_EMULATED,
158 .use_clustering = ATA_SHT_USE_CLUSTERING,
159 .proc_name = DRV_NAME,
160 .dma_boundary = ATA_DMA_BOUNDARY,
161 .slave_configure = ata_scsi_slave_config,
162 .bios_param = ata_std_bios_param,
163};
164
165static struct ata_port_operations ns87410_port_ops = {
166 .port_disable = ata_port_disable,
167 .set_piomode = ns87410_set_piomode,
168
169 .tf_load = ata_tf_load,
170 .tf_read = ata_tf_read,
171 .check_status = ata_check_status,
172 .exec_command = ata_exec_command,
173 .dev_select = ata_std_dev_select,
174
175 .freeze = ata_bmdma_freeze,
176 .thaw = ata_bmdma_thaw,
177 .error_handler = ns87410_error_handler,
178 .post_internal_cmd = ata_bmdma_post_internal_cmd,
179
180 .qc_prep = ata_qc_prep,
181 .qc_issue = ns87410_qc_issue_prot,
182 .eng_timeout = ata_eng_timeout,
183 .data_xfer = ata_pio_data_xfer,
184
185 .irq_handler = ata_interrupt,
186 .irq_clear = ata_bmdma_irq_clear,
187
188 .port_start = ata_port_start,
189 .port_stop = ata_port_stop,
190 .host_stop = ata_host_stop
191};
192
193static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
194{
195 static struct ata_port_info info = {
196 .sht = &ns87410_sht,
197 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
198 .pio_mask = 0x0F,
199 .port_ops = &ns87410_port_ops
200 };
201 static struct ata_port_info *port_info[2] = {&info, &info};
202 return ata_pci_init_one(dev, port_info, 2);
203}
204
205static const struct pci_device_id ns87410[] = {
206 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87410), },
207 { 0, },
208};
209
210static struct pci_driver ns87410_pci_driver = {
211 .name = DRV_NAME,
212 .id_table = ns87410,
213 .probe = ns87410_init_one,
214 .remove = ata_pci_remove_one
215};
216
217static int __init ns87410_init(void)
218{
219 return pci_register_driver(&ns87410_pci_driver);
220}
221
222
223static void __exit ns87410_exit(void)
224{
225 pci_unregister_driver(&ns87410_pci_driver);
226}
227
228
229MODULE_AUTHOR("Alan Cox");
230MODULE_DESCRIPTION("low-level driver for Nat Semi 87410");
231MODULE_LICENSE("GPL");
232MODULE_DEVICE_TABLE(pci, ns87410);
233MODULE_VERSION(DRV_VERSION);
234
235module_init(ns87410_init);
236module_exit(ns87410_exit);
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
new file mode 100644
index 000000000000..04c618a2664b
--- /dev/null
+++ b/drivers/ata/pata_oldpiix.c
@@ -0,0 +1,339 @@
1/*
2 * pata_oldpiix.c - Intel PATA/SATA controllers
3 *
4 * (C) 2005 Red Hat <alan@redhat.com>
5 *
6 * Some parts based on ata_piix.c by Jeff Garzik and others.
7 *
8 * Early PIIX differs significantly from the later PIIX as it lacks
9 * SITRE and the slave timing registers. This means that you have to
10 * set timing per channel, or be clever. Libata tells us whenever it
11 * does drive selection and we use this to reload the timings.
12 *
13 * Because of these behaviour differences PIIX gets its own driver module.
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19#include <linux/init.h>
20#include <linux/blkdev.h>
21#include <linux/delay.h>
22#include <linux/device.h>
23#include <scsi/scsi_host.h>
24#include <linux/libata.h>
25#include <linux/ata.h>
26
27#define DRV_NAME "pata_oldpiix"
28#define DRV_VERSION "0.5.1"
29
30/**
31 * oldpiix_pre_reset - probe begin
32 * @ap: ATA port
33 *
34 * Set up cable type and use generic probe init
35 */
36
37static int oldpiix_pre_reset(struct ata_port *ap)
38{
39 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
40 static const struct pci_bits oldpiix_enable_bits[] = {
41 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
42 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
43 };
44
45 if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no])) {
46 ata_port_disable(ap);
47 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
48 return 0;
49 }
50 ap->cbl = ATA_CBL_PATA40;
51 return ata_std_prereset(ap);
52}
53
54/**
55 * oldpiix_pata_error_handler - Probe specified port on PATA host controller
56 * @ap: Port to probe
57 * @classes:
58 *
59 * LOCKING:
60 * None (inherited from caller).
61 */
62
63static void oldpiix_pata_error_handler(struct ata_port *ap)
64{
65 ata_bmdma_drive_eh(ap, oldpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
66}
67
68/**
69 * oldpiix_set_piomode - Initialize host controller PATA PIO timings
70 * @ap: Port whose timings we are configuring
71 * @adev: um
72 *
73 * Set PIO mode for device, in host controller PCI config space.
74 *
75 * LOCKING:
76 * None (inherited from caller).
77 */
78
79static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev)
80{
81 unsigned int pio = adev->pio_mode - XFER_PIO_0;
82 struct pci_dev *dev = to_pci_dev(ap->host->dev);
83 unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
84 u16 idetm_data;
85 int control = 0;
86
87 /*
88 * See Intel Document 298600-004 for the timing programing rules
89 * for PIIX/ICH. Note that the early PIIX does not have the slave
90 * timing port at 0x44.
91 */
92
93 static const /* ISP RTC */
94 u8 timings[][2] = { { 0, 0 },
95 { 0, 0 },
96 { 1, 0 },
97 { 2, 1 },
98 { 2, 3 }, };
99
100 if (pio > 2)
101 control |= 1; /* TIME1 enable */
102 if (ata_pio_need_iordy(adev))
103 control |= 2; /* IE IORDY */
104
105 /* Intel specifies that the PPE functionality is for disk only */
106 if (adev->class == ATA_DEV_ATA)
107 control |= 4; /* PPE enable */
108
109 pci_read_config_word(dev, idetm_port, &idetm_data);
110
111 /* Enable PPE, IE and TIME as appropriate. Clear the other
112 drive timing bits */
113 if (adev->devno == 0) {
114 idetm_data &= 0xCCE0;
115 idetm_data |= control;
116 } else {
117 idetm_data &= 0xCC0E;
118 idetm_data |= (control << 4);
119 }
120 idetm_data |= (timings[pio][0] << 12) |
121 (timings[pio][1] << 8);
122 pci_write_config_word(dev, idetm_port, idetm_data);
123
124 /* Track which port is configured */
125 ap->private_data = adev;
126}
127
128/**
129 * oldpiix_set_dmamode - Initialize host controller PATA DMA timings
130 * @ap: Port whose timings we are configuring
131 * @adev: Device to program
132 * @isich: True if the device is an ICH and has IOCFG registers
133 *
134 * Set MWDMA mode for device, in host controller PCI config space.
135 *
136 * LOCKING:
137 * None (inherited from caller).
138 */
139
140static void oldpiix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
141{
142 struct pci_dev *dev = to_pci_dev(ap->host->dev);
143 u8 idetm_port = ap->port_no ? 0x42 : 0x40;
144 u16 idetm_data;
145
146 static const /* ISP RTC */
147 u8 timings[][2] = { { 0, 0 },
148 { 0, 0 },
149 { 1, 0 },
150 { 2, 1 },
151 { 2, 3 }, };
152
153 /*
154 * MWDMA is driven by the PIO timings. We must also enable
155 * IORDY unconditionally along with TIME1. PPE has already
156 * been set when the PIO timing was set.
157 */
158
159 unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
160 unsigned int control;
161 const unsigned int needed_pio[3] = {
162 XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
163 };
164 int pio = needed_pio[mwdma] - XFER_PIO_0;
165
166 pci_read_config_word(dev, idetm_port, &idetm_data);
167
168 control = 3; /* IORDY|TIME0 */
169 /* Intel specifies that the PPE functionality is for disk only */
170 if (adev->class == ATA_DEV_ATA)
171 control |= 4; /* PPE enable */
172
173 /* If the drive MWDMA is faster than it can do PIO then
174 we must force PIO into PIO0 */
175
176 if (adev->pio_mode < needed_pio[mwdma])
177 /* Enable DMA timing only */
178 control |= 8; /* PIO cycles in PIO0 */
179
180 /* Mask out the relevant control and timing bits we will load. Also
181 clear the other drive TIME register as a precaution */
182 if (adev->devno == 0) {
183 idetm_data &= 0xCCE0;
184 idetm_data |= control;
185 } else {
186 idetm_data &= 0xCC0E;
187 idetm_data |= (control << 4);
188 }
189 idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
190 pci_write_config_word(dev, idetm_port, idetm_data);
191
192 /* Track which port is configured */
193 ap->private_data = adev;
194}
195
196/**
197 * oldpiix_qc_issue_prot - command issue
198 * @qc: command pending
199 *
200 * Called when the libata layer is about to issue a command. We wrap
201 * this interface so that we can load the correct ATA timings if
202 * neccessary. Our logic also clears TIME0/TIME1 for the other device so
203 * that, even if we get this wrong, cycles to the other device will
204 * be made PIO0.
205 */
206
207static unsigned int oldpiix_qc_issue_prot(struct ata_queued_cmd *qc)
208{
209 struct ata_port *ap = qc->ap;
210 struct ata_device *adev = qc->dev;
211
212 if (adev != ap->private_data) {
213 if (adev->dma_mode)
214 oldpiix_set_dmamode(ap, adev);
215 else if (adev->pio_mode)
216 oldpiix_set_piomode(ap, adev);
217 }
218 return ata_qc_issue_prot(qc);
219}
220
221
222static struct scsi_host_template oldpiix_sht = {
223 .module = THIS_MODULE,
224 .name = DRV_NAME,
225 .ioctl = ata_scsi_ioctl,
226 .queuecommand = ata_scsi_queuecmd,
227 .can_queue = ATA_DEF_QUEUE,
228 .this_id = ATA_SHT_THIS_ID,
229 .sg_tablesize = LIBATA_MAX_PRD,
230 .max_sectors = ATA_MAX_SECTORS,
231 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
232 .emulated = ATA_SHT_EMULATED,
233 .use_clustering = ATA_SHT_USE_CLUSTERING,
234 .proc_name = DRV_NAME,
235 .dma_boundary = ATA_DMA_BOUNDARY,
236 .slave_configure = ata_scsi_slave_config,
237 .bios_param = ata_std_bios_param,
238};
239
240static const struct ata_port_operations oldpiix_pata_ops = {
241 .port_disable = ata_port_disable,
242 .set_piomode = oldpiix_set_piomode,
243 .set_dmamode = oldpiix_set_dmamode,
244 .mode_filter = ata_pci_default_filter,
245
246 .tf_load = ata_tf_load,
247 .tf_read = ata_tf_read,
248 .check_status = ata_check_status,
249 .exec_command = ata_exec_command,
250 .dev_select = ata_std_dev_select,
251
252 .freeze = ata_bmdma_freeze,
253 .thaw = ata_bmdma_thaw,
254 .error_handler = oldpiix_pata_error_handler,
255 .post_internal_cmd = ata_bmdma_post_internal_cmd,
256
257 .bmdma_setup = ata_bmdma_setup,
258 .bmdma_start = ata_bmdma_start,
259 .bmdma_stop = ata_bmdma_stop,
260 .bmdma_status = ata_bmdma_status,
261 .qc_prep = ata_qc_prep,
262 .qc_issue = oldpiix_qc_issue_prot,
263 .data_xfer = ata_pio_data_xfer,
264
265 .irq_handler = ata_interrupt,
266 .irq_clear = ata_bmdma_irq_clear,
267
268 .port_start = ata_port_start,
269 .port_stop = ata_port_stop,
270 .host_stop = ata_host_stop,
271};
272
273
274/**
275 * oldpiix_init_one - Register PIIX ATA PCI device with kernel services
276 * @pdev: PCI device to register
277 * @ent: Entry in oldpiix_pci_tbl matching with @pdev
278 *
279 * Called from kernel PCI layer. We probe for combined mode (sigh),
280 * and then hand over control to libata, for it to do the rest.
281 *
282 * LOCKING:
283 * Inherited from PCI layer (may sleep).
284 *
285 * RETURNS:
286 * Zero on success, or -ERRNO value.
287 */
288
289static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
290{
291 static int printed_version;
292 static struct ata_port_info info = {
293 .sht = &oldpiix_sht,
294 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
295 .pio_mask = 0x1f, /* pio0-4 */
296 .mwdma_mask = 0x07, /* mwdma1-2 */
297 .port_ops = &oldpiix_pata_ops,
298 };
299 static struct ata_port_info *port_info[2] = { &info, &info };
300
301 if (!printed_version++)
302 dev_printk(KERN_DEBUG, &pdev->dev,
303 "version " DRV_VERSION "\n");
304
305 return ata_pci_init_one(pdev, port_info, 2);
306}
307
308static const struct pci_device_id oldpiix_pci_tbl[] = {
309 { PCI_DEVICE(0x8086, 0x1230), },
310 { } /* terminate list */
311};
312
313static struct pci_driver oldpiix_pci_driver = {
314 .name = DRV_NAME,
315 .id_table = oldpiix_pci_tbl,
316 .probe = oldpiix_init_one,
317 .remove = ata_pci_remove_one,
318};
319
320static int __init oldpiix_init(void)
321{
322 return pci_register_driver(&oldpiix_pci_driver);
323}
324
325static void __exit oldpiix_exit(void)
326{
327 pci_unregister_driver(&oldpiix_pci_driver);
328}
329
330
331module_init(oldpiix_init);
332module_exit(oldpiix_exit);
333
334MODULE_AUTHOR("Alan Cox");
335MODULE_DESCRIPTION("SCSI low-level driver for early PIIX series controllers");
336MODULE_LICENSE("GPL");
337MODULE_DEVICE_TABLE(pci, oldpiix_pci_tbl);
338MODULE_VERSION(DRV_VERSION);
339
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
new file mode 100644
index 000000000000..c3d01325e0e2
--- /dev/null
+++ b/drivers/ata/pata_opti.c
@@ -0,0 +1,292 @@
1/*
2 * pata_opti.c - ATI PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based on
7 * linux/drivers/ide/pci/opti621.c Version 0.7 Sept 10, 2002
8 *
9 * Copyright (C) 1996-1998 Linus Torvalds & authors (see below)
10 *
11 * Authors:
12 * Jaromir Koutek <miri@punknet.cz>,
13 * Jan Harkes <jaharkes@cwi.nl>,
14 * Mark Lord <mlord@pobox.com>
15 * Some parts of code are from ali14xx.c and from rz1000.c.
16 *
17 * Also consulted the FreeBSD prototype driver by Kevin Day to try
18 * and resolve some confusions. Further documentation can be found in
19 * Ralf Brown's interrupt list
20 *
21 * If you have other variants of the Opti range (Viper/Vendetta) please
22 * try this driver with those PCI idents and report back. For the later
23 * chips see the pata_optidma driver
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/blkdev.h>
32#include <linux/delay.h>
33#include <scsi/scsi_host.h>
34#include <linux/libata.h>
35
36#define DRV_NAME "pata_opti"
37#define DRV_VERSION "0.2.4"
38
39enum {
40 READ_REG = 0, /* index of Read cycle timing register */
41 WRITE_REG = 1, /* index of Write cycle timing register */
42 CNTRL_REG = 3, /* index of Control register */
43 STRAP_REG = 5, /* index of Strap register */
44 MISC_REG = 6 /* index of Miscellaneous register */
45};
46
47/**
48 * opti_pre_reset - probe begin
49 * @ap: ATA port
50 *
51 * Set up cable type and use generic probe init
52 */
53
54static int opti_pre_reset(struct ata_port *ap)
55{
56 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
57 static const struct pci_bits opti_enable_bits[] = {
58 { 0x45, 1, 0x80, 0x00 },
59 { 0x40, 1, 0x08, 0x00 }
60 };
61
62 if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no])) {
63 ata_port_disable(ap);
64 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
65 return 0;
66 }
67 ap->cbl = ATA_CBL_PATA40;
68 return ata_std_prereset(ap);
69}
70
71/**
72 * opti_probe_reset - probe reset
73 * @ap: ATA port
74 *
75 * Perform the ATA probe and bus reset sequence plus specific handling
76 * for this hardware. The Opti needs little handling - we have no UDMA66
77 * capability that needs cable detection. All we must do is check the port
78 * is enabled.
79 */
80
81static void opti_error_handler(struct ata_port *ap)
82{
83 ata_bmdma_drive_eh(ap, opti_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
84}
85
86/**
87 * opti_write_reg - control register setup
88 * @ap: ATA port
89 * @value: value
90 * @reg: control register number
91 *
92 * The Opti uses magic 'trapdoor' register accesses to do configuration
93 * rather than using PCI space as other controllers do. The double inw
94 * on the error register activates configuration mode. We can then write
95 * the control register
96 */
97
98static void opti_write_reg(struct ata_port *ap, u8 val, int reg)
99{
100 unsigned long regio = ap->ioaddr.cmd_addr;
101
102 /* These 3 unlock the control register access */
103 inw(regio + 1);
104 inw(regio + 1);
105 outb(3, regio + 2);
106
107 /* Do the I/O */
108 outb(val, regio + reg);
109
110 /* Relock */
111 outb(0x83, regio + 2);
112}
113
114#if 0
115/**
116 * opti_read_reg - control register read
117 * @ap: ATA port
118 * @reg: control register number
119 *
120 * The Opti uses magic 'trapdoor' register accesses to do configuration
121 * rather than using PCI space as other controllers do. The double inw
122 * on the error register activates configuration mode. We can then read
123 * the control register
124 */
125
126static u8 opti_read_reg(struct ata_port *ap, int reg)
127{
128 unsigned long regio = ap->ioaddr.cmd_addr;
129 u8 ret;
130 inw(regio + 1);
131 inw(regio + 1);
132 outb(3, regio + 2);
133 ret = inb(regio + reg);
134 outb(0x83, regio + 2);
135}
136#endif
137
138/**
139 * opti_set_piomode - set initial PIO mode data
140 * @ap: ATA interface
141 * @adev: ATA device
142 *
143 * Called to do the PIO mode setup. Timing numbers are taken from
144 * the FreeBSD driver then pre computed to keep the code clean. There
145 * are two tables depending on the hardware clock speed.
146 */
147
148static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev)
149{
150 struct ata_device *pair = ata_dev_pair(adev);
151 int clock;
152 int pio = adev->pio_mode - XFER_PIO_0;
153 unsigned long regio = ap->ioaddr.cmd_addr;
154 u8 addr;
155
156 /* Address table precomputed with prefetch off and a DCLK of 2 */
157 static const u8 addr_timing[2][5] = {
158 { 0x30, 0x20, 0x20, 0x10, 0x10 },
159 { 0x20, 0x20, 0x10, 0x10, 0x10 }
160 };
161 static const u8 data_rec_timing[2][5] = {
162 { 0x6B, 0x56, 0x42, 0x32, 0x31 },
163 { 0x58, 0x44, 0x32, 0x22, 0x21 }
164 };
165
166 outb(0xff, regio + 5);
167 clock = inw(regio + 5) & 1;
168
169 /*
170 * As with many controllers the address setup time is shared
171 * and must suit both devices if present.
172 */
173
174 addr = addr_timing[clock][pio];
175 if (pair) {
176 /* Hardware constraint */
177 u8 pair_addr = addr_timing[clock][pair->pio_mode - XFER_PIO_0];
178 if (pair_addr > addr)
179 addr = pair_addr;
180 }
181
182 /* Commence primary programming sequence */
183 opti_write_reg(ap, adev->devno, MISC_REG);
184 opti_write_reg(ap, data_rec_timing[clock][pio], READ_REG);
185 opti_write_reg(ap, data_rec_timing[clock][pio], WRITE_REG);
186 opti_write_reg(ap, addr, MISC_REG);
187
188 /* Programming sequence complete, override strapping */
189 opti_write_reg(ap, 0x85, CNTRL_REG);
190}
191
192static struct scsi_host_template opti_sht = {
193 .module = THIS_MODULE,
194 .name = DRV_NAME,
195 .ioctl = ata_scsi_ioctl,
196 .queuecommand = ata_scsi_queuecmd,
197 .can_queue = ATA_DEF_QUEUE,
198 .this_id = ATA_SHT_THIS_ID,
199 .sg_tablesize = LIBATA_MAX_PRD,
200 .max_sectors = ATA_MAX_SECTORS,
201 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
202 .emulated = ATA_SHT_EMULATED,
203 .use_clustering = ATA_SHT_USE_CLUSTERING,
204 .proc_name = DRV_NAME,
205 .dma_boundary = ATA_DMA_BOUNDARY,
206 .slave_configure = ata_scsi_slave_config,
207 .bios_param = ata_std_bios_param,
208};
209
210static struct ata_port_operations opti_port_ops = {
211 .port_disable = ata_port_disable,
212 .set_piomode = opti_set_piomode,
213/* .set_dmamode = opti_set_dmamode, */
214 .tf_load = ata_tf_load,
215 .tf_read = ata_tf_read,
216 .check_status = ata_check_status,
217 .exec_command = ata_exec_command,
218 .dev_select = ata_std_dev_select,
219
220 .freeze = ata_bmdma_freeze,
221 .thaw = ata_bmdma_thaw,
222 .error_handler = opti_error_handler,
223 .post_internal_cmd = ata_bmdma_post_internal_cmd,
224
225 .bmdma_setup = ata_bmdma_setup,
226 .bmdma_start = ata_bmdma_start,
227 .bmdma_stop = ata_bmdma_stop,
228 .bmdma_status = ata_bmdma_status,
229
230 .qc_prep = ata_qc_prep,
231 .qc_issue = ata_qc_issue_prot,
232 .eng_timeout = ata_eng_timeout,
233 .data_xfer = ata_pio_data_xfer,
234
235 .irq_handler = ata_interrupt,
236 .irq_clear = ata_bmdma_irq_clear,
237
238 .port_start = ata_port_start,
239 .port_stop = ata_port_stop,
240 .host_stop = ata_host_stop
241};
242
243static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
244{
245 static struct ata_port_info info = {
246 .sht = &opti_sht,
247 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
248 .pio_mask = 0x1f,
249 .port_ops = &opti_port_ops
250 };
251 static struct ata_port_info *port_info[2] = { &info, &info };
252 static int printed_version;
253
254 if (!printed_version++)
255 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
256
257 return ata_pci_init_one(dev, port_info, 2);
258}
259
260static const struct pci_device_id opti[] = {
261 { PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
262 { PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
263 { 0, },
264};
265
266static struct pci_driver opti_pci_driver = {
267 .name = DRV_NAME,
268 .id_table = opti,
269 .probe = opti_init_one,
270 .remove = ata_pci_remove_one
271};
272
273static int __init opti_init(void)
274{
275 return pci_register_driver(&opti_pci_driver);
276}
277
278
279static void __exit opti_exit(void)
280{
281 pci_unregister_driver(&opti_pci_driver);
282}
283
284
285MODULE_AUTHOR("Alan Cox");
286MODULE_DESCRIPTION("low-level driver for Opti 621/621X");
287MODULE_LICENSE("GPL");
288MODULE_DEVICE_TABLE(pci, opti);
289MODULE_VERSION(DRV_VERSION);
290
291module_init(opti_init);
292module_exit(opti_exit);
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
new file mode 100644
index 000000000000..177a455f4251
--- /dev/null
+++ b/drivers/ata/pata_optidma.c
@@ -0,0 +1,547 @@
1/*
2 * pata_optidma.c - Opti DMA PATA for new ATA layer
3 * (C) 2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * The Opti DMA controllers are related to the older PIO PCI controllers
7 * and indeed the VLB ones. The main differences are that the timing
8 * numbers are now based off PCI clocks not VLB and differ, and that
9 * MWDMA is supported.
10 *
11 * This driver should support Viper-N+, FireStar, FireStar Plus.
12 *
13 * These devices support virtual DMA for read (aka the CS5520). Later
14 * chips support UDMA33, but only if the rest of the board logic does,
15 * so you have to get this right. We don't support the virtual DMA
16 * but we do handle UDMA.
17 *
18 * Bits that are worth knowing
19 * Most control registers are shadowed into I/O registers
20 * 0x1F5 bit 0 tells you if the PCI/VLB clock is 33 or 25Mhz
21 * Virtual DMA registers *move* between rev 0x02 and rev 0x10
22 * UDMA requires a 66MHz FSB
23 *
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/blkdev.h>
31#include <linux/delay.h>
32#include <scsi/scsi_host.h>
33#include <linux/libata.h>
34
35#define DRV_NAME "pata_optidma"
36#define DRV_VERSION "0.2.1"
37
38enum {
39 READ_REG = 0, /* index of Read cycle timing register */
40 WRITE_REG = 1, /* index of Write cycle timing register */
41 CNTRL_REG = 3, /* index of Control register */
42 STRAP_REG = 5, /* index of Strap register */
43 MISC_REG = 6 /* index of Miscellaneous register */
44};
45
46static int pci_clock; /* 0 = 33 1 = 25 */
47
48/**
49 * optidma_pre_reset - probe begin
50 * @ap: ATA port
51 *
52 * Set up cable type and use generic probe init
53 */
54
55static int optidma_pre_reset(struct ata_port *ap)
56{
57 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
58 static const struct pci_bits optidma_enable_bits = {
59 0x40, 1, 0x08, 0x00
60 };
61
62 if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits)) {
63 ata_port_disable(ap);
64 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
65 return 0;
66 }
67 ap->cbl = ATA_CBL_PATA40;
68 return ata_std_prereset(ap);
69}
70
71/**
72 * optidma_probe_reset - probe reset
73 * @ap: ATA port
74 *
75 * Perform the ATA probe and bus reset sequence plus specific handling
76 * for this hardware. The Opti needs little handling - we have no UDMA66
77 * capability that needs cable detection. All we must do is check the port
78 * is enabled.
79 */
80
81static void optidma_error_handler(struct ata_port *ap)
82{
83 ata_bmdma_drive_eh(ap, optidma_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
84}
85
86/**
87 * optidma_unlock - unlock control registers
88 * @ap: ATA port
89 *
90 * Unlock the control register block for this adapter. Registers must not
91 * be unlocked in a situation where libata might look at them.
92 */
93
94static void optidma_unlock(struct ata_port *ap)
95{
96 unsigned long regio = ap->ioaddr.cmd_addr;
97
98 /* These 3 unlock the control register access */
99 inw(regio + 1);
100 inw(regio + 1);
101 outb(3, regio + 2);
102}
103
104/**
105 * optidma_lock - issue temporary relock
106 * @ap: ATA port
107 *
108 * Re-lock the configuration register settings.
109 */
110
111static void optidma_lock(struct ata_port *ap)
112{
113 unsigned long regio = ap->ioaddr.cmd_addr;
114
115 /* Relock */
116 outb(0x83, regio + 2);
117}
118
119/**
120 * optidma_set_mode - set mode data
121 * @ap: ATA interface
122 * @adev: ATA device
123 * @mode: Mode to set
124 *
125 * Called to do the DMA or PIO mode setup. Timing numbers are all
126 * pre computed to keep the code clean. There are two tables depending
127 * on the hardware clock speed.
128 *
129 * WARNING: While we do this the IDE registers vanish. If we take an
130 * IRQ here we depend on the host set locking to avoid catastrophe.
131 */
132
133static void optidma_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode)
134{
135 struct ata_device *pair = ata_dev_pair(adev);
136 int pio = adev->pio_mode - XFER_PIO_0;
137 int dma = adev->dma_mode - XFER_MW_DMA_0;
138 unsigned long regio = ap->ioaddr.cmd_addr;
139 u8 addr;
140
141 /* Address table precomputed with a DCLK of 2 */
142 static const u8 addr_timing[2][5] = {
143 { 0x30, 0x20, 0x20, 0x10, 0x10 },
144 { 0x20, 0x20, 0x10, 0x10, 0x10 }
145 };
146 static const u8 data_rec_timing[2][5] = {
147 { 0x59, 0x46, 0x30, 0x20, 0x20 },
148 { 0x46, 0x32, 0x20, 0x20, 0x10 }
149 };
150 static const u8 dma_data_rec_timing[2][3] = {
151 { 0x76, 0x20, 0x20 },
152 { 0x54, 0x20, 0x10 }
153 };
154
155 /* Switch from IDE to control mode */
156 optidma_unlock(ap);
157
158
159 /*
160 * As with many controllers the address setup time is shared
161 * and must suit both devices if present. FIXME: Check if we
162 * need to look at slowest of PIO/DMA mode of either device
163 */
164
165 if (mode >= XFER_MW_DMA_0)
166 addr = 0;
167 else
168 addr = addr_timing[pci_clock][pio];
169
170 if (pair) {
171 u8 pair_addr;
172 /* Hardware constraint */
173 if (pair->dma_mode)
174 pair_addr = 0;
175 else
176 pair_addr = addr_timing[pci_clock][pair->pio_mode - XFER_PIO_0];
177 if (pair_addr > addr)
178 addr = pair_addr;
179 }
180
181 /* Commence primary programming sequence */
182 /* First we load the device number into the timing select */
183 outb(adev->devno, regio + MISC_REG);
184 /* Now we load the data timings into read data/write data */
185 if (mode < XFER_MW_DMA_0) {
186 outb(data_rec_timing[pci_clock][pio], regio + READ_REG);
187 outb(data_rec_timing[pci_clock][pio], regio + WRITE_REG);
188 } else if (mode < XFER_UDMA_0) {
189 outb(dma_data_rec_timing[pci_clock][dma], regio + READ_REG);
190 outb(dma_data_rec_timing[pci_clock][dma], regio + WRITE_REG);
191 }
192 /* Finally we load the address setup into the misc register */
193 outb(addr | adev->devno, regio + MISC_REG);
194
195 /* Programming sequence complete, timing 0 dev 0, timing 1 dev 1 */
196 outb(0x85, regio + CNTRL_REG);
197
198 /* Switch back to IDE mode */
199 optidma_lock(ap);
200
201 /* Note: at this point our programming is incomplete. We are
202 not supposed to program PCI 0x43 "things we hacked onto the chip"
203 until we've done both sets of PIO/DMA timings */
204}
205
206/**
207 * optiplus_set_mode - DMA setup for Firestar Plus
208 * @ap: ATA port
209 * @adev: device
210 * @mode: desired mode
211 *
212 * The Firestar plus has additional UDMA functionality for UDMA0-2 and
213 * requires we do some additional work. Because the base work we must do
214 * is mostly shared we wrap the Firestar setup functionality in this
215 * one
216 */
217
218static void optiplus_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode)
219{
220 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
221 u8 udcfg;
222 u8 udslave;
223 int dev2 = 2 * adev->devno;
224 int unit = 2 * ap->port_no + adev->devno;
225 int udma = mode - XFER_UDMA_0;
226
227 pci_read_config_byte(pdev, 0x44, &udcfg);
228 if (mode <= XFER_UDMA_0) {
229 udcfg &= ~(1 << unit);
230 optidma_set_mode(ap, adev, adev->dma_mode);
231 } else {
232 udcfg |= (1 << unit);
233 if (ap->port_no) {
234 pci_read_config_byte(pdev, 0x45, &udslave);
235 udslave &= ~(0x03 << dev2);
236 udslave |= (udma << dev2);
237 pci_write_config_byte(pdev, 0x45, udslave);
238 } else {
239 udcfg &= ~(0x30 << dev2);
240 udcfg |= (udma << dev2);
241 }
242 }
243 pci_write_config_byte(pdev, 0x44, udcfg);
244}
245
246/**
247 * optidma_set_pio_mode - PIO setup callback
248 * @ap: ATA port
249 * @adev: Device
250 *
251 * The libata core provides separate functions for handling PIO and
252 * DMA programming. The architecture of the Firestar makes it easier
253 * for us to have a common function so we provide wrappers
254 */
255
256static void optidma_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
257{
258 optidma_set_mode(ap, adev, adev->pio_mode);
259}
260
261/**
262 * optidma_set_dma_mode - DMA setup callback
263 * @ap: ATA port
264 * @adev: Device
265 *
266 * The libata core provides separate functions for handling PIO and
267 * DMA programming. The architecture of the Firestar makes it easier
268 * for us to have a common function so we provide wrappers
269 */
270
271static void optidma_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
272{
273 optidma_set_mode(ap, adev, adev->dma_mode);
274}
275
276/**
277 * optiplus_set_pio_mode - PIO setup callback
278 * @ap: ATA port
279 * @adev: Device
280 *
281 * The libata core provides separate functions for handling PIO and
282 * DMA programming. The architecture of the Firestar makes it easier
283 * for us to have a common function so we provide wrappers
284 */
285
286static void optiplus_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
287{
288 optiplus_set_mode(ap, adev, adev->pio_mode);
289}
290
291/**
292 * optiplus_set_dma_mode - DMA setup callback
293 * @ap: ATA port
294 * @adev: Device
295 *
296 * The libata core provides separate functions for handling PIO and
297 * DMA programming. The architecture of the Firestar makes it easier
298 * for us to have a common function so we provide wrappers
299 */
300
301static void optiplus_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
302{
303 optiplus_set_mode(ap, adev, adev->dma_mode);
304}
305
306/**
307 * optidma_make_bits - PCI setup helper
308 * @adev: ATA device
309 *
310 * Turn the ATA device setup into PCI configuration bits
311 * for register 0x43 and return the two bits needed.
312 */
313
314static u8 optidma_make_bits43(struct ata_device *adev)
315{
316 static const u8 bits43[5] = {
317 0, 0, 0, 1, 2
318 };
319 if (!ata_dev_enabled(adev))
320 return 0;
321 if (adev->dma_mode)
322 return adev->dma_mode - XFER_MW_DMA_0;
323 return bits43[adev->pio_mode - XFER_PIO_0];
324}
325
326/**
327 * optidma_post_set_mode - finalize PCI setup
328 * @ap: port to set up
329 *
330 * Finalise the configuration by writing the nibble of extra bits
331 * of data into the chip.
332 */
333
334static void optidma_post_set_mode(struct ata_port *ap)
335{
336 u8 r;
337 int nybble = 4 * ap->port_no;
338 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
339
340 pci_read_config_byte(pdev, 0x43, &r);
341
342 r &= (0x0F << nybble);
343 r |= (optidma_make_bits43(&ap->device[0]) +
344 (optidma_make_bits43(&ap->device[0]) << 2)) << nybble;
345
346 pci_write_config_byte(pdev, 0x43, r);
347}
348
349static struct scsi_host_template optidma_sht = {
350 .module = THIS_MODULE,
351 .name = DRV_NAME,
352 .ioctl = ata_scsi_ioctl,
353 .queuecommand = ata_scsi_queuecmd,
354 .can_queue = ATA_DEF_QUEUE,
355 .this_id = ATA_SHT_THIS_ID,
356 .sg_tablesize = LIBATA_MAX_PRD,
357 .max_sectors = ATA_MAX_SECTORS,
358 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
359 .emulated = ATA_SHT_EMULATED,
360 .use_clustering = ATA_SHT_USE_CLUSTERING,
361 .proc_name = DRV_NAME,
362 .dma_boundary = ATA_DMA_BOUNDARY,
363 .slave_configure = ata_scsi_slave_config,
364 .bios_param = ata_std_bios_param,
365};
366
367static struct ata_port_operations optidma_port_ops = {
368 .port_disable = ata_port_disable,
369 .set_piomode = optidma_set_pio_mode,
370 .set_dmamode = optidma_set_dma_mode,
371
372 .tf_load = ata_tf_load,
373 .tf_read = ata_tf_read,
374 .check_status = ata_check_status,
375 .exec_command = ata_exec_command,
376 .dev_select = ata_std_dev_select,
377
378 .freeze = ata_bmdma_freeze,
379 .thaw = ata_bmdma_thaw,
380 .post_internal_cmd = ata_bmdma_post_internal_cmd,
381 .error_handler = optidma_error_handler,
382 .post_set_mode = optidma_post_set_mode,
383
384 .bmdma_setup = ata_bmdma_setup,
385 .bmdma_start = ata_bmdma_start,
386 .bmdma_stop = ata_bmdma_stop,
387 .bmdma_status = ata_bmdma_status,
388
389 .qc_prep = ata_qc_prep,
390 .qc_issue = ata_qc_issue_prot,
391 .eng_timeout = ata_eng_timeout,
392 .data_xfer = ata_pio_data_xfer,
393
394 .irq_handler = ata_interrupt,
395 .irq_clear = ata_bmdma_irq_clear,
396
397 .port_start = ata_port_start,
398 .port_stop = ata_port_stop,
399 .host_stop = ata_host_stop
400};
401
402static struct ata_port_operations optiplus_port_ops = {
403 .port_disable = ata_port_disable,
404 .set_piomode = optiplus_set_pio_mode,
405 .set_dmamode = optiplus_set_dma_mode,
406
407 .tf_load = ata_tf_load,
408 .tf_read = ata_tf_read,
409 .check_status = ata_check_status,
410 .exec_command = ata_exec_command,
411 .dev_select = ata_std_dev_select,
412
413 .freeze = ata_bmdma_freeze,
414 .thaw = ata_bmdma_thaw,
415 .post_internal_cmd = ata_bmdma_post_internal_cmd,
416 .error_handler = optidma_error_handler,
417 .post_set_mode = optidma_post_set_mode,
418
419 .bmdma_setup = ata_bmdma_setup,
420 .bmdma_start = ata_bmdma_start,
421 .bmdma_stop = ata_bmdma_stop,
422 .bmdma_status = ata_bmdma_status,
423
424 .qc_prep = ata_qc_prep,
425 .qc_issue = ata_qc_issue_prot,
426 .eng_timeout = ata_eng_timeout,
427 .data_xfer = ata_pio_data_xfer,
428
429 .irq_handler = ata_interrupt,
430 .irq_clear = ata_bmdma_irq_clear,
431
432 .port_start = ata_port_start,
433 .port_stop = ata_port_stop,
434 .host_stop = ata_host_stop
435};
436
437/**
438 * optiplus_with_udma - Look for UDMA capable setup
439 * @pdev; ATA controller
440 */
441
442static int optiplus_with_udma(struct pci_dev *pdev)
443{
444 u8 r;
445 int ret = 0;
446 int ioport = 0x22;
447 struct pci_dev *dev1;
448
449 /* Find function 1 */
450 dev1 = pci_get_device(0x1045, 0xC701, NULL);
451 if(dev1 == NULL)
452 return 0;
453
454 /* Rev must be >= 0x10 */
455 pci_read_config_byte(dev1, 0x08, &r);
456 if (r < 0x10)
457 goto done_nomsg;
458 /* Read the chipset system configuration to check our mode */
459 pci_read_config_byte(dev1, 0x5F, &r);
460 ioport |= (r << 8);
461 outb(0x10, ioport);
462 /* Must be 66Mhz sync */
463 if ((inb(ioport + 2) & 1) == 0)
464 goto done;
465
466 /* Check the ATA arbitration/timing is suitable */
467 pci_read_config_byte(pdev, 0x42, &r);
468 if ((r & 0x36) != 0x36)
469 goto done;
470 pci_read_config_byte(dev1, 0x52, &r);
471 if (r & 0x80) /* IDEDIR disabled */
472 ret = 1;
473done:
474 printk(KERN_WARNING "UDMA not supported in this configuration.\n");
475done_nomsg: /* Wrong chip revision */
476 pci_dev_put(dev1);
477 return ret;
478}
479
480static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
481{
482 static struct ata_port_info info_82c700 = {
483 .sht = &optidma_sht,
484 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
485 .pio_mask = 0x1f,
486 .mwdma_mask = 0x07,
487 .port_ops = &optidma_port_ops
488 };
489 static struct ata_port_info info_82c700_udma = {
490 .sht = &optidma_sht,
491 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
492 .pio_mask = 0x1f,
493 .mwdma_mask = 0x07,
494 .udma_mask = 0x07,
495 .port_ops = &optiplus_port_ops
496 };
497 static struct ata_port_info *port_info[2];
498 struct ata_port_info *info = &info_82c700;
499 static int printed_version;
500
501 if (!printed_version++)
502 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
503
504 /* Fixed location chipset magic */
505 inw(0x1F1);
506 inw(0x1F1);
507 pci_clock = inb(0x1F5) & 1; /* 0 = 33Mhz, 1 = 25Mhz */
508
509 if (optiplus_with_udma(dev))
510 info = &info_82c700_udma;
511
512 port_info[0] = port_info[1] = info;
513 return ata_pci_init_one(dev, port_info, 2);
514}
515
516static const struct pci_device_id optidma[] = {
517 { PCI_DEVICE(0x1045, 0xD568), }, /* Opti 82C700 */
518 { 0, },
519};
520
521static struct pci_driver optidma_pci_driver = {
522 .name = DRV_NAME,
523 .id_table = optidma,
524 .probe = optidma_init_one,
525 .remove = ata_pci_remove_one
526};
527
528static int __init optidma_init(void)
529{
530 return pci_register_driver(&optidma_pci_driver);
531}
532
533
534static void __exit optidma_exit(void)
535{
536 pci_unregister_driver(&optidma_pci_driver);
537}
538
539
540MODULE_AUTHOR("Alan Cox");
541MODULE_DESCRIPTION("low-level driver for Opti Firestar/Firestar Plus");
542MODULE_LICENSE("GPL");
543MODULE_DEVICE_TABLE(pci, optidma);
544MODULE_VERSION(DRV_VERSION);
545
546module_init(optidma_init);
547module_exit(optidma_exit);
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
new file mode 100644
index 000000000000..62b25cda409b
--- /dev/null
+++ b/drivers/ata/pata_pcmcia.c
@@ -0,0 +1,393 @@
1/*
2 * pata_pcmcia.c - PCMCIA PATA controller driver.
3 * Copyright 2005-2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
4 * PCMCIA ident update Copyright 2006 Marcin Juszkiewicz
5 * <openembedded@hrw.one.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING. If not, write to
19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 * Heavily based upon ide-cs.c
22 * The initial developer of the original code is David A. Hinds
23 * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
24 * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/blkdev.h>
31#include <linux/delay.h>
32#include <scsi/scsi_host.h>
33#include <linux/ata.h>
34#include <linux/libata.h>
35
36#include <pcmcia/cs_types.h>
37#include <pcmcia/cs.h>
38#include <pcmcia/cistpl.h>
39#include <pcmcia/ds.h>
40#include <pcmcia/cisreg.h>
41#include <pcmcia/ciscode.h>
42
43
44#define DRV_NAME "pata_pcmcia"
45#define DRV_VERSION "0.2.9"
46
47/*
48 * Private data structure to glue stuff together
49 */
50
51struct ata_pcmcia_info {
52 struct pcmcia_device *pdev;
53 int ndev;
54 dev_node_t node;
55};
56
57static struct scsi_host_template pcmcia_sht = {
58 .module = THIS_MODULE,
59 .name = DRV_NAME,
60 .ioctl = ata_scsi_ioctl,
61 .queuecommand = ata_scsi_queuecmd,
62 .can_queue = ATA_DEF_QUEUE,
63 .this_id = ATA_SHT_THIS_ID,
64 .sg_tablesize = LIBATA_MAX_PRD,
65 .max_sectors = ATA_MAX_SECTORS,
66 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
67 .emulated = ATA_SHT_EMULATED,
68 .use_clustering = ATA_SHT_USE_CLUSTERING,
69 .proc_name = DRV_NAME,
70 .dma_boundary = ATA_DMA_BOUNDARY,
71 .slave_configure = ata_scsi_slave_config,
72 .bios_param = ata_std_bios_param,
73};
74
75static struct ata_port_operations pcmcia_port_ops = {
76 .port_disable = ata_port_disable,
77 .tf_load = ata_tf_load,
78 .tf_read = ata_tf_read,
79 .check_status = ata_check_status,
80 .exec_command = ata_exec_command,
81 .dev_select = ata_std_dev_select,
82
83 .freeze = ata_bmdma_freeze,
84 .thaw = ata_bmdma_thaw,
85 .error_handler = ata_bmdma_error_handler,
86 .post_internal_cmd = ata_bmdma_post_internal_cmd,
87
88 .qc_prep = ata_qc_prep,
89 .qc_issue = ata_qc_issue_prot,
90 .eng_timeout = ata_eng_timeout,
91 .data_xfer = ata_pio_data_xfer_noirq,
92
93 .irq_handler = ata_interrupt,
94 .irq_clear = ata_bmdma_irq_clear,
95
96 .port_start = ata_port_start,
97 .port_stop = ata_port_stop,
98 .host_stop = ata_host_stop
99};
100
101#define CS_CHECK(fn, ret) \
102do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
103
104/**
105 * pcmcia_init_one - attach a PCMCIA interface
106 * @pdev: pcmcia device
107 *
108 * Register a PCMCIA IDE interface. Such interfaces are PIO 0 and
109 * shared IRQ.
110 */
111
112static int pcmcia_init_one(struct pcmcia_device *pdev)
113{
114 struct ata_probe_ent ae;
115 struct ata_pcmcia_info *info;
116 tuple_t tuple;
117 struct {
118 unsigned short buf[128];
119 cisparse_t parse;
120 config_info_t conf;
121 cistpl_cftable_entry_t dflt;
122 } *stk = NULL;
123 cistpl_cftable_entry_t *cfg;
124 int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM;
125 unsigned long io_base, ctl_base;
126
127 info = kzalloc(sizeof(*info), GFP_KERNEL);
128 if (info == NULL)
129 return -ENOMEM;
130
131 /* Glue stuff together. FIXME: We may be able to get rid of info with care */
132 info->pdev = pdev;
133 pdev->priv = info;
134
135 /* Set up attributes in order to probe card and get resources */
136 pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
137 pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
138 pdev->io.IOAddrLines = 3;
139 pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
140 pdev->irq.IRQInfo1 = IRQ_LEVEL_ID;
141 pdev->conf.Attributes = CONF_ENABLE_IRQ;
142 pdev->conf.IntType = INT_MEMORY_AND_IO;
143
144 /* Allocate resoure probing structures */
145
146 stk = kzalloc(sizeof(*stk), GFP_KERNEL);
147 if (!stk)
148 goto out1;
149
150 cfg = &stk->parse.cftable_entry;
151
152 /* Tuples we are walking */
153 tuple.TupleData = (cisdata_t *)&stk->buf;
154 tuple.TupleOffset = 0;
155 tuple.TupleDataMax = 255;
156 tuple.Attributes = 0;
157 tuple.DesiredTuple = CISTPL_CONFIG;
158
159 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
160 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(pdev, &tuple));
161 CS_CHECK(ParseTuple, pcmcia_parse_tuple(pdev, &tuple, &stk->parse));
162 pdev->conf.ConfigBase = stk->parse.config.base;
163 pdev->conf.Present = stk->parse.config.rmask[0];
164
165 /* See if we have a manufacturer identifier. Use it to set is_kme for
166 vendor quirks */
167 tuple.DesiredTuple = CISTPL_MANFID;
168 if (!pcmcia_get_first_tuple(pdev, &tuple) && !pcmcia_get_tuple_data(pdev, &tuple) && !pcmcia_parse_tuple(pdev, &tuple, &stk->parse))
169 is_kme = ((stk->parse.manfid.manf == MANFID_KME) && ((stk->parse.manfid.card == PRODID_KME_KXLC005_A) || (stk->parse.manfid.card == PRODID_KME_KXLC005_B)));
170
171 /* Not sure if this is right... look up the current Vcc */
172 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(pdev, &stk->conf));
173/* link->conf.Vcc = stk->conf.Vcc; */
174
175 pass = io_base = ctl_base = 0;
176 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
177 tuple.Attributes = 0;
178 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
179
180 /* Now munch the resources looking for a suitable set */
181 while (1) {
182 if (pcmcia_get_tuple_data(pdev, &tuple) != 0)
183 goto next_entry;
184 if (pcmcia_parse_tuple(pdev, &tuple, &stk->parse) != 0)
185 goto next_entry;
186 /* Check for matching Vcc, unless we're desperate */
187 if (!pass) {
188 if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
189 if (stk->conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000)
190 goto next_entry;
191 } else if (stk->dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) {
192 if (stk->conf.Vcc != stk->dflt.vcc.param[CISTPL_POWER_VNOM] / 10000)
193 goto next_entry;
194 }
195 }
196
197 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
198 pdev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
199 else if (stk->dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
200 pdev->conf.Vpp = stk->dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
201
202 if ((cfg->io.nwin > 0) || (stk->dflt.io.nwin > 0)) {
203 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &stk->dflt.io;
204 pdev->conf.ConfigIndex = cfg->index;
205 pdev->io.BasePort1 = io->win[0].base;
206 pdev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
207 if (!(io->flags & CISTPL_IO_16BIT))
208 pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
209 if (io->nwin == 2) {
210 pdev->io.NumPorts1 = 8;
211 pdev->io.BasePort2 = io->win[1].base;
212 pdev->io.NumPorts2 = (is_kme) ? 2 : 1;
213 if (pcmcia_request_io(pdev, &pdev->io) != 0)
214 goto next_entry;
215 io_base = pdev->io.BasePort1;
216 ctl_base = pdev->io.BasePort2;
217 } else if ((io->nwin == 1) && (io->win[0].len >= 16)) {
218 pdev->io.NumPorts1 = io->win[0].len;
219 pdev->io.NumPorts2 = 0;
220 if (pcmcia_request_io(pdev, &pdev->io) != 0)
221 goto next_entry;
222 io_base = pdev->io.BasePort1;
223 ctl_base = pdev->io.BasePort1 + 0x0e;
224 } else goto next_entry;
225 /* If we've got this far, we're done */
226 break;
227 }
228next_entry:
229 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
230 memcpy(&stk->dflt, cfg, sizeof(stk->dflt));
231 if (pass) {
232 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(pdev, &tuple));
233 } else if (pcmcia_get_next_tuple(pdev, &tuple) != 0) {
234 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
235 memset(&stk->dflt, 0, sizeof(stk->dflt));
236 pass++;
237 }
238 }
239
240 CS_CHECK(RequestIRQ, pcmcia_request_irq(pdev, &pdev->irq));
241 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(pdev, &pdev->conf));
242
243 /* Success. Disable the IRQ nIEN line, do quirks */
244 outb(0x02, ctl_base);
245 if (is_kme)
246 outb(0x81, ctl_base + 0x01);
247
248 /* FIXME: Could be more ports at base + 0x10 but we only deal with
249 one right now */
250 if (pdev->io.NumPorts1 >= 0x20)
251 printk(KERN_WARNING DRV_NAME ": second channel not yet supported.\n");
252
253 /*
254 * Having done the PCMCIA plumbing the ATA side is relatively
255 * sane.
256 */
257
258 memset(&ae, 0, sizeof(struct ata_probe_ent));
259 INIT_LIST_HEAD(&ae.node);
260 ae.dev = &pdev->dev;
261 ae.port_ops = &pcmcia_port_ops;
262 ae.sht = &pcmcia_sht;
263 ae.n_ports = 1;
264 ae.pio_mask = 1; /* ISA so PIO 0 cycles */
265 ae.irq = pdev->irq.AssignedIRQ;
266 ae.irq_flags = SA_SHIRQ;
267 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
268 ae.port[0].cmd_addr = io_base;
269 ae.port[0].altstatus_addr = ctl_base;
270 ae.port[0].ctl_addr = ctl_base;
271 ata_std_ports(&ae.port[0]);
272
273 if (ata_device_add(&ae) == 0)
274 goto failed;
275
276 info->ndev = 1;
277 kfree(stk);
278 return 0;
279
280cs_failed:
281 cs_error(pdev, last_fn, last_ret);
282failed:
283 kfree(stk);
284 info->ndev = 0;
285 pcmcia_disable_device(pdev);
286out1:
287 kfree(info);
288 return ret;
289}
290
291/**
292 * pcmcia_remove_one - unplug an pcmcia interface
293 * @pdev: pcmcia device
294 *
295 * A PCMCIA ATA device has been unplugged. Perform the needed
296 * cleanup. Also called on module unload for any active devices.
297 */
298
299static void pcmcia_remove_one(struct pcmcia_device *pdev)
300{
301 struct ata_pcmcia_info *info = pdev->priv;
302 struct device *dev = &pdev->dev;
303
304 if (info != NULL) {
305 /* If we have attached the device to the ATA layer, detach it */
306 if (info->ndev) {
307 struct ata_host *host = dev_get_drvdata(dev);
308 ata_host_remove(host);
309 dev_set_drvdata(dev, NULL);
310 }
311 info->ndev = 0;
312 pdev->priv = NULL;
313 }
314 pcmcia_disable_device(pdev);
315 kfree(info);
316}
317
318static struct pcmcia_device_id pcmcia_devices[] = {
319 PCMCIA_DEVICE_FUNC_ID(4),
320 PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
321 PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
322 PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401),
323 PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
324 PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
325 PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
326 PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
327 PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
328 PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar */
329 PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
330 PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
331 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
332 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
333 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
334 PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
335 PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
336 PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
337 PCMCIA_DEVICE_PROD_ID12("EXP", "CD+GAME", 0x6f58c983, 0x63c13aaf),
338 PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591),
339 PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728),
340 PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
341 PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
342 PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
343 PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
344 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
345 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
346 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
347 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
348 PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b),
349 PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c),
350 PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
351 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
352 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
353 PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
354 PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
355 PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
356 PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
357 PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
358 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
359 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
360 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
361 PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
362 PCMCIA_DEVICE_NULL,
363};
364
365MODULE_DEVICE_TABLE(pcmcia, pcmcia_devices);
366
367static struct pcmcia_driver pcmcia_driver = {
368 .owner = THIS_MODULE,
369 .drv = {
370 .name = DRV_NAME,
371 },
372 .id_table = pcmcia_devices,
373 .probe = pcmcia_init_one,
374 .remove = pcmcia_remove_one,
375};
376
377static int __init pcmcia_init(void)
378{
379 return pcmcia_register_driver(&pcmcia_driver);
380}
381
382static void __exit pcmcia_exit(void)
383{
384 pcmcia_unregister_driver(&pcmcia_driver);
385}
386
387MODULE_AUTHOR("Alan Cox");
388MODULE_DESCRIPTION("low-level driver for PCMCIA ATA");
389MODULE_LICENSE("GPL");
390MODULE_VERSION(DRV_VERSION);
391
392module_init(pcmcia_init);
393module_exit(pcmcia_exit);
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
new file mode 100644
index 000000000000..56b8c1ee2937
--- /dev/null
+++ b/drivers/ata/pata_pdc2027x.c
@@ -0,0 +1,869 @@
1/*
2 * Promise PATA TX2/TX4/TX2000/133 IDE driver for pdc20268 to pdc20277.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Ported to libata by:
10 * Albert Lee <albertcc@tw.ibm.com> IBM Corporation
11 *
12 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
13 * Portions Copyright (C) 1999 Promise Technology, Inc.
14 *
15 * Author: Frank Tiernan (frankt@promise.com)
16 * Released under terms of General Public License
17 *
18 *
19 * libata documentation is available via 'make {ps|pdf}docs',
20 * as Documentation/DocBook/libata.*
21 *
22 * Hardware information only available under NDA.
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/blkdev.h>
30#include <linux/delay.h>
31#include <linux/device.h>
32#include <scsi/scsi.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_cmnd.h>
35#include <linux/libata.h>
36#include <asm/io.h>
37
38#define DRV_NAME "pata_pdc2027x"
39#define DRV_VERSION "0.74-ac3"
40#undef PDC_DEBUG
41
42#ifdef PDC_DEBUG
43#define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
44#else
45#define PDPRINTK(fmt, args...)
46#endif
47
48enum {
49 PDC_UDMA_100 = 0,
50 PDC_UDMA_133 = 1,
51
52 PDC_100_MHZ = 100000000,
53 PDC_133_MHZ = 133333333,
54
55 PDC_SYS_CTL = 0x1100,
56 PDC_ATA_CTL = 0x1104,
57 PDC_GLOBAL_CTL = 0x1108,
58 PDC_CTCR0 = 0x110C,
59 PDC_CTCR1 = 0x1110,
60 PDC_BYTE_COUNT = 0x1120,
61 PDC_PLL_CTL = 0x1202,
62};
63
64static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
65static void pdc2027x_remove_one(struct pci_dev *pdev);
66static void pdc2027x_error_handler(struct ata_port *ap);
67static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev);
68static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
69static void pdc2027x_post_set_mode(struct ata_port *ap);
70static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc);
71
72/*
73 * ATA Timing Tables based on 133MHz controller clock.
74 * These tables are only used when the controller is in 133MHz clock.
75 * If the controller is in 100MHz clock, the ASIC hardware will
76 * set the timing registers automatically when "set feature" command
77 * is issued to the device. However, if the controller clock is 133MHz,
78 * the following tables must be used.
79 */
80static struct pdc2027x_pio_timing {
81 u8 value0, value1, value2;
82} pdc2027x_pio_timing_tbl [] = {
83 { 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
84 { 0x46, 0x29, 0xa4 }, /* PIO mode 1 */
85 { 0x23, 0x26, 0x64 }, /* PIO mode 2 */
86 { 0x27, 0x0d, 0x35 }, /* PIO mode 3, IORDY on, Prefetch off */
87 { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
88};
89
90static struct pdc2027x_mdma_timing {
91 u8 value0, value1;
92} pdc2027x_mdma_timing_tbl [] = {
93 { 0xdf, 0x5f }, /* MDMA mode 0 */
94 { 0x6b, 0x27 }, /* MDMA mode 1 */
95 { 0x69, 0x25 }, /* MDMA mode 2 */
96};
97
98static struct pdc2027x_udma_timing {
99 u8 value0, value1, value2;
100} pdc2027x_udma_timing_tbl [] = {
101 { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
102 { 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
103 { 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
104 { 0x1a, 0x05, 0xcd }, /* UDMA mode 3 */
105 { 0x1a, 0x03, 0xcd }, /* UDMA mode 4 */
106 { 0x1a, 0x02, 0xcb }, /* UDMA mode 5 */
107 { 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
108};
109
110static const struct pci_device_id pdc2027x_pci_tbl[] = {
111 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20268, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_100 },
112 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20269, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
113 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20270, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_100 },
114 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20271, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
115 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20275, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
116 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20276, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
117 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20277, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
118 { } /* terminate list */
119};
120
121static struct pci_driver pdc2027x_pci_driver = {
122 .name = DRV_NAME,
123 .id_table = pdc2027x_pci_tbl,
124 .probe = pdc2027x_init_one,
125 .remove = __devexit_p(pdc2027x_remove_one),
126};
127
128static struct scsi_host_template pdc2027x_sht = {
129 .module = THIS_MODULE,
130 .name = DRV_NAME,
131 .ioctl = ata_scsi_ioctl,
132 .queuecommand = ata_scsi_queuecmd,
133 .can_queue = ATA_DEF_QUEUE,
134 .this_id = ATA_SHT_THIS_ID,
135 .sg_tablesize = LIBATA_MAX_PRD,
136 .max_sectors = ATA_MAX_SECTORS,
137 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
138 .emulated = ATA_SHT_EMULATED,
139 .use_clustering = ATA_SHT_USE_CLUSTERING,
140 .proc_name = DRV_NAME,
141 .dma_boundary = ATA_DMA_BOUNDARY,
142 .slave_configure = ata_scsi_slave_config,
143 .bios_param = ata_std_bios_param,
144};
145
146static struct ata_port_operations pdc2027x_pata100_ops = {
147 .port_disable = ata_port_disable,
148
149 .tf_load = ata_tf_load,
150 .tf_read = ata_tf_read,
151 .check_status = ata_check_status,
152 .exec_command = ata_exec_command,
153 .dev_select = ata_std_dev_select,
154
155 .check_atapi_dma = pdc2027x_check_atapi_dma,
156 .bmdma_setup = ata_bmdma_setup,
157 .bmdma_start = ata_bmdma_start,
158 .bmdma_stop = ata_bmdma_stop,
159 .bmdma_status = ata_bmdma_status,
160 .qc_prep = ata_qc_prep,
161 .qc_issue = ata_qc_issue_prot,
162 .data_xfer = ata_mmio_data_xfer,
163
164 .freeze = ata_bmdma_freeze,
165 .thaw = ata_bmdma_thaw,
166 .error_handler = pdc2027x_error_handler,
167 .post_internal_cmd = ata_bmdma_post_internal_cmd,
168
169 .irq_handler = ata_interrupt,
170 .irq_clear = ata_bmdma_irq_clear,
171
172 .port_start = ata_port_start,
173 .port_stop = ata_port_stop,
174 .host_stop = ata_pci_host_stop,
175};
176
177static struct ata_port_operations pdc2027x_pata133_ops = {
178 .port_disable = ata_port_disable,
179 .set_piomode = pdc2027x_set_piomode,
180 .set_dmamode = pdc2027x_set_dmamode,
181 .post_set_mode = pdc2027x_post_set_mode,
182
183 .tf_load = ata_tf_load,
184 .tf_read = ata_tf_read,
185 .check_status = ata_check_status,
186 .exec_command = ata_exec_command,
187 .dev_select = ata_std_dev_select,
188
189 .check_atapi_dma = pdc2027x_check_atapi_dma,
190 .bmdma_setup = ata_bmdma_setup,
191 .bmdma_start = ata_bmdma_start,
192 .bmdma_stop = ata_bmdma_stop,
193 .bmdma_status = ata_bmdma_status,
194 .qc_prep = ata_qc_prep,
195 .qc_issue = ata_qc_issue_prot,
196 .data_xfer = ata_mmio_data_xfer,
197
198 .freeze = ata_bmdma_freeze,
199 .thaw = ata_bmdma_thaw,
200 .error_handler = pdc2027x_error_handler,
201 .post_internal_cmd = ata_bmdma_post_internal_cmd,
202
203 .irq_handler = ata_interrupt,
204 .irq_clear = ata_bmdma_irq_clear,
205
206 .port_start = ata_port_start,
207 .port_stop = ata_port_stop,
208 .host_stop = ata_pci_host_stop,
209};
210
211static struct ata_port_info pdc2027x_port_info[] = {
212 /* PDC_UDMA_100 */
213 {
214 .sht = &pdc2027x_sht,
215 .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
216 ATA_FLAG_MMIO,
217 .pio_mask = 0x1f, /* pio0-4 */
218 .mwdma_mask = 0x07, /* mwdma0-2 */
219 .udma_mask = ATA_UDMA5, /* udma0-5 */
220 .port_ops = &pdc2027x_pata100_ops,
221 },
222 /* PDC_UDMA_133 */
223 {
224 .sht = &pdc2027x_sht,
225 .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
226 ATA_FLAG_MMIO,
227 .pio_mask = 0x1f, /* pio0-4 */
228 .mwdma_mask = 0x07, /* mwdma0-2 */
229 .udma_mask = ATA_UDMA6, /* udma0-6 */
230 .port_ops = &pdc2027x_pata133_ops,
231 },
232};
233
234MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Albert Lee");
235MODULE_DESCRIPTION("libata driver module for Promise PDC20268 to PDC20277");
236MODULE_LICENSE("GPL");
237MODULE_VERSION(DRV_VERSION);
238MODULE_DEVICE_TABLE(pci, pdc2027x_pci_tbl);
239
240/**
241 * port_mmio - Get the MMIO address of PDC2027x extended registers
242 * @ap: Port
243 * @offset: offset from mmio base
244 */
245static inline void* port_mmio(struct ata_port *ap, unsigned int offset)
246{
247 return ap->host->mmio_base + ap->port_no * 0x100 + offset;
248}
249
250/**
251 * dev_mmio - Get the MMIO address of PDC2027x extended registers
252 * @ap: Port
253 * @adev: device
254 * @offset: offset from mmio base
255 */
256static inline void* dev_mmio(struct ata_port *ap, struct ata_device *adev, unsigned int offset)
257{
258 u8 adj = (adev->devno) ? 0x08 : 0x00;
259 return port_mmio(ap, offset) + adj;
260}
261
262/**
263 * pdc2027x_pata_cbl_detect - Probe host controller cable detect info
264 * @ap: Port for which cable detect info is desired
265 *
266 * Read 80c cable indicator from Promise extended register.
267 * This register is latched when the system is reset.
268 *
269 * LOCKING:
270 * None (inherited from caller).
271 */
272static void pdc2027x_cbl_detect(struct ata_port *ap)
273{
274 u32 cgcr;
275
276 /* check cable detect results */
277 cgcr = readl(port_mmio(ap, PDC_GLOBAL_CTL));
278 if (cgcr & (1 << 26))
279 goto cbl40;
280
281 PDPRINTK("No cable or 80-conductor cable on port %d\n", ap->port_no);
282
283 ap->cbl = ATA_CBL_PATA80;
284 return;
285
286cbl40:
287 printk(KERN_INFO DRV_NAME ": 40-conductor cable detected on port %d\n", ap->port_no);
288 ap->cbl = ATA_CBL_PATA40;
289 ap->udma_mask &= ATA_UDMA_MASK_40C;
290}
291
292/**
293 * pdc2027x_port_enabled - Check PDC ATA control register to see whether the port is enabled.
294 * @ap: Port to check
295 */
296static inline int pdc2027x_port_enabled(struct ata_port *ap)
297{
298 return readb(port_mmio(ap, PDC_ATA_CTL)) & 0x02;
299}
300
301/**
302 * pdc2027x_prereset - prereset for PATA host controller
303 * @ap: Target port
304 *
305 * Probeinit including cable detection.
306 *
307 * LOCKING:
308 * None (inherited from caller).
309 */
310
311static int pdc2027x_prereset(struct ata_port *ap)
312{
313 /* Check whether port enabled */
314 if (!pdc2027x_port_enabled(ap)) {
315 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
316 return 0;
317 }
318 pdc2027x_cbl_detect(ap);
319 return ata_std_prereset(ap);
320}
321
322/**
323 * pdc2027x_error_handler - Perform reset on PATA port and classify
324 * @ap: Port to reset
325 *
326 * Reset PATA phy and classify attached devices.
327 *
328 * LOCKING:
329 * None (inherited from caller).
330 */
331
332static void pdc2027x_error_handler(struct ata_port *ap)
333{
334 ata_bmdma_drive_eh(ap, pdc2027x_prereset, ata_std_softreset, NULL, ata_std_postreset);
335}
336
337/**
338 * pdc2027x_set_piomode - Initialize host controller PATA PIO timings
339 * @ap: Port to configure
340 * @adev: um
341 * @pio: PIO mode, 0 - 4
342 *
343 * Set PIO mode for device.
344 *
345 * LOCKING:
346 * None (inherited from caller).
347 */
348
349static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
350{
351 unsigned int pio = adev->pio_mode - XFER_PIO_0;
352 u32 ctcr0, ctcr1;
353
354 PDPRINTK("adev->pio_mode[%X]\n", adev->pio_mode);
355
356 /* Sanity check */
357 if (pio > 4) {
358 printk(KERN_ERR DRV_NAME ": Unknown pio mode [%d] ignored\n", pio);
359 return;
360
361 }
362
363 /* Set the PIO timing registers using value table for 133MHz */
364 PDPRINTK("Set pio regs... \n");
365
366 ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0));
367 ctcr0 &= 0xffff0000;
368 ctcr0 |= pdc2027x_pio_timing_tbl[pio].value0 |
369 (pdc2027x_pio_timing_tbl[pio].value1 << 8);
370 writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
371
372 ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
373 ctcr1 &= 0x00ffffff;
374 ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
375 writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
376
377 PDPRINTK("Set pio regs done\n");
378
379 PDPRINTK("Set to pio mode[%u] \n", pio);
380}
381
382/**
383 * pdc2027x_set_dmamode - Initialize host controller PATA UDMA timings
384 * @ap: Port to configure
385 * @adev: um
386 * @udma: udma mode, XFER_UDMA_0 to XFER_UDMA_6
387 *
388 * Set UDMA mode for device.
389 *
390 * LOCKING:
391 * None (inherited from caller).
392 */
393static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
394{
395 unsigned int dma_mode = adev->dma_mode;
396 u32 ctcr0, ctcr1;
397
398 if ((dma_mode >= XFER_UDMA_0) &&
399 (dma_mode <= XFER_UDMA_6)) {
400 /* Set the UDMA timing registers with value table for 133MHz */
401 unsigned int udma_mode = dma_mode & 0x07;
402
403 if (dma_mode == XFER_UDMA_2) {
404 /*
405 * Turn off tHOLD.
406 * If tHOLD is '1', the hardware will add half clock for data hold time.
407 * This code segment seems to be no effect. tHOLD will be overwritten below.
408 */
409 ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
410 writel(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
411 }
412
413 PDPRINTK("Set udma regs... \n");
414
415 ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
416 ctcr1 &= 0xff000000;
417 ctcr1 |= pdc2027x_udma_timing_tbl[udma_mode].value0 |
418 (pdc2027x_udma_timing_tbl[udma_mode].value1 << 8) |
419 (pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
420 writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
421
422 PDPRINTK("Set udma regs done\n");
423
424 PDPRINTK("Set to udma mode[%u] \n", udma_mode);
425
426 } else if ((dma_mode >= XFER_MW_DMA_0) &&
427 (dma_mode <= XFER_MW_DMA_2)) {
428 /* Set the MDMA timing registers with value table for 133MHz */
429 unsigned int mdma_mode = dma_mode & 0x07;
430
431 PDPRINTK("Set mdma regs... \n");
432 ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0));
433
434 ctcr0 &= 0x0000ffff;
435 ctcr0 |= (pdc2027x_mdma_timing_tbl[mdma_mode].value0 << 16) |
436 (pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24);
437
438 writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
439 PDPRINTK("Set mdma regs done\n");
440
441 PDPRINTK("Set to mdma mode[%u] \n", mdma_mode);
442 } else {
443 printk(KERN_ERR DRV_NAME ": Unknown dma mode [%u] ignored\n", dma_mode);
444 }
445}
446
447/**
448 * pdc2027x_post_set_mode - Set the timing registers back to correct values.
449 * @ap: Port to configure
450 *
451 * The pdc2027x hardware will look at "SET FEATURES" and change the timing registers
452 * automatically. The values set by the hardware might be incorrect, under 133Mhz PLL.
453 * This function overwrites the possibly incorrect values set by the hardware to be correct.
454 */
455static void pdc2027x_post_set_mode(struct ata_port *ap)
456{
457 int i;
458
459 for (i = 0; i < ATA_MAX_DEVICES; i++) {
460 struct ata_device *dev = &ap->device[i];
461
462 if (ata_dev_enabled(dev)) {
463
464 pdc2027x_set_piomode(ap, dev);
465
466 /*
467 * Enable prefetch if the device support PIO only.
468 */
469 if (dev->xfer_shift == ATA_SHIFT_PIO) {
470 u32 ctcr1 = readl(dev_mmio(ap, dev, PDC_CTCR1));
471 ctcr1 |= (1 << 25);
472 writel(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
473
474 PDPRINTK("Turn on prefetch\n");
475 } else {
476 pdc2027x_set_dmamode(ap, dev);
477 }
478 }
479 }
480}
481
482/**
483 * pdc2027x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command
484 * @qc: Metadata associated with taskfile to check
485 *
486 * LOCKING:
487 * None (inherited from caller).
488 *
489 * RETURNS: 0 when ATAPI DMA can be used
490 * 1 otherwise
491 */
492static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc)
493{
494 struct scsi_cmnd *cmd = qc->scsicmd;
495 u8 *scsicmd = cmd->cmnd;
496 int rc = 1; /* atapi dma off by default */
497
498 /*
499 * This workaround is from Promise's GPL driver.
500 * If ATAPI DMA is used for commands not in the
501 * following white list, say MODE_SENSE and REQUEST_SENSE,
502 * pdc2027x might hit the irq lost problem.
503 */
504 switch (scsicmd[0]) {
505 case READ_10:
506 case WRITE_10:
507 case READ_12:
508 case WRITE_12:
509 case READ_6:
510 case WRITE_6:
511 case 0xad: /* READ_DVD_STRUCTURE */
512 case 0xbe: /* READ_CD */
513 /* ATAPI DMA is ok */
514 rc = 0;
515 break;
516 default:
517 ;
518 }
519
520 return rc;
521}
522
523/**
524 * pdc_read_counter - Read the ctr counter
525 * @probe_ent: for the port address
526 */
527
528static long pdc_read_counter(struct ata_probe_ent *probe_ent)
529{
530 long counter;
531 int retry = 1;
532 u32 bccrl, bccrh, bccrlv, bccrhv;
533
534retry:
535 bccrl = readl(probe_ent->mmio_base + PDC_BYTE_COUNT) & 0xffff;
536 bccrh = readl(probe_ent->mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
537 rmb();
538
539 /* Read the counter values again for verification */
540 bccrlv = readl(probe_ent->mmio_base + PDC_BYTE_COUNT) & 0xffff;
541 bccrhv = readl(probe_ent->mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
542 rmb();
543
544 counter = (bccrh << 15) | bccrl;
545
546 PDPRINTK("bccrh [%X] bccrl [%X]\n", bccrh, bccrl);
547 PDPRINTK("bccrhv[%X] bccrlv[%X]\n", bccrhv, bccrlv);
548
549 /*
550 * The 30-bit decreasing counter are read by 2 pieces.
551 * Incorrect value may be read when both bccrh and bccrl are changing.
552 * Ex. When 7900 decrease to 78FF, wrong value 7800 might be read.
553 */
554 if (retry && !(bccrh == bccrhv && bccrl >= bccrlv)) {
555 retry--;
556 PDPRINTK("rereading counter\n");
557 goto retry;
558 }
559
560 return counter;
561}
562
563/**
564 * adjust_pll - Adjust the PLL input clock in Hz.
565 *
566 * @pdc_controller: controller specific information
567 * @probe_ent: For the port address
568 * @pll_clock: The input of PLL in HZ
569 */
570static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsigned int board_idx)
571{
572
573 u16 pll_ctl;
574 long pll_clock_khz = pll_clock / 1000;
575 long pout_required = board_idx? PDC_133_MHZ:PDC_100_MHZ;
576 long ratio = pout_required / pll_clock_khz;
577 int F, R;
578
579 /* Sanity check */
580 if (unlikely(pll_clock_khz < 5000L || pll_clock_khz > 70000L)) {
581 printk(KERN_ERR DRV_NAME ": Invalid PLL input clock %ldkHz, give up!\n", pll_clock_khz);
582 return;
583 }
584
585#ifdef PDC_DEBUG
586 PDPRINTK("pout_required is %ld\n", pout_required);
587
588 /* Show the current clock value of PLL control register
589 * (maybe already configured by the firmware)
590 */
591 pll_ctl = readw(probe_ent->mmio_base + PDC_PLL_CTL);
592
593 PDPRINTK("pll_ctl[%X]\n", pll_ctl);
594#endif
595
596 /*
597 * Calculate the ratio of F, R and OD
598 * POUT = (F + 2) / (( R + 2) * NO)
599 */
600 if (ratio < 8600L) { /* 8.6x */
601 /* Using NO = 0x01, R = 0x0D */
602 R = 0x0d;
603 } else if (ratio < 12900L) { /* 12.9x */
604 /* Using NO = 0x01, R = 0x08 */
605 R = 0x08;
606 } else if (ratio < 16100L) { /* 16.1x */
607 /* Using NO = 0x01, R = 0x06 */
608 R = 0x06;
609 } else if (ratio < 64000L) { /* 64x */
610 R = 0x00;
611 } else {
612 /* Invalid ratio */
613 printk(KERN_ERR DRV_NAME ": Invalid ratio %ld, give up!\n", ratio);
614 return;
615 }
616
617 F = (ratio * (R+2)) / 1000 - 2;
618
619 if (unlikely(F < 0 || F > 127)) {
620 /* Invalid F */
621 printk(KERN_ERR DRV_NAME ": F[%d] invalid!\n", F);
622 return;
623 }
624
625 PDPRINTK("F[%d] R[%d] ratio*1000[%ld]\n", F, R, ratio);
626
627 pll_ctl = (R << 8) | F;
628
629 PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl);
630
631 writew(pll_ctl, probe_ent->mmio_base + PDC_PLL_CTL);
632 readw(probe_ent->mmio_base + PDC_PLL_CTL); /* flush */
633
634 /* Wait the PLL circuit to be stable */
635 mdelay(30);
636
637#ifdef PDC_DEBUG
638 /*
639 * Show the current clock value of PLL control register
640 * (maybe configured by the firmware)
641 */
642 pll_ctl = readw(probe_ent->mmio_base + PDC_PLL_CTL);
643
644 PDPRINTK("pll_ctl[%X]\n", pll_ctl);
645#endif
646
647 return;
648}
649
650/**
651 * detect_pll_input_clock - Detect the PLL input clock in Hz.
652 * @probe_ent: for the port address
653 * Ex. 16949000 on 33MHz PCI bus for pdc20275.
654 * Half of the PCI clock.
655 */
656static long pdc_detect_pll_input_clock(struct ata_probe_ent *probe_ent)
657{
658 u32 scr;
659 long start_count, end_count;
660 long pll_clock;
661
662 /* Read current counter value */
663 start_count = pdc_read_counter(probe_ent);
664
665 /* Start the test mode */
666 scr = readl(probe_ent->mmio_base + PDC_SYS_CTL);
667 PDPRINTK("scr[%X]\n", scr);
668 writel(scr | (0x01 << 14), probe_ent->mmio_base + PDC_SYS_CTL);
669 readl(probe_ent->mmio_base + PDC_SYS_CTL); /* flush */
670
671 /* Let the counter run for 100 ms. */
672 mdelay(100);
673
674 /* Read the counter values again */
675 end_count = pdc_read_counter(probe_ent);
676
677 /* Stop the test mode */
678 scr = readl(probe_ent->mmio_base + PDC_SYS_CTL);
679 PDPRINTK("scr[%X]\n", scr);
680 writel(scr & ~(0x01 << 14), probe_ent->mmio_base + PDC_SYS_CTL);
681 readl(probe_ent->mmio_base + PDC_SYS_CTL); /* flush */
682
683 /* calculate the input clock in Hz */
684 pll_clock = (start_count - end_count) * 10;
685
686 PDPRINTK("start[%ld] end[%ld] \n", start_count, end_count);
687 PDPRINTK("PLL input clock[%ld]Hz\n", pll_clock);
688
689 return pll_clock;
690}
691
692/**
693 * pdc_hardware_init - Initialize the hardware.
694 * @pdev: instance of pci_dev found
695 * @pdc_controller: controller specific information
696 * @pe: for the port address
697 */
698static int pdc_hardware_init(struct pci_dev *pdev, struct ata_probe_ent *pe, unsigned int board_idx)
699{
700 long pll_clock;
701
702 /*
703 * Detect PLL input clock rate.
704 * On some system, where PCI bus is running at non-standard clock rate.
705 * Ex. 25MHz or 40MHz, we have to adjust the cycle_time.
706 * The pdc20275 controller employs PLL circuit to help correct timing registers setting.
707 */
708 pll_clock = pdc_detect_pll_input_clock(pe);
709
710 if (pll_clock < 0) /* counter overflow? Try again. */
711 pll_clock = pdc_detect_pll_input_clock(pe);
712
713 dev_printk(KERN_INFO, &pdev->dev, "PLL input clock %ld kHz\n", pll_clock/1000);
714
715 /* Adjust PLL control register */
716 pdc_adjust_pll(pe, pll_clock, board_idx);
717
718 return 0;
719}
720
721/**
722 * pdc_ata_setup_port - setup the mmio address
723 * @port: ata ioports to setup
724 * @base: base address
725 */
726static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
727{
728 port->cmd_addr =
729 port->data_addr = base;
730 port->feature_addr =
731 port->error_addr = base + 0x05;
732 port->nsect_addr = base + 0x0a;
733 port->lbal_addr = base + 0x0f;
734 port->lbam_addr = base + 0x10;
735 port->lbah_addr = base + 0x15;
736 port->device_addr = base + 0x1a;
737 port->command_addr =
738 port->status_addr = base + 0x1f;
739 port->altstatus_addr =
740 port->ctl_addr = base + 0x81a;
741}
742
743/**
744 * pdc2027x_init_one - PCI probe function
745 * Called when an instance of PCI adapter is inserted.
746 * This function checks whether the hardware is supported,
747 * initialize hardware and register an instance of ata_host to
748 * libata by providing struct ata_probe_ent and ata_device_add().
749 * (implements struct pci_driver.probe() )
750 *
751 * @pdev: instance of pci_dev found
752 * @ent: matching entry in the id_tbl[]
753 */
754static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
755{
756 static int printed_version;
757 unsigned int board_idx = (unsigned int) ent->driver_data;
758
759 struct ata_probe_ent *probe_ent = NULL;
760 unsigned long base;
761 void *mmio_base;
762 int rc;
763
764 if (!printed_version++)
765 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
766
767 rc = pci_enable_device(pdev);
768 if (rc)
769 return rc;
770
771 rc = pci_request_regions(pdev, DRV_NAME);
772 if (rc)
773 goto err_out;
774
775 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
776 if (rc)
777 goto err_out_regions;
778
779 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
780 if (rc)
781 goto err_out_regions;
782
783 /* Prepare the probe entry */
784 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
785 if (probe_ent == NULL) {
786 rc = -ENOMEM;
787 goto err_out_regions;
788 }
789
790 probe_ent->dev = pci_dev_to_dev(pdev);
791 INIT_LIST_HEAD(&probe_ent->node);
792
793 mmio_base = pci_iomap(pdev, 5, 0);
794 if (!mmio_base) {
795 rc = -ENOMEM;
796 goto err_out_free_ent;
797 }
798
799 base = (unsigned long) mmio_base;
800
801 probe_ent->sht = pdc2027x_port_info[board_idx].sht;
802 probe_ent->port_flags = pdc2027x_port_info[board_idx].flags;
803 probe_ent->pio_mask = pdc2027x_port_info[board_idx].pio_mask;
804 probe_ent->mwdma_mask = pdc2027x_port_info[board_idx].mwdma_mask;
805 probe_ent->udma_mask = pdc2027x_port_info[board_idx].udma_mask;
806 probe_ent->port_ops = pdc2027x_port_info[board_idx].port_ops;
807
808 probe_ent->irq = pdev->irq;
809 probe_ent->irq_flags = SA_SHIRQ;
810 probe_ent->mmio_base = mmio_base;
811
812 pdc_ata_setup_port(&probe_ent->port[0], base + 0x17c0);
813 probe_ent->port[0].bmdma_addr = base + 0x1000;
814 pdc_ata_setup_port(&probe_ent->port[1], base + 0x15c0);
815 probe_ent->port[1].bmdma_addr = base + 0x1008;
816
817 probe_ent->n_ports = 2;
818
819 pci_set_master(pdev);
820 //pci_enable_intx(pdev);
821
822 /* initialize adapter */
823 if (pdc_hardware_init(pdev, probe_ent, board_idx) != 0)
824 goto err_out_free_ent;
825
826 ata_device_add(probe_ent);
827 kfree(probe_ent);
828
829 return 0;
830
831err_out_free_ent:
832 kfree(probe_ent);
833err_out_regions:
834 pci_release_regions(pdev);
835err_out:
836 pci_disable_device(pdev);
837 return rc;
838}
839
840/**
841 * pdc2027x_remove_one - Called to remove a single instance of the
842 * adapter.
843 *
844 * @dev: The PCI device to remove.
845 * FIXME: module load/unload not working yet
846 */
847static void __devexit pdc2027x_remove_one(struct pci_dev *pdev)
848{
849 ata_pci_remove_one(pdev);
850}
851
852/**
853 * pdc2027x_init - Called after this module is loaded into the kernel.
854 */
855static int __init pdc2027x_init(void)
856{
857 return pci_module_init(&pdc2027x_pci_driver);
858}
859
860/**
861 * pdc2027x_exit - Called before this module unloaded from the kernel
862 */
863static void __exit pdc2027x_exit(void)
864{
865 pci_unregister_driver(&pdc2027x_pci_driver);
866}
867
868module_init(pdc2027x_init);
869module_exit(pdc2027x_exit);
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
new file mode 100644
index 000000000000..48f43432764e
--- /dev/null
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -0,0 +1,423 @@
1/*
2 * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based in part on linux/drivers/ide/pci/pdc202xx_old.c
7 *
8 * First cut with LBA48/ATAPI
9 *
10 * TODO:
11 * Channel interlock/reset on both required ?
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/pci.h>
17#include <linux/init.h>
18#include <linux/blkdev.h>
19#include <linux/delay.h>
20#include <scsi/scsi_host.h>
21#include <linux/libata.h>
22
23#define DRV_NAME "pata_pdc202xx_old"
24#define DRV_VERSION "0.2.1"
25
26/**
27 * pdc2024x_pre_reset - probe begin
28 * @ap: ATA port
29 *
30 * Set up cable type and use generic probe init
31 */
32
33static int pdc2024x_pre_reset(struct ata_port *ap)
34{
35 ap->cbl = ATA_CBL_PATA40;
36 return ata_std_prereset(ap);
37}
38
39
40static void pdc2024x_error_handler(struct ata_port *ap)
41{
42 ata_bmdma_drive_eh(ap, pdc2024x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
43}
44
45
46static int pdc2026x_pre_reset(struct ata_port *ap)
47{
48 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
49 u16 cis;
50
51 pci_read_config_word(pdev, 0x50, &cis);
52 if (cis & (1 << (10 + ap->port_no)))
53 ap->cbl = ATA_CBL_PATA80;
54 else
55 ap->cbl = ATA_CBL_PATA40;
56
57 return ata_std_prereset(ap);
58}
59
60static void pdc2026x_error_handler(struct ata_port *ap)
61{
62 ata_bmdma_drive_eh(ap, pdc2026x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
63}
64
65/**
66 * pdc_configure_piomode - set chip PIO timing
67 * @ap: ATA interface
68 * @adev: ATA device
69 * @pio: PIO mode
70 *
71 * Called to do the PIO mode setup. Our timing registers are shared
72 * so a configure_dmamode call will undo any work we do here and vice
73 * versa
74 */
75
76static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
77{
78 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
79 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
80 static u16 pio_timing[5] = {
81 0x0913, 0x050C , 0x0308, 0x0206, 0x0104
82 };
83 u8 r_ap, r_bp;
84
85 pci_read_config_byte(pdev, port, &r_ap);
86 pci_read_config_byte(pdev, port + 1, &r_bp);
87 r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */
88 r_bp &= ~0x07;
89 r_ap |= (pio_timing[pio] >> 8);
90 r_bp |= (pio_timing[pio] & 0xFF);
91
92 if (ata_pio_need_iordy(adev))
93 r_ap |= 0x20; /* IORDY enable */
94 if (adev->class == ATA_DEV_ATA)
95 r_ap |= 0x10; /* FIFO enable */
96 pci_write_config_byte(pdev, port, r_ap);
97 pci_write_config_byte(pdev, port + 1, r_bp);
98}
99
100/**
101 * pdc_set_piomode - set initial PIO mode data
102 * @ap: ATA interface
103 * @adev: ATA device
104 *
105 * Called to do the PIO mode setup. Our timing registers are shared
106 * but we want to set the PIO timing by default.
107 */
108
109static void pdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
110{
111 pdc_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
112}
113
114/**
115 * pdc_configure_dmamode - set DMA mode in chip
116 * @ap: ATA interface
117 * @adev: ATA device
118 *
119 * Load DMA cycle times into the chip ready for a DMA transfer
120 * to occur.
121 */
122
123static void pdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
124{
125 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
126 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
127 static u8 udma_timing[6][2] = {
128 { 0x60, 0x03 }, /* 33 Mhz Clock */
129 { 0x40, 0x02 },
130 { 0x20, 0x01 },
131 { 0x40, 0x02 }, /* 66 Mhz Clock */
132 { 0x20, 0x01 },
133 { 0x20, 0x01 }
134 };
135 u8 r_bp, r_cp;
136
137 pci_read_config_byte(pdev, port + 1, &r_bp);
138 pci_read_config_byte(pdev, port + 2, &r_cp);
139
140 r_bp &= ~0xF0;
141 r_cp &= ~0x0F;
142
143 if (adev->dma_mode >= XFER_UDMA_0) {
144 int speed = adev->dma_mode - XFER_UDMA_0;
145 r_bp |= udma_timing[speed][0];
146 r_cp |= udma_timing[speed][1];
147
148 } else {
149 int speed = adev->dma_mode - XFER_MW_DMA_0;
150 r_bp |= 0x60;
151 r_cp |= (5 - speed);
152 }
153 pci_write_config_byte(pdev, port + 1, r_bp);
154 pci_write_config_byte(pdev, port + 2, r_cp);
155
156}
157
158/**
159 * pdc2026x_bmdma_start - DMA engine begin
160 * @qc: ATA command
161 *
162 * In UDMA3 or higher we have to clock switch for the duration of the
163 * DMA transfer sequence.
164 */
165
166static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
167{
168 struct ata_port *ap = qc->ap;
169 struct ata_device *adev = qc->dev;
170 struct ata_taskfile *tf = &qc->tf;
171 int sel66 = ap->port_no ? 0x08: 0x02;
172
173 unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr;
174 unsigned long clock = master + 0x11;
175 unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no);
176
177 u32 len;
178
179 /* Check we keep host level locking here */
180 if (adev->dma_mode >= XFER_UDMA_2)
181 outb(inb(clock) | sel66, clock);
182 else
183 outb(inb(clock) & ~sel66, clock);
184
185 /* The DMA clocks may have been trashed by a reset. FIXME: make conditional
186 and move to qc_issue ? */
187 pdc_set_dmamode(ap, qc->dev);
188
189 /* Cases the state machine will not complete correctly without help */
190 if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA)
191 {
192 if (tf->flags & ATA_TFLAG_LBA48)
193 len = qc->nsect * 512;
194 else
195 len = qc->nbytes;
196
197 if (tf->flags & ATA_TFLAG_WRITE)
198 len |= 0x06000000;
199 else
200 len |= 0x05000000;
201
202 outl(len, atapi_reg);
203 }
204
205 /* Activate DMA */
206 ata_bmdma_start(qc);
207}
208
209/**
210 * pdc2026x_bmdma_end - DMA engine stop
211 * @qc: ATA command
212 *
213 * After a DMA completes we need to put the clock back to 33MHz for
214 * PIO timings.
215 */
216
217static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
218{
219 struct ata_port *ap = qc->ap;
220 struct ata_device *adev = qc->dev;
221 struct ata_taskfile *tf = &qc->tf;
222
223 int sel66 = ap->port_no ? 0x08: 0x02;
224 /* The clock bits are in the same register for both channels */
225 unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr;
226 unsigned long clock = master + 0x11;
227 unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no);
228
229 /* Cases the state machine will not complete correctly */
230 if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) {
231 outl(0, atapi_reg);
232 outb(inb(clock) & ~sel66, clock);
233 }
234 /* Check we keep host level locking here */
235 /* Flip back to 33Mhz for PIO */
236 if (adev->dma_mode >= XFER_UDMA_2)
237 outb(inb(clock) & ~sel66, clock);
238
239 ata_bmdma_stop(qc);
240}
241
242/**
243 * pdc2026x_dev_config - device setup hook
244 * @ap: ATA port
245 * @adev: newly found device
246 *
247 * Perform chip specific early setup. We need to lock the transfer
248 * sizes to 8bit to avoid making the state engine on the 2026x cards
249 * barf.
250 */
251
252static void pdc2026x_dev_config(struct ata_port *ap, struct ata_device *adev)
253{
254 adev->max_sectors = 256;
255}
256
257static struct scsi_host_template pdc_sht = {
258 .module = THIS_MODULE,
259 .name = DRV_NAME,
260 .ioctl = ata_scsi_ioctl,
261 .queuecommand = ata_scsi_queuecmd,
262 .can_queue = ATA_DEF_QUEUE,
263 .this_id = ATA_SHT_THIS_ID,
264 .sg_tablesize = LIBATA_MAX_PRD,
265 .max_sectors = ATA_MAX_SECTORS,
266 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
267 .emulated = ATA_SHT_EMULATED,
268 .use_clustering = ATA_SHT_USE_CLUSTERING,
269 .proc_name = DRV_NAME,
270 .dma_boundary = ATA_DMA_BOUNDARY,
271 .slave_configure = ata_scsi_slave_config,
272 .bios_param = ata_std_bios_param,
273};
274
275static struct ata_port_operations pdc2024x_port_ops = {
276 .port_disable = ata_port_disable,
277 .set_piomode = pdc_set_piomode,
278 .set_dmamode = pdc_set_dmamode,
279 .mode_filter = ata_pci_default_filter,
280 .tf_load = ata_tf_load,
281 .tf_read = ata_tf_read,
282 .check_status = ata_check_status,
283 .exec_command = ata_exec_command,
284 .dev_select = ata_std_dev_select,
285
286 .freeze = ata_bmdma_freeze,
287 .thaw = ata_bmdma_thaw,
288 .error_handler = pdc2024x_error_handler,
289 .post_internal_cmd = ata_bmdma_post_internal_cmd,
290
291 .bmdma_setup = ata_bmdma_setup,
292 .bmdma_start = ata_bmdma_start,
293 .bmdma_stop = ata_bmdma_stop,
294 .bmdma_status = ata_bmdma_status,
295
296 .qc_prep = ata_qc_prep,
297 .qc_issue = ata_qc_issue_prot,
298 .data_xfer = ata_pio_data_xfer,
299
300 .irq_handler = ata_interrupt,
301 .irq_clear = ata_bmdma_irq_clear,
302
303 .port_start = ata_port_start,
304 .port_stop = ata_port_stop,
305 .host_stop = ata_host_stop
306};
307
308static struct ata_port_operations pdc2026x_port_ops = {
309 .port_disable = ata_port_disable,
310 .set_piomode = pdc_set_piomode,
311 .set_dmamode = pdc_set_dmamode,
312 .mode_filter = ata_pci_default_filter,
313 .tf_load = ata_tf_load,
314 .tf_read = ata_tf_read,
315 .check_status = ata_check_status,
316 .exec_command = ata_exec_command,
317 .dev_select = ata_std_dev_select,
318 .dev_config = pdc2026x_dev_config,
319
320 .freeze = ata_bmdma_freeze,
321 .thaw = ata_bmdma_thaw,
322 .error_handler = pdc2026x_error_handler,
323 .post_internal_cmd = ata_bmdma_post_internal_cmd,
324
325 .bmdma_setup = ata_bmdma_setup,
326 .bmdma_start = pdc2026x_bmdma_start,
327 .bmdma_stop = pdc2026x_bmdma_stop,
328 .bmdma_status = ata_bmdma_status,
329
330 .qc_prep = ata_qc_prep,
331 .qc_issue = ata_qc_issue_prot,
332 .data_xfer = ata_pio_data_xfer,
333
334 .irq_handler = ata_interrupt,
335 .irq_clear = ata_bmdma_irq_clear,
336
337 .port_start = ata_port_start,
338 .port_stop = ata_port_stop,
339 .host_stop = ata_host_stop
340};
341
342static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
343{
344 static struct ata_port_info info[3] = {
345 {
346 .sht = &pdc_sht,
347 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
348 .pio_mask = 0x1f,
349 .mwdma_mask = 0x07,
350 .udma_mask = ATA_UDMA2,
351 .port_ops = &pdc2024x_port_ops
352 },
353 {
354 .sht = &pdc_sht,
355 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
356 .pio_mask = 0x1f,
357 .mwdma_mask = 0x07,
358 .udma_mask = ATA_UDMA4,
359 .port_ops = &pdc2026x_port_ops
360 },
361 {
362 .sht = &pdc_sht,
363 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
364 .pio_mask = 0x1f,
365 .mwdma_mask = 0x07,
366 .udma_mask = ATA_UDMA5,
367 .port_ops = &pdc2026x_port_ops
368 }
369
370 };
371 static struct ata_port_info *port_info[2];
372
373 port_info[0] = port_info[1] = &info[id->driver_data];
374
375 if (dev->device == PCI_DEVICE_ID_PROMISE_20265) {
376 struct pci_dev *bridge = dev->bus->self;
377 /* Don't grab anything behind a Promise I2O RAID */
378 if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
379 if( bridge->device == PCI_DEVICE_ID_INTEL_I960)
380 return -ENODEV;
381 if( bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
382 return -ENODEV;
383 }
384 }
385 return ata_pci_init_one(dev, port_info, 2);
386}
387
388static struct pci_device_id pdc[] = {
389 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0},
390 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1},
391 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1},
392 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2},
393 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2},
394 { 0, },
395};
396
397static struct pci_driver pdc_pci_driver = {
398 .name = DRV_NAME,
399 .id_table = pdc,
400 .probe = pdc_init_one,
401 .remove = ata_pci_remove_one
402};
403
404static int __init pdc_init(void)
405{
406 return pci_register_driver(&pdc_pci_driver);
407}
408
409
410static void __exit pdc_exit(void)
411{
412 pci_unregister_driver(&pdc_pci_driver);
413}
414
415
416MODULE_AUTHOR("Alan Cox");
417MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
418MODULE_LICENSE("GPL");
419MODULE_DEVICE_TABLE(pci, pdc);
420MODULE_VERSION(DRV_VERSION);
421
422module_init(pdc_init);
423module_exit(pdc_exit);
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
new file mode 100644
index 000000000000..35cfdf0ac3f0
--- /dev/null
+++ b/drivers/ata/pata_qdi.c
@@ -0,0 +1,403 @@
1/*
2 * pata_qdi.c - QDI VLB ATA controllers
3 * (C) 2006 Red Hat <alan@redhat.com>
4 *
5 * This driver mostly exists as a proof of concept for non PCI devices under
6 * libata. While the QDI6580 was 'neat' in 1993 it is no longer terribly
7 * useful.
8 *
9 * Tuning code written from the documentation at
10 * http://www.ryston.cz/petr/vlb/qd6500.html
11 * http://www.ryston.cz/petr/vlb/qd6580.html
12 *
13 * Probe code based on drivers/ide/legacy/qd65xx.c
14 * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by
15 * Samuel Thibault <samuel.thibault@fnac.net>
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/pci.h>
21#include <linux/init.h>
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <scsi/scsi_host.h>
25#include <linux/libata.h>
26#include <linux/platform_device.h>
27
28#define DRV_NAME "pata_qdi"
29#define DRV_VERSION "0.2.4"
30
31#define NR_HOST 4 /* Two 6580s */
32
33struct qdi_data {
34 unsigned long timing;
35 u8 clock[2];
36 u8 last;
37 int fast;
38 struct platform_device *platform_dev;
39
40};
41
42static struct ata_host *qdi_host[NR_HOST];
43static struct qdi_data qdi_data[NR_HOST];
44static int nr_qdi_host;
45
46#ifdef MODULE
47static int probe_qdi = 1;
48#else
49static int probe_qdi;
50#endif
51
52static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
53{
54 struct ata_timing t;
55 struct qdi_data *qdi = ap->host->private_data;
56 int active, recovery;
57 u8 timing;
58
59 /* Get the timing data in cycles */
60 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
61
62 if (qdi->fast) {
63 active = 8 - FIT(t.active, 1, 8);
64 recovery = 18 - FIT(t.recover, 3, 18);
65 } else {
66 active = 9 - FIT(t.active, 2, 9);
67 recovery = 15 - FIT(t.recover, 0, 15);
68 }
69 timing = (recovery << 4) | active | 0x08;
70
71 qdi->clock[adev->devno] = timing;
72
73 outb(timing, qdi->timing);
74}
75
76static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
77{
78 struct ata_timing t;
79 struct qdi_data *qdi = ap->host->private_data;
80 int active, recovery;
81 u8 timing;
82
83 /* Get the timing data in cycles */
84 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
85
86 if (qdi->fast) {
87 active = 8 - FIT(t.active, 1, 8);
88 recovery = 18 - FIT(t.recover, 3, 18);
89 } else {
90 active = 9 - FIT(t.active, 2, 9);
91 recovery = 15 - FIT(t.recover, 0, 15);
92 }
93 timing = (recovery << 4) | active | 0x08;
94
95 qdi->clock[adev->devno] = timing;
96
97 outb(timing, qdi->timing);
98
99 /* Clear the FIFO */
100 if (adev->class != ATA_DEV_ATA)
101 outb(0x5F, (qdi->timing & 0xFFF0) + 3);
102}
103
104/**
105 * qdi_qc_issue_prot - command issue
106 * @qc: command pending
107 *
108 * Called when the libata layer is about to issue a command. We wrap
109 * this interface so that we can load the correct ATA timings.
110 */
111
112static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
113{
114 struct ata_port *ap = qc->ap;
115 struct ata_device *adev = qc->dev;
116 struct qdi_data *qdi = ap->host->private_data;
117
118 if (qdi->clock[adev->devno] != qdi->last) {
119 if (adev->pio_mode) {
120 qdi->last = qdi->clock[adev->devno];
121 outb(qdi->clock[adev->devno], qdi->timing);
122 }
123 }
124 return ata_qc_issue_prot(qc);
125}
126
127static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
128{
129 struct ata_port *ap = adev->ap;
130 int slop = buflen & 3;
131
132 if (ata_id_has_dword_io(adev->id)) {
133 if (write_data)
134 outsl(ap->ioaddr.data_addr, buf, buflen >> 2);
135 else
136 insl(ap->ioaddr.data_addr, buf, buflen >> 2);
137
138 if (unlikely(slop)) {
139 u32 pad;
140 if (write_data) {
141 memcpy(&pad, buf + buflen - slop, slop);
142 outl(le32_to_cpu(pad), ap->ioaddr.data_addr);
143 } else {
144 pad = cpu_to_le16(inl(ap->ioaddr.data_addr));
145 memcpy(buf + buflen - slop, &pad, slop);
146 }
147 }
148 } else
149 ata_pio_data_xfer(adev, buf, buflen, write_data);
150}
151
152static struct scsi_host_template qdi_sht = {
153 .module = THIS_MODULE,
154 .name = DRV_NAME,
155 .ioctl = ata_scsi_ioctl,
156 .queuecommand = ata_scsi_queuecmd,
157 .can_queue = ATA_DEF_QUEUE,
158 .this_id = ATA_SHT_THIS_ID,
159 .sg_tablesize = LIBATA_MAX_PRD,
160 .max_sectors = ATA_MAX_SECTORS,
161 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
162 .emulated = ATA_SHT_EMULATED,
163 .use_clustering = ATA_SHT_USE_CLUSTERING,
164 .proc_name = DRV_NAME,
165 .dma_boundary = ATA_DMA_BOUNDARY,
166 .slave_configure = ata_scsi_slave_config,
167 .bios_param = ata_std_bios_param,
168};
169
170static struct ata_port_operations qdi6500_port_ops = {
171 .port_disable = ata_port_disable,
172 .set_piomode = qdi6500_set_piomode,
173
174 .tf_load = ata_tf_load,
175 .tf_read = ata_tf_read,
176 .check_status = ata_check_status,
177 .exec_command = ata_exec_command,
178 .dev_select = ata_std_dev_select,
179
180 .freeze = ata_bmdma_freeze,
181 .thaw = ata_bmdma_thaw,
182 .error_handler = ata_bmdma_error_handler,
183 .post_internal_cmd = ata_bmdma_post_internal_cmd,
184
185 .qc_prep = ata_qc_prep,
186 .qc_issue = qdi_qc_issue_prot,
187 .eng_timeout = ata_eng_timeout,
188 .data_xfer = qdi_data_xfer,
189
190 .irq_handler = ata_interrupt,
191 .irq_clear = ata_bmdma_irq_clear,
192
193 .port_start = ata_port_start,
194 .port_stop = ata_port_stop,
195 .host_stop = ata_host_stop
196};
197
198static struct ata_port_operations qdi6580_port_ops = {
199 .port_disable = ata_port_disable,
200 .set_piomode = qdi6580_set_piomode,
201
202 .tf_load = ata_tf_load,
203 .tf_read = ata_tf_read,
204 .check_status = ata_check_status,
205 .exec_command = ata_exec_command,
206 .dev_select = ata_std_dev_select,
207
208 .freeze = ata_bmdma_freeze,
209 .thaw = ata_bmdma_thaw,
210 .error_handler = ata_bmdma_error_handler,
211 .post_internal_cmd = ata_bmdma_post_internal_cmd,
212
213 .qc_prep = ata_qc_prep,
214 .qc_issue = qdi_qc_issue_prot,
215 .eng_timeout = ata_eng_timeout,
216 .data_xfer = qdi_data_xfer,
217
218 .irq_handler = ata_interrupt,
219 .irq_clear = ata_bmdma_irq_clear,
220
221 .port_start = ata_port_start,
222 .port_stop = ata_port_stop,
223 .host_stop = ata_host_stop
224};
225
226/**
227 * qdi_init_one - attach a qdi interface
228 * @type: Type to display
229 * @io: I/O port start
230 * @irq: interrupt line
231 * @fast: True if on a > 33Mhz VLB
232 *
233 * Register an ISA bus IDE interface. Such interfaces are PIO and we
234 * assume do not support IRQ sharing.
235 */
236
237static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast)
238{
239 struct ata_probe_ent ae;
240 struct platform_device *pdev;
241 int ret;
242
243 unsigned long ctrl = io + 0x206;
244
245 /*
246 * Fill in a probe structure first of all
247 */
248
249 pdev = platform_device_register_simple(DRV_NAME, nr_qdi_host, NULL, 0);
250 if (pdev == NULL)
251 return -ENOMEM;
252
253 memset(&ae, 0, sizeof(struct ata_probe_ent));
254 INIT_LIST_HEAD(&ae.node);
255 ae.dev = &pdev->dev;
256
257 if (type == 6580) {
258 ae.port_ops = &qdi6580_port_ops;
259 ae.pio_mask = 0x1F;
260 } else {
261 ae.port_ops = &qdi6500_port_ops;
262 ae.pio_mask = 0x07; /* Actually PIO3 !IORDY is possible */
263 }
264
265 ae.sht = &qdi_sht;
266 ae.n_ports = 1;
267 ae.irq = irq;
268 ae.irq_flags = 0;
269 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
270 ae.port[0].cmd_addr = io;
271 ae.port[0].altstatus_addr = ctrl;
272 ae.port[0].ctl_addr = ctrl;
273 ata_std_ports(&ae.port[0]);
274
275 /*
276 * Hook in a private data structure per channel
277 */
278 ae.private_data = &qdi_data[nr_qdi_host];
279
280 qdi_data[nr_qdi_host].timing = port;
281 qdi_data[nr_qdi_host].fast = fast;
282 qdi_data[nr_qdi_host].platform_dev = pdev;
283
284 printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io);
285 ret = ata_device_add(&ae);
286 if (ret == 0) {
287 platform_device_unregister(pdev);
288 return -ENODEV;
289 }
290
291 qdi_host[nr_qdi_host++] = dev_get_drvdata(&pdev->dev);
292 return 0;
293}
294
295/**
296 * qdi_init - attach qdi interfaces
297 *
298 * Attach qdi IDE interfaces by scanning the ports it may occupy.
299 */
300
301static __init int qdi_init(void)
302{
303 unsigned long flags;
304 static const unsigned long qd_port[2] = { 0x30, 0xB0 };
305 static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
306 static const int ide_irq[2] = { 14, 15 };
307
308 int ct = 0;
309 int i;
310
311 if (probe_qdi == 0)
312 return -ENODEV;
313
314 /*
315 * Check each possible QD65xx base address
316 */
317
318 for (i = 0; i < 2; i++) {
319 unsigned long port = qd_port[i];
320 u8 r, res;
321
322
323 if (request_region(port, 2, "pata_qdi")) {
324 /* Check for a card */
325 local_irq_save(flags);
326 r = inb_p(port);
327 outb_p(0x19, port);
328 res = inb_p(port);
329 outb_p(r, port);
330 local_irq_restore(flags);
331
332 /* Fail */
333 if (res == 0x19)
334 {
335 release_region(port, 2);
336 continue;
337 }
338
339 /* Passes the presence test */
340 r = inb_p(port + 1); /* Check port agrees with port set */
341 if ((r & 2) >> 1 != i) {
342 release_region(port, 2);
343 continue;
344 }
345
346 /* Check card type */
347 if ((r & 0xF0) == 0xC0) {
348 /* QD6500: single channel */
349 if (r & 8) {
350 /* Disabled ? */
351 release_region(port, 2);
352 continue;
353 }
354 ct += qdi_init_one(port, 6500, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04);
355 }
356 if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
357 /* QD6580: dual channel */
358 if (!request_region(port + 2 , 2, "pata_qdi"))
359 {
360 release_region(port, 2);
361 continue;
362 }
363 res = inb(port + 3);
364 if (res & 1) {
365 /* Single channel mode */
366 ct += qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04);
367 } else {
368 /* Dual channel mode */
369 ct += qdi_init_one(port, 6580, 0x1F0, 14, r & 0x04);
370 ct += qdi_init_one(port + 2, 6580, 0x170, 15, r & 0x04);
371 }
372 }
373 }
374 }
375 if (ct != 0)
376 return 0;
377 return -ENODEV;
378}
379
380static __exit void qdi_exit(void)
381{
382 int i;
383
384 for (i = 0; i < nr_qdi_host; i++) {
385 ata_host_remove(qdi_host[i]);
386 /* Free the control resource. The 6580 dual channel has the resources
387 * claimed as a pair of 2 byte resources so we need no special cases...
388 */
389 release_region(qdi_data[i].timing, 2);
390 platform_device_unregister(qdi_data[i].platform_dev);
391 }
392}
393
394MODULE_AUTHOR("Alan Cox");
395MODULE_DESCRIPTION("low-level driver for qdi ATA");
396MODULE_LICENSE("GPL");
397MODULE_VERSION(DRV_VERSION);
398
399module_init(qdi_init);
400module_exit(qdi_exit);
401
402module_param(probe_qdi, int, 0);
403
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
new file mode 100644
index 000000000000..277f8411b521
--- /dev/null
+++ b/drivers/ata/pata_radisys.c
@@ -0,0 +1,335 @@
1/*
2 * pata_radisys.c - Intel PATA/SATA controllers
3 *
4 * (C) 2006 Red Hat <alan@redhat.com>
5 *
6 * Some parts based on ata_piix.c by Jeff Garzik and others.
7 *
8 * A PIIX relative, this device has a single ATA channel and no
9 * slave timings, SITRE or PPE. In that sense it is a close relative
10 * of the original PIIX. It does however support UDMA 33/66 per channel
11 * although no other modes/timings. Also lacking is 32bit I/O on the ATA
12 * port.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/init.h>
19#include <linux/blkdev.h>
20#include <linux/delay.h>
21#include <linux/device.h>
22#include <scsi/scsi_host.h>
23#include <linux/libata.h>
24#include <linux/ata.h>
25
26#define DRV_NAME "pata_radisys"
27#define DRV_VERSION "0.4.1"
28
29/**
30 * radisys_probe_init - probe begin
31 * @ap: ATA port
32 *
33 * Set up cable type and use generic probe init
34 */
35
36static int radisys_pre_reset(struct ata_port *ap)
37{
38 ap->cbl = ATA_CBL_PATA80;
39 return ata_std_prereset(ap);
40}
41
42
43/**
44 * radisys_pata_error_handler - Probe specified port on PATA host controller
45 * @ap: Port to probe
46 * @classes:
47 *
48 * LOCKING:
49 * None (inherited from caller).
50 */
51
52static void radisys_pata_error_handler(struct ata_port *ap)
53{
54 ata_bmdma_drive_eh(ap, radisys_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
55}
56
57/**
58 * radisys_set_piomode - Initialize host controller PATA PIO timings
59 * @ap: Port whose timings we are configuring
60 * @adev: um
61 *
62 * Set PIO mode for device, in host controller PCI config space.
63 *
64 * LOCKING:
65 * None (inherited from caller).
66 */
67
68static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev)
69{
70 unsigned int pio = adev->pio_mode - XFER_PIO_0;
71 struct pci_dev *dev = to_pci_dev(ap->host->dev);
72 u16 idetm_data;
73 int control = 0;
74
75 /*
76 * See Intel Document 298600-004 for the timing programing rules
77 * for PIIX/ICH. Note that the early PIIX does not have the slave
78 * timing port at 0x44. The Radisys is a relative of the PIIX
79 * but not the same so be careful.
80 */
81
82 static const /* ISP RTC */
83 u8 timings[][2] = { { 0, 0 }, /* Check me */
84 { 0, 0 },
85 { 1, 1 },
86 { 2, 2 },
87 { 3, 3 }, };
88
89 if (pio > 0)
90 control |= 1; /* TIME1 enable */
91 if (ata_pio_need_iordy(adev))
92 control |= 2; /* IE IORDY */
93
94 pci_read_config_word(dev, 0x40, &idetm_data);
95
96 /* Enable IE and TIME as appropriate. Clear the other
97 drive timing bits */
98 idetm_data &= 0xCCCC;
99 idetm_data |= (control << (4 * adev->devno));
100 idetm_data |= (timings[pio][0] << 12) |
101 (timings[pio][1] << 8);
102 pci_write_config_word(dev, 0x40, idetm_data);
103
104 /* Track which port is configured */
105 ap->private_data = adev;
106}
107
108/**
109 * radisys_set_dmamode - Initialize host controller PATA DMA timings
110 * @ap: Port whose timings we are configuring
111 * @adev: Device to program
112 * @isich: True if the device is an ICH and has IOCFG registers
113 *
114 * Set MWDMA mode for device, in host controller PCI config space.
115 *
116 * LOCKING:
117 * None (inherited from caller).
118 */
119
120static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev)
121{
122 struct pci_dev *dev = to_pci_dev(ap->host->dev);
123 u16 idetm_data;
124 u8 udma_enable;
125
126 static const /* ISP RTC */
127 u8 timings[][2] = { { 0, 0 },
128 { 0, 0 },
129 { 1, 1 },
130 { 2, 2 },
131 { 3, 3 }, };
132
133 /*
134 * MWDMA is driven by the PIO timings. We must also enable
135 * IORDY unconditionally.
136 */
137
138 pci_read_config_word(dev, 0x40, &idetm_data);
139 pci_read_config_byte(dev, 0x48, &udma_enable);
140
141 if (adev->dma_mode < XFER_UDMA_0) {
142 unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
143 const unsigned int needed_pio[3] = {
144 XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
145 };
146 int pio = needed_pio[mwdma] - XFER_PIO_0;
147 int control = 3; /* IORDY|TIME0 */
148
149 /* If the drive MWDMA is faster than it can do PIO then
150 we must force PIO0 for PIO cycles. */
151
152 if (adev->pio_mode < needed_pio[mwdma])
153 control = 1;
154
155 /* Mask out the relevant control and timing bits we will load. Also
156 clear the other drive TIME register as a precaution */
157
158 idetm_data &= 0xCCCC;
159 idetm_data |= control << (4 * adev->devno);
160 idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
161
162 udma_enable &= ~(1 << adev->devno);
163 } else {
164 u8 udma_mode;
165
166 /* UDMA66 on: UDMA 33 and 66 are switchable via register 0x4A */
167
168 pci_read_config_byte(dev, 0x4A, &udma_mode);
169
170 if (adev->xfer_mode == XFER_UDMA_2)
171 udma_mode &= ~ (1 << adev->devno);
172 else /* UDMA 4 */
173 udma_mode |= (1 << adev->devno);
174
175 pci_write_config_byte(dev, 0x4A, udma_mode);
176
177 udma_enable |= (1 << adev->devno);
178 }
179 pci_write_config_word(dev, 0x40, idetm_data);
180 pci_write_config_byte(dev, 0x48, udma_enable);
181
182 /* Track which port is configured */
183 ap->private_data = adev;
184}
185
186/**
187 * radisys_qc_issue_prot - command issue
188 * @qc: command pending
189 *
190 * Called when the libata layer is about to issue a command. We wrap
191 * this interface so that we can load the correct ATA timings if
192 * neccessary. Our logic also clears TIME0/TIME1 for the other device so
193 * that, even if we get this wrong, cycles to the other device will
194 * be made PIO0.
195 */
196
197static unsigned int radisys_qc_issue_prot(struct ata_queued_cmd *qc)
198{
199 struct ata_port *ap = qc->ap;
200 struct ata_device *adev = qc->dev;
201
202 if (adev != ap->private_data) {
203 /* UDMA timing is not shared */
204 if (adev->dma_mode < XFER_UDMA_0) {
205 if (adev->dma_mode)
206 radisys_set_dmamode(ap, adev);
207 else if (adev->pio_mode)
208 radisys_set_piomode(ap, adev);
209 }
210 }
211 return ata_qc_issue_prot(qc);
212}
213
214
215static struct scsi_host_template radisys_sht = {
216 .module = THIS_MODULE,
217 .name = DRV_NAME,
218 .ioctl = ata_scsi_ioctl,
219 .queuecommand = ata_scsi_queuecmd,
220 .can_queue = ATA_DEF_QUEUE,
221 .this_id = ATA_SHT_THIS_ID,
222 .sg_tablesize = LIBATA_MAX_PRD,
223 .max_sectors = ATA_MAX_SECTORS,
224 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
225 .emulated = ATA_SHT_EMULATED,
226 .use_clustering = ATA_SHT_USE_CLUSTERING,
227 .proc_name = DRV_NAME,
228 .dma_boundary = ATA_DMA_BOUNDARY,
229 .slave_configure = ata_scsi_slave_config,
230 .bios_param = ata_std_bios_param,
231};
232
233static const struct ata_port_operations radisys_pata_ops = {
234 .port_disable = ata_port_disable,
235 .set_piomode = radisys_set_piomode,
236 .set_dmamode = radisys_set_dmamode,
237 .mode_filter = ata_pci_default_filter,
238
239 .tf_load = ata_tf_load,
240 .tf_read = ata_tf_read,
241 .check_status = ata_check_status,
242 .exec_command = ata_exec_command,
243 .dev_select = ata_std_dev_select,
244
245 .freeze = ata_bmdma_freeze,
246 .thaw = ata_bmdma_thaw,
247 .error_handler = radisys_pata_error_handler,
248 .post_internal_cmd = ata_bmdma_post_internal_cmd,
249
250 .bmdma_setup = ata_bmdma_setup,
251 .bmdma_start = ata_bmdma_start,
252 .bmdma_stop = ata_bmdma_stop,
253 .bmdma_status = ata_bmdma_status,
254 .qc_prep = ata_qc_prep,
255 .qc_issue = radisys_qc_issue_prot,
256 .data_xfer = ata_pio_data_xfer,
257
258 .eng_timeout = ata_eng_timeout,
259
260 .irq_handler = ata_interrupt,
261 .irq_clear = ata_bmdma_irq_clear,
262
263 .port_start = ata_port_start,
264 .port_stop = ata_port_stop,
265 .host_stop = ata_host_stop,
266};
267
268
269/**
270 * radisys_init_one - Register PIIX ATA PCI device with kernel services
271 * @pdev: PCI device to register
272 * @ent: Entry in radisys_pci_tbl matching with @pdev
273 *
274 * Called from kernel PCI layer. We probe for combined mode (sigh),
275 * and then hand over control to libata, for it to do the rest.
276 *
277 * LOCKING:
278 * Inherited from PCI layer (may sleep).
279 *
280 * RETURNS:
281 * Zero on success, or -ERRNO value.
282 */
283
284static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
285{
286 static int printed_version;
287 static struct ata_port_info info = {
288 .sht = &radisys_sht,
289 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
290 .pio_mask = 0x1f, /* pio0-4 */
291 .mwdma_mask = 0x07, /* mwdma1-2 */
292 .udma_mask = 0x14, /* UDMA33/66 only */
293 .port_ops = &radisys_pata_ops,
294 };
295 static struct ata_port_info *port_info[2] = { &info, &info };
296
297 if (!printed_version++)
298 dev_printk(KERN_DEBUG, &pdev->dev,
299 "version " DRV_VERSION "\n");
300
301 return ata_pci_init_one(pdev, port_info, 2);
302}
303
304static const struct pci_device_id radisys_pci_tbl[] = {
305 { 0x1331, 0x8201, PCI_ANY_ID, PCI_ANY_ID, },
306 { } /* terminate list */
307};
308
309static struct pci_driver radisys_pci_driver = {
310 .name = DRV_NAME,
311 .id_table = radisys_pci_tbl,
312 .probe = radisys_init_one,
313 .remove = ata_pci_remove_one,
314};
315
316static int __init radisys_init(void)
317{
318 return pci_register_driver(&radisys_pci_driver);
319}
320
321static void __exit radisys_exit(void)
322{
323 pci_unregister_driver(&radisys_pci_driver);
324}
325
326
327module_init(radisys_init);
328module_exit(radisys_exit);
329
330MODULE_AUTHOR("Alan Cox");
331MODULE_DESCRIPTION("SCSI low-level driver for Radisys R82600 controllers");
332MODULE_LICENSE("GPL");
333MODULE_DEVICE_TABLE(pci, radisys_pci_tbl);
334MODULE_VERSION(DRV_VERSION);
335
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
new file mode 100644
index 000000000000..3c6d84fd4312
--- /dev/null
+++ b/drivers/ata/pata_rz1000.c
@@ -0,0 +1,205 @@
1/*
2 * RZ1000/1001 driver based upon
3 *
4 * linux/drivers/ide/pci/rz1000.c Version 0.06 January 12, 2003
5 * Copyright (C) 1995-1998 Linus Torvalds & author (see below)
6 * Principal Author: mlord@pobox.com (Mark Lord)
7 *
8 * See linux/MAINTAINERS for address of current maintainer.
9 *
10 * This file provides support for disabling the buggy read-ahead
11 * mode of the RZ1000 IDE chipset, commonly used on Intel motherboards.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/pci.h>
17#include <linux/init.h>
18#include <linux/blkdev.h>
19#include <linux/delay.h>
20#include <scsi/scsi_host.h>
21#include <linux/libata.h>
22
23#define DRV_NAME "pata_rz1000"
24#define DRV_VERSION "0.2.2"
25
26
27/**
28 * rz1000_prereset - probe begin
29 * @ap: ATA port
30 *
31 * Set up cable type and use generics
32 */
33
34static int rz1000_prereset(struct ata_port *ap)
35{
36 ap->cbl = ATA_CBL_PATA40;
37 return ata_std_prereset(ap);
38}
39
40/**
41 * rz1000_error_handler - probe reset
42 * @ap: ATA port
43 *
44 * Perform the ATA standard reset sequence
45 */
46
47static void rz1000_error_handler(struct ata_port *ap)
48{
49 ata_bmdma_drive_eh(ap, rz1000_prereset, ata_std_softreset, NULL, ata_std_postreset);
50}
51
52/**
53 * rz1000_set_mode - mode setting function
54 * @ap: ATA interface
55 *
56 * Use a non standard set_mode function. We don't want to be tuned. We
57 * would prefer to be BIOS generic but for the fact our hardware is
58 * whacked out.
59 */
60
61static void rz1000_set_mode(struct ata_port *ap)
62{
63 int i;
64
65 for (i = 0; i < ATA_MAX_DEVICES; i++) {
66 struct ata_device *dev = &ap->device[i];
67 if (ata_dev_enabled(dev)) {
68 /* We don't really care */
69 dev->pio_mode = XFER_PIO_0;
70 dev->xfer_mode = XFER_PIO_0;
71 dev->xfer_shift = ATA_SHIFT_PIO;
72 dev->flags |= ATA_DFLAG_PIO;
73 }
74 }
75}
76
77
78static struct scsi_host_template rz1000_sht = {
79 .module = THIS_MODULE,
80 .name = DRV_NAME,
81 .ioctl = ata_scsi_ioctl,
82 .queuecommand = ata_scsi_queuecmd,
83 .can_queue = ATA_DEF_QUEUE,
84 .this_id = ATA_SHT_THIS_ID,
85 .sg_tablesize = LIBATA_MAX_PRD,
86 .max_sectors = ATA_MAX_SECTORS,
87 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
88 .emulated = ATA_SHT_EMULATED,
89 .use_clustering = ATA_SHT_USE_CLUSTERING,
90 .proc_name = DRV_NAME,
91 .dma_boundary = ATA_DMA_BOUNDARY,
92 .slave_configure = ata_scsi_slave_config,
93 .bios_param = ata_std_bios_param,
94};
95
96static struct ata_port_operations rz1000_port_ops = {
97 .set_mode = rz1000_set_mode,
98
99 .port_disable = ata_port_disable,
100 .tf_load = ata_tf_load,
101 .tf_read = ata_tf_read,
102 .check_status = ata_check_status,
103 .exec_command = ata_exec_command,
104 .dev_select = ata_std_dev_select,
105
106 .error_handler = rz1000_error_handler,
107
108 .bmdma_setup = ata_bmdma_setup,
109 .bmdma_start = ata_bmdma_start,
110 .bmdma_stop = ata_bmdma_stop,
111 .bmdma_status = ata_bmdma_status,
112
113 .qc_prep = ata_qc_prep,
114 .qc_issue = ata_qc_issue_prot,
115 .eng_timeout = ata_eng_timeout,
116 .data_xfer = ata_pio_data_xfer,
117
118 .freeze = ata_bmdma_freeze,
119 .thaw = ata_bmdma_thaw,
120 .error_handler = rz1000_error_handler,
121 .post_internal_cmd = ata_bmdma_post_internal_cmd,
122
123 .irq_handler = ata_interrupt,
124 .irq_clear = ata_bmdma_irq_clear,
125
126 .port_start = ata_port_start,
127 .port_stop = ata_port_stop,
128 .host_stop = ata_host_stop
129};
130
131/**
132 * rz1000_init_one - Register RZ1000 ATA PCI device with kernel services
133 * @pdev: PCI device to register
134 * @ent: Entry in rz1000_pci_tbl matching with @pdev
135 *
136 * Configure an RZ1000 interface. This doesn't require much special
137 * handling except that we *MUST* kill the chipset readahead or the
138 * user may experience data corruption.
139 */
140
141static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
142{
143 static int printed_version;
144 struct ata_port_info *port_info[2];
145 u16 reg;
146 static struct ata_port_info info = {
147 .sht = &rz1000_sht,
148 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
149 .pio_mask = 0x1f,
150 .port_ops = &rz1000_port_ops
151 };
152
153 if (!printed_version++)
154 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
155
156 /* Be exceptionally paranoid as we must be sure to apply the fix */
157 if (pci_read_config_word(pdev, 0x40, &reg) != 0)
158 goto fail;
159 reg &= 0xDFFF;
160 if (pci_write_config_word(pdev, 0x40, reg) != 0)
161 goto fail;
162 printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
163
164 port_info[0] = &info;
165 port_info[1] = &info;
166 return ata_pci_init_one(pdev, port_info, 2);
167fail:
168 printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n");
169 /* Not safe to use so skip */
170 return -ENODEV;
171}
172
173static struct pci_device_id pata_rz1000[] = {
174 { PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), },
175 { PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), },
176 { 0, },
177};
178
179static struct pci_driver rz1000_pci_driver = {
180 .name = DRV_NAME,
181 .id_table = pata_rz1000,
182 .probe = rz1000_init_one,
183 .remove = ata_pci_remove_one
184};
185
186
187static int __init rz1000_init(void)
188{
189 return pci_register_driver(&rz1000_pci_driver);
190}
191
192static void __exit rz1000_exit(void)
193{
194 pci_unregister_driver(&rz1000_pci_driver);
195}
196
197MODULE_AUTHOR("Alan Cox");
198MODULE_DESCRIPTION("low-level driver for RZ1000 PCI ATA");
199MODULE_LICENSE("GPL");
200MODULE_DEVICE_TABLE(pci, pata_rz1000);
201MODULE_VERSION(DRV_VERSION);
202
203module_init(rz1000_init);
204module_exit(rz1000_exit);
205
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
new file mode 100644
index 000000000000..4166c1a8a9e8
--- /dev/null
+++ b/drivers/ata/pata_sc1200.c
@@ -0,0 +1,287 @@
1/*
2 * New ATA layer SC1200 driver Alan Cox <alan@redhat.com>
3 *
4 * TODO: Mode selection filtering
5 * TODO: Can't enable second channel until ATA core has serialize
6 * TODO: Needs custom DMA cleanup code
7 *
8 * Based very heavily on
9 *
10 * linux/drivers/ide/pci/sc1200.c Version 0.91 28-Jan-2003
11 *
12 * Copyright (C) 2000-2002 Mark Lord <mlord@pobox.com>
13 * May be copied or modified under the terms of the GNU General Public License
14 *
15 * Development of this chipset driver was funded
16 * by the nice folks at National Semiconductor.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License version 2 as
20 * published by the Free Software Foundation.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <scsi/scsi_host.h>
40#include <linux/libata.h>
41
42#define DRV_NAME "sc1200"
43#define DRV_VERSION "0.2.3"
44
45#define SC1200_REV_A 0x00
46#define SC1200_REV_B1 0x01
47#define SC1200_REV_B3 0x02
48#define SC1200_REV_C1 0x03
49#define SC1200_REV_D1 0x04
50
51/**
52 * sc1200_clock - PCI clock
53 *
54 * Return the PCI bus clocking for the SC1200 chipset configuration
55 * in use. We return 0 for 33MHz 1 for 48MHz and 2 for 66Mhz
56 */
57
58static int sc1200_clock(void)
59{
60 /* Magic registers that give us the chipset data */
61 u8 chip_id = inb(0x903C);
62 u8 silicon_rev = inb(0x903D);
63 u16 pci_clock;
64
65 if (chip_id == 0x04 && silicon_rev < SC1200_REV_B1)
66 return 0; /* 33 MHz mode */
67
68 /* Clock generator configuration 0x901E its 8/9 are the PCI clocking
69 0/3 is 33Mhz 1 is 48 2 is 66 */
70
71 pci_clock = inw(0x901E);
72 pci_clock >>= 8;
73 pci_clock &= 0x03;
74 if (pci_clock == 3)
75 pci_clock = 0;
76 return pci_clock;
77}
78
79/**
80 * sc1200_set_piomode - PIO setup
81 * @ap: ATA interface
82 * @adev: device on the interface
83 *
84 * Set our PIO requirements. This is fairly simple on the SC1200
85 */
86
87static void sc1200_set_piomode(struct ata_port *ap, struct ata_device *adev)
88{
89 static const u32 pio_timings[4][5] = {
90 {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, // format0 33Mhz
91 {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}, // format1, 33Mhz
92 {0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021}, // format1, 48Mhz
93 {0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131} // format1, 66Mhz
94 };
95
96 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
97 u32 format;
98 unsigned int reg = 0x40 + 0x10 * ap->port_no;
99 int mode = adev->pio_mode - XFER_PIO_0;
100
101 pci_read_config_dword(pdev, reg + 4, &format);
102 format >>= 31;
103 format += sc1200_clock();
104 pci_write_config_dword(pdev, reg + 8 * adev->devno,
105 pio_timings[format][mode]);
106}
107
108/**
109 * sc1200_set_dmamode - DMA timing setup
110 * @ap: ATA interface
111 * @adev: Device being configured
112 *
113 * We cannot mix MWDMA and UDMA without reloading timings each switch
114 * master to slave.
115 */
116
117static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev)
118{
119 static const u32 udma_timing[3][3] = {
120 { 0x00921250, 0x00911140, 0x00911030 },
121 { 0x00932470, 0x00922260, 0x00922140 },
122 { 0x009436A1, 0x00933481, 0x00923261 }
123 };
124
125 static const u32 mwdma_timing[3][3] = {
126 { 0x00077771, 0x00012121, 0x00002020 },
127 { 0x000BBBB2, 0x00024241, 0x00013131 },
128 { 0x000FFFF3, 0x00035352, 0x00015151 }
129 };
130
131 int clock = sc1200_clock();
132 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
133 unsigned int reg = 0x40 + 0x10 * ap->port_no;
134 int mode = adev->dma_mode;
135 u32 format;
136
137 if (mode >= XFER_UDMA_0)
138 format = udma_timing[clock][mode - XFER_UDMA_0];
139 else
140 format = mwdma_timing[clock][mode - XFER_MW_DMA_0];
141
142 if (adev->devno == 0) {
143 u32 timings;
144
145 pci_read_config_dword(pdev, reg + 4, &timings);
146 timings &= 0x80000000UL;
147 timings |= format;
148 pci_write_config_dword(pdev, reg + 4, timings);
149 } else
150 pci_write_config_dword(pdev, reg + 12, format);
151}
152
153/**
154 * sc1200_qc_issue_prot - command issue
155 * @qc: command pending
156 *
157 * Called when the libata layer is about to issue a command. We wrap
158 * this interface so that we can load the correct ATA timings if
159 * neccessary. Specifically we have a problem that there is only
160 * one MWDMA/UDMA bit.
161 */
162
163static unsigned int sc1200_qc_issue_prot(struct ata_queued_cmd *qc)
164{
165 struct ata_port *ap = qc->ap;
166 struct ata_device *adev = qc->dev;
167 struct ata_device *prev = ap->private_data;
168
169 /* See if the DMA settings could be wrong */
170 if (adev->dma_mode != 0 && adev != prev && prev != NULL) {
171 /* Maybe, but do the channels match MWDMA/UDMA ? */
172 if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) ||
173 (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0))
174 /* Switch the mode bits */
175 sc1200_set_dmamode(ap, adev);
176 }
177
178 return ata_qc_issue_prot(qc);
179}
180
181static struct scsi_host_template sc1200_sht = {
182 .module = THIS_MODULE,
183 .name = DRV_NAME,
184 .ioctl = ata_scsi_ioctl,
185 .queuecommand = ata_scsi_queuecmd,
186 .can_queue = ATA_DEF_QUEUE,
187 .this_id = ATA_SHT_THIS_ID,
188 .sg_tablesize = LIBATA_MAX_PRD,
189 .max_sectors = ATA_MAX_SECTORS,
190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
191 .emulated = ATA_SHT_EMULATED,
192 .use_clustering = ATA_SHT_USE_CLUSTERING,
193 .proc_name = DRV_NAME,
194 .dma_boundary = ATA_DMA_BOUNDARY,
195 .slave_configure = ata_scsi_slave_config,
196 .bios_param = ata_std_bios_param,
197};
198
199static struct ata_port_operations sc1200_port_ops = {
200 .port_disable = ata_port_disable,
201 .set_piomode = sc1200_set_piomode,
202 .set_dmamode = sc1200_set_dmamode,
203 .mode_filter = ata_pci_default_filter,
204
205 .tf_load = ata_tf_load,
206 .tf_read = ata_tf_read,
207 .check_status = ata_check_status,
208 .exec_command = ata_exec_command,
209 .dev_select = ata_std_dev_select,
210
211 .error_handler = ata_bmdma_error_handler,
212
213 .bmdma_setup = ata_bmdma_setup,
214 .bmdma_start = ata_bmdma_start,
215 .bmdma_stop = ata_bmdma_stop,
216 .bmdma_status = ata_bmdma_status,
217
218 .qc_prep = ata_qc_prep,
219 .qc_issue = sc1200_qc_issue_prot,
220 .eng_timeout = ata_eng_timeout,
221 .data_xfer = ata_pio_data_xfer,
222
223 .irq_handler = ata_interrupt,
224 .irq_clear = ata_bmdma_irq_clear,
225
226 .port_start = ata_port_start,
227 .port_stop = ata_port_stop,
228 .host_stop = ata_host_stop
229};
230
231/**
232 * sc1200_init_one - Initialise an SC1200
233 * @dev: PCI device
234 * @id: Entry in match table
235 *
236 * Just throw the needed data at the libata helper and it does all
237 * our work.
238 */
239
240static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
241{
242 static struct ata_port_info info = {
243 .sht = &sc1200_sht,
244 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
245 .pio_mask = 0x1f,
246 .mwdma_mask = 0x07,
247 .udma_mask = 0x07,
248 .port_ops = &sc1200_port_ops
249 };
250 static struct ata_port_info *port_info[2] = { &info, &info };
251
252 /* Can't enable port 2 yet, see top comments */
253 return ata_pci_init_one(dev, port_info, 1);
254}
255
256static struct pci_device_id sc1200[] = {
257 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_IDE), },
258 { 0, },
259};
260
261static struct pci_driver sc1200_pci_driver = {
262 .name = DRV_NAME,
263 .id_table = sc1200,
264 .probe = sc1200_init_one,
265 .remove = ata_pci_remove_one
266};
267
268static int __init sc1200_init(void)
269{
270 return pci_register_driver(&sc1200_pci_driver);
271}
272
273
274static void __exit sc1200_exit(void)
275{
276 pci_unregister_driver(&sc1200_pci_driver);
277}
278
279
280MODULE_AUTHOR("Alan Cox, Mark Lord");
281MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200");
282MODULE_LICENSE("GPL");
283MODULE_DEVICE_TABLE(pci, sc1200);
284MODULE_VERSION(DRV_VERSION);
285
286module_init(sc1200_init);
287module_exit(sc1200_exit);
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
new file mode 100644
index 000000000000..af456113c55d
--- /dev/null
+++ b/drivers/ata/pata_serverworks.c
@@ -0,0 +1,587 @@
1/*
2 * ata-serverworks.c - Serverworks PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based upon
7 *
8 * serverworks.c
9 *
10 * Copyright (C) 1998-2000 Michel Aubry
11 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
12 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
13 * Portions copyright (c) 2001 Sun Microsystems
14 *
15 *
16 * RCC/ServerWorks IDE driver for Linux
17 *
18 * OSB4: `Open South Bridge' IDE Interface (fn 1)
19 * supports UDMA mode 2 (33 MB/s)
20 *
21 * CSB5: `Champion South Bridge' IDE Interface (fn 1)
22 * all revisions support UDMA mode 4 (66 MB/s)
23 * revision A2.0 and up support UDMA mode 5 (100 MB/s)
24 *
25 * *** The CSB5 does not provide ANY register ***
26 * *** to detect 80-conductor cable presence. ***
27 *
28 * CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
29 *
30 * Documentation:
31 * Available under NDA only. Errata info very hard to get.
32 */
33
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/init.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <scsi/scsi_host.h>
41#include <linux/libata.h>
42
43#define DRV_NAME "pata_serverworks"
44#define DRV_VERSION "0.3.6"
45
46#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
47#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
48
49/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
50 * can overrun their FIFOs when used with the CSB5 */
51
52static const char *csb_bad_ata100[] = {
53 "ST320011A",
54 "ST340016A",
55 "ST360021A",
56 "ST380021A",
57 NULL
58};
59
60/**
61 * dell_cable - Dell serverworks cable detection
62 * @ap: ATA port to do cable detect
63 *
64 * Dell hide the 40/80 pin select for their interfaces in the top two
65 * bits of the subsystem ID.
66 */
67
68static int dell_cable(struct ata_port *ap) {
69 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
70
71 if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
72 return ATA_CBL_PATA80;
73 return ATA_CBL_PATA40;
74}
75
76/**
77 * sun_cable - Sun Cobalt 'Alpine' cable detection
78 * @ap: ATA port to do cable select
79 *
80 * Cobalt CSB5 IDE hides the 40/80pin in the top two bits of the
81 * subsystem ID the same as dell. We could use one function but we may
82 * need to extend the Dell one in future
83 */
84
85static int sun_cable(struct ata_port *ap) {
86 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
87
88 if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
89 return ATA_CBL_PATA80;
90 return ATA_CBL_PATA40;
91}
92
93/**
94 * osb4_cable - OSB4 cable detect
95 * @ap: ATA port to check
96 *
97 * The OSB4 isn't UDMA66 capable so this is easy
98 */
99
100static int osb4_cable(struct ata_port *ap) {
101 return ATA_CBL_PATA40;
102}
103
104/**
105 * csb4_cable - CSB5/6 cable detect
106 * @ap: ATA port to check
107 *
108 * Serverworks default arrangement is to use the drive side detection
109 * only.
110 */
111
112static int csb_cable(struct ata_port *ap) {
113 return ATA_CBL_PATA80;
114}
115
116struct sv_cable_table {
117 int device;
118 int subvendor;
119 int (*cable_detect)(struct ata_port *ap);
120};
121
122/*
123 * Note that we don't copy the old serverworks code because the old
124 * code contains obvious mistakes
125 */
126
127static struct sv_cable_table cable_detect[] = {
128 { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, dell_cable },
129 { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, dell_cable },
130 { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, sun_cable },
131 { PCI_DEVICE_ID_SERVERWORKS_OSB4, PCI_ANY_ID, osb4_cable },
132 { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, csb_cable },
133 { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, csb_cable },
134 { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, csb_cable },
135 { PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, csb_cable },
136 { }
137};
138
139/**
140 * serverworks_pre_reset - cable detection
141 * @ap: ATA port
142 *
143 * Perform cable detection according to the device and subvendor
144 * identifications
145 */
146
147static int serverworks_pre_reset(struct ata_port *ap) {
148 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
149 struct sv_cable_table *cb = cable_detect;
150
151 while(cb->device) {
152 if (cb->device == pdev->device &&
153 (cb->subvendor == pdev->subsystem_vendor ||
154 cb->subvendor == PCI_ANY_ID)) {
155 ap->cbl = cb->cable_detect(ap);
156 return ata_std_prereset(ap);
157 }
158 cb++;
159 }
160
161 BUG();
162 return -1; /* kill compiler warning */
163}
164
165static void serverworks_error_handler(struct ata_port *ap)
166{
167 return ata_bmdma_drive_eh(ap, serverworks_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
168}
169
170/**
171 * serverworks_is_csb - Check for CSB or OSB
172 * @pdev: PCI device to check
173 *
174 * Returns true if the device being checked is known to be a CSB
175 * series device.
176 */
177
178static u8 serverworks_is_csb(struct pci_dev *pdev)
179{
180 switch (pdev->device) {
181 case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
182 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
183 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
184 case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
185 return 1;
186 default:
187 break;
188 }
189 return 0;
190}
191
192/**
193 * serverworks_osb4_filter - mode selection filter
194 * @ap: ATA interface
195 * @adev: ATA device
196 *
197 * Filter the offered modes for the device to apply controller
198 * specific rules. OSB4 requires no UDMA for disks due to a FIFO
199 * bug we hit.
200 */
201
202static unsigned long serverworks_osb4_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
203{
204 if (adev->class == ATA_DEV_ATA)
205 mask &= ~ATA_MASK_UDMA;
206 return ata_pci_default_filter(ap, adev, mask);
207}
208
209
210/**
211 * serverworks_csb_filter - mode selection filter
212 * @ap: ATA interface
213 * @adev: ATA device
214 *
215 * Check the blacklist and disable UDMA5 if matched
216 */
217
218static unsigned long serverworks_csb_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
219{
220 const char *p;
221 char model_num[40];
222 int len, i;
223
224 /* Disk, UDMA */
225 if (adev->class != ATA_DEV_ATA)
226 return ata_pci_default_filter(ap, adev, mask);
227
228 /* Actually do need to check */
229 ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
230 /* Precuationary - why not do this in the libata core ?? */
231
232 len = strlen(model_num);
233 while ((len > 0) && (model_num[len - 1] == ' ')) {
234 len--;
235 model_num[len] = 0;
236 }
237
238 for(i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
239 if (!strncmp(p, model_num, len))
240 mask &= ~(0x1F << ATA_SHIFT_UDMA);
241 }
242 return ata_pci_default_filter(ap, adev, mask);
243}
244
245
246/**
247 * serverworks_set_piomode - set initial PIO mode data
248 * @ap: ATA interface
249 * @adev: ATA device
250 *
251 * Program the OSB4/CSB5 timing registers for PIO. The PIO register
252 * load is done as a simple lookup.
253 */
254static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev)
255{
256 static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
257 int offset = 1 + (2 * ap->port_no) - adev->devno;
258 int devbits = (2 * ap->port_no + adev->devno) * 4;
259 u16 csb5_pio;
260 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
261 int pio = adev->pio_mode - XFER_PIO_0;
262
263 pci_write_config_byte(pdev, 0x40 + offset, pio_mode[pio]);
264
265 /* The OSB4 just requires the timing but the CSB series want the
266 mode number as well */
267 if (serverworks_is_csb(pdev)) {
268 pci_read_config_word(pdev, 0x4A, &csb5_pio);
269 csb5_pio &= ~(0x0F << devbits);
270 pci_write_config_byte(pdev, 0x4A, csb5_pio | (pio << devbits));
271 }
272}
273
274/**
275 * serverworks_set_dmamode - set initial DMA mode data
276 * @ap: ATA interface
277 * @adev: ATA device
278 *
279 * Program the MWDMA/UDMA modes for the serverworks OSB4/CSB5
280 * chipset. The MWDMA mode values are pulled from a lookup table
281 * while the chipset uses mode number for UDMA.
282 */
283
284static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev)
285{
286 static const u8 dma_mode[] = { 0x77, 0x21, 0x20 };
287 int offset = 1 + 2 * ap->port_no - adev->devno;
288 int devbits = (2 * ap->port_no + adev->devno);
289 u8 ultra;
290 u8 ultra_cfg;
291 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
292
293 pci_read_config_byte(pdev, 0x54, &ultra_cfg);
294
295 if (adev->dma_mode >= XFER_UDMA_0) {
296 pci_write_config_byte(pdev, 0x44 + offset, 0x20);
297
298 pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra);
299 ultra &= ~(0x0F << (ap->port_no * 4));
300 ultra |= (adev->dma_mode - XFER_UDMA_0)
301 << (ap->port_no * 4);
302 pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra);
303
304 ultra_cfg |= (1 << devbits);
305 } else {
306 pci_write_config_byte(pdev, 0x44 + offset,
307 dma_mode[adev->dma_mode - XFER_MW_DMA_0]);
308 ultra_cfg &= ~(1 << devbits);
309 }
310 pci_write_config_byte(pdev, 0x54, ultra_cfg);
311}
312
313static struct scsi_host_template serverworks_sht = {
314 .module = THIS_MODULE,
315 .name = DRV_NAME,
316 .ioctl = ata_scsi_ioctl,
317 .queuecommand = ata_scsi_queuecmd,
318 .can_queue = ATA_DEF_QUEUE,
319 .this_id = ATA_SHT_THIS_ID,
320 .sg_tablesize = LIBATA_MAX_PRD,
321 .max_sectors = ATA_MAX_SECTORS,
322 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
323 .emulated = ATA_SHT_EMULATED,
324 .use_clustering = ATA_SHT_USE_CLUSTERING,
325 .proc_name = DRV_NAME,
326 .dma_boundary = ATA_DMA_BOUNDARY,
327 .slave_configure = ata_scsi_slave_config,
328 .bios_param = ata_std_bios_param,
329};
330
331static struct ata_port_operations serverworks_osb4_port_ops = {
332 .port_disable = ata_port_disable,
333 .set_piomode = serverworks_set_piomode,
334 .set_dmamode = serverworks_set_dmamode,
335 .mode_filter = serverworks_osb4_filter,
336
337 .tf_load = ata_tf_load,
338 .tf_read = ata_tf_read,
339 .check_status = ata_check_status,
340 .exec_command = ata_exec_command,
341 .dev_select = ata_std_dev_select,
342
343 .freeze = ata_bmdma_freeze,
344 .thaw = ata_bmdma_thaw,
345 .error_handler = serverworks_error_handler,
346 .post_internal_cmd = ata_bmdma_post_internal_cmd,
347
348 .bmdma_setup = ata_bmdma_setup,
349 .bmdma_start = ata_bmdma_start,
350 .bmdma_stop = ata_bmdma_stop,
351 .bmdma_status = ata_bmdma_status,
352
353 .qc_prep = ata_qc_prep,
354 .qc_issue = ata_qc_issue_prot,
355 .eng_timeout = ata_eng_timeout,
356 .data_xfer = ata_pio_data_xfer,
357
358 .irq_handler = ata_interrupt,
359 .port_start = ata_port_start,
360 .port_stop = ata_port_stop,
361 .host_stop = ata_host_stop
362};
363
364static struct ata_port_operations serverworks_csb_port_ops = {
365 .port_disable = ata_port_disable,
366 .set_piomode = serverworks_set_piomode,
367 .set_dmamode = serverworks_set_dmamode,
368 .mode_filter = serverworks_csb_filter,
369
370 .tf_load = ata_tf_load,
371 .tf_read = ata_tf_read,
372 .check_status = ata_check_status,
373 .exec_command = ata_exec_command,
374 .dev_select = ata_std_dev_select,
375
376 .freeze = ata_bmdma_freeze,
377 .thaw = ata_bmdma_thaw,
378 .error_handler = serverworks_error_handler,
379 .post_internal_cmd = ata_bmdma_post_internal_cmd,
380
381 .bmdma_setup = ata_bmdma_setup,
382 .bmdma_start = ata_bmdma_start,
383 .bmdma_stop = ata_bmdma_stop,
384 .bmdma_status = ata_bmdma_status,
385
386 .qc_prep = ata_qc_prep,
387 .qc_issue = ata_qc_issue_prot,
388 .eng_timeout = ata_eng_timeout,
389 .data_xfer = ata_pio_data_xfer,
390
391 .irq_handler = ata_interrupt,
392 .port_start = ata_port_start,
393 .port_stop = ata_port_stop,
394 .host_stop = ata_host_stop
395};
396
397static int serverworks_fixup_osb4(struct pci_dev *pdev)
398{
399 u32 reg;
400 struct pci_dev *isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
401 PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
402 if (isa_dev) {
403 pci_read_config_dword(isa_dev, 0x64, &reg);
404 reg &= ~0x00002000; /* disable 600ns interrupt mask */
405 if (!(reg & 0x00004000))
406 printk(KERN_DEBUG DRV_NAME ": UDMA not BIOS enabled.\n");
407 reg |= 0x00004000; /* enable UDMA/33 support */
408 pci_write_config_dword(isa_dev, 0x64, reg);
409 pci_dev_put(isa_dev);
410 return 0;
411 }
412 printk(KERN_WARNING "ata_serverworks: Unable to find bridge.\n");
413 return -ENODEV;
414}
415
416static int serverworks_fixup_csb(struct pci_dev *pdev)
417{
418 u8 rev;
419 u8 btr;
420
421 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
422
423 /* Third Channel Test */
424 if (!(PCI_FUNC(pdev->devfn) & 1)) {
425 struct pci_dev * findev = NULL;
426 u32 reg4c = 0;
427 findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
428 PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL);
429 if (findev) {
430 pci_read_config_dword(findev, 0x4C, &reg4c);
431 reg4c &= ~0x000007FF;
432 reg4c |= 0x00000040;
433 reg4c |= 0x00000020;
434 pci_write_config_dword(findev, 0x4C, reg4c);
435 pci_dev_put(findev);
436 }
437 } else {
438 struct pci_dev * findev = NULL;
439 u8 reg41 = 0;
440
441 findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
442 PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL);
443 if (findev) {
444 pci_read_config_byte(findev, 0x41, &reg41);
445 reg41 &= ~0x40;
446 pci_write_config_byte(findev, 0x41, reg41);
447 pci_dev_put(findev);
448 }
449 }
450 /* setup the UDMA Control register
451 *
452 * 1. clear bit 6 to enable DMA
453 * 2. enable DMA modes with bits 0-1
454 * 00 : legacy
455 * 01 : udma2
456 * 10 : udma2/udma4
457 * 11 : udma2/udma4/udma5
458 */
459 pci_read_config_byte(pdev, 0x5A, &btr);
460 btr &= ~0x40;
461 if (!(PCI_FUNC(pdev->devfn) & 1))
462 btr |= 0x2;
463 else
464 btr |= (rev >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
465 pci_write_config_byte(pdev, 0x5A, btr);
466
467 return btr;
468}
469
470static void serverworks_fixup_ht1000(struct pci_dev *pdev)
471{
472 u8 btr;
473 /* Setup HT1000 SouthBridge Controller - Single Channel Only */
474 pci_read_config_byte(pdev, 0x5A, &btr);
475 btr &= ~0x40;
476 btr |= 0x3;
477 pci_write_config_byte(pdev, 0x5A, btr);
478}
479
480
481static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
482{
483 int ports = 2;
484 static struct ata_port_info info[4] = {
485 { /* OSB4 */
486 .sht = &serverworks_sht,
487 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
488 .pio_mask = 0x1f,
489 .mwdma_mask = 0x07,
490 .udma_mask = 0x07,
491 .port_ops = &serverworks_osb4_port_ops
492 }, { /* OSB4 no UDMA */
493 .sht = &serverworks_sht,
494 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
495 .pio_mask = 0x1f,
496 .mwdma_mask = 0x07,
497 .udma_mask = 0x00,
498 .port_ops = &serverworks_osb4_port_ops
499 }, { /* CSB5 */
500 .sht = &serverworks_sht,
501 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
502 .pio_mask = 0x1f,
503 .mwdma_mask = 0x07,
504 .udma_mask = 0x1f,
505 .port_ops = &serverworks_csb_port_ops
506 }, { /* CSB5 - later revisions*/
507 .sht = &serverworks_sht,
508 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
509 .pio_mask = 0x1f,
510 .mwdma_mask = 0x07,
511 .udma_mask = 0x3f,
512 .port_ops = &serverworks_csb_port_ops
513 }
514 };
515 static struct ata_port_info *port_info[2];
516 struct ata_port_info *devinfo = &info[id->driver_data];
517
518 /* Force master latency timer to 64 PCI clocks */
519 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
520
521 /* OSB4 : South Bridge and IDE */
522 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
523 /* Select non UDMA capable OSB4 if we can't do fixups */
524 if ( serverworks_fixup_osb4(pdev) < 0)
525 devinfo = &info[1];
526 }
527 /* setup CSB5/CSB6 : South Bridge and IDE option RAID */
528 else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
529 (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
530 (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
531
532 /* If the returned btr is the newer revision then
533 select the right info block */
534 if (serverworks_fixup_csb(pdev) == 3)
535 devinfo = &info[3];
536
537 /* Is this the 3rd channel CSB6 IDE ? */
538 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)
539 ports = 1;
540 }
541 /* setup HT1000E */
542 else if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
543 serverworks_fixup_ht1000(pdev);
544
545 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
546 ata_pci_clear_simplex(pdev);
547
548 port_info[0] = port_info[1] = devinfo;
549 return ata_pci_init_one(pdev, port_info, ports);
550}
551
552static struct pci_device_id serverworks[] = {
553 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
554 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2},
555 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2},
556 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2},
557 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2},
558 { 0, },
559};
560
561static struct pci_driver serverworks_pci_driver = {
562 .name = DRV_NAME,
563 .id_table = serverworks,
564 .probe = serverworks_init_one,
565 .remove = ata_pci_remove_one
566};
567
568static int __init serverworks_init(void)
569{
570 return pci_register_driver(&serverworks_pci_driver);
571}
572
573
574static void __exit serverworks_exit(void)
575{
576 pci_unregister_driver(&serverworks_pci_driver);
577}
578
579
580MODULE_AUTHOR("Alan Cox");
581MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6");
582MODULE_LICENSE("GPL");
583MODULE_DEVICE_TABLE(pci, serverworks);
584MODULE_VERSION(DRV_VERSION);
585
586module_init(serverworks_init);
587module_exit(serverworks_exit);
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
new file mode 100644
index 000000000000..8f7db9638d0a
--- /dev/null
+++ b/drivers/ata/pata_sil680.c
@@ -0,0 +1,381 @@
1/*
2 * pata_sil680.c - SIL680 PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based upon
7 *
8 * linux/drivers/ide/pci/siimage.c Version 1.07 Nov 30, 2003
9 *
10 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
11 * Copyright (C) 2003 Red Hat <alan@redhat.com>
12 *
13 * May be copied or modified under the terms of the GNU General Public License
14 *
15 * Documentation publically available.
16 *
17 * If you have strange problems with nVidia chipset systems please
18 * see the SI support documentation and update your system BIOS
19 * if neccessary
20 *
21 * TODO
22 * If we know all our devices are LBA28 (or LBA28 sized) we could use
23 * the command fifo mode.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/blkdev.h>
31#include <linux/delay.h>
32#include <scsi/scsi_host.h>
33#include <linux/libata.h>
34
35#define DRV_NAME "pata_sil680"
36#define DRV_VERSION "0.3.2"
37
38/**
39 * sil680_selreg - return register base
40 * @hwif: interface
41 * @r: config offset
42 *
43 * Turn a config register offset into the right address in either
44 * PCI space or MMIO space to access the control register in question
45 * Thankfully this is a configuration operation so isnt performance
46 * criticial.
47 */
48
49static unsigned long sil680_selreg(struct ata_port *ap, int r)
50{
51 unsigned long base = 0xA0 + r;
52 base += (ap->port_no << 4);
53 return base;
54}
55
56/**
57 * sil680_seldev - return register base
58 * @hwif: interface
59 * @r: config offset
60 *
61 * Turn a config register offset into the right address in either
62 * PCI space or MMIO space to access the control register in question
63 * including accounting for the unit shift.
64 */
65
66static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r)
67{
68 unsigned long base = 0xA0 + r;
69 base += (ap->port_no << 4);
70 base |= adev->devno ? 2 : 0;
71 return base;
72}
73
74
75/**
76 * sil680_cable_detect - cable detection
77 * @ap: ATA port
78 *
79 * Perform cable detection. The SIL680 stores this in PCI config
80 * space for us.
81 */
82
83static int sil680_cable_detect(struct ata_port *ap) {
84 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
85 unsigned long addr = sil680_selreg(ap, 0);
86 u8 ata66;
87 pci_read_config_byte(pdev, addr, &ata66);
88 if (ata66 & 1)
89 return ATA_CBL_PATA80;
90 else
91 return ATA_CBL_PATA40;
92}
93
94static int sil680_pre_reset(struct ata_port *ap)
95{
96 ap->cbl = sil680_cable_detect(ap);
97 return ata_std_prereset(ap);
98}
99
100/**
101 * sil680_bus_reset - reset the SIL680 bus
102 * @ap: ATA port to reset
103 *
104 * Perform the SIL680 housekeeping when doing an ATA bus reset
105 */
106
107static int sil680_bus_reset(struct ata_port *ap,unsigned int *classes)
108{
109 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
110 unsigned long addr = sil680_selreg(ap, 0);
111 u8 reset;
112
113 pci_read_config_byte(pdev, addr, &reset);
114 pci_write_config_byte(pdev, addr, reset | 0x03);
115 udelay(25);
116 pci_write_config_byte(pdev, addr, reset);
117 return ata_std_softreset(ap, classes);
118}
119
120static void sil680_error_handler(struct ata_port *ap)
121{
122 ata_bmdma_drive_eh(ap, sil680_pre_reset, sil680_bus_reset, NULL, ata_std_postreset);
123}
124
125/**
126 * sil680_set_piomode - set initial PIO mode data
127 * @ap: ATA interface
128 * @adev: ATA device
129 *
130 * Program the SIL680 registers for PIO mode. Note that the task speed
131 * registers are shared between the devices so we must pick the lowest
132 * mode for command work.
133 */
134
135static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
136{
137 static u16 speed_p[5] = { 0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1 };
138 static u16 speed_t[5] = { 0x328A, 0x1281, 0x1281, 0x10C3, 0x10C1 };
139
140 unsigned long tfaddr = sil680_selreg(ap, 0x02);
141 unsigned long addr = sil680_seldev(ap, adev, 0x04);
142 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
143 int pio = adev->pio_mode - XFER_PIO_0;
144 int lowest_pio = pio;
145 u16 reg;
146
147 struct ata_device *pair = ata_dev_pair(adev);
148
149 if (pair != NULL && adev->pio_mode > pair->pio_mode)
150 lowest_pio = pair->pio_mode - XFER_PIO_0;
151
152 pci_write_config_word(pdev, addr, speed_p[pio]);
153 pci_write_config_word(pdev, tfaddr, speed_t[lowest_pio]);
154
155 pci_read_config_word(pdev, tfaddr-2, &reg);
156 reg &= ~0x0200; /* Clear IORDY */
157 if (ata_pio_need_iordy(adev))
158 reg |= 0x0200; /* Enable IORDY */
159 pci_write_config_word(pdev, tfaddr-2, reg);
160}
161
162/**
163 * sil680_set_dmamode - set initial DMA mode data
164 * @ap: ATA interface
165 * @adev: ATA device
166 *
167 * Program the MWDMA/UDMA modes for the sil680 k
168 * chipset. The MWDMA mode values are pulled from a lookup table
169 * while the chipset uses mode number for UDMA.
170 */
171
172static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
173{
174 static u8 ultra_table[2][7] = {
175 { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01, 0xFF }, /* 100MHz */
176 { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }, /* 133Mhz */
177 };
178 static u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 };
179
180 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
181 unsigned long ma = sil680_seldev(ap, adev, 0x08);
182 unsigned long ua = sil680_seldev(ap, adev, 0x0C);
183 unsigned long addr_mask = 0x80 + 4 * ap->port_no;
184 int port_shift = adev->devno * 4;
185 u8 scsc, mode;
186 u16 multi, ultra;
187
188 pci_read_config_byte(pdev, 0x8A, &scsc);
189 pci_read_config_byte(pdev, addr_mask, &mode);
190 pci_read_config_word(pdev, ma, &multi);
191 pci_read_config_word(pdev, ua, &ultra);
192
193 /* Mask timing bits */
194 ultra &= ~0x3F;
195 mode &= ~(0x03 << port_shift);
196
197 /* Extract scsc */
198 scsc = (scsc & 0x30) ? 1: 0;
199
200 if (adev->dma_mode >= XFER_UDMA_0) {
201 multi = 0x10C1;
202 ultra |= ultra_table[scsc][adev->dma_mode - XFER_UDMA_0];
203 mode |= (0x03 << port_shift);
204 } else {
205 multi = dma_table[adev->dma_mode - XFER_MW_DMA_0];
206 mode |= (0x02 << port_shift);
207 }
208 pci_write_config_byte(pdev, addr_mask, mode);
209 pci_write_config_word(pdev, ma, multi);
210 pci_write_config_word(pdev, ua, ultra);
211}
212
213static struct scsi_host_template sil680_sht = {
214 .module = THIS_MODULE,
215 .name = DRV_NAME,
216 .ioctl = ata_scsi_ioctl,
217 .queuecommand = ata_scsi_queuecmd,
218 .can_queue = ATA_DEF_QUEUE,
219 .this_id = ATA_SHT_THIS_ID,
220 .sg_tablesize = LIBATA_MAX_PRD,
221 .max_sectors = ATA_MAX_SECTORS,
222 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
223 .emulated = ATA_SHT_EMULATED,
224 .use_clustering = ATA_SHT_USE_CLUSTERING,
225 .proc_name = DRV_NAME,
226 .dma_boundary = ATA_DMA_BOUNDARY,
227 .slave_configure = ata_scsi_slave_config,
228 .bios_param = ata_std_bios_param,
229};
230
231static struct ata_port_operations sil680_port_ops = {
232 .port_disable = ata_port_disable,
233 .set_piomode = sil680_set_piomode,
234 .set_dmamode = sil680_set_dmamode,
235 .mode_filter = ata_pci_default_filter,
236 .tf_load = ata_tf_load,
237 .tf_read = ata_tf_read,
238 .check_status = ata_check_status,
239 .exec_command = ata_exec_command,
240 .dev_select = ata_std_dev_select,
241
242 .freeze = ata_bmdma_freeze,
243 .thaw = ata_bmdma_thaw,
244 .error_handler = sil680_error_handler,
245 .post_internal_cmd = ata_bmdma_post_internal_cmd,
246
247 .bmdma_setup = ata_bmdma_setup,
248 .bmdma_start = ata_bmdma_start,
249 .bmdma_stop = ata_bmdma_stop,
250 .bmdma_status = ata_bmdma_status,
251
252 .qc_prep = ata_qc_prep,
253 .qc_issue = ata_qc_issue_prot,
254 .eng_timeout = ata_eng_timeout,
255 .data_xfer = ata_pio_data_xfer,
256
257 .irq_handler = ata_interrupt,
258 .irq_clear = ata_bmdma_irq_clear,
259
260 .port_start = ata_port_start,
261 .port_stop = ata_port_stop,
262 .host_stop = ata_host_stop
263};
264
265static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
266{
267 static struct ata_port_info info = {
268 .sht = &sil680_sht,
269 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
270 .pio_mask = 0x1f,
271 .mwdma_mask = 0x07,
272 .udma_mask = 0x7f,
273 .port_ops = &sil680_port_ops
274 };
275 static struct ata_port_info info_slow = {
276 .sht = &sil680_sht,
277 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
278 .pio_mask = 0x1f,
279 .mwdma_mask = 0x07,
280 .udma_mask = 0x3f,
281 .port_ops = &sil680_port_ops
282 };
283 static struct ata_port_info *port_info[2] = {&info, &info};
284 static int printed_version;
285 u32 class_rev = 0;
286 u8 tmpbyte = 0;
287
288 if (!printed_version++)
289 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
290
291 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
292 class_rev &= 0xff;
293 /* FIXME: double check */
294 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, (class_rev) ? 1 : 255);
295
296 pci_write_config_byte(pdev, 0x80, 0x00);
297 pci_write_config_byte(pdev, 0x84, 0x00);
298
299 pci_read_config_byte(pdev, 0x8A, &tmpbyte);
300
301 printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n",
302 tmpbyte & 1, tmpbyte & 0x30);
303
304 switch(tmpbyte & 0x30) {
305 case 0x00:
306 /* 133 clock attempt to force it on */
307 pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10);
308 break;
309 case 0x30:
310 /* if clocking is disabled */
311 /* 133 clock attempt to force it on */
312 pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20);
313 break;
314 case 0x10:
315 /* 133 already */
316 break;
317 case 0x20:
318 /* BIOS set PCI x2 clocking */
319 break;
320 }
321
322 pci_read_config_byte(pdev, 0x8A, &tmpbyte);
323 printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n",
324 tmpbyte & 1, tmpbyte & 0x30);
325 if ((tmpbyte & 0x30) == 0)
326 port_info[0] = port_info[1] = &info_slow;
327
328 pci_write_config_byte(pdev, 0xA1, 0x72);
329 pci_write_config_word(pdev, 0xA2, 0x328A);
330 pci_write_config_dword(pdev, 0xA4, 0x62DD62DD);
331 pci_write_config_dword(pdev, 0xA8, 0x43924392);
332 pci_write_config_dword(pdev, 0xAC, 0x40094009);
333 pci_write_config_byte(pdev, 0xB1, 0x72);
334 pci_write_config_word(pdev, 0xB2, 0x328A);
335 pci_write_config_dword(pdev, 0xB4, 0x62DD62DD);
336 pci_write_config_dword(pdev, 0xB8, 0x43924392);
337 pci_write_config_dword(pdev, 0xBC, 0x40094009);
338
339 switch(tmpbyte & 0x30) {
340 case 0x00: printk(KERN_INFO "sil680: 100MHz clock.\n");break;
341 case 0x10: printk(KERN_INFO "sil680: 133MHz clock.\n");break;
342 case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n");break;
343 /* This last case is _NOT_ ok */
344 case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n");
345 return -EIO;
346 }
347 return ata_pci_init_one(pdev, port_info, 2);
348}
349
350static const struct pci_device_id sil680[] = {
351 { PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_680), },
352 { 0, },
353};
354
355static struct pci_driver sil680_pci_driver = {
356 .name = DRV_NAME,
357 .id_table = sil680,
358 .probe = sil680_init_one,
359 .remove = ata_pci_remove_one
360};
361
362static int __init sil680_init(void)
363{
364 return pci_register_driver(&sil680_pci_driver);
365}
366
367
368static void __exit sil680_exit(void)
369{
370 pci_unregister_driver(&sil680_pci_driver);
371}
372
373
374MODULE_AUTHOR("Alan Cox");
375MODULE_DESCRIPTION("low-level driver for SI680 PATA");
376MODULE_LICENSE("GPL");
377MODULE_DEVICE_TABLE(pci, sil680);
378MODULE_VERSION(DRV_VERSION);
379
380module_init(sil680_init);
381module_exit(sil680_exit);
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
new file mode 100644
index 000000000000..2e555168b431
--- /dev/null
+++ b/drivers/ata/pata_sis.c
@@ -0,0 +1,1034 @@
1/*
2 * pata_sis.c - SiS ATA driver
3 *
4 * (C) 2005 Red Hat <alan@redhat.com>
5 *
6 * Based upon linux/drivers/ide/pci/sis5513.c
7 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
8 * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
9 * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz>
10 * SiS Taiwan : for direct support and hardware.
11 * Daniela Engert : for initial ATA100 advices and numerous others.
12 * John Fremlin, Manfred Spraul, Dave Morgan, Peter Kjellerstedt :
13 * for checking code correctness, providing patches.
14 * Original tests and design on the SiS620 chipset.
15 * ATA100 tests and design on the SiS735 chipset.
16 * ATA16/33 support from specs
17 * ATA133 support for SiS961/962 by L.C. Chang <lcchang@sis.com.tw>
18 *
19 *
20 * TODO
21 * Check MWDMA on drives that don't support MWDMA speed pio cycles ?
22 * More Testing
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/blkdev.h>
30#include <linux/delay.h>
31#include <linux/device.h>
32#include <scsi/scsi_host.h>
33#include <linux/libata.h>
34#include <linux/ata.h>
35
36#define DRV_NAME "pata_sis"
37#define DRV_VERSION "0.4.3"
38
39struct sis_chipset {
40 u16 device; /* PCI host ID */
41 struct ata_port_info *info; /* Info block */
42 /* Probably add family, cable detect type etc here to clean
43 up code later */
44};
45
46/**
47 * sis_port_base - return PCI configuration base for dev
48 * @adev: device
49 *
50 * Returns the base of the PCI configuration registers for this port
51 * number.
52 */
53
54static int sis_port_base(struct ata_device *adev)
55{
56 return 0x40 + (4 * adev->ap->port_no) + (2 * adev->devno);
57}
58
59/**
60 * sis_133_pre_reset - check for 40/80 pin
61 * @ap: Port
62 *
63 * Perform cable detection for the later UDMA133 capable
64 * SiS chipset.
65 */
66
67static int sis_133_pre_reset(struct ata_port *ap)
68{
69 static const struct pci_bits sis_enable_bits[] = {
70 { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */
71 { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */
72 };
73
74 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
75 u16 tmp;
76
77 if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) {
78 ata_port_disable(ap);
79 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
80 return 0;
81 }
82 /* The top bit of this register is the cable detect bit */
83 pci_read_config_word(pdev, 0x50 + 2 * ap->port_no, &tmp);
84 if (tmp & 0x8000)
85 ap->cbl = ATA_CBL_PATA40;
86 else
87 ap->cbl = ATA_CBL_PATA80;
88
89 return ata_std_prereset(ap);
90}
91
92/**
93 * sis_error_handler - Probe specified port on PATA host controller
94 * @ap: Port to probe
95 *
96 * LOCKING:
97 * None (inherited from caller).
98 */
99
100static void sis_133_error_handler(struct ata_port *ap)
101{
102 ata_bmdma_drive_eh(ap, sis_133_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
103}
104
105
106/**
107 * sis_66_pre_reset - check for 40/80 pin
108 * @ap: Port
109 *
110 * Perform cable detection on the UDMA66, UDMA100 and early UDMA133
111 * SiS IDE controllers.
112 */
113
114static int sis_66_pre_reset(struct ata_port *ap)
115{
116 static const struct pci_bits sis_enable_bits[] = {
117 { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */
118 { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */
119 };
120
121 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
122 u8 tmp;
123
124 if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) {
125 ata_port_disable(ap);
126 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
127 return 0;
128 }
129 /* Older chips keep cable detect in bits 4/5 of reg 0x48 */
130 pci_read_config_byte(pdev, 0x48, &tmp);
131 tmp >>= ap->port_no;
132 if (tmp & 0x10)
133 ap->cbl = ATA_CBL_PATA40;
134 else
135 ap->cbl = ATA_CBL_PATA80;
136
137 return ata_std_prereset(ap);
138}
139
140/**
141 * sis_66_error_handler - Probe specified port on PATA host controller
142 * @ap: Port to probe
143 * @classes:
144 *
145 * LOCKING:
146 * None (inherited from caller).
147 */
148
149static void sis_66_error_handler(struct ata_port *ap)
150{
151 ata_bmdma_drive_eh(ap, sis_66_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
152}
153
154/**
155 * sis_old_pre_reset - probe begin
156 * @ap: ATA port
157 *
158 * Set up cable type and use generic probe init
159 */
160
161static int sis_old_pre_reset(struct ata_port *ap)
162{
163 static const struct pci_bits sis_enable_bits[] = {
164 { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */
165 { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */
166 };
167
168 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
169
170 if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) {
171 ata_port_disable(ap);
172 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
173 return 0;
174 }
175 ap->cbl = ATA_CBL_PATA40;
176 return ata_std_prereset(ap);
177}
178
179
180/**
181 * sis_old_error_handler - Probe specified port on PATA host controller
182 * @ap: Port to probe
183 *
184 * LOCKING:
185 * None (inherited from caller).
186 */
187
188static void sis_old_error_handler(struct ata_port *ap)
189{
190 ata_bmdma_drive_eh(ap, sis_old_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
191}
192
193/**
194 * sis_set_fifo - Set RWP fifo bits for this device
195 * @ap: Port
196 * @adev: Device
197 *
198 * SIS chipsets implement prefetch/postwrite bits for each device
199 * on both channels. This functionality is not ATAPI compatible and
200 * must be configured according to the class of device present
201 */
202
203static void sis_set_fifo(struct ata_port *ap, struct ata_device *adev)
204{
205 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
206 u8 fifoctrl;
207 u8 mask = 0x11;
208
209 mask <<= (2 * ap->port_no);
210 mask <<= adev->devno;
211
212 /* This holds various bits including the FIFO control */
213 pci_read_config_byte(pdev, 0x4B, &fifoctrl);
214 fifoctrl &= ~mask;
215
216 /* Enable for ATA (disk) only */
217 if (adev->class == ATA_DEV_ATA)
218 fifoctrl |= mask;
219 pci_write_config_byte(pdev, 0x4B, fifoctrl);
220}
221
222/**
223 * sis_old_set_piomode - Initialize host controller PATA PIO timings
224 * @ap: Port whose timings we are configuring
225 * @adev: Device we are configuring for.
226 *
227 * Set PIO mode for device, in host controller PCI config space. This
228 * function handles PIO set up for all chips that are pre ATA100 and
229 * also early ATA100 devices.
230 *
231 * LOCKING:
232 * None (inherited from caller).
233 */
234
235static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev)
236{
237 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
238 int port = sis_port_base(adev);
239 u8 t1, t2;
240 int speed = adev->pio_mode - XFER_PIO_0;
241
242 const u8 active[] = { 0x00, 0x07, 0x04, 0x03, 0x01 };
243 const u8 recovery[] = { 0x00, 0x06, 0x04, 0x03, 0x03 };
244
245 sis_set_fifo(ap, adev);
246
247 pci_read_config_byte(pdev, port, &t1);
248 pci_read_config_byte(pdev, port + 1, &t2);
249
250 t1 &= ~0x0F; /* Clear active/recovery timings */
251 t2 &= ~0x07;
252
253 t1 |= active[speed];
254 t2 |= recovery[speed];
255
256 pci_write_config_byte(pdev, port, t1);
257 pci_write_config_byte(pdev, port + 1, t2);
258}
259
260/**
261 * sis_100_set_pioode - Initialize host controller PATA PIO timings
262 * @ap: Port whose timings we are configuring
263 * @adev: Device we are configuring for.
264 *
265 * Set PIO mode for device, in host controller PCI config space. This
266 * function handles PIO set up for ATA100 devices and early ATA133.
267 *
268 * LOCKING:
269 * None (inherited from caller).
270 */
271
272static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev)
273{
274 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
275 int port = sis_port_base(adev);
276 int speed = adev->pio_mode - XFER_PIO_0;
277
278 const u8 actrec[] = { 0x00, 0x67, 0x44, 0x33, 0x31 };
279
280 sis_set_fifo(ap, adev);
281
282 pci_write_config_byte(pdev, port, actrec[speed]);
283}
284
285/**
286 * sis_133_set_pioode - Initialize host controller PATA PIO timings
287 * @ap: Port whose timings we are configuring
288 * @adev: Device we are configuring for.
289 *
290 * Set PIO mode for device, in host controller PCI config space. This
291 * function handles PIO set up for the later ATA133 devices.
292 *
293 * LOCKING:
294 * None (inherited from caller).
295 */
296
297static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev)
298{
299 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
300 int port = 0x40;
301 u32 t1;
302 u32 reg54;
303 int speed = adev->pio_mode - XFER_PIO_0;
304
305 const u32 timing133[] = {
306 0x28269000, /* Recovery << 24 | Act << 16 | Ini << 12 */
307 0x0C266000,
308 0x04263000,
309 0x0C0A3000,
310 0x05093000
311 };
312 const u32 timing100[] = {
313 0x1E1C6000, /* Recovery << 24 | Act << 16 | Ini << 12 */
314 0x091C4000,
315 0x031C2000,
316 0x09072000,
317 0x04062000
318 };
319
320 sis_set_fifo(ap, adev);
321
322 /* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */
323 pci_read_config_dword(pdev, 0x54, &reg54);
324 if (reg54 & 0x40000000)
325 port = 0x70;
326 port += 8 * ap->port_no + 4 * adev->devno;
327
328 pci_read_config_dword(pdev, port, &t1);
329 t1 &= 0xC0C00FFF; /* Mask out timing */
330
331 if (t1 & 0x08) /* 100 or 133 ? */
332 t1 |= timing133[speed];
333 else
334 t1 |= timing100[speed];
335 pci_write_config_byte(pdev, port, t1);
336}
337
338/**
339 * sis_old_set_dmamode - Initialize host controller PATA DMA timings
340 * @ap: Port whose timings we are configuring
341 * @adev: Device to program
342 *
343 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
344 * Handles pre UDMA and UDMA33 devices. Supports MWDMA as well unlike
345 * the old ide/pci driver.
346 *
347 * LOCKING:
348 * None (inherited from caller).
349 */
350
351static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
352{
353 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
354 int speed = adev->dma_mode - XFER_MW_DMA_0;
355 int drive_pci = sis_port_base(adev);
356 u16 timing;
357
358 const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 };
359 const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 };
360
361 pci_read_config_word(pdev, drive_pci, &timing);
362
363 if (adev->dma_mode < XFER_UDMA_0) {
364 /* bits 3-0 hold recovery timing bits 8-10 active timing and
365 the higer bits are dependant on the device */
366 timing &= ~ 0x870F;
367 timing |= mwdma_bits[speed];
368 pci_write_config_word(pdev, drive_pci, timing);
369 } else {
370 /* Bit 15 is UDMA on/off, bit 13-14 are cycle time */
371 speed = adev->dma_mode - XFER_UDMA_0;
372 timing &= ~0x6000;
373 timing |= udma_bits[speed];
374 }
375}
376
377/**
378 * sis_66_set_dmamode - Initialize host controller PATA DMA timings
379 * @ap: Port whose timings we are configuring
380 * @adev: Device to program
381 *
382 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
383 * Handles UDMA66 and early UDMA100 devices. Supports MWDMA as well unlike
384 * the old ide/pci driver.
385 *
386 * LOCKING:
387 * None (inherited from caller).
388 */
389
390static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
391{
392 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
393 int speed = adev->dma_mode - XFER_MW_DMA_0;
394 int drive_pci = sis_port_base(adev);
395 u16 timing;
396
397 const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 };
398 const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000};
399
400 pci_read_config_word(pdev, drive_pci, &timing);
401
402 if (adev->dma_mode < XFER_UDMA_0) {
403 /* bits 3-0 hold recovery timing bits 8-10 active timing and
404 the higer bits are dependant on the device, bit 15 udma */
405 timing &= ~ 0x870F;
406 timing |= mwdma_bits[speed];
407 } else {
408 /* Bit 15 is UDMA on/off, bit 12-14 are cycle time */
409 speed = adev->dma_mode - XFER_UDMA_0;
410 timing &= ~0x6000;
411 timing |= udma_bits[speed];
412 }
413 pci_write_config_word(pdev, drive_pci, timing);
414}
415
416/**
417 * sis_100_set_dmamode - Initialize host controller PATA DMA timings
418 * @ap: Port whose timings we are configuring
419 * @adev: Device to program
420 *
421 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
422 * Handles UDMA66 and early UDMA100 devices.
423 *
424 * LOCKING:
425 * None (inherited from caller).
426 */
427
428static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev)
429{
430 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
431 int speed = adev->dma_mode - XFER_MW_DMA_0;
432 int drive_pci = sis_port_base(adev);
433 u16 timing;
434
435 const u16 udma_bits[] = { 0x8B00, 0x8700, 0x8500, 0x8300, 0x8200, 0x8100};
436
437 pci_read_config_word(pdev, drive_pci, &timing);
438
439 if (adev->dma_mode < XFER_UDMA_0) {
440 /* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */
441 } else {
442 /* Bit 15 is UDMA on/off, bit 12-14 are cycle time */
443 speed = adev->dma_mode - XFER_UDMA_0;
444 timing &= ~0x0F00;
445 timing |= udma_bits[speed];
446 }
447 pci_write_config_word(pdev, drive_pci, timing);
448}
449
450/**
451 * sis_133_early_set_dmamode - Initialize host controller PATA DMA timings
452 * @ap: Port whose timings we are configuring
453 * @adev: Device to program
454 *
455 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
456 * Handles early SiS 961 bridges. Supports MWDMA as well unlike
457 * the old ide/pci driver.
458 *
459 * LOCKING:
460 * None (inherited from caller).
461 */
462
463static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *adev)
464{
465 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
466 int speed = adev->dma_mode - XFER_MW_DMA_0;
467 int drive_pci = sis_port_base(adev);
468 u16 timing;
469
470 const u16 udma_bits[] = { 0x8F00, 0x8A00, 0x8700, 0x8500, 0x8300, 0x8200, 0x8100};
471
472 pci_read_config_word(pdev, drive_pci, &timing);
473
474 if (adev->dma_mode < XFER_UDMA_0) {
475 /* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */
476 } else {
477 /* Bit 15 is UDMA on/off, bit 12-14 are cycle time */
478 speed = adev->dma_mode - XFER_UDMA_0;
479 timing &= ~0x0F00;
480 timing |= udma_bits[speed];
481 }
482 pci_write_config_word(pdev, drive_pci, timing);
483}
484
485/**
486 * sis_133_set_dmamode - Initialize host controller PATA DMA timings
487 * @ap: Port whose timings we are configuring
488 * @adev: Device to program
489 *
490 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
491 * Handles early SiS 961 bridges. Supports MWDMA as well unlike
492 * the old ide/pci driver.
493 *
494 * LOCKING:
495 * None (inherited from caller).
496 */
497
498static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev)
499{
500 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
501 int speed = adev->dma_mode - XFER_MW_DMA_0;
502 int port = 0x40;
503 u32 t1;
504 u32 reg54;
505
506 /* bits 4- cycle time 8 - cvs time */
507 const u32 timing_u100[] = { 0x6B0, 0x470, 0x350, 0x140, 0x120, 0x110, 0x000 };
508 const u32 timing_u133[] = { 0x9F0, 0x6A0, 0x470, 0x250, 0x230, 0x220, 0x210 };
509
510 /* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */
511 pci_read_config_dword(pdev, 0x54, &reg54);
512 if (reg54 & 0x40000000)
513 port = 0x70;
514 port += (8 * ap->port_no) + (4 * adev->devno);
515
516 pci_read_config_dword(pdev, port, &t1);
517
518 if (adev->dma_mode < XFER_UDMA_0) {
519 t1 &= ~0x00000004;
520 /* FIXME: need data sheet to add MWDMA here. Also lacking on
521 ide/pci driver */
522 } else {
523 speed = adev->dma_mode - XFER_UDMA_0;
524 /* if & 8 no UDMA133 - need info for ... */
525 t1 &= ~0x00000FF0;
526 t1 |= 0x00000004;
527 if (t1 & 0x08)
528 t1 |= timing_u133[speed];
529 else
530 t1 |= timing_u100[speed];
531 }
532 pci_write_config_dword(pdev, port, t1);
533}
534
535static struct scsi_host_template sis_sht = {
536 .module = THIS_MODULE,
537 .name = DRV_NAME,
538 .ioctl = ata_scsi_ioctl,
539 .queuecommand = ata_scsi_queuecmd,
540 .can_queue = ATA_DEF_QUEUE,
541 .this_id = ATA_SHT_THIS_ID,
542 .sg_tablesize = LIBATA_MAX_PRD,
543 .max_sectors = ATA_MAX_SECTORS,
544 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
545 .emulated = ATA_SHT_EMULATED,
546 .use_clustering = ATA_SHT_USE_CLUSTERING,
547 .proc_name = DRV_NAME,
548 .dma_boundary = ATA_DMA_BOUNDARY,
549 .slave_configure = ata_scsi_slave_config,
550 .bios_param = ata_std_bios_param,
551};
552
553static const struct ata_port_operations sis_133_ops = {
554 .port_disable = ata_port_disable,
555 .set_piomode = sis_133_set_piomode,
556 .set_dmamode = sis_133_set_dmamode,
557 .mode_filter = ata_pci_default_filter,
558
559 .tf_load = ata_tf_load,
560 .tf_read = ata_tf_read,
561 .check_status = ata_check_status,
562 .exec_command = ata_exec_command,
563 .dev_select = ata_std_dev_select,
564
565 .freeze = ata_bmdma_freeze,
566 .thaw = ata_bmdma_thaw,
567 .error_handler = sis_133_error_handler,
568 .post_internal_cmd = ata_bmdma_post_internal_cmd,
569
570 .bmdma_setup = ata_bmdma_setup,
571 .bmdma_start = ata_bmdma_start,
572 .bmdma_stop = ata_bmdma_stop,
573 .bmdma_status = ata_bmdma_status,
574 .qc_prep = ata_qc_prep,
575 .qc_issue = ata_qc_issue_prot,
576 .data_xfer = ata_pio_data_xfer,
577
578 .eng_timeout = ata_eng_timeout,
579
580 .irq_handler = ata_interrupt,
581 .irq_clear = ata_bmdma_irq_clear,
582
583 .port_start = ata_port_start,
584 .port_stop = ata_port_stop,
585 .host_stop = ata_host_stop,
586};
587
588static const struct ata_port_operations sis_133_early_ops = {
589 .port_disable = ata_port_disable,
590 .set_piomode = sis_100_set_piomode,
591 .set_dmamode = sis_133_early_set_dmamode,
592 .mode_filter = ata_pci_default_filter,
593
594 .tf_load = ata_tf_load,
595 .tf_read = ata_tf_read,
596 .check_status = ata_check_status,
597 .exec_command = ata_exec_command,
598 .dev_select = ata_std_dev_select,
599
600 .freeze = ata_bmdma_freeze,
601 .thaw = ata_bmdma_thaw,
602 .error_handler = sis_66_error_handler,
603 .post_internal_cmd = ata_bmdma_post_internal_cmd,
604
605 .bmdma_setup = ata_bmdma_setup,
606 .bmdma_start = ata_bmdma_start,
607 .bmdma_stop = ata_bmdma_stop,
608 .bmdma_status = ata_bmdma_status,
609 .qc_prep = ata_qc_prep,
610 .qc_issue = ata_qc_issue_prot,
611 .data_xfer = ata_pio_data_xfer,
612
613 .eng_timeout = ata_eng_timeout,
614
615 .irq_handler = ata_interrupt,
616 .irq_clear = ata_bmdma_irq_clear,
617
618 .port_start = ata_port_start,
619 .port_stop = ata_port_stop,
620 .host_stop = ata_host_stop,
621};
622
623static const struct ata_port_operations sis_100_ops = {
624 .port_disable = ata_port_disable,
625 .set_piomode = sis_100_set_piomode,
626 .set_dmamode = sis_100_set_dmamode,
627 .mode_filter = ata_pci_default_filter,
628
629 .tf_load = ata_tf_load,
630 .tf_read = ata_tf_read,
631 .check_status = ata_check_status,
632 .exec_command = ata_exec_command,
633 .dev_select = ata_std_dev_select,
634
635 .freeze = ata_bmdma_freeze,
636 .thaw = ata_bmdma_thaw,
637 .error_handler = sis_66_error_handler,
638 .post_internal_cmd = ata_bmdma_post_internal_cmd,
639
640
641 .bmdma_setup = ata_bmdma_setup,
642 .bmdma_start = ata_bmdma_start,
643 .bmdma_stop = ata_bmdma_stop,
644 .bmdma_status = ata_bmdma_status,
645 .qc_prep = ata_qc_prep,
646 .qc_issue = ata_qc_issue_prot,
647 .data_xfer = ata_pio_data_xfer,
648
649 .eng_timeout = ata_eng_timeout,
650
651 .irq_handler = ata_interrupt,
652 .irq_clear = ata_bmdma_irq_clear,
653
654 .port_start = ata_port_start,
655 .port_stop = ata_port_stop,
656 .host_stop = ata_host_stop,
657};
658
659static const struct ata_port_operations sis_66_ops = {
660 .port_disable = ata_port_disable,
661 .set_piomode = sis_old_set_piomode,
662 .set_dmamode = sis_66_set_dmamode,
663 .mode_filter = ata_pci_default_filter,
664
665 .tf_load = ata_tf_load,
666 .tf_read = ata_tf_read,
667 .check_status = ata_check_status,
668 .exec_command = ata_exec_command,
669 .dev_select = ata_std_dev_select,
670
671 .freeze = ata_bmdma_freeze,
672 .thaw = ata_bmdma_thaw,
673 .error_handler = sis_66_error_handler,
674 .post_internal_cmd = ata_bmdma_post_internal_cmd,
675
676 .bmdma_setup = ata_bmdma_setup,
677 .bmdma_start = ata_bmdma_start,
678 .bmdma_stop = ata_bmdma_stop,
679 .bmdma_status = ata_bmdma_status,
680 .qc_prep = ata_qc_prep,
681 .qc_issue = ata_qc_issue_prot,
682 .data_xfer = ata_pio_data_xfer,
683
684 .eng_timeout = ata_eng_timeout,
685
686 .irq_handler = ata_interrupt,
687 .irq_clear = ata_bmdma_irq_clear,
688
689 .port_start = ata_port_start,
690 .port_stop = ata_port_stop,
691 .host_stop = ata_host_stop,
692};
693
694static const struct ata_port_operations sis_old_ops = {
695 .port_disable = ata_port_disable,
696 .set_piomode = sis_old_set_piomode,
697 .set_dmamode = sis_old_set_dmamode,
698 .mode_filter = ata_pci_default_filter,
699
700 .tf_load = ata_tf_load,
701 .tf_read = ata_tf_read,
702 .check_status = ata_check_status,
703 .exec_command = ata_exec_command,
704 .dev_select = ata_std_dev_select,
705
706 .freeze = ata_bmdma_freeze,
707 .thaw = ata_bmdma_thaw,
708 .error_handler = sis_old_error_handler,
709 .post_internal_cmd = ata_bmdma_post_internal_cmd,
710
711 .bmdma_setup = ata_bmdma_setup,
712 .bmdma_start = ata_bmdma_start,
713 .bmdma_stop = ata_bmdma_stop,
714 .bmdma_status = ata_bmdma_status,
715 .qc_prep = ata_qc_prep,
716 .qc_issue = ata_qc_issue_prot,
717 .data_xfer = ata_pio_data_xfer,
718
719 .eng_timeout = ata_eng_timeout,
720
721 .irq_handler = ata_interrupt,
722 .irq_clear = ata_bmdma_irq_clear,
723
724 .port_start = ata_port_start,
725 .port_stop = ata_port_stop,
726 .host_stop = ata_host_stop,
727};
728
729static struct ata_port_info sis_info = {
730 .sht = &sis_sht,
731 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
732 .pio_mask = 0x1f, /* pio0-4 */
733 .mwdma_mask = 0x07,
734 .udma_mask = 0,
735 .port_ops = &sis_old_ops,
736};
737static struct ata_port_info sis_info33 = {
738 .sht = &sis_sht,
739 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
740 .pio_mask = 0x1f, /* pio0-4 */
741 .mwdma_mask = 0x07,
742 .udma_mask = ATA_UDMA2, /* UDMA 33 */
743 .port_ops = &sis_old_ops,
744};
745static struct ata_port_info sis_info66 = {
746 .sht = &sis_sht,
747 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
748 .pio_mask = 0x1f, /* pio0-4 */
749 .udma_mask = ATA_UDMA4, /* UDMA 66 */
750 .port_ops = &sis_66_ops,
751};
752static struct ata_port_info sis_info100 = {
753 .sht = &sis_sht,
754 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
755 .pio_mask = 0x1f, /* pio0-4 */
756 .udma_mask = ATA_UDMA5,
757 .port_ops = &sis_100_ops,
758};
759static struct ata_port_info sis_info100_early = {
760 .sht = &sis_sht,
761 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
762 .udma_mask = ATA_UDMA5,
763 .pio_mask = 0x1f, /* pio0-4 */
764 .port_ops = &sis_66_ops,
765};
766static struct ata_port_info sis_info133 = {
767 .sht = &sis_sht,
768 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
769 .pio_mask = 0x1f, /* pio0-4 */
770 .udma_mask = ATA_UDMA6,
771 .port_ops = &sis_133_ops,
772};
773static struct ata_port_info sis_info133_early = {
774 .sht = &sis_sht,
775 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
776 .pio_mask = 0x1f, /* pio0-4 */
777 .udma_mask = ATA_UDMA6,
778 .port_ops = &sis_133_early_ops,
779};
780
781
782static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis)
783{
784 u16 regw;
785 u8 reg;
786
787 if (sis->info == &sis_info133) {
788 pci_read_config_word(pdev, 0x50, &regw);
789 if (regw & 0x08)
790 pci_write_config_word(pdev, 0x50, regw & ~0x08);
791 pci_read_config_word(pdev, 0x52, &regw);
792 if (regw & 0x08)
793 pci_write_config_word(pdev, 0x52, regw & ~0x08);
794 return;
795 }
796
797 if (sis->info == &sis_info133_early || sis->info == &sis_info100) {
798 /* Fix up latency */
799 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
800 /* Set compatibility bit */
801 pci_read_config_byte(pdev, 0x49, &reg);
802 if (!(reg & 0x01))
803 pci_write_config_byte(pdev, 0x49, reg | 0x01);
804 return;
805 }
806
807 if (sis->info == &sis_info66 || sis->info == &sis_info100_early) {
808 /* Fix up latency */
809 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
810 /* Set compatibility bit */
811 pci_read_config_byte(pdev, 0x52, &reg);
812 if (!(reg & 0x04))
813 pci_write_config_byte(pdev, 0x52, reg | 0x04);
814 return;
815 }
816
817 if (sis->info == &sis_info33) {
818 pci_read_config_byte(pdev, PCI_CLASS_PROG, &reg);
819 if (( reg & 0x0F ) != 0x00)
820 pci_write_config_byte(pdev, PCI_CLASS_PROG, reg & 0xF0);
821 /* Fall through to ATA16 fixup below */
822 }
823
824 if (sis->info == &sis_info || sis->info == &sis_info33) {
825 /* force per drive recovery and active timings
826 needed on ATA_33 and below chips */
827 pci_read_config_byte(pdev, 0x52, &reg);
828 if (!(reg & 0x08))
829 pci_write_config_byte(pdev, 0x52, reg|0x08);
830 return;
831 }
832
833 BUG();
834}
835
836/**
837 * sis_init_one - Register SiS ATA PCI device with kernel services
838 * @pdev: PCI device to register
839 * @ent: Entry in sis_pci_tbl matching with @pdev
840 *
841 * Called from kernel PCI layer. We probe for combined mode (sigh),
842 * and then hand over control to libata, for it to do the rest.
843 *
844 * LOCKING:
845 * Inherited from PCI layer (may sleep).
846 *
847 * RETURNS:
848 * Zero on success, or -ERRNO value.
849 */
850
851static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
852{
853 static int printed_version;
854 static struct ata_port_info *port_info[2];
855 struct ata_port_info *port;
856 struct pci_dev *host = NULL;
857 struct sis_chipset *chipset = NULL;
858
859 static struct sis_chipset sis_chipsets[] = {
860
861 { 0x0968, &sis_info133 },
862 { 0x0966, &sis_info133 },
863 { 0x0965, &sis_info133 },
864 { 0x0745, &sis_info100 },
865 { 0x0735, &sis_info100 },
866 { 0x0733, &sis_info100 },
867 { 0x0635, &sis_info100 },
868 { 0x0633, &sis_info100 },
869
870 { 0x0730, &sis_info100_early }, /* 100 with ATA 66 layout */
871 { 0x0550, &sis_info100_early }, /* 100 with ATA 66 layout */
872
873 { 0x0640, &sis_info66 },
874 { 0x0630, &sis_info66 },
875 { 0x0620, &sis_info66 },
876 { 0x0540, &sis_info66 },
877 { 0x0530, &sis_info66 },
878
879 { 0x5600, &sis_info33 },
880 { 0x5598, &sis_info33 },
881 { 0x5597, &sis_info33 },
882 { 0x5591, &sis_info33 },
883 { 0x5582, &sis_info33 },
884 { 0x5581, &sis_info33 },
885
886 { 0x5596, &sis_info },
887 { 0x5571, &sis_info },
888 { 0x5517, &sis_info },
889 { 0x5511, &sis_info },
890
891 {0}
892 };
893 static struct sis_chipset sis133_early = {
894 0x0, &sis_info133_early
895 };
896 static struct sis_chipset sis133 = {
897 0x0, &sis_info133
898 };
899 static struct sis_chipset sis100_early = {
900 0x0, &sis_info100_early
901 };
902 static struct sis_chipset sis100 = {
903 0x0, &sis_info100
904 };
905
906 if (!printed_version++)
907 dev_printk(KERN_DEBUG, &pdev->dev,
908 "version " DRV_VERSION "\n");
909
910 /* We have to find the bridge first */
911
912 for (chipset = &sis_chipsets[0]; chipset->device; chipset++) {
913 host = pci_get_device(PCI_VENDOR_ID_SI, chipset->device, NULL);
914 if (host != NULL) {
915 if (chipset->device == 0x630) { /* SIS630 */
916 u8 host_rev;
917 pci_read_config_byte(host, PCI_REVISION_ID, &host_rev);
918 if (host_rev >= 0x30) /* 630 ET */
919 chipset = &sis100_early;
920 }
921 break;
922 }
923 }
924
925 /* Look for concealed bridges */
926 if (host == NULL) {
927 /* Second check */
928 u32 idemisc;
929 u16 trueid;
930
931 /* Disable ID masking and register remapping then
932 see what the real ID is */
933
934 pci_read_config_dword(pdev, 0x54, &idemisc);
935 pci_write_config_dword(pdev, 0x54, idemisc & 0x7fffffff);
936 pci_read_config_word(pdev, PCI_DEVICE_ID, &trueid);
937 pci_write_config_dword(pdev, 0x54, idemisc);
938
939 switch(trueid) {
940 case 0x5518: /* SIS 962/963 */
941 chipset = &sis133;
942 if ((idemisc & 0x40000000) == 0) {
943 pci_write_config_dword(pdev, 0x54, idemisc | 0x40000000);
944 printk(KERN_INFO "SIS5513: Switching to 5513 register mapping\n");
945 }
946 break;
947 case 0x0180: /* SIS 965/965L */
948 chipset = &sis133;
949 break;
950 case 0x1180: /* SIS 966/966L */
951 chipset = &sis133;
952 break;
953 }
954 }
955
956 /* Further check */
957 if (chipset == NULL) {
958 struct pci_dev *lpc_bridge;
959 u16 trueid;
960 u8 prefctl;
961 u8 idecfg;
962 u8 sbrev;
963
964 /* Try the second unmasking technique */
965 pci_read_config_byte(pdev, 0x4a, &idecfg);
966 pci_write_config_byte(pdev, 0x4a, idecfg | 0x10);
967 pci_read_config_word(pdev, PCI_DEVICE_ID, &trueid);
968 pci_write_config_byte(pdev, 0x4a, idecfg);
969
970 switch(trueid) {
971 case 0x5517:
972 lpc_bridge = pci_get_slot(pdev->bus, 0x10); /* Bus 0 Dev 2 Fn 0 */
973 if (lpc_bridge == NULL)
974 break;
975 pci_read_config_byte(lpc_bridge, PCI_REVISION_ID, &sbrev);
976 pci_read_config_byte(pdev, 0x49, &prefctl);
977 pci_dev_put(lpc_bridge);
978
979 if (sbrev == 0x10 && (prefctl & 0x80)) {
980 chipset = &sis133_early;
981 break;
982 }
983 chipset = &sis100;
984 break;
985 }
986 }
987 pci_dev_put(host);
988
989 /* No chipset info, no support */
990 if (chipset == NULL)
991 return -ENODEV;
992
993 port = chipset->info;
994 port->private_data = chipset;
995
996 sis_fixup(pdev, chipset);
997
998 port_info[0] = port_info[1] = port;
999 return ata_pci_init_one(pdev, port_info, 2);
1000}
1001
1002static const struct pci_device_id sis_pci_tbl[] = {
1003 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x5513), }, /* SiS 5513 */
1004 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x5518), }, /* SiS 5518 */
1005 { }
1006};
1007
1008static struct pci_driver sis_pci_driver = {
1009 .name = DRV_NAME,
1010 .id_table = sis_pci_tbl,
1011 .probe = sis_init_one,
1012 .remove = ata_pci_remove_one,
1013};
1014
1015static int __init sis_init(void)
1016{
1017 return pci_register_driver(&sis_pci_driver);
1018}
1019
1020static void __exit sis_exit(void)
1021{
1022 pci_unregister_driver(&sis_pci_driver);
1023}
1024
1025
1026module_init(sis_init);
1027module_exit(sis_exit);
1028
1029MODULE_AUTHOR("Alan Cox");
1030MODULE_DESCRIPTION("SCSI low-level driver for SiS ATA");
1031MODULE_LICENSE("GPL");
1032MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
1033MODULE_VERSION(DRV_VERSION);
1034
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
new file mode 100644
index 000000000000..f8499786917a
--- /dev/null
+++ b/drivers/ata/pata_sl82c105.c
@@ -0,0 +1,388 @@
1/*
2 * pata_sl82c105.c - SL82C105 PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based in part on linux/drivers/ide/pci/sl82c105.c
7 * SL82C105/Winbond 553 IDE driver
8 *
9 * and in part on the documentation and errata sheet
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/init.h>
16#include <linux/blkdev.h>
17#include <linux/delay.h>
18#include <scsi/scsi_host.h>
19#include <linux/libata.h>
20
21#define DRV_NAME "pata_sl82c105"
22#define DRV_VERSION "0.2.2"
23
24enum {
25 /*
26 * SL82C105 PCI config register 0x40 bits.
27 */
28 CTRL_IDE_IRQB = (1 << 30),
29 CTRL_IDE_IRQA = (1 << 28),
30 CTRL_LEGIRQ = (1 << 11),
31 CTRL_P1F16 = (1 << 5),
32 CTRL_P1EN = (1 << 4),
33 CTRL_P0F16 = (1 << 1),
34 CTRL_P0EN = (1 << 0)
35};
36
37/**
38 * sl82c105_pre_reset - probe begin
39 * @ap: ATA port
40 *
41 * Set up cable type and use generic probe init
42 */
43
44static int sl82c105_pre_reset(struct ata_port *ap)
45{
46 static const struct pci_bits sl82c105_enable_bits[] = {
47 { 0x40, 1, 0x01, 0x01 },
48 { 0x40, 1, 0x10, 0x10 }
49 };
50 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
51
52 if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) {
53 ata_port_disable(ap);
54 dev_printk(KERN_INFO, &pdev->dev, "port disabled. ignoring.\n");
55 return 0;
56 }
57 ap->cbl = ATA_CBL_PATA40;
58 return ata_std_prereset(ap);
59}
60
61
62static void sl82c105_error_handler(struct ata_port *ap)
63{
64 ata_bmdma_drive_eh(ap, sl82c105_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
65}
66
67
68/**
69 * sl82c105_configure_piomode - set chip PIO timing
70 * @ap: ATA interface
71 * @adev: ATA device
72 * @pio: PIO mode
73 *
74 * Called to do the PIO mode setup. Our timing registers are shared
75 * so a configure_dmamode call will undo any work we do here and vice
76 * versa
77 */
78
79static void sl82c105_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
80{
81 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
82 static u16 pio_timing[5] = {
83 0x50D, 0x407, 0x304, 0x242, 0x240
84 };
85 u16 dummy;
86 int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
87
88 pci_write_config_word(pdev, timing, pio_timing[pio]);
89 /* Can we lose this oddity of the old driver */
90 pci_read_config_word(pdev, timing, &dummy);
91}
92
93/**
94 * sl82c105_set_piomode - set initial PIO mode data
95 * @ap: ATA interface
96 * @adev: ATA device
97 *
98 * Called to do the PIO mode setup. Our timing registers are shared
99 * but we want to set the PIO timing by default.
100 */
101
102static void sl82c105_set_piomode(struct ata_port *ap, struct ata_device *adev)
103{
104 sl82c105_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
105}
106
107/**
108 * sl82c105_configure_dmamode - set DMA mode in chip
109 * @ap: ATA interface
110 * @adev: ATA device
111 *
112 * Load DMA cycle times into the chip ready for a DMA transfer
113 * to occur.
114 */
115
116static void sl82c105_configure_dmamode(struct ata_port *ap, struct ata_device *adev)
117{
118 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
119 static u16 dma_timing[3] = {
120 0x707, 0x201, 0x200
121 };
122 u16 dummy;
123 int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
124 int dma = adev->dma_mode - XFER_MW_DMA_0;
125
126 pci_write_config_word(pdev, timing, dma_timing[dma]);
127 /* Can we lose this oddity of the old driver */
128 pci_read_config_word(pdev, timing, &dummy);
129}
130
131/**
132 * sl82c105_set_dmamode - set initial DMA mode data
133 * @ap: ATA interface
134 * @adev: ATA device
135 *
136 * Called to do the DMA mode setup. This replaces the PIO timings
137 * for the device in question. Set appropriate PIO timings not DMA
138 * timings at this point.
139 */
140
141static void sl82c105_set_dmamode(struct ata_port *ap, struct ata_device *adev)
142{
143 switch(adev->dma_mode) {
144 case XFER_MW_DMA_0:
145 sl82c105_configure_piomode(ap, adev, 1);
146 break;
147 case XFER_MW_DMA_1:
148 sl82c105_configure_piomode(ap, adev, 3);
149 break;
150 case XFER_MW_DMA_2:
151 sl82c105_configure_piomode(ap, adev, 3);
152 break;
153 default:
154 BUG();
155 }
156}
157
158/**
159 * sl82c105_reset_engine - Reset the DMA engine
160 * @ap: ATA interface
161 *
162 * The sl82c105 has some serious problems with the DMA engine
163 * when transfers don't run as expected or ATAPI is used. The
164 * recommended fix is to reset the engine each use using a chip
165 * test register.
166 */
167
168static void sl82c105_reset_engine(struct ata_port *ap)
169{
170 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
171 u16 val;
172
173 pci_read_config_word(pdev, 0x7E, &val);
174 pci_write_config_word(pdev, 0x7E, val | 4);
175 pci_write_config_word(pdev, 0x7E, val & ~4);
176}
177
178/**
179 * sl82c105_bmdma_start - DMA engine begin
180 * @qc: ATA command
181 *
182 * Reset the DMA engine each use as recommended by the errata
183 * document.
184 *
185 * FIXME: if we switch clock at BMDMA start/end we might get better
186 * PIO performance on DMA capable devices.
187 */
188
189static void sl82c105_bmdma_start(struct ata_queued_cmd *qc)
190{
191 struct ata_port *ap = qc->ap;
192
193 sl82c105_reset_engine(ap);
194
195 /* Set the clocks for DMA */
196 sl82c105_configure_dmamode(ap, qc->dev);
197 /* Activate DMA */
198 ata_bmdma_start(qc);
199}
200
201/**
202 * sl82c105_bmdma_end - DMA engine stop
203 * @qc: ATA command
204 *
205 * Reset the DMA engine each use as recommended by the errata
206 * document.
207 *
208 * This function is also called to turn off DMA when a timeout occurs
209 * during DMA operation. In both cases we need to reset the engine,
210 * so no actual eng_timeout handler is required.
211 *
212 * We assume bmdma_stop is always called if bmdma_start as called. If
213 * not then we may need to wrap qc_issue.
214 */
215
216static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc)
217{
218 struct ata_port *ap = qc->ap;
219
220 ata_bmdma_stop(qc);
221 sl82c105_reset_engine(ap);
222
223 /* This will redo the initial setup of the DMA device to matching
224 PIO timings */
225 sl82c105_set_dmamode(ap, qc->dev);
226}
227
228static struct scsi_host_template sl82c105_sht = {
229 .module = THIS_MODULE,
230 .name = DRV_NAME,
231 .ioctl = ata_scsi_ioctl,
232 .queuecommand = ata_scsi_queuecmd,
233 .can_queue = ATA_DEF_QUEUE,
234 .this_id = ATA_SHT_THIS_ID,
235 .sg_tablesize = LIBATA_MAX_PRD,
236 .max_sectors = ATA_MAX_SECTORS,
237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
238 .emulated = ATA_SHT_EMULATED,
239 .use_clustering = ATA_SHT_USE_CLUSTERING,
240 .proc_name = DRV_NAME,
241 .dma_boundary = ATA_DMA_BOUNDARY,
242 .slave_configure = ata_scsi_slave_config,
243 .bios_param = ata_std_bios_param,
244};
245
246static struct ata_port_operations sl82c105_port_ops = {
247 .port_disable = ata_port_disable,
248 .set_piomode = sl82c105_set_piomode,
249 .set_dmamode = sl82c105_set_dmamode,
250 .mode_filter = ata_pci_default_filter,
251
252 .tf_load = ata_tf_load,
253 .tf_read = ata_tf_read,
254 .check_status = ata_check_status,
255 .exec_command = ata_exec_command,
256 .dev_select = ata_std_dev_select,
257
258 .error_handler = sl82c105_error_handler,
259
260 .bmdma_setup = ata_bmdma_setup,
261 .bmdma_start = sl82c105_bmdma_start,
262 .bmdma_stop = sl82c105_bmdma_stop,
263 .bmdma_status = ata_bmdma_status,
264
265 .qc_prep = ata_qc_prep,
266 .qc_issue = ata_qc_issue_prot,
267 .eng_timeout = ata_eng_timeout,
268 .data_xfer = ata_pio_data_xfer,
269
270 .irq_handler = ata_interrupt,
271 .irq_clear = ata_bmdma_irq_clear,
272
273 .port_start = ata_port_start,
274 .port_stop = ata_port_stop,
275 .host_stop = ata_host_stop
276};
277
278/**
279 * sl82c105_bridge_revision - find bridge version
280 * @pdev: PCI device for the ATA function
281 *
282 * Locates the PCI bridge associated with the ATA function and
283 * providing it is a Winbond 553 reports the revision. If it cannot
284 * find a revision or the right device it returns -1
285 */
286
287static int sl82c105_bridge_revision(struct pci_dev *pdev)
288{
289 struct pci_dev *bridge;
290 u8 rev;
291
292 /*
293 * The bridge should be part of the same device, but function 0.
294 */
295 bridge = pci_get_slot(pdev->bus,
296 PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
297 if (!bridge)
298 return -1;
299
300 /*
301 * Make sure it is a Winbond 553 and is an ISA bridge.
302 */
303 if (bridge->vendor != PCI_VENDOR_ID_WINBOND ||
304 bridge->device != PCI_DEVICE_ID_WINBOND_83C553 ||
305 bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) {
306 pci_dev_put(bridge);
307 return -1;
308 }
309 /*
310 * We need to find function 0's revision, not function 1
311 */
312 pci_read_config_byte(bridge, PCI_REVISION_ID, &rev);
313
314 pci_dev_put(bridge);
315 return rev;
316}
317
318
319static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
320{
321 static struct ata_port_info info_dma = {
322 .sht = &sl82c105_sht,
323 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
324 .pio_mask = 0x1f,
325 .mwdma_mask = 0x07,
326 .port_ops = &sl82c105_port_ops
327 };
328 static struct ata_port_info info_early = {
329 .sht = &sl82c105_sht,
330 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
331 .pio_mask = 0x1f,
332 .port_ops = &sl82c105_port_ops
333 };
334 static struct ata_port_info *port_info[2] = { &info_early, &info_early };
335 u32 val;
336 int rev;
337
338 rev = sl82c105_bridge_revision(dev);
339
340 if (rev == -1)
341 dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Unable to find bridge, disabling DMA.\n");
342 else if (rev <= 5)
343 dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Early bridge revision, no DMA available.\n");
344 else {
345 port_info[0] = &info_dma;
346 port_info[1] = &info_dma;
347 }
348
349 pci_read_config_dword(dev, 0x40, &val);
350 val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
351 pci_write_config_dword(dev, 0x40, val);
352
353
354 return ata_pci_init_one(dev, port_info, 1); /* For now */
355}
356
357static struct pci_device_id sl82c105[] = {
358 { PCI_DEVICE(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105), },
359 { 0, },
360};
361
362static struct pci_driver sl82c105_pci_driver = {
363 .name = DRV_NAME,
364 .id_table = sl82c105,
365 .probe = sl82c105_init_one,
366 .remove = ata_pci_remove_one
367};
368
369static int __init sl82c105_init(void)
370{
371 return pci_register_driver(&sl82c105_pci_driver);
372}
373
374
375static void __exit sl82c105_exit(void)
376{
377 pci_unregister_driver(&sl82c105_pci_driver);
378}
379
380
381MODULE_AUTHOR("Alan Cox");
382MODULE_DESCRIPTION("low-level driver for Sl82c105");
383MODULE_LICENSE("GPL");
384MODULE_DEVICE_TABLE(pci, sl82c105);
385MODULE_VERSION(DRV_VERSION);
386
387module_init(sl82c105_init);
388module_exit(sl82c105_exit);
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
new file mode 100644
index 000000000000..36f788728f3f
--- /dev/null
+++ b/drivers/ata/pata_triflex.c
@@ -0,0 +1,285 @@
1/*
2 * pata_triflex.c - Compaq PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based upon
7 *
8 * triflex.c
9 *
10 * IDE Chipset driver for the Compaq TriFlex IDE controller.
11 *
12 * Known to work with the Compaq Workstation 5x00 series.
13 *
14 * Copyright (C) 2002 Hewlett-Packard Development Group, L.P.
15 * Author: Torben Mathiasen <torben.mathiasen@hp.com>
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License version 2 as
19 * published by the Free Software Foundation.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 *
30 * Loosely based on the piix & svwks drivers.
31 *
32 * Documentation:
33 * Not publically available.
34 */
35
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/init.h>
40#include <linux/blkdev.h>
41#include <linux/delay.h>
42#include <scsi/scsi_host.h>
43#include <linux/libata.h>
44
45#define DRV_NAME "pata_triflex"
46#define DRV_VERSION "0.2.5"
47
48/**
49 * triflex_probe_init - probe begin
50 * @ap: ATA port
51 *
52 * Set up cable type and use generic probe init
53 */
54
55static int triflex_probe_init(struct ata_port *ap)
56{
57 static const struct pci_bits triflex_enable_bits[] = {
58 { 0x80, 1, 0x01, 0x01 },
59 { 0x80, 1, 0x02, 0x02 }
60 };
61
62 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
63
64 if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no])) {
65 ata_port_disable(ap);
66 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
67 return 0;
68 }
69 ap->cbl = ATA_CBL_PATA40;
70 return ata_std_prereset(ap);
71}
72
73
74
75static void triflex_error_handler(struct ata_port *ap)
76{
77 ata_bmdma_drive_eh(ap, triflex_probe_init, ata_std_softreset, NULL, ata_std_postreset);
78}
79
80/**
81 * triflex_load_timing - timing configuration
82 * @ap: ATA interface
83 * @adev: Device on the bus
84 * @speed: speed to configure
85 *
86 * The Triflex has one set of timings per device per channel. This
87 * means we must do some switching. As the PIO and DMA timings don't
88 * match we have to do some reloading unlike PIIX devices where tuning
89 * tricks can avoid it.
90 */
91
92static void triflex_load_timing(struct ata_port *ap, struct ata_device *adev, int speed)
93{
94 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
95 u32 timing = 0;
96 u32 triflex_timing, old_triflex_timing;
97 int channel_offset = ap->port_no ? 0x74: 0x70;
98 unsigned int is_slave = (adev->devno != 0);
99
100
101 pci_read_config_dword(pdev, channel_offset, &old_triflex_timing);
102 triflex_timing = old_triflex_timing;
103
104 switch(speed)
105 {
106 case XFER_MW_DMA_2:
107 timing = 0x0103;break;
108 case XFER_MW_DMA_1:
109 timing = 0x0203;break;
110 case XFER_MW_DMA_0:
111 timing = 0x0808;break;
112 case XFER_SW_DMA_2:
113 case XFER_SW_DMA_1:
114 case XFER_SW_DMA_0:
115 timing = 0x0F0F;break;
116 case XFER_PIO_4:
117 timing = 0x0202;break;
118 case XFER_PIO_3:
119 timing = 0x0204;break;
120 case XFER_PIO_2:
121 timing = 0x0404;break;
122 case XFER_PIO_1:
123 timing = 0x0508;break;
124 case XFER_PIO_0:
125 timing = 0x0808;break;
126 default:
127 BUG();
128 }
129 triflex_timing &= ~ (0xFFFF << (16 * is_slave));
130 triflex_timing |= (timing << (16 * is_slave));
131
132 if (triflex_timing != old_triflex_timing)
133 pci_write_config_dword(pdev, channel_offset, triflex_timing);
134}
135
136/**
137 * triflex_set_piomode - set initial PIO mode data
138 * @ap: ATA interface
139 * @adev: ATA device
140 *
141 * Use the timing loader to set up the PIO mode. We have to do this
142 * because DMA start/stop will only be called once DMA occurs. If there
143 * has been no DMA then the PIO timings are still needed.
144 */
145static void triflex_set_piomode(struct ata_port *ap, struct ata_device *adev)
146{
147 triflex_load_timing(ap, adev, adev->pio_mode);
148}
149
150/**
151 * triflex_dma_start - DMA start callback
152 * @qc: Command in progress
153 *
154 * Usually drivers set the DMA timing at the point the set_dmamode call
155 * is made. Triflex however requires we load new timings on the
156 * transition or keep matching PIO/DMA pairs (ie MWDMA2/PIO4 etc).
157 * We load the DMA timings just before starting DMA and then restore
158 * the PIO timing when the DMA is finished.
159 */
160
161static void triflex_bmdma_start(struct ata_queued_cmd *qc)
162{
163 triflex_load_timing(qc->ap, qc->dev, qc->dev->dma_mode);
164 ata_bmdma_start(qc);
165}
166
167/**
168 * triflex_dma_stop - DMA stop callback
169 * @ap: ATA interface
170 * @adev: ATA device
171 *
172 * We loaded new timings in dma_start, as a result we need to restore
173 * the PIO timings in dma_stop so that the next command issue gets the
174 * right clock values.
175 */
176
177static void triflex_bmdma_stop(struct ata_queued_cmd *qc)
178{
179 ata_bmdma_stop(qc);
180 triflex_load_timing(qc->ap, qc->dev, qc->dev->pio_mode);
181}
182
183static struct scsi_host_template triflex_sht = {
184 .module = THIS_MODULE,
185 .name = DRV_NAME,
186 .ioctl = ata_scsi_ioctl,
187 .queuecommand = ata_scsi_queuecmd,
188 .can_queue = ATA_DEF_QUEUE,
189 .this_id = ATA_SHT_THIS_ID,
190 .sg_tablesize = LIBATA_MAX_PRD,
191 .max_sectors = ATA_MAX_SECTORS,
192 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
193 .emulated = ATA_SHT_EMULATED,
194 .use_clustering = ATA_SHT_USE_CLUSTERING,
195 .proc_name = DRV_NAME,
196 .dma_boundary = ATA_DMA_BOUNDARY,
197 .slave_configure = ata_scsi_slave_config,
198 .bios_param = ata_std_bios_param,
199};
200
201static struct ata_port_operations triflex_port_ops = {
202 .port_disable = ata_port_disable,
203 .set_piomode = triflex_set_piomode,
204 .mode_filter = ata_pci_default_filter,
205
206 .tf_load = ata_tf_load,
207 .tf_read = ata_tf_read,
208 .check_status = ata_check_status,
209 .exec_command = ata_exec_command,
210 .dev_select = ata_std_dev_select,
211
212 .freeze = ata_bmdma_freeze,
213 .thaw = ata_bmdma_thaw,
214 .error_handler = triflex_error_handler,
215 .post_internal_cmd = ata_bmdma_post_internal_cmd,
216
217 .bmdma_setup = ata_bmdma_setup,
218 .bmdma_start = triflex_bmdma_start,
219 .bmdma_stop = triflex_bmdma_stop,
220 .bmdma_status = ata_bmdma_status,
221
222 .qc_prep = ata_qc_prep,
223 .qc_issue = ata_qc_issue_prot,
224 .eng_timeout = ata_eng_timeout,
225 .data_xfer = ata_pio_data_xfer,
226
227 .irq_handler = ata_interrupt,
228 .irq_clear = ata_bmdma_irq_clear,
229
230 .port_start = ata_port_start,
231 .port_stop = ata_port_stop,
232 .host_stop = ata_host_stop
233};
234
235static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
236{
237 static struct ata_port_info info = {
238 .sht = &triflex_sht,
239 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
240 .pio_mask = 0x1f,
241 .mwdma_mask = 0x07,
242 .port_ops = &triflex_port_ops
243 };
244 static struct ata_port_info *port_info[2] = { &info, &info };
245 static int printed_version;
246
247 if (!printed_version++)
248 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
249
250 return ata_pci_init_one(dev, port_info, 2);
251}
252
253static const struct pci_device_id triflex[] = {
254 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE,
255 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
256 { 0, },
257};
258
259static struct pci_driver triflex_pci_driver = {
260 .name = DRV_NAME,
261 .id_table = triflex,
262 .probe = triflex_init_one,
263 .remove = ata_pci_remove_one
264};
265
266static int __init triflex_init(void)
267{
268 return pci_register_driver(&triflex_pci_driver);
269}
270
271
272static void __exit triflex_exit(void)
273{
274 pci_unregister_driver(&triflex_pci_driver);
275}
276
277
278MODULE_AUTHOR("Alan Cox");
279MODULE_DESCRIPTION("low-level driver for Compaq Triflex");
280MODULE_LICENSE("GPL");
281MODULE_DEVICE_TABLE(pci, triflex);
282MODULE_VERSION(DRV_VERSION);
283
284module_init(triflex_init);
285module_exit(triflex_exit);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
new file mode 100644
index 000000000000..1b2ff133b163
--- /dev/null
+++ b/drivers/ata/pata_via.c
@@ -0,0 +1,568 @@
1/*
2 * pata_via.c - VIA PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Documentation
7 * Most chipset documentation available under NDA only
8 *
9 * VIA version guide
10 * VIA VT82C561 - early design, uses ata_generic currently
11 * VIA VT82C576 - MWDMA, 33Mhz
12 * VIA VT82C586 - MWDMA, 33Mhz
13 * VIA VT82C586a - Added UDMA to 33Mhz
14 * VIA VT82C586b - UDMA33
15 * VIA VT82C596a - Nonfunctional UDMA66
16 * VIA VT82C596b - Working UDMA66
17 * VIA VT82C686 - Nonfunctional UDMA66
18 * VIA VT82C686a - Working UDMA66
19 * VIA VT82C686b - Updated to UDMA100
20 * VIA VT8231 - UDMA100
21 * VIA VT8233 - UDMA100
22 * VIA VT8233a - UDMA133
23 * VIA VT8233c - UDMA100
24 * VIA VT8235 - UDMA133
25 * VIA VT8237 - UDMA133
26 *
27 * Most registers remain compatible across chips. Others start reserved
28 * and acquire sensible semantics if set to 1 (eg cable detect). A few
29 * exceptions exist, notably around the FIFO settings.
30 *
31 * One additional quirk of the VIA design is that like ALi they use few
32 * PCI IDs for a lot of chips.
33 *
34 * Based heavily on:
35 *
36 * Version 3.38
37 *
38 * VIA IDE driver for Linux. Supported southbridges:
39 *
40 * vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
41 * vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
42 * vt8235, vt8237
43 *
44 * Copyright (c) 2000-2002 Vojtech Pavlik
45 *
46 * Based on the work of:
47 * Michel Aubry
48 * Jeff Garzik
49 * Andre Hedrick
50
51 */
52
53#include <linux/kernel.h>
54#include <linux/module.h>
55#include <linux/pci.h>
56#include <linux/init.h>
57#include <linux/blkdev.h>
58#include <linux/delay.h>
59#include <scsi/scsi_host.h>
60#include <linux/libata.h>
61
62#define DRV_NAME "pata_via"
63#define DRV_VERSION "0.1.13"
64
65/*
66 * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx
67 * driver.
68 */
69
70enum {
71 VIA_UDMA = 0x007,
72 VIA_UDMA_NONE = 0x000,
73 VIA_UDMA_33 = 0x001,
74 VIA_UDMA_66 = 0x002,
75 VIA_UDMA_100 = 0x003,
76 VIA_UDMA_133 = 0x004,
77 VIA_BAD_PREQ = 0x010, /* Crashes if PREQ# till DDACK# set */
78 VIA_BAD_CLK66 = 0x020, /* 66 MHz clock doesn't work correctly */
79 VIA_SET_FIFO = 0x040, /* Needs to have FIFO split set */
80 VIA_NO_UNMASK = 0x080, /* Doesn't work with IRQ unmasking on */
81 VIA_BAD_ID = 0x100, /* Has wrong vendor ID (0x1107) */
82 VIA_BAD_AST = 0x200, /* Don't touch Address Setup Timing */
83 VIA_NO_ENABLES = 0x400, /* Has no enablebits */
84};
85
86/*
87 * VIA SouthBridge chips.
88 */
89
90static const struct via_isa_bridge {
91 const char *name;
92 u16 id;
93 u8 rev_min;
94 u8 rev_max;
95 u16 flags;
96} via_isa_bridges[] = {
97 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
98 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES},
99 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
100 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
101 { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
102 { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
103 { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, VIA_UDMA_100 },
104 { "vt8233", PCI_DEVICE_ID_VIA_8233_0, 0x00, 0x2f, VIA_UDMA_100 },
105 { "vt8231", PCI_DEVICE_ID_VIA_8231, 0x00, 0x2f, VIA_UDMA_100 },
106 { "vt82c686b", PCI_DEVICE_ID_VIA_82C686, 0x40, 0x4f, VIA_UDMA_100 },
107 { "vt82c686a", PCI_DEVICE_ID_VIA_82C686, 0x10, 0x2f, VIA_UDMA_66 },
108 { "vt82c686", PCI_DEVICE_ID_VIA_82C686, 0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 },
109 { "vt82c596b", PCI_DEVICE_ID_VIA_82C596, 0x10, 0x2f, VIA_UDMA_66 },
110 { "vt82c596a", PCI_DEVICE_ID_VIA_82C596, 0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 },
111 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, VIA_UDMA_33 | VIA_SET_FIFO },
112 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, VIA_UDMA_33 | VIA_SET_FIFO | VIA_BAD_PREQ },
113 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, VIA_UDMA_33 | VIA_SET_FIFO },
114 { "vt82c586a", PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, VIA_UDMA_33 | VIA_SET_FIFO },
115 { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, VIA_UDMA_NONE | VIA_SET_FIFO },
116 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK },
117 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
118 { NULL }
119};
120
121/**
122 * via_cable_detect - cable detection
123 * @ap: ATA port
124 *
125 * Perform cable detection. Actually for the VIA case the BIOS
126 * already did this for us. We read the values provided by the
127 * BIOS. If you are using an 8235 in a non-PC configuration you
128 * may need to update this code.
129 *
130 * Hotplug also impacts on this.
131 */
132
133static int via_cable_detect(struct ata_port *ap) {
134 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
135 u32 ata66;
136
137 pci_read_config_dword(pdev, 0x50, &ata66);
138 /* Check both the drive cable reporting bits, we might not have
139 two drives */
140 if (ata66 & (0x10100000 >> (16 * ap->port_no)))
141 return ATA_CBL_PATA80;
142 else
143 return ATA_CBL_PATA40;
144}
145
146static int via_pre_reset(struct ata_port *ap)
147{
148 const struct via_isa_bridge *config = ap->host->private_data;
149
150 if (!(config->flags & VIA_NO_ENABLES)) {
151 static const struct pci_bits via_enable_bits[] = {
152 { 0x40, 1, 0x02, 0x02 },
153 { 0x40, 1, 0x01, 0x01 }
154 };
155
156 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
157
158 if (!pci_test_config_bits(pdev, &via_enable_bits[ap->port_no])) {
159 ata_port_disable(ap);
160 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
161 return 0;
162 }
163 }
164
165 if ((config->flags & VIA_UDMA) >= VIA_UDMA_66)
166 ap->cbl = via_cable_detect(ap);
167 else
168 ap->cbl = ATA_CBL_PATA40;
169 return ata_std_prereset(ap);
170}
171
172
173/**
174 * via_error_handler - reset for VIA chips
175 * @ap: ATA port
176 *
177 * Handle the reset callback for the later chips with cable detect
178 */
179
180static void via_error_handler(struct ata_port *ap)
181{
182 ata_bmdma_drive_eh(ap, via_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
183}
184
185/**
186 * via_do_set_mode - set initial PIO mode data
187 * @ap: ATA interface
188 * @adev: ATA device
189 * @mode: ATA mode being programmed
190 * @tdiv: Clocks per PCI clock
191 * @set_ast: Set to program address setup
192 * @udma_type: UDMA mode/format of registers
193 *
194 * Program the VIA registers for DMA and PIO modes. Uses the ata timing
195 * support in order to compute modes.
196 *
197 * FIXME: Hotplug will require we serialize multiple mode changes
198 * on the two channels.
199 */
200
201static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mode, int tdiv, int set_ast, int udma_type)
202{
203 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
204 struct ata_device *peer = ata_dev_pair(adev);
205 struct ata_timing t, p;
206 static int via_clock = 33333; /* Bus clock in kHZ - ought to be tunable one day */
207 unsigned long T = 1000000000 / via_clock;
208 unsigned long UT = T/tdiv;
209 int ut;
210 int offset = 3 - (2*ap->port_no) - adev->devno;
211
212
213 /* Calculate the timing values we require */
214 ata_timing_compute(adev, mode, &t, T, UT);
215
216 /* We share 8bit timing so we must merge the constraints */
217 if (peer) {
218 if (peer->pio_mode) {
219 ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
220 ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
221 }
222 }
223
224 /* Address setup is programmable but breaks on UDMA133 setups */
225 if (set_ast) {
226 u8 setup; /* 2 bits per drive */
227 int shift = 2 * offset;
228
229 pci_read_config_byte(pdev, 0x4C, &setup);
230 setup &= ~(3 << shift);
231 setup |= FIT(t.setup, 1, 4) << shift; /* 1,4 or 1,4 - 1 FIXME */
232 pci_write_config_byte(pdev, 0x4C, setup);
233 }
234
235 /* Load the PIO mode bits */
236 pci_write_config_byte(pdev, 0x4F - ap->port_no,
237 ((FIT(t.act8b, 1, 16) - 1) << 4) | (FIT(t.rec8b, 1, 16) - 1));
238 pci_write_config_byte(pdev, 0x48 + offset,
239 ((FIT(t.active, 1, 16) - 1) << 4) | (FIT(t.recover, 1, 16) - 1));
240
241 /* Load the UDMA bits according to type */
242 switch(udma_type) {
243 default:
244 /* BUG() ? */
245 /* fall through */
246 case 33:
247 ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 5) - 2)) : 0x03;
248 break;
249 case 66:
250 ut = t.udma ? (0xe8 | (FIT(t.udma, 2, 9) - 2)) : 0x0f;
251 break;
252 case 100:
253 ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07;
254 break;
255 case 133:
256 ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07;
257 break;
258 }
259 /* Set UDMA unless device is not UDMA capable */
260 if (udma_type)
261 pci_write_config_byte(pdev, 0x50 + offset, ut);
262}
263
264static void via_set_piomode(struct ata_port *ap, struct ata_device *adev)
265{
266 const struct via_isa_bridge *config = ap->host->private_data;
267 int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
268 int mode = config->flags & VIA_UDMA;
269 static u8 tclock[5] = { 1, 1, 2, 3, 4 };
270 static u8 udma[5] = { 0, 33, 66, 100, 133 };
271
272 via_do_set_mode(ap, adev, adev->pio_mode, tclock[mode], set_ast, udma[mode]);
273}
274
275static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev)
276{
277 const struct via_isa_bridge *config = ap->host->private_data;
278 int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
279 int mode = config->flags & VIA_UDMA;
280 static u8 tclock[5] = { 1, 1, 2, 3, 4 };
281 static u8 udma[5] = { 0, 33, 66, 100, 133 };
282
283 via_do_set_mode(ap, adev, adev->dma_mode, tclock[mode], set_ast, udma[mode]);
284}
285
286static struct scsi_host_template via_sht = {
287 .module = THIS_MODULE,
288 .name = DRV_NAME,
289 .ioctl = ata_scsi_ioctl,
290 .queuecommand = ata_scsi_queuecmd,
291 .can_queue = ATA_DEF_QUEUE,
292 .this_id = ATA_SHT_THIS_ID,
293 .sg_tablesize = LIBATA_MAX_PRD,
294 .max_sectors = ATA_MAX_SECTORS,
295 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
296 .emulated = ATA_SHT_EMULATED,
297 .use_clustering = ATA_SHT_USE_CLUSTERING,
298 .proc_name = DRV_NAME,
299 .dma_boundary = ATA_DMA_BOUNDARY,
300 .slave_configure = ata_scsi_slave_config,
301 .bios_param = ata_std_bios_param,
302};
303
304static struct ata_port_operations via_port_ops = {
305 .port_disable = ata_port_disable,
306 .set_piomode = via_set_piomode,
307 .set_dmamode = via_set_dmamode,
308 .mode_filter = ata_pci_default_filter,
309
310 .tf_load = ata_tf_load,
311 .tf_read = ata_tf_read,
312 .check_status = ata_check_status,
313 .exec_command = ata_exec_command,
314 .dev_select = ata_std_dev_select,
315
316 .freeze = ata_bmdma_freeze,
317 .thaw = ata_bmdma_thaw,
318 .error_handler = via_error_handler,
319 .post_internal_cmd = ata_bmdma_post_internal_cmd,
320
321 .bmdma_setup = ata_bmdma_setup,
322 .bmdma_start = ata_bmdma_start,
323 .bmdma_stop = ata_bmdma_stop,
324 .bmdma_status = ata_bmdma_status,
325
326 .qc_prep = ata_qc_prep,
327 .qc_issue = ata_qc_issue_prot,
328 .eng_timeout = ata_eng_timeout,
329 .data_xfer = ata_pio_data_xfer,
330
331 .irq_handler = ata_interrupt,
332 .irq_clear = ata_bmdma_irq_clear,
333
334 .port_start = ata_port_start,
335 .port_stop = ata_port_stop,
336 .host_stop = ata_host_stop
337};
338
339static struct ata_port_operations via_port_ops_noirq = {
340 .port_disable = ata_port_disable,
341 .set_piomode = via_set_piomode,
342 .set_dmamode = via_set_dmamode,
343 .mode_filter = ata_pci_default_filter,
344
345 .tf_load = ata_tf_load,
346 .tf_read = ata_tf_read,
347 .check_status = ata_check_status,
348 .exec_command = ata_exec_command,
349 .dev_select = ata_std_dev_select,
350
351 .freeze = ata_bmdma_freeze,
352 .thaw = ata_bmdma_thaw,
353 .error_handler = via_error_handler,
354 .post_internal_cmd = ata_bmdma_post_internal_cmd,
355
356 .bmdma_setup = ata_bmdma_setup,
357 .bmdma_start = ata_bmdma_start,
358 .bmdma_stop = ata_bmdma_stop,
359 .bmdma_status = ata_bmdma_status,
360
361 .qc_prep = ata_qc_prep,
362 .qc_issue = ata_qc_issue_prot,
363 .eng_timeout = ata_eng_timeout,
364 .data_xfer = ata_pio_data_xfer_noirq,
365
366 .irq_handler = ata_interrupt,
367 .irq_clear = ata_bmdma_irq_clear,
368
369 .port_start = ata_port_start,
370 .port_stop = ata_port_stop,
371 .host_stop = ata_host_stop
372};
373
374/**
375 * via_init_one - discovery callback
376 * @pdev: PCI device ID
377 * @id: PCI table info
378 *
379 * A VIA IDE interface has been discovered. Figure out what revision
380 * and perform configuration work before handing it to the ATA layer
381 */
382
383static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
384{
385 /* Early VIA without UDMA support */
386 static struct ata_port_info via_mwdma_info = {
387 .sht = &via_sht,
388 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
389 .pio_mask = 0x1f,
390 .mwdma_mask = 0x07,
391 .port_ops = &via_port_ops
392 };
393 /* Ditto with IRQ masking required */
394 static struct ata_port_info via_mwdma_info_borked = {
395 .sht = &via_sht,
396 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
397 .pio_mask = 0x1f,
398 .mwdma_mask = 0x07,
399 .port_ops = &via_port_ops_noirq,
400 };
401 /* VIA UDMA 33 devices (and borked 66) */
402 static struct ata_port_info via_udma33_info = {
403 .sht = &via_sht,
404 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
405 .pio_mask = 0x1f,
406 .mwdma_mask = 0x07,
407 .udma_mask = 0x7,
408 .port_ops = &via_port_ops
409 };
410 /* VIA UDMA 66 devices */
411 static struct ata_port_info via_udma66_info = {
412 .sht = &via_sht,
413 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
414 .pio_mask = 0x1f,
415 .mwdma_mask = 0x07,
416 .udma_mask = 0x1f,
417 .port_ops = &via_port_ops
418 };
419 /* VIA UDMA 100 devices */
420 static struct ata_port_info via_udma100_info = {
421 .sht = &via_sht,
422 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
423 .pio_mask = 0x1f,
424 .mwdma_mask = 0x07,
425 .udma_mask = 0x3f,
426 .port_ops = &via_port_ops
427 };
428 /* UDMA133 with bad AST (All current 133) */
429 static struct ata_port_info via_udma133_info = {
430 .sht = &via_sht,
431 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
432 .pio_mask = 0x1f,
433 .mwdma_mask = 0x07,
434 .udma_mask = 0x7f, /* FIXME: should check north bridge */
435 .port_ops = &via_port_ops
436 };
437 struct ata_port_info *port_info[2], *type;
438 struct pci_dev *isa = NULL;
439 const struct via_isa_bridge *config;
440 static int printed_version;
441 u8 t;
442 u8 enable;
443 u32 timing;
444
445 if (!printed_version++)
446 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
447
448 /* To find out how the IDE will behave and what features we
449 actually have to look at the bridge not the IDE controller */
450 for (config = via_isa_bridges; config->id; config++)
451 if ((isa = pci_get_device(PCI_VENDOR_ID_VIA +
452 !!(config->flags & VIA_BAD_ID),
453 config->id, NULL))) {
454
455 pci_read_config_byte(isa, PCI_REVISION_ID, &t);
456 if (t >= config->rev_min &&
457 t <= config->rev_max)
458 break;
459 pci_dev_put(isa);
460 }
461
462 if (!config->id) {
463 printk(KERN_WARNING "via: Unknown VIA SouthBridge, disabling.\n");
464 return -ENODEV;
465 }
466 pci_dev_put(isa);
467
468 /* 0x40 low bits indicate enabled channels */
469 pci_read_config_byte(pdev, 0x40 , &enable);
470 enable &= 3;
471 if (enable == 0) {
472 return -ENODEV;
473 }
474
475 /* Initialise the FIFO for the enabled channels. */
476 if (config->flags & VIA_SET_FIFO) {
477 u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20};
478 u8 fifo;
479
480 pci_read_config_byte(pdev, 0x43, &fifo);
481
482 /* Clear PREQ# until DDACK# for errata */
483 if (config->flags & VIA_BAD_PREQ)
484 fifo &= 0x7F;
485 else
486 fifo &= 0x9f;
487 /* Turn on FIFO for enabled channels */
488 fifo |= fifo_setting[enable];
489 pci_write_config_byte(pdev, 0x43, fifo);
490 }
491 /* Clock set up */
492 switch(config->flags & VIA_UDMA) {
493 case VIA_UDMA_NONE:
494 if (config->flags & VIA_NO_UNMASK)
495 type = &via_mwdma_info_borked;
496 else
497 type = &via_mwdma_info;
498 break;
499 case VIA_UDMA_33:
500 type = &via_udma33_info;
501 break;
502 case VIA_UDMA_66:
503 type = &via_udma66_info;
504 /* The 66 MHz devices require we enable the clock */
505 pci_read_config_dword(pdev, 0x50, &timing);
506 timing |= 0x80008;
507 pci_write_config_dword(pdev, 0x50, timing);
508 break;
509 case VIA_UDMA_100:
510 type = &via_udma100_info;
511 break;
512 case VIA_UDMA_133:
513 type = &via_udma133_info;
514 break;
515 default:
516 WARN_ON(1);
517 return -ENODEV;
518 }
519
520 if (config->flags & VIA_BAD_CLK66) {
521 /* Disable the 66MHz clock on problem devices */
522 pci_read_config_dword(pdev, 0x50, &timing);
523 timing &= ~0x80008;
524 pci_write_config_dword(pdev, 0x50, timing);
525 }
526
527 /* We have established the device type, now fire it up */
528 type->private_data = (void *)config;
529
530 port_info[0] = port_info[1] = type;
531 return ata_pci_init_one(pdev, port_info, 2);
532}
533
534static const struct pci_device_id via[] = {
535 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576_1), },
536 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1), },
537 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_6410), },
538 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), },
539 { 0, },
540};
541
542static struct pci_driver via_pci_driver = {
543 .name = DRV_NAME,
544 .id_table = via,
545 .probe = via_init_one,
546 .remove = ata_pci_remove_one
547};
548
549static int __init via_init(void)
550{
551 return pci_register_driver(&via_pci_driver);
552}
553
554
555static void __exit via_exit(void)
556{
557 pci_unregister_driver(&via_pci_driver);
558}
559
560
561MODULE_AUTHOR("Alan Cox");
562MODULE_DESCRIPTION("low-level driver for VIA PATA");
563MODULE_LICENSE("GPL");
564MODULE_DEVICE_TABLE(pci, via);
565MODULE_VERSION(DRV_VERSION);
566
567module_init(via_init);
568module_exit(via_exit);
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
new file mode 100644
index 000000000000..0e23ecb77bc2
--- /dev/null
+++ b/drivers/ata/pdc_adma.c
@@ -0,0 +1,740 @@
1/*
2 * pdc_adma.c - Pacific Digital Corporation ADMA
3 *
4 * Maintained by: Mark Lord <mlord@pobox.com>
5 *
6 * Copyright 2005 Mark Lord
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 *
27 * Supports ATA disks in single-packet ADMA mode.
28 * Uses PIO for everything else.
29 *
30 * TODO: Use ADMA transfers for ATAPI devices, when possible.
31 * This requires careful attention to a number of quirks of the chip.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/sched.h>
43#include <linux/device.h>
44#include <scsi/scsi_host.h>
45#include <asm/io.h>
46#include <linux/libata.h>
47
48#define DRV_NAME "pdc_adma"
49#define DRV_VERSION "0.04"
50
51/* macro to calculate base address for ATA regs */
52#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
53
54/* macro to calculate base address for ADMA regs */
55#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20))
56
57enum {
58 ADMA_PORTS = 2,
59 ADMA_CPB_BYTES = 40,
60 ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16,
61 ADMA_PKT_BYTES = ADMA_CPB_BYTES + ADMA_PRD_BYTES,
62
63 ADMA_DMA_BOUNDARY = 0xffffffff,
64
65 /* global register offsets */
66 ADMA_MODE_LOCK = 0x00c7,
67
68 /* per-channel register offsets */
69 ADMA_CONTROL = 0x0000, /* ADMA control */
70 ADMA_STATUS = 0x0002, /* ADMA status */
71 ADMA_CPB_COUNT = 0x0004, /* CPB count */
72 ADMA_CPB_CURRENT = 0x000c, /* current CPB address */
73 ADMA_CPB_NEXT = 0x000c, /* next CPB address */
74 ADMA_CPB_LOOKUP = 0x0010, /* CPB lookup table */
75 ADMA_FIFO_IN = 0x0014, /* input FIFO threshold */
76 ADMA_FIFO_OUT = 0x0016, /* output FIFO threshold */
77
78 /* ADMA_CONTROL register bits */
79 aNIEN = (1 << 8), /* irq mask: 1==masked */
80 aGO = (1 << 7), /* packet trigger ("Go!") */
81 aRSTADM = (1 << 5), /* ADMA logic reset */
82 aPIOMD4 = 0x0003, /* PIO mode 4 */
83
84 /* ADMA_STATUS register bits */
85 aPSD = (1 << 6),
86 aUIRQ = (1 << 4),
87 aPERR = (1 << 0),
88
89 /* CPB bits */
90 cDONE = (1 << 0),
91 cVLD = (1 << 0),
92 cDAT = (1 << 2),
93 cIEN = (1 << 3),
94
95 /* PRD bits */
96 pORD = (1 << 4),
97 pDIRO = (1 << 5),
98 pEND = (1 << 7),
99
100 /* ATA register flags */
101 rIGN = (1 << 5),
102 rEND = (1 << 7),
103
104 /* ATA register addresses */
105 ADMA_REGS_CONTROL = 0x0e,
106 ADMA_REGS_SECTOR_COUNT = 0x12,
107 ADMA_REGS_LBA_LOW = 0x13,
108 ADMA_REGS_LBA_MID = 0x14,
109 ADMA_REGS_LBA_HIGH = 0x15,
110 ADMA_REGS_DEVICE = 0x16,
111 ADMA_REGS_COMMAND = 0x17,
112
113 /* PCI device IDs */
114 board_1841_idx = 0, /* ADMA 2-port controller */
115};
116
117typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;
118
119struct adma_port_priv {
120 u8 *pkt;
121 dma_addr_t pkt_dma;
122 adma_state_t state;
123};
124
125static int adma_ata_init_one (struct pci_dev *pdev,
126 const struct pci_device_id *ent);
127static irqreturn_t adma_intr (int irq, void *dev_instance,
128 struct pt_regs *regs);
129static int adma_port_start(struct ata_port *ap);
130static void adma_host_stop(struct ata_host *host);
131static void adma_port_stop(struct ata_port *ap);
132static void adma_phy_reset(struct ata_port *ap);
133static void adma_qc_prep(struct ata_queued_cmd *qc);
134static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
135static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
136static void adma_bmdma_stop(struct ata_queued_cmd *qc);
137static u8 adma_bmdma_status(struct ata_port *ap);
138static void adma_irq_clear(struct ata_port *ap);
139static void adma_eng_timeout(struct ata_port *ap);
140
141static struct scsi_host_template adma_ata_sht = {
142 .module = THIS_MODULE,
143 .name = DRV_NAME,
144 .ioctl = ata_scsi_ioctl,
145 .queuecommand = ata_scsi_queuecmd,
146 .can_queue = ATA_DEF_QUEUE,
147 .this_id = ATA_SHT_THIS_ID,
148 .sg_tablesize = LIBATA_MAX_PRD,
149 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
150 .emulated = ATA_SHT_EMULATED,
151 .use_clustering = ENABLE_CLUSTERING,
152 .proc_name = DRV_NAME,
153 .dma_boundary = ADMA_DMA_BOUNDARY,
154 .slave_configure = ata_scsi_slave_config,
155 .slave_destroy = ata_scsi_slave_destroy,
156 .bios_param = ata_std_bios_param,
157};
158
159static const struct ata_port_operations adma_ata_ops = {
160 .port_disable = ata_port_disable,
161 .tf_load = ata_tf_load,
162 .tf_read = ata_tf_read,
163 .check_status = ata_check_status,
164 .check_atapi_dma = adma_check_atapi_dma,
165 .exec_command = ata_exec_command,
166 .dev_select = ata_std_dev_select,
167 .phy_reset = adma_phy_reset,
168 .qc_prep = adma_qc_prep,
169 .qc_issue = adma_qc_issue,
170 .eng_timeout = adma_eng_timeout,
171 .data_xfer = ata_mmio_data_xfer,
172 .irq_handler = adma_intr,
173 .irq_clear = adma_irq_clear,
174 .port_start = adma_port_start,
175 .port_stop = adma_port_stop,
176 .host_stop = adma_host_stop,
177 .bmdma_stop = adma_bmdma_stop,
178 .bmdma_status = adma_bmdma_status,
179};
180
181static struct ata_port_info adma_port_info[] = {
182 /* board_1841_idx */
183 {
184 .sht = &adma_ata_sht,
185 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
187 ATA_FLAG_PIO_POLLING,
188 .pio_mask = 0x10, /* pio4 */
189 .udma_mask = 0x1f, /* udma0-4 */
190 .port_ops = &adma_ata_ops,
191 },
192};
193
194static const struct pci_device_id adma_ata_pci_tbl[] = {
195 { PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
196 board_1841_idx },
197
198 { } /* terminate list */
199};
200
201static struct pci_driver adma_ata_pci_driver = {
202 .name = DRV_NAME,
203 .id_table = adma_ata_pci_tbl,
204 .probe = adma_ata_init_one,
205 .remove = ata_pci_remove_one,
206};
207
208static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
209{
210 return 1; /* ATAPI DMA not yet supported */
211}
212
213static void adma_bmdma_stop(struct ata_queued_cmd *qc)
214{
215 /* nothing */
216}
217
218static u8 adma_bmdma_status(struct ata_port *ap)
219{
220 return 0;
221}
222
223static void adma_irq_clear(struct ata_port *ap)
224{
225 /* nothing */
226}
227
228static void adma_reset_engine(void __iomem *chan)
229{
230 /* reset ADMA to idle state */
231 writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
232 udelay(2);
233 writew(aPIOMD4, chan + ADMA_CONTROL);
234 udelay(2);
235}
236
237static void adma_reinit_engine(struct ata_port *ap)
238{
239 struct adma_port_priv *pp = ap->private_data;
240 void __iomem *mmio_base = ap->host->mmio_base;
241 void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
242
243 /* mask/clear ATA interrupts */
244 writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr);
245 ata_check_status(ap);
246
247 /* reset the ADMA engine */
248 adma_reset_engine(chan);
249
250 /* set in-FIFO threshold to 0x100 */
251 writew(0x100, chan + ADMA_FIFO_IN);
252
253 /* set CPB pointer */
254 writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);
255
256 /* set out-FIFO threshold to 0x100 */
257 writew(0x100, chan + ADMA_FIFO_OUT);
258
259 /* set CPB count */
260 writew(1, chan + ADMA_CPB_COUNT);
261
262 /* read/discard ADMA status */
263 readb(chan + ADMA_STATUS);
264}
265
266static inline void adma_enter_reg_mode(struct ata_port *ap)
267{
268 void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no);
269
270 writew(aPIOMD4, chan + ADMA_CONTROL);
271 readb(chan + ADMA_STATUS); /* flush */
272}
273
274static void adma_phy_reset(struct ata_port *ap)
275{
276 struct adma_port_priv *pp = ap->private_data;
277
278 pp->state = adma_state_idle;
279 adma_reinit_engine(ap);
280 ata_port_probe(ap);
281 ata_bus_reset(ap);
282}
283
284static void adma_eng_timeout(struct ata_port *ap)
285{
286 struct adma_port_priv *pp = ap->private_data;
287
288 if (pp->state != adma_state_idle) /* healthy paranoia */
289 pp->state = adma_state_mmio;
290 adma_reinit_engine(ap);
291 ata_eng_timeout(ap);
292}
293
294static int adma_fill_sg(struct ata_queued_cmd *qc)
295{
296 struct scatterlist *sg;
297 struct ata_port *ap = qc->ap;
298 struct adma_port_priv *pp = ap->private_data;
299 u8 *buf = pp->pkt;
300 int i = (2 + buf[3]) * 8;
301 u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
302
303 ata_for_each_sg(sg, qc) {
304 u32 addr;
305 u32 len;
306
307 addr = (u32)sg_dma_address(sg);
308 *(__le32 *)(buf + i) = cpu_to_le32(addr);
309 i += 4;
310
311 len = sg_dma_len(sg) >> 3;
312 *(__le32 *)(buf + i) = cpu_to_le32(len);
313 i += 4;
314
315 if (ata_sg_is_last(sg, qc))
316 pFLAGS |= pEND;
317 buf[i++] = pFLAGS;
318 buf[i++] = qc->dev->dma_mode & 0xf;
319 buf[i++] = 0; /* pPKLW */
320 buf[i++] = 0; /* reserved */
321
322 *(__le32 *)(buf + i)
323 = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
324 i += 4;
325
326 VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
327 (unsigned long)addr, len);
328 }
329 return i;
330}
331
332static void adma_qc_prep(struct ata_queued_cmd *qc)
333{
334 struct adma_port_priv *pp = qc->ap->private_data;
335 u8 *buf = pp->pkt;
336 u32 pkt_dma = (u32)pp->pkt_dma;
337 int i = 0;
338
339 VPRINTK("ENTER\n");
340
341 adma_enter_reg_mode(qc->ap);
342 if (qc->tf.protocol != ATA_PROT_DMA) {
343 ata_qc_prep(qc);
344 return;
345 }
346
347 buf[i++] = 0; /* Response flags */
348 buf[i++] = 0; /* reserved */
349 buf[i++] = cVLD | cDAT | cIEN;
350 i++; /* cLEN, gets filled in below */
351
352 *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma); /* cNCPB */
353 i += 4; /* cNCPB */
354 i += 4; /* cPRD, gets filled in below */
355
356 buf[i++] = 0; /* reserved */
357 buf[i++] = 0; /* reserved */
358 buf[i++] = 0; /* reserved */
359 buf[i++] = 0; /* reserved */
360
361 /* ATA registers; must be a multiple of 4 */
362 buf[i++] = qc->tf.device;
363 buf[i++] = ADMA_REGS_DEVICE;
364 if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
365 buf[i++] = qc->tf.hob_nsect;
366 buf[i++] = ADMA_REGS_SECTOR_COUNT;
367 buf[i++] = qc->tf.hob_lbal;
368 buf[i++] = ADMA_REGS_LBA_LOW;
369 buf[i++] = qc->tf.hob_lbam;
370 buf[i++] = ADMA_REGS_LBA_MID;
371 buf[i++] = qc->tf.hob_lbah;
372 buf[i++] = ADMA_REGS_LBA_HIGH;
373 }
374 buf[i++] = qc->tf.nsect;
375 buf[i++] = ADMA_REGS_SECTOR_COUNT;
376 buf[i++] = qc->tf.lbal;
377 buf[i++] = ADMA_REGS_LBA_LOW;
378 buf[i++] = qc->tf.lbam;
379 buf[i++] = ADMA_REGS_LBA_MID;
380 buf[i++] = qc->tf.lbah;
381 buf[i++] = ADMA_REGS_LBA_HIGH;
382 buf[i++] = 0;
383 buf[i++] = ADMA_REGS_CONTROL;
384 buf[i++] = rIGN;
385 buf[i++] = 0;
386 buf[i++] = qc->tf.command;
387 buf[i++] = ADMA_REGS_COMMAND | rEND;
388
389 buf[3] = (i >> 3) - 2; /* cLEN */
390 *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i); /* cPRD */
391
392 i = adma_fill_sg(qc);
393 wmb(); /* flush PRDs and pkt to memory */
394#if 0
395 /* dump out CPB + PRDs for debug */
396 {
397 int j, len = 0;
398 static char obuf[2048];
399 for (j = 0; j < i; ++j) {
400 len += sprintf(obuf+len, "%02x ", buf[j]);
401 if ((j & 7) == 7) {
402 printk("%s\n", obuf);
403 len = 0;
404 }
405 }
406 if (len)
407 printk("%s\n", obuf);
408 }
409#endif
410}
411
412static inline void adma_packet_start(struct ata_queued_cmd *qc)
413{
414 struct ata_port *ap = qc->ap;
415 void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no);
416
417 VPRINTK("ENTER, ap %p\n", ap);
418
419 /* fire up the ADMA engine */
420 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
421}
422
423static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
424{
425 struct adma_port_priv *pp = qc->ap->private_data;
426
427 switch (qc->tf.protocol) {
428 case ATA_PROT_DMA:
429 pp->state = adma_state_pkt;
430 adma_packet_start(qc);
431 return 0;
432
433 case ATA_PROT_ATAPI_DMA:
434 BUG();
435 break;
436
437 default:
438 break;
439 }
440
441 pp->state = adma_state_mmio;
442 return ata_qc_issue_prot(qc);
443}
444
445static inline unsigned int adma_intr_pkt(struct ata_host *host)
446{
447 unsigned int handled = 0, port_no;
448 u8 __iomem *mmio_base = host->mmio_base;
449
450 for (port_no = 0; port_no < host->n_ports; ++port_no) {
451 struct ata_port *ap = host->ports[port_no];
452 struct adma_port_priv *pp;
453 struct ata_queued_cmd *qc;
454 void __iomem *chan = ADMA_REGS(mmio_base, port_no);
455 u8 status = readb(chan + ADMA_STATUS);
456
457 if (status == 0)
458 continue;
459 handled = 1;
460 adma_enter_reg_mode(ap);
461 if (ap->flags & ATA_FLAG_DISABLED)
462 continue;
463 pp = ap->private_data;
464 if (!pp || pp->state != adma_state_pkt)
465 continue;
466 qc = ata_qc_from_tag(ap, ap->active_tag);
467 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
468 if ((status & (aPERR | aPSD | aUIRQ)))
469 qc->err_mask |= AC_ERR_OTHER;
470 else if (pp->pkt[0] != cDONE)
471 qc->err_mask |= AC_ERR_OTHER;
472
473 ata_qc_complete(qc);
474 }
475 }
476 return handled;
477}
478
479static inline unsigned int adma_intr_mmio(struct ata_host *host)
480{
481 unsigned int handled = 0, port_no;
482
483 for (port_no = 0; port_no < host->n_ports; ++port_no) {
484 struct ata_port *ap;
485 ap = host->ports[port_no];
486 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
487 struct ata_queued_cmd *qc;
488 struct adma_port_priv *pp = ap->private_data;
489 if (!pp || pp->state != adma_state_mmio)
490 continue;
491 qc = ata_qc_from_tag(ap, ap->active_tag);
492 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
493
494 /* check main status, clearing INTRQ */
495 u8 status = ata_check_status(ap);
496 if ((status & ATA_BUSY))
497 continue;
498 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
499 ap->id, qc->tf.protocol, status);
500
501 /* complete taskfile transaction */
502 pp->state = adma_state_idle;
503 qc->err_mask |= ac_err_mask(status);
504 ata_qc_complete(qc);
505 handled = 1;
506 }
507 }
508 }
509 return handled;
510}
511
512static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs)
513{
514 struct ata_host *host = dev_instance;
515 unsigned int handled = 0;
516
517 VPRINTK("ENTER\n");
518
519 spin_lock(&host->lock);
520 handled = adma_intr_pkt(host) | adma_intr_mmio(host);
521 spin_unlock(&host->lock);
522
523 VPRINTK("EXIT\n");
524
525 return IRQ_RETVAL(handled);
526}
527
528static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
529{
530 port->cmd_addr =
531 port->data_addr = base + 0x000;
532 port->error_addr =
533 port->feature_addr = base + 0x004;
534 port->nsect_addr = base + 0x008;
535 port->lbal_addr = base + 0x00c;
536 port->lbam_addr = base + 0x010;
537 port->lbah_addr = base + 0x014;
538 port->device_addr = base + 0x018;
539 port->status_addr =
540 port->command_addr = base + 0x01c;
541 port->altstatus_addr =
542 port->ctl_addr = base + 0x038;
543}
544
545static int adma_port_start(struct ata_port *ap)
546{
547 struct device *dev = ap->host->dev;
548 struct adma_port_priv *pp;
549 int rc;
550
551 rc = ata_port_start(ap);
552 if (rc)
553 return rc;
554 adma_enter_reg_mode(ap);
555 rc = -ENOMEM;
556 pp = kcalloc(1, sizeof(*pp), GFP_KERNEL);
557 if (!pp)
558 goto err_out;
559 pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
560 GFP_KERNEL);
561 if (!pp->pkt)
562 goto err_out_kfree;
563 /* paranoia? */
564 if ((pp->pkt_dma & 7) != 0) {
565 printk("bad alignment for pp->pkt_dma: %08x\n",
566 (u32)pp->pkt_dma);
567 dma_free_coherent(dev, ADMA_PKT_BYTES,
568 pp->pkt, pp->pkt_dma);
569 goto err_out_kfree;
570 }
571 memset(pp->pkt, 0, ADMA_PKT_BYTES);
572 ap->private_data = pp;
573 adma_reinit_engine(ap);
574 return 0;
575
576err_out_kfree:
577 kfree(pp);
578err_out:
579 ata_port_stop(ap);
580 return rc;
581}
582
583static void adma_port_stop(struct ata_port *ap)
584{
585 struct device *dev = ap->host->dev;
586 struct adma_port_priv *pp = ap->private_data;
587
588 adma_reset_engine(ADMA_REGS(ap->host->mmio_base, ap->port_no));
589 if (pp != NULL) {
590 ap->private_data = NULL;
591 if (pp->pkt != NULL)
592 dma_free_coherent(dev, ADMA_PKT_BYTES,
593 pp->pkt, pp->pkt_dma);
594 kfree(pp);
595 }
596 ata_port_stop(ap);
597}
598
599static void adma_host_stop(struct ata_host *host)
600{
601 unsigned int port_no;
602
603 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
604 adma_reset_engine(ADMA_REGS(host->mmio_base, port_no));
605
606 ata_pci_host_stop(host);
607}
608
609static void adma_host_init(unsigned int chip_id,
610 struct ata_probe_ent *probe_ent)
611{
612 unsigned int port_no;
613 void __iomem *mmio_base = probe_ent->mmio_base;
614
615 /* enable/lock aGO operation */
616 writeb(7, mmio_base + ADMA_MODE_LOCK);
617
618 /* reset the ADMA logic */
619 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
620 adma_reset_engine(ADMA_REGS(mmio_base, port_no));
621}
622
623static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
624{
625 int rc;
626
627 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
628 if (rc) {
629 dev_printk(KERN_ERR, &pdev->dev,
630 "32-bit DMA enable failed\n");
631 return rc;
632 }
633 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
634 if (rc) {
635 dev_printk(KERN_ERR, &pdev->dev,
636 "32-bit consistent DMA enable failed\n");
637 return rc;
638 }
639 return 0;
640}
641
642static int adma_ata_init_one(struct pci_dev *pdev,
643 const struct pci_device_id *ent)
644{
645 static int printed_version;
646 struct ata_probe_ent *probe_ent = NULL;
647 void __iomem *mmio_base;
648 unsigned int board_idx = (unsigned int) ent->driver_data;
649 int rc, port_no;
650
651 if (!printed_version++)
652 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
653
654 rc = pci_enable_device(pdev);
655 if (rc)
656 return rc;
657
658 rc = pci_request_regions(pdev, DRV_NAME);
659 if (rc)
660 goto err_out;
661
662 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
663 rc = -ENODEV;
664 goto err_out_regions;
665 }
666
667 mmio_base = pci_iomap(pdev, 4, 0);
668 if (mmio_base == NULL) {
669 rc = -ENOMEM;
670 goto err_out_regions;
671 }
672
673 rc = adma_set_dma_masks(pdev, mmio_base);
674 if (rc)
675 goto err_out_iounmap;
676
677 probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL);
678 if (probe_ent == NULL) {
679 rc = -ENOMEM;
680 goto err_out_iounmap;
681 }
682
683 probe_ent->dev = pci_dev_to_dev(pdev);
684 INIT_LIST_HEAD(&probe_ent->node);
685
686 probe_ent->sht = adma_port_info[board_idx].sht;
687 probe_ent->port_flags = adma_port_info[board_idx].flags;
688 probe_ent->pio_mask = adma_port_info[board_idx].pio_mask;
689 probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask;
690 probe_ent->udma_mask = adma_port_info[board_idx].udma_mask;
691 probe_ent->port_ops = adma_port_info[board_idx].port_ops;
692
693 probe_ent->irq = pdev->irq;
694 probe_ent->irq_flags = IRQF_SHARED;
695 probe_ent->mmio_base = mmio_base;
696 probe_ent->n_ports = ADMA_PORTS;
697
698 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
699 adma_ata_setup_port(&probe_ent->port[port_no],
700 ADMA_ATA_REGS((unsigned long)mmio_base, port_no));
701 }
702
703 pci_set_master(pdev);
704
705 /* initialize adapter */
706 adma_host_init(board_idx, probe_ent);
707
708 rc = ata_device_add(probe_ent);
709 kfree(probe_ent);
710 if (rc != ADMA_PORTS)
711 goto err_out_iounmap;
712 return 0;
713
714err_out_iounmap:
715 pci_iounmap(pdev, mmio_base);
716err_out_regions:
717 pci_release_regions(pdev);
718err_out:
719 pci_disable_device(pdev);
720 return rc;
721}
722
723static int __init adma_ata_init(void)
724{
725 return pci_register_driver(&adma_ata_pci_driver);
726}
727
728static void __exit adma_ata_exit(void)
729{
730 pci_unregister_driver(&adma_ata_pci_driver);
731}
732
733MODULE_AUTHOR("Mark Lord");
734MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
735MODULE_LICENSE("GPL");
736MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
737MODULE_VERSION(DRV_VERSION);
738
739module_init(adma_ata_init);
740module_exit(adma_ata_exit);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
new file mode 100644
index 000000000000..fdce6e07ecd2
--- /dev/null
+++ b/drivers/ata/sata_mv.c
@@ -0,0 +1,2465 @@
1/*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/blkdev.h>
29#include <linux/delay.h>
30#include <linux/interrupt.h>
31#include <linux/sched.h>
32#include <linux/dma-mapping.h>
33#include <linux/device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_cmnd.h>
36#include <linux/libata.h>
37#include <asm/io.h>
38
39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.7"
41
42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */
44 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
45 MV_IO_BAR = 2, /* offset 0x18: IO space */
46 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
47
48 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
49 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
50
51 MV_PCI_REG_BASE = 0,
52 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
53 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
54 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
55 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
56 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
57 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
58
59 MV_SATAHC0_REG_BASE = 0x20000,
60 MV_FLASH_CTL = 0x1046c,
61 MV_GPIO_PORT_CTL = 0x104f0,
62 MV_RESET_CFG = 0x180d8,
63
64 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
65 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
66 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
67 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
68
69 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
70
71 MV_MAX_Q_DEPTH = 32,
72 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
73
74 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
75 * CRPB needs alignment on a 256B boundary. Size == 256B
76 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
77 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
78 */
79 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
80 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
81 MV_MAX_SG_CT = 176,
82 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
83 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
84
85 MV_PORTS_PER_HC = 4,
86 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
87 MV_PORT_HC_SHIFT = 2,
88 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
89 MV_PORT_MASK = 3,
90
91 /* Host Flags */
92 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
93 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
94 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
95 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
96 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
97 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
98
99 CRQB_FLAG_READ = (1 << 0),
100 CRQB_TAG_SHIFT = 1,
101 CRQB_CMD_ADDR_SHIFT = 8,
102 CRQB_CMD_CS = (0x2 << 11),
103 CRQB_CMD_LAST = (1 << 15),
104
105 CRPB_FLAG_STATUS_SHIFT = 8,
106
107 EPRD_FLAG_END_OF_TBL = (1 << 31),
108
109 /* PCI interface registers */
110
111 PCI_COMMAND_OFS = 0xc00,
112
113 PCI_MAIN_CMD_STS_OFS = 0xd30,
114 STOP_PCI_MASTER = (1 << 2),
115 PCI_MASTER_EMPTY = (1 << 3),
116 GLOB_SFT_RST = (1 << 4),
117
118 MV_PCI_MODE = 0xd00,
119 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
120 MV_PCI_DISC_TIMER = 0xd04,
121 MV_PCI_MSI_TRIGGER = 0xc38,
122 MV_PCI_SERR_MASK = 0xc28,
123 MV_PCI_XBAR_TMOUT = 0x1d04,
124 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
125 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
126 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
127 MV_PCI_ERR_COMMAND = 0x1d50,
128
129 PCI_IRQ_CAUSE_OFS = 0x1d58,
130 PCI_IRQ_MASK_OFS = 0x1d5c,
131 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
132
133 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
134 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
135 PORT0_ERR = (1 << 0), /* shift by port # */
136 PORT0_DONE = (1 << 1), /* shift by port # */
137 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
138 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
139 PCI_ERR = (1 << 18),
140 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
141 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
142 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
143 GPIO_INT = (1 << 22),
144 SELF_INT = (1 << 23),
145 TWSI_INT = (1 << 24),
146 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
147 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
148 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
149 HC_MAIN_RSVD),
150
151 /* SATAHC registers */
152 HC_CFG_OFS = 0,
153
154 HC_IRQ_CAUSE_OFS = 0x14,
155 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
156 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
157 DEV_IRQ = (1 << 8), /* shift by port # */
158
159 /* Shadow block registers */
160 SHD_BLK_OFS = 0x100,
161 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
162
163 /* SATA registers */
164 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
165 SATA_ACTIVE_OFS = 0x350,
166 PHY_MODE3 = 0x310,
167 PHY_MODE4 = 0x314,
168 PHY_MODE2 = 0x330,
169 MV5_PHY_MODE = 0x74,
170 MV5_LT_MODE = 0x30,
171 MV5_PHY_CTL = 0x0C,
172 SATA_INTERFACE_CTL = 0x050,
173
174 MV_M2_PREAMP_MASK = 0x7e0,
175
176 /* Port registers */
177 EDMA_CFG_OFS = 0,
178 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
179 EDMA_CFG_NCQ = (1 << 5),
180 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
181 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
182 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
183
184 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
185 EDMA_ERR_IRQ_MASK_OFS = 0xc,
186 EDMA_ERR_D_PAR = (1 << 0),
187 EDMA_ERR_PRD_PAR = (1 << 1),
188 EDMA_ERR_DEV = (1 << 2),
189 EDMA_ERR_DEV_DCON = (1 << 3),
190 EDMA_ERR_DEV_CON = (1 << 4),
191 EDMA_ERR_SERR = (1 << 5),
192 EDMA_ERR_SELF_DIS = (1 << 7),
193 EDMA_ERR_BIST_ASYNC = (1 << 8),
194 EDMA_ERR_CRBQ_PAR = (1 << 9),
195 EDMA_ERR_CRPB_PAR = (1 << 10),
196 EDMA_ERR_INTRL_PAR = (1 << 11),
197 EDMA_ERR_IORDY = (1 << 12),
198 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
199 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
200 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
201 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
202 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
203 EDMA_ERR_TRANS_PROTO = (1 << 31),
204 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
205 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
206 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
207 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
208 EDMA_ERR_LNK_DATA_RX |
209 EDMA_ERR_LNK_DATA_TX |
210 EDMA_ERR_TRANS_PROTO),
211
212 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
213 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
214
215 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
216 EDMA_REQ_Q_PTR_SHIFT = 5,
217
218 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
219 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
220 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
221 EDMA_RSP_Q_PTR_SHIFT = 3,
222
223 EDMA_CMD_OFS = 0x28,
224 EDMA_EN = (1 << 0),
225 EDMA_DS = (1 << 1),
226 ATA_RST = (1 << 2),
227
228 EDMA_IORDY_TMOUT = 0x34,
229 EDMA_ARB_CFG = 0x38,
230
231 /* Host private flags (hp_flags) */
232 MV_HP_FLAG_MSI = (1 << 0),
233 MV_HP_ERRATA_50XXB0 = (1 << 1),
234 MV_HP_ERRATA_50XXB2 = (1 << 2),
235 MV_HP_ERRATA_60X1B2 = (1 << 3),
236 MV_HP_ERRATA_60X1C0 = (1 << 4),
237 MV_HP_ERRATA_XX42A0 = (1 << 5),
238 MV_HP_50XX = (1 << 6),
239 MV_HP_GEN_IIE = (1 << 7),
240
241 /* Port private flags (pp_flags) */
242 MV_PP_FLAG_EDMA_EN = (1 << 0),
243 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
244};
245
246#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
247#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
248#define IS_GEN_I(hpriv) IS_50XX(hpriv)
249#define IS_GEN_II(hpriv) IS_60XX(hpriv)
250#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
251
252enum {
253 /* Our DMA boundary is determined by an ePRD being unable to handle
254 * anything larger than 64KB
255 */
256 MV_DMA_BOUNDARY = 0xffffU,
257
258 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
259
260 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
261};
262
263enum chip_type {
264 chip_504x,
265 chip_508x,
266 chip_5080,
267 chip_604x,
268 chip_608x,
269 chip_6042,
270 chip_7042,
271};
272
273/* Command ReQuest Block: 32B */
274struct mv_crqb {
275 __le32 sg_addr;
276 __le32 sg_addr_hi;
277 __le16 ctrl_flags;
278 __le16 ata_cmd[11];
279};
280
281struct mv_crqb_iie {
282 __le32 addr;
283 __le32 addr_hi;
284 __le32 flags;
285 __le32 len;
286 __le32 ata_cmd[4];
287};
288
289/* Command ResPonse Block: 8B */
290struct mv_crpb {
291 __le16 id;
292 __le16 flags;
293 __le32 tmstmp;
294};
295
296/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
297struct mv_sg {
298 __le32 addr;
299 __le32 flags_size;
300 __le32 addr_hi;
301 __le32 reserved;
302};
303
304struct mv_port_priv {
305 struct mv_crqb *crqb;
306 dma_addr_t crqb_dma;
307 struct mv_crpb *crpb;
308 dma_addr_t crpb_dma;
309 struct mv_sg *sg_tbl;
310 dma_addr_t sg_tbl_dma;
311 u32 pp_flags;
312};
313
314struct mv_port_signal {
315 u32 amps;
316 u32 pre;
317};
318
319struct mv_host_priv;
320struct mv_hw_ops {
321 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
322 unsigned int port);
323 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
324 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
325 void __iomem *mmio);
326 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
327 unsigned int n_hc);
328 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
329 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
330};
331
332struct mv_host_priv {
333 u32 hp_flags;
334 struct mv_port_signal signal[8];
335 const struct mv_hw_ops *ops;
336};
337
338static void mv_irq_clear(struct ata_port *ap);
339static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
340static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
341static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
342static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
343static void mv_phy_reset(struct ata_port *ap);
344static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
345static void mv_host_stop(struct ata_host *host);
346static int mv_port_start(struct ata_port *ap);
347static void mv_port_stop(struct ata_port *ap);
348static void mv_qc_prep(struct ata_queued_cmd *qc);
349static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
350static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
351static irqreturn_t mv_interrupt(int irq, void *dev_instance,
352 struct pt_regs *regs);
353static void mv_eng_timeout(struct ata_port *ap);
354static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
355
356static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
357 unsigned int port);
358static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
359static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
360 void __iomem *mmio);
361static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
362 unsigned int n_hc);
363static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
364static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
365
366static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
367 unsigned int port);
368static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
369static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
370 void __iomem *mmio);
371static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
372 unsigned int n_hc);
373static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
374static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
375static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
376 unsigned int port_no);
377static void mv_stop_and_reset(struct ata_port *ap);
378
379static struct scsi_host_template mv_sht = {
380 .module = THIS_MODULE,
381 .name = DRV_NAME,
382 .ioctl = ata_scsi_ioctl,
383 .queuecommand = ata_scsi_queuecmd,
384 .can_queue = MV_USE_Q_DEPTH,
385 .this_id = ATA_SHT_THIS_ID,
386 .sg_tablesize = MV_MAX_SG_CT / 2,
387 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
388 .emulated = ATA_SHT_EMULATED,
389 .use_clustering = ATA_SHT_USE_CLUSTERING,
390 .proc_name = DRV_NAME,
391 .dma_boundary = MV_DMA_BOUNDARY,
392 .slave_configure = ata_scsi_slave_config,
393 .slave_destroy = ata_scsi_slave_destroy,
394 .bios_param = ata_std_bios_param,
395};
396
397static const struct ata_port_operations mv5_ops = {
398 .port_disable = ata_port_disable,
399
400 .tf_load = ata_tf_load,
401 .tf_read = ata_tf_read,
402 .check_status = ata_check_status,
403 .exec_command = ata_exec_command,
404 .dev_select = ata_std_dev_select,
405
406 .phy_reset = mv_phy_reset,
407
408 .qc_prep = mv_qc_prep,
409 .qc_issue = mv_qc_issue,
410 .data_xfer = ata_mmio_data_xfer,
411
412 .eng_timeout = mv_eng_timeout,
413
414 .irq_handler = mv_interrupt,
415 .irq_clear = mv_irq_clear,
416
417 .scr_read = mv5_scr_read,
418 .scr_write = mv5_scr_write,
419
420 .port_start = mv_port_start,
421 .port_stop = mv_port_stop,
422 .host_stop = mv_host_stop,
423};
424
425static const struct ata_port_operations mv6_ops = {
426 .port_disable = ata_port_disable,
427
428 .tf_load = ata_tf_load,
429 .tf_read = ata_tf_read,
430 .check_status = ata_check_status,
431 .exec_command = ata_exec_command,
432 .dev_select = ata_std_dev_select,
433
434 .phy_reset = mv_phy_reset,
435
436 .qc_prep = mv_qc_prep,
437 .qc_issue = mv_qc_issue,
438 .data_xfer = ata_mmio_data_xfer,
439
440 .eng_timeout = mv_eng_timeout,
441
442 .irq_handler = mv_interrupt,
443 .irq_clear = mv_irq_clear,
444
445 .scr_read = mv_scr_read,
446 .scr_write = mv_scr_write,
447
448 .port_start = mv_port_start,
449 .port_stop = mv_port_stop,
450 .host_stop = mv_host_stop,
451};
452
453static const struct ata_port_operations mv_iie_ops = {
454 .port_disable = ata_port_disable,
455
456 .tf_load = ata_tf_load,
457 .tf_read = ata_tf_read,
458 .check_status = ata_check_status,
459 .exec_command = ata_exec_command,
460 .dev_select = ata_std_dev_select,
461
462 .phy_reset = mv_phy_reset,
463
464 .qc_prep = mv_qc_prep_iie,
465 .qc_issue = mv_qc_issue,
466
467 .eng_timeout = mv_eng_timeout,
468
469 .irq_handler = mv_interrupt,
470 .irq_clear = mv_irq_clear,
471
472 .scr_read = mv_scr_read,
473 .scr_write = mv_scr_write,
474
475 .port_start = mv_port_start,
476 .port_stop = mv_port_stop,
477 .host_stop = mv_host_stop,
478};
479
480static const struct ata_port_info mv_port_info[] = {
481 { /* chip_504x */
482 .sht = &mv_sht,
483 .flags = MV_COMMON_FLAGS,
484 .pio_mask = 0x1f, /* pio0-4 */
485 .udma_mask = 0x7f, /* udma0-6 */
486 .port_ops = &mv5_ops,
487 },
488 { /* chip_508x */
489 .sht = &mv_sht,
490 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
491 .pio_mask = 0x1f, /* pio0-4 */
492 .udma_mask = 0x7f, /* udma0-6 */
493 .port_ops = &mv5_ops,
494 },
495 { /* chip_5080 */
496 .sht = &mv_sht,
497 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
498 .pio_mask = 0x1f, /* pio0-4 */
499 .udma_mask = 0x7f, /* udma0-6 */
500 .port_ops = &mv5_ops,
501 },
502 { /* chip_604x */
503 .sht = &mv_sht,
504 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
505 .pio_mask = 0x1f, /* pio0-4 */
506 .udma_mask = 0x7f, /* udma0-6 */
507 .port_ops = &mv6_ops,
508 },
509 { /* chip_608x */
510 .sht = &mv_sht,
511 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
512 MV_FLAG_DUAL_HC),
513 .pio_mask = 0x1f, /* pio0-4 */
514 .udma_mask = 0x7f, /* udma0-6 */
515 .port_ops = &mv6_ops,
516 },
517 { /* chip_6042 */
518 .sht = &mv_sht,
519 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
520 .pio_mask = 0x1f, /* pio0-4 */
521 .udma_mask = 0x7f, /* udma0-6 */
522 .port_ops = &mv_iie_ops,
523 },
524 { /* chip_7042 */
525 .sht = &mv_sht,
526 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
527 MV_FLAG_DUAL_HC),
528 .pio_mask = 0x1f, /* pio0-4 */
529 .udma_mask = 0x7f, /* udma0-6 */
530 .port_ops = &mv_iie_ops,
531 },
532};
533
534static const struct pci_device_id mv_pci_tbl[] = {
535 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_5080},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
539
540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
541 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
542 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
543 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
544 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
545
546 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x0241), 0, 0, chip_604x},
547 {} /* terminate list */
548};
549
550static struct pci_driver mv_pci_driver = {
551 .name = DRV_NAME,
552 .id_table = mv_pci_tbl,
553 .probe = mv_init_one,
554 .remove = ata_pci_remove_one,
555};
556
557static const struct mv_hw_ops mv5xxx_ops = {
558 .phy_errata = mv5_phy_errata,
559 .enable_leds = mv5_enable_leds,
560 .read_preamp = mv5_read_preamp,
561 .reset_hc = mv5_reset_hc,
562 .reset_flash = mv5_reset_flash,
563 .reset_bus = mv5_reset_bus,
564};
565
566static const struct mv_hw_ops mv6xxx_ops = {
567 .phy_errata = mv6_phy_errata,
568 .enable_leds = mv6_enable_leds,
569 .read_preamp = mv6_read_preamp,
570 .reset_hc = mv6_reset_hc,
571 .reset_flash = mv6_reset_flash,
572 .reset_bus = mv_reset_pci_bus,
573};
574
575/*
576 * module options
577 */
578static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
579
580
581/*
582 * Functions
583 */
584
585static inline void writelfl(unsigned long data, void __iomem *addr)
586{
587 writel(data, addr);
588 (void) readl(addr); /* flush to avoid PCI posted write */
589}
590
591static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
592{
593 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
594}
595
596static inline unsigned int mv_hc_from_port(unsigned int port)
597{
598 return port >> MV_PORT_HC_SHIFT;
599}
600
601static inline unsigned int mv_hardport_from_port(unsigned int port)
602{
603 return port & MV_PORT_MASK;
604}
605
606static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
607 unsigned int port)
608{
609 return mv_hc_base(base, mv_hc_from_port(port));
610}
611
612static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
613{
614 return mv_hc_base_from_port(base, port) +
615 MV_SATAHC_ARBTR_REG_SZ +
616 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
617}
618
619static inline void __iomem *mv_ap_base(struct ata_port *ap)
620{
621 return mv_port_base(ap->host->mmio_base, ap->port_no);
622}
623
624static inline int mv_get_hc_count(unsigned long port_flags)
625{
626 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
627}
628
629static void mv_irq_clear(struct ata_port *ap)
630{
631}
632
633/**
634 * mv_start_dma - Enable eDMA engine
635 * @base: port base address
636 * @pp: port private data
637 *
638 * Verify the local cache of the eDMA state is accurate with a
639 * WARN_ON.
640 *
641 * LOCKING:
642 * Inherited from caller.
643 */
644static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
645{
646 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
647 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
648 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
649 }
650 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
651}
652
653/**
654 * mv_stop_dma - Disable eDMA engine
655 * @ap: ATA channel to manipulate
656 *
657 * Verify the local cache of the eDMA state is accurate with a
658 * WARN_ON.
659 *
660 * LOCKING:
661 * Inherited from caller.
662 */
663static void mv_stop_dma(struct ata_port *ap)
664{
665 void __iomem *port_mmio = mv_ap_base(ap);
666 struct mv_port_priv *pp = ap->private_data;
667 u32 reg;
668 int i;
669
670 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
671 /* Disable EDMA if active. The disable bit auto clears.
672 */
673 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
674 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
675 } else {
676 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
677 }
678
679 /* now properly wait for the eDMA to stop */
680 for (i = 1000; i > 0; i--) {
681 reg = readl(port_mmio + EDMA_CMD_OFS);
682 if (!(EDMA_EN & reg)) {
683 break;
684 }
685 udelay(100);
686 }
687
688 if (EDMA_EN & reg) {
689 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
690 /* FIXME: Consider doing a reset here to recover */
691 }
692}
693
694#ifdef ATA_DEBUG
695static void mv_dump_mem(void __iomem *start, unsigned bytes)
696{
697 int b, w;
698 for (b = 0; b < bytes; ) {
699 DPRINTK("%p: ", start + b);
700 for (w = 0; b < bytes && w < 4; w++) {
701 printk("%08x ",readl(start + b));
702 b += sizeof(u32);
703 }
704 printk("\n");
705 }
706}
707#endif
708
709static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
710{
711#ifdef ATA_DEBUG
712 int b, w;
713 u32 dw;
714 for (b = 0; b < bytes; ) {
715 DPRINTK("%02x: ", b);
716 for (w = 0; b < bytes && w < 4; w++) {
717 (void) pci_read_config_dword(pdev,b,&dw);
718 printk("%08x ",dw);
719 b += sizeof(u32);
720 }
721 printk("\n");
722 }
723#endif
724}
725static void mv_dump_all_regs(void __iomem *mmio_base, int port,
726 struct pci_dev *pdev)
727{
728#ifdef ATA_DEBUG
729 void __iomem *hc_base = mv_hc_base(mmio_base,
730 port >> MV_PORT_HC_SHIFT);
731 void __iomem *port_base;
732 int start_port, num_ports, p, start_hc, num_hcs, hc;
733
734 if (0 > port) {
735 start_hc = start_port = 0;
736 num_ports = 8; /* shld be benign for 4 port devs */
737 num_hcs = 2;
738 } else {
739 start_hc = port >> MV_PORT_HC_SHIFT;
740 start_port = port;
741 num_ports = num_hcs = 1;
742 }
743 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
744 num_ports > 1 ? num_ports - 1 : start_port);
745
746 if (NULL != pdev) {
747 DPRINTK("PCI config space regs:\n");
748 mv_dump_pci_cfg(pdev, 0x68);
749 }
750 DPRINTK("PCI regs:\n");
751 mv_dump_mem(mmio_base+0xc00, 0x3c);
752 mv_dump_mem(mmio_base+0xd00, 0x34);
753 mv_dump_mem(mmio_base+0xf00, 0x4);
754 mv_dump_mem(mmio_base+0x1d00, 0x6c);
755 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
756 hc_base = mv_hc_base(mmio_base, hc);
757 DPRINTK("HC regs (HC %i):\n", hc);
758 mv_dump_mem(hc_base, 0x1c);
759 }
760 for (p = start_port; p < start_port + num_ports; p++) {
761 port_base = mv_port_base(mmio_base, p);
762 DPRINTK("EDMA regs (port %i):\n",p);
763 mv_dump_mem(port_base, 0x54);
764 DPRINTK("SATA regs (port %i):\n",p);
765 mv_dump_mem(port_base+0x300, 0x60);
766 }
767#endif
768}
769
770static unsigned int mv_scr_offset(unsigned int sc_reg_in)
771{
772 unsigned int ofs;
773
774 switch (sc_reg_in) {
775 case SCR_STATUS:
776 case SCR_CONTROL:
777 case SCR_ERROR:
778 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
779 break;
780 case SCR_ACTIVE:
781 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
782 break;
783 default:
784 ofs = 0xffffffffU;
785 break;
786 }
787 return ofs;
788}
789
790static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
791{
792 unsigned int ofs = mv_scr_offset(sc_reg_in);
793
794 if (0xffffffffU != ofs) {
795 return readl(mv_ap_base(ap) + ofs);
796 } else {
797 return (u32) ofs;
798 }
799}
800
801static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
802{
803 unsigned int ofs = mv_scr_offset(sc_reg_in);
804
805 if (0xffffffffU != ofs) {
806 writelfl(val, mv_ap_base(ap) + ofs);
807 }
808}
809
810/**
811 * mv_host_stop - Host specific cleanup/stop routine.
812 * @host: host data structure
813 *
814 * Disable ints, cleanup host memory, call general purpose
815 * host_stop.
816 *
817 * LOCKING:
818 * Inherited from caller.
819 */
820static void mv_host_stop(struct ata_host *host)
821{
822 struct mv_host_priv *hpriv = host->private_data;
823 struct pci_dev *pdev = to_pci_dev(host->dev);
824
825 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
826 pci_disable_msi(pdev);
827 } else {
828 pci_intx(pdev, 0);
829 }
830 kfree(hpriv);
831 ata_host_stop(host);
832}
833
834static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
835{
836 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
837}
838
839static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
840{
841 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
842
843 /* set up non-NCQ EDMA configuration */
844 cfg &= ~0x1f; /* clear queue depth */
845 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
846 cfg &= ~(1 << 9); /* disable equeue */
847
848 if (IS_GEN_I(hpriv))
849 cfg |= (1 << 8); /* enab config burst size mask */
850
851 else if (IS_GEN_II(hpriv))
852 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
853
854 else if (IS_GEN_IIE(hpriv)) {
855 cfg |= (1 << 23); /* dis RX PM port mask */
856 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
857 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
858 cfg |= (1 << 18); /* enab early completion */
859 cfg |= (1 << 17); /* enab host q cache */
860 cfg |= (1 << 22); /* enab cutthrough */
861 }
862
863 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
864}
865
866/**
867 * mv_port_start - Port specific init/start routine.
868 * @ap: ATA channel to manipulate
869 *
870 * Allocate and point to DMA memory, init port private memory,
871 * zero indices.
872 *
873 * LOCKING:
874 * Inherited from caller.
875 */
876static int mv_port_start(struct ata_port *ap)
877{
878 struct device *dev = ap->host->dev;
879 struct mv_host_priv *hpriv = ap->host->private_data;
880 struct mv_port_priv *pp;
881 void __iomem *port_mmio = mv_ap_base(ap);
882 void *mem;
883 dma_addr_t mem_dma;
884 int rc = -ENOMEM;
885
886 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
887 if (!pp)
888 goto err_out;
889 memset(pp, 0, sizeof(*pp));
890
891 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
892 GFP_KERNEL);
893 if (!mem)
894 goto err_out_pp;
895 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
896
897 rc = ata_pad_alloc(ap, dev);
898 if (rc)
899 goto err_out_priv;
900
901 /* First item in chunk of DMA memory:
902 * 32-slot command request table (CRQB), 32 bytes each in size
903 */
904 pp->crqb = mem;
905 pp->crqb_dma = mem_dma;
906 mem += MV_CRQB_Q_SZ;
907 mem_dma += MV_CRQB_Q_SZ;
908
909 /* Second item:
910 * 32-slot command response table (CRPB), 8 bytes each in size
911 */
912 pp->crpb = mem;
913 pp->crpb_dma = mem_dma;
914 mem += MV_CRPB_Q_SZ;
915 mem_dma += MV_CRPB_Q_SZ;
916
917 /* Third item:
918 * Table of scatter-gather descriptors (ePRD), 16 bytes each
919 */
920 pp->sg_tbl = mem;
921 pp->sg_tbl_dma = mem_dma;
922
923 mv_edma_cfg(hpriv, port_mmio);
924
925 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
926 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
927 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
928
929 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
930 writelfl(pp->crqb_dma & 0xffffffff,
931 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
932 else
933 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
934
935 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
936
937 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
938 writelfl(pp->crpb_dma & 0xffffffff,
939 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
940 else
941 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
942
943 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
944 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
945
946 /* Don't turn on EDMA here...do it before DMA commands only. Else
947 * we'll be unable to send non-data, PIO, etc due to restricted access
948 * to shadow regs.
949 */
950 ap->private_data = pp;
951 return 0;
952
953err_out_priv:
954 mv_priv_free(pp, dev);
955err_out_pp:
956 kfree(pp);
957err_out:
958 return rc;
959}
960
961/**
962 * mv_port_stop - Port specific cleanup/stop routine.
963 * @ap: ATA channel to manipulate
964 *
965 * Stop DMA, cleanup port memory.
966 *
967 * LOCKING:
968 * This routine uses the host lock to protect the DMA stop.
969 */
970static void mv_port_stop(struct ata_port *ap)
971{
972 struct device *dev = ap->host->dev;
973 struct mv_port_priv *pp = ap->private_data;
974 unsigned long flags;
975
976 spin_lock_irqsave(&ap->host->lock, flags);
977 mv_stop_dma(ap);
978 spin_unlock_irqrestore(&ap->host->lock, flags);
979
980 ap->private_data = NULL;
981 ata_pad_free(ap, dev);
982 mv_priv_free(pp, dev);
983 kfree(pp);
984}
985
986/**
987 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
988 * @qc: queued command whose SG list to source from
989 *
990 * Populate the SG list and mark the last entry.
991 *
992 * LOCKING:
993 * Inherited from caller.
994 */
995static void mv_fill_sg(struct ata_queued_cmd *qc)
996{
997 struct mv_port_priv *pp = qc->ap->private_data;
998 unsigned int i = 0;
999 struct scatterlist *sg;
1000
1001 ata_for_each_sg(sg, qc) {
1002 dma_addr_t addr;
1003 u32 sg_len, len, offset;
1004
1005 addr = sg_dma_address(sg);
1006 sg_len = sg_dma_len(sg);
1007
1008 while (sg_len) {
1009 offset = addr & MV_DMA_BOUNDARY;
1010 len = sg_len;
1011 if ((offset + sg_len) > 0x10000)
1012 len = 0x10000 - offset;
1013
1014 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
1015 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1016 pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
1017
1018 sg_len -= len;
1019 addr += len;
1020
1021 if (!sg_len && ata_sg_is_last(sg, qc))
1022 pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1023
1024 i++;
1025 }
1026 }
1027}
1028
1029static inline unsigned mv_inc_q_index(unsigned index)
1030{
1031 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1032}
1033
1034static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1035{
1036 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1037 (last ? CRQB_CMD_LAST : 0);
1038 *cmdw = cpu_to_le16(tmp);
1039}
1040
1041/**
1042 * mv_qc_prep - Host specific command preparation.
1043 * @qc: queued command to prepare
1044 *
1045 * This routine simply redirects to the general purpose routine
1046 * if command is not DMA. Else, it handles prep of the CRQB
1047 * (command request block), does some sanity checking, and calls
1048 * the SG load routine.
1049 *
1050 * LOCKING:
1051 * Inherited from caller.
1052 */
1053static void mv_qc_prep(struct ata_queued_cmd *qc)
1054{
1055 struct ata_port *ap = qc->ap;
1056 struct mv_port_priv *pp = ap->private_data;
1057 __le16 *cw;
1058 struct ata_taskfile *tf;
1059 u16 flags = 0;
1060 unsigned in_index;
1061
1062 if (ATA_PROT_DMA != qc->tf.protocol)
1063 return;
1064
1065 /* Fill in command request block
1066 */
1067 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1068 flags |= CRQB_FLAG_READ;
1069 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1070 flags |= qc->tag << CRQB_TAG_SHIFT;
1071
1072 /* get current queue index from hardware */
1073 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1074 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1075
1076 pp->crqb[in_index].sg_addr =
1077 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1078 pp->crqb[in_index].sg_addr_hi =
1079 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1080 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1081
1082 cw = &pp->crqb[in_index].ata_cmd[0];
1083 tf = &qc->tf;
1084
1085 /* Sadly, the CRQB cannot accomodate all registers--there are
1086 * only 11 bytes...so we must pick and choose required
1087 * registers based on the command. So, we drop feature and
1088 * hob_feature for [RW] DMA commands, but they are needed for
1089 * NCQ. NCQ will drop hob_nsect.
1090 */
1091 switch (tf->command) {
1092 case ATA_CMD_READ:
1093 case ATA_CMD_READ_EXT:
1094 case ATA_CMD_WRITE:
1095 case ATA_CMD_WRITE_EXT:
1096 case ATA_CMD_WRITE_FUA_EXT:
1097 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1098 break;
1099#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1100 case ATA_CMD_FPDMA_READ:
1101 case ATA_CMD_FPDMA_WRITE:
1102 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1103 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1104 break;
1105#endif /* FIXME: remove this line when NCQ added */
1106 default:
1107 /* The only other commands EDMA supports in non-queued and
1108 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1109 * of which are defined/used by Linux. If we get here, this
1110 * driver needs work.
1111 *
1112 * FIXME: modify libata to give qc_prep a return value and
1113 * return error here.
1114 */
1115 BUG_ON(tf->command);
1116 break;
1117 }
1118 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1119 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1120 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1121 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1122 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1123 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1124 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1125 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1126 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1127
1128 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1129 return;
1130 mv_fill_sg(qc);
1131}
1132
1133/**
1134 * mv_qc_prep_iie - Host specific command preparation.
1135 * @qc: queued command to prepare
1136 *
1137 * This routine simply redirects to the general purpose routine
1138 * if command is not DMA. Else, it handles prep of the CRQB
1139 * (command request block), does some sanity checking, and calls
1140 * the SG load routine.
1141 *
1142 * LOCKING:
1143 * Inherited from caller.
1144 */
1145static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1146{
1147 struct ata_port *ap = qc->ap;
1148 struct mv_port_priv *pp = ap->private_data;
1149 struct mv_crqb_iie *crqb;
1150 struct ata_taskfile *tf;
1151 unsigned in_index;
1152 u32 flags = 0;
1153
1154 if (ATA_PROT_DMA != qc->tf.protocol)
1155 return;
1156
1157 /* Fill in Gen IIE command request block
1158 */
1159 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1160 flags |= CRQB_FLAG_READ;
1161
1162 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1163 flags |= qc->tag << CRQB_TAG_SHIFT;
1164
1165 /* get current queue index from hardware */
1166 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1167 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1168
1169 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1170 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1171 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1172 crqb->flags = cpu_to_le32(flags);
1173
1174 tf = &qc->tf;
1175 crqb->ata_cmd[0] = cpu_to_le32(
1176 (tf->command << 16) |
1177 (tf->feature << 24)
1178 );
1179 crqb->ata_cmd[1] = cpu_to_le32(
1180 (tf->lbal << 0) |
1181 (tf->lbam << 8) |
1182 (tf->lbah << 16) |
1183 (tf->device << 24)
1184 );
1185 crqb->ata_cmd[2] = cpu_to_le32(
1186 (tf->hob_lbal << 0) |
1187 (tf->hob_lbam << 8) |
1188 (tf->hob_lbah << 16) |
1189 (tf->hob_feature << 24)
1190 );
1191 crqb->ata_cmd[3] = cpu_to_le32(
1192 (tf->nsect << 0) |
1193 (tf->hob_nsect << 8)
1194 );
1195
1196 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1197 return;
1198 mv_fill_sg(qc);
1199}
1200
1201/**
1202 * mv_qc_issue - Initiate a command to the host
1203 * @qc: queued command to start
1204 *
1205 * This routine simply redirects to the general purpose routine
1206 * if command is not DMA. Else, it sanity checks our local
1207 * caches of the request producer/consumer indices then enables
1208 * DMA and bumps the request producer index.
1209 *
1210 * LOCKING:
1211 * Inherited from caller.
1212 */
1213static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1214{
1215 void __iomem *port_mmio = mv_ap_base(qc->ap);
1216 struct mv_port_priv *pp = qc->ap->private_data;
1217 unsigned in_index;
1218 u32 in_ptr;
1219
1220 if (ATA_PROT_DMA != qc->tf.protocol) {
1221 /* We're about to send a non-EDMA capable command to the
1222 * port. Turn off EDMA so there won't be problems accessing
1223 * shadow block, etc registers.
1224 */
1225 mv_stop_dma(qc->ap);
1226 return ata_qc_issue_prot(qc);
1227 }
1228
1229 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1230 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1231
1232 /* until we do queuing, the queue should be empty at this point */
1233 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1234 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1235
1236 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1237
1238 mv_start_dma(port_mmio, pp);
1239
1240 /* and write the request in pointer to kick the EDMA to life */
1241 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1242 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1243 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1244
1245 return 0;
1246}
1247
1248/**
1249 * mv_get_crpb_status - get status from most recently completed cmd
1250 * @ap: ATA channel to manipulate
1251 *
1252 * This routine is for use when the port is in DMA mode, when it
1253 * will be using the CRPB (command response block) method of
1254 * returning command completion information. We check indices
1255 * are good, grab status, and bump the response consumer index to
1256 * prove that we're up to date.
1257 *
1258 * LOCKING:
1259 * Inherited from caller.
1260 */
1261static u8 mv_get_crpb_status(struct ata_port *ap)
1262{
1263 void __iomem *port_mmio = mv_ap_base(ap);
1264 struct mv_port_priv *pp = ap->private_data;
1265 unsigned out_index;
1266 u32 out_ptr;
1267 u8 ata_status;
1268
1269 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1270 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1271
1272 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1273 >> CRPB_FLAG_STATUS_SHIFT;
1274
1275 /* increment our consumer index... */
1276 out_index = mv_inc_q_index(out_index);
1277
1278 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1279 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1280 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1281
1282 /* write out our inc'd consumer index so EDMA knows we're caught up */
1283 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1284 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1285 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1286
1287 /* Return ATA status register for completed CRPB */
1288 return ata_status;
1289}
1290
1291/**
1292 * mv_err_intr - Handle error interrupts on the port
1293 * @ap: ATA channel to manipulate
1294 * @reset_allowed: bool: 0 == don't trigger from reset here
1295 *
1296 * In most cases, just clear the interrupt and move on. However,
1297 * some cases require an eDMA reset, which is done right before
1298 * the COMRESET in mv_phy_reset(). The SERR case requires a
1299 * clear of pending errors in the SATA SERROR register. Finally,
1300 * if the port disabled DMA, update our cached copy to match.
1301 *
1302 * LOCKING:
1303 * Inherited from caller.
1304 */
1305static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1306{
1307 void __iomem *port_mmio = mv_ap_base(ap);
1308 u32 edma_err_cause, serr = 0;
1309
1310 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1311
1312 if (EDMA_ERR_SERR & edma_err_cause) {
1313 sata_scr_read(ap, SCR_ERROR, &serr);
1314 sata_scr_write_flush(ap, SCR_ERROR, serr);
1315 }
1316 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1317 struct mv_port_priv *pp = ap->private_data;
1318 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1319 }
1320 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1321 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
1322
1323 /* Clear EDMA now that SERR cleanup done */
1324 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1325
1326 /* check for fatal here and recover if needed */
1327 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1328 mv_stop_and_reset(ap);
1329}
1330
1331/**
1332 * mv_host_intr - Handle all interrupts on the given host controller
1333 * @host: host specific structure
1334 * @relevant: port error bits relevant to this host controller
1335 * @hc: which host controller we're to look at
1336 *
1337 * Read then write clear the HC interrupt status then walk each
1338 * port connected to the HC and see if it needs servicing. Port
1339 * success ints are reported in the HC interrupt status reg, the
1340 * port error ints are reported in the higher level main
1341 * interrupt status register and thus are passed in via the
1342 * 'relevant' argument.
1343 *
1344 * LOCKING:
1345 * Inherited from caller.
1346 */
1347static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1348{
1349 void __iomem *mmio = host->mmio_base;
1350 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1351 struct ata_queued_cmd *qc;
1352 u32 hc_irq_cause;
1353 int shift, port, port0, hard_port, handled;
1354 unsigned int err_mask;
1355
1356 if (hc == 0) {
1357 port0 = 0;
1358 } else {
1359 port0 = MV_PORTS_PER_HC;
1360 }
1361
1362 /* we'll need the HC success int register in most cases */
1363 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1364 if (hc_irq_cause) {
1365 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1366 }
1367
1368 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1369 hc,relevant,hc_irq_cause);
1370
1371 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1372 u8 ata_status = 0;
1373 struct ata_port *ap = host->ports[port];
1374 struct mv_port_priv *pp = ap->private_data;
1375
1376 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1377 handled = 0; /* ensure ata_status is set if handled++ */
1378
1379 /* Note that DEV_IRQ might happen spuriously during EDMA,
1380 * and should be ignored in such cases.
1381 * The cause of this is still under investigation.
1382 */
1383 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1384 /* EDMA: check for response queue interrupt */
1385 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1386 ata_status = mv_get_crpb_status(ap);
1387 handled = 1;
1388 }
1389 } else {
1390 /* PIO: check for device (drive) interrupt */
1391 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1392 ata_status = readb((void __iomem *)
1393 ap->ioaddr.status_addr);
1394 handled = 1;
1395 /* ignore spurious intr if drive still BUSY */
1396 if (ata_status & ATA_BUSY) {
1397 ata_status = 0;
1398 handled = 0;
1399 }
1400 }
1401 }
1402
1403 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1404 continue;
1405
1406 err_mask = ac_err_mask(ata_status);
1407
1408 shift = port << 1; /* (port * 2) */
1409 if (port >= MV_PORTS_PER_HC) {
1410 shift++; /* skip bit 8 in the HC Main IRQ reg */
1411 }
1412 if ((PORT0_ERR << shift) & relevant) {
1413 mv_err_intr(ap, 1);
1414 err_mask |= AC_ERR_OTHER;
1415 handled = 1;
1416 }
1417
1418 if (handled) {
1419 qc = ata_qc_from_tag(ap, ap->active_tag);
1420 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1421 VPRINTK("port %u IRQ found for qc, "
1422 "ata_status 0x%x\n", port,ata_status);
1423 /* mark qc status appropriately */
1424 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1425 qc->err_mask |= err_mask;
1426 ata_qc_complete(qc);
1427 }
1428 }
1429 }
1430 }
1431 VPRINTK("EXIT\n");
1432}
1433
1434/**
1435 * mv_interrupt -
1436 * @irq: unused
1437 * @dev_instance: private data; in this case the host structure
1438 * @regs: unused
1439 *
1440 * Read the read only register to determine if any host
1441 * controllers have pending interrupts. If so, call lower level
1442 * routine to handle. Also check for PCI errors which are only
1443 * reported here.
1444 *
1445 * LOCKING:
1446 * This routine holds the host lock while processing pending
1447 * interrupts.
1448 */
1449static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1450 struct pt_regs *regs)
1451{
1452 struct ata_host *host = dev_instance;
1453 unsigned int hc, handled = 0, n_hcs;
1454 void __iomem *mmio = host->mmio_base;
1455 struct mv_host_priv *hpriv;
1456 u32 irq_stat;
1457
1458 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1459
1460 /* check the cases where we either have nothing pending or have read
1461 * a bogus register value which can indicate HW removal or PCI fault
1462 */
1463 if (!irq_stat || (0xffffffffU == irq_stat)) {
1464 return IRQ_NONE;
1465 }
1466
1467 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1468 spin_lock(&host->lock);
1469
1470 for (hc = 0; hc < n_hcs; hc++) {
1471 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1472 if (relevant) {
1473 mv_host_intr(host, relevant, hc);
1474 handled++;
1475 }
1476 }
1477
1478 hpriv = host->private_data;
1479 if (IS_60XX(hpriv)) {
1480 /* deal with the interrupt coalescing bits */
1481 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1482 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1483 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1484 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1485 }
1486 }
1487
1488 if (PCI_ERR & irq_stat) {
1489 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1490 readl(mmio + PCI_IRQ_CAUSE_OFS));
1491
1492 DPRINTK("All regs @ PCI error\n");
1493 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1494
1495 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1496 handled++;
1497 }
1498 spin_unlock(&host->lock);
1499
1500 return IRQ_RETVAL(handled);
1501}
1502
1503static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1504{
1505 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1506 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1507
1508 return hc_mmio + ofs;
1509}
1510
1511static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1512{
1513 unsigned int ofs;
1514
1515 switch (sc_reg_in) {
1516 case SCR_STATUS:
1517 case SCR_ERROR:
1518 case SCR_CONTROL:
1519 ofs = sc_reg_in * sizeof(u32);
1520 break;
1521 default:
1522 ofs = 0xffffffffU;
1523 break;
1524 }
1525 return ofs;
1526}
1527
1528static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1529{
1530 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
1531 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1532
1533 if (ofs != 0xffffffffU)
1534 return readl(mmio + ofs);
1535 else
1536 return (u32) ofs;
1537}
1538
1539static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1540{
1541 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
1542 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1543
1544 if (ofs != 0xffffffffU)
1545 writelfl(val, mmio + ofs);
1546}
1547
1548static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1549{
1550 u8 rev_id;
1551 int early_5080;
1552
1553 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1554
1555 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1556
1557 if (!early_5080) {
1558 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1559 tmp |= (1 << 0);
1560 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1561 }
1562
1563 mv_reset_pci_bus(pdev, mmio);
1564}
1565
1566static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1567{
1568 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1569}
1570
1571static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1572 void __iomem *mmio)
1573{
1574 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1575 u32 tmp;
1576
1577 tmp = readl(phy_mmio + MV5_PHY_MODE);
1578
1579 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1580 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1581}
1582
1583static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1584{
1585 u32 tmp;
1586
1587 writel(0, mmio + MV_GPIO_PORT_CTL);
1588
1589 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1590
1591 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1592 tmp |= ~(1 << 0);
1593 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1594}
1595
1596static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1597 unsigned int port)
1598{
1599 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1600 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1601 u32 tmp;
1602 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1603
1604 if (fix_apm_sq) {
1605 tmp = readl(phy_mmio + MV5_LT_MODE);
1606 tmp |= (1 << 19);
1607 writel(tmp, phy_mmio + MV5_LT_MODE);
1608
1609 tmp = readl(phy_mmio + MV5_PHY_CTL);
1610 tmp &= ~0x3;
1611 tmp |= 0x1;
1612 writel(tmp, phy_mmio + MV5_PHY_CTL);
1613 }
1614
1615 tmp = readl(phy_mmio + MV5_PHY_MODE);
1616 tmp &= ~mask;
1617 tmp |= hpriv->signal[port].pre;
1618 tmp |= hpriv->signal[port].amps;
1619 writel(tmp, phy_mmio + MV5_PHY_MODE);
1620}
1621
1622
1623#undef ZERO
1624#define ZERO(reg) writel(0, port_mmio + (reg))
1625static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1626 unsigned int port)
1627{
1628 void __iomem *port_mmio = mv_port_base(mmio, port);
1629
1630 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1631
1632 mv_channel_reset(hpriv, mmio, port);
1633
1634 ZERO(0x028); /* command */
1635 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1636 ZERO(0x004); /* timer */
1637 ZERO(0x008); /* irq err cause */
1638 ZERO(0x00c); /* irq err mask */
1639 ZERO(0x010); /* rq bah */
1640 ZERO(0x014); /* rq inp */
1641 ZERO(0x018); /* rq outp */
1642 ZERO(0x01c); /* respq bah */
1643 ZERO(0x024); /* respq outp */
1644 ZERO(0x020); /* respq inp */
1645 ZERO(0x02c); /* test control */
1646 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1647}
1648#undef ZERO
1649
1650#define ZERO(reg) writel(0, hc_mmio + (reg))
1651static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1652 unsigned int hc)
1653{
1654 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1655 u32 tmp;
1656
1657 ZERO(0x00c);
1658 ZERO(0x010);
1659 ZERO(0x014);
1660 ZERO(0x018);
1661
1662 tmp = readl(hc_mmio + 0x20);
1663 tmp &= 0x1c1c1c1c;
1664 tmp |= 0x03030303;
1665 writel(tmp, hc_mmio + 0x20);
1666}
1667#undef ZERO
1668
1669static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1670 unsigned int n_hc)
1671{
1672 unsigned int hc, port;
1673
1674 for (hc = 0; hc < n_hc; hc++) {
1675 for (port = 0; port < MV_PORTS_PER_HC; port++)
1676 mv5_reset_hc_port(hpriv, mmio,
1677 (hc * MV_PORTS_PER_HC) + port);
1678
1679 mv5_reset_one_hc(hpriv, mmio, hc);
1680 }
1681
1682 return 0;
1683}
1684
1685#undef ZERO
1686#define ZERO(reg) writel(0, mmio + (reg))
1687static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1688{
1689 u32 tmp;
1690
1691 tmp = readl(mmio + MV_PCI_MODE);
1692 tmp &= 0xff00ffff;
1693 writel(tmp, mmio + MV_PCI_MODE);
1694
1695 ZERO(MV_PCI_DISC_TIMER);
1696 ZERO(MV_PCI_MSI_TRIGGER);
1697 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1698 ZERO(HC_MAIN_IRQ_MASK_OFS);
1699 ZERO(MV_PCI_SERR_MASK);
1700 ZERO(PCI_IRQ_CAUSE_OFS);
1701 ZERO(PCI_IRQ_MASK_OFS);
1702 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1703 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1704 ZERO(MV_PCI_ERR_ATTRIBUTE);
1705 ZERO(MV_PCI_ERR_COMMAND);
1706}
1707#undef ZERO
1708
1709static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1710{
1711 u32 tmp;
1712
1713 mv5_reset_flash(hpriv, mmio);
1714
1715 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1716 tmp &= 0x3;
1717 tmp |= (1 << 5) | (1 << 6);
1718 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1719}
1720
1721/**
1722 * mv6_reset_hc - Perform the 6xxx global soft reset
1723 * @mmio: base address of the HBA
1724 *
1725 * This routine only applies to 6xxx parts.
1726 *
1727 * LOCKING:
1728 * Inherited from caller.
1729 */
1730static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1731 unsigned int n_hc)
1732{
1733 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1734 int i, rc = 0;
1735 u32 t;
1736
1737 /* Following procedure defined in PCI "main command and status
1738 * register" table.
1739 */
1740 t = readl(reg);
1741 writel(t | STOP_PCI_MASTER, reg);
1742
1743 for (i = 0; i < 1000; i++) {
1744 udelay(1);
1745 t = readl(reg);
1746 if (PCI_MASTER_EMPTY & t) {
1747 break;
1748 }
1749 }
1750 if (!(PCI_MASTER_EMPTY & t)) {
1751 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1752 rc = 1;
1753 goto done;
1754 }
1755
1756 /* set reset */
1757 i = 5;
1758 do {
1759 writel(t | GLOB_SFT_RST, reg);
1760 t = readl(reg);
1761 udelay(1);
1762 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1763
1764 if (!(GLOB_SFT_RST & t)) {
1765 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1766 rc = 1;
1767 goto done;
1768 }
1769
1770 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1771 i = 5;
1772 do {
1773 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1774 t = readl(reg);
1775 udelay(1);
1776 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1777
1778 if (GLOB_SFT_RST & t) {
1779 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1780 rc = 1;
1781 }
1782done:
1783 return rc;
1784}
1785
1786static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1787 void __iomem *mmio)
1788{
1789 void __iomem *port_mmio;
1790 u32 tmp;
1791
1792 tmp = readl(mmio + MV_RESET_CFG);
1793 if ((tmp & (1 << 0)) == 0) {
1794 hpriv->signal[idx].amps = 0x7 << 8;
1795 hpriv->signal[idx].pre = 0x1 << 5;
1796 return;
1797 }
1798
1799 port_mmio = mv_port_base(mmio, idx);
1800 tmp = readl(port_mmio + PHY_MODE2);
1801
1802 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1803 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1804}
1805
1806static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1807{
1808 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1809}
1810
1811static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1812 unsigned int port)
1813{
1814 void __iomem *port_mmio = mv_port_base(mmio, port);
1815
1816 u32 hp_flags = hpriv->hp_flags;
1817 int fix_phy_mode2 =
1818 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1819 int fix_phy_mode4 =
1820 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1821 u32 m2, tmp;
1822
1823 if (fix_phy_mode2) {
1824 m2 = readl(port_mmio + PHY_MODE2);
1825 m2 &= ~(1 << 16);
1826 m2 |= (1 << 31);
1827 writel(m2, port_mmio + PHY_MODE2);
1828
1829 udelay(200);
1830
1831 m2 = readl(port_mmio + PHY_MODE2);
1832 m2 &= ~((1 << 16) | (1 << 31));
1833 writel(m2, port_mmio + PHY_MODE2);
1834
1835 udelay(200);
1836 }
1837
1838 /* who knows what this magic does */
1839 tmp = readl(port_mmio + PHY_MODE3);
1840 tmp &= ~0x7F800000;
1841 tmp |= 0x2A800000;
1842 writel(tmp, port_mmio + PHY_MODE3);
1843
1844 if (fix_phy_mode4) {
1845 u32 m4;
1846
1847 m4 = readl(port_mmio + PHY_MODE4);
1848
1849 if (hp_flags & MV_HP_ERRATA_60X1B2)
1850 tmp = readl(port_mmio + 0x310);
1851
1852 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1853
1854 writel(m4, port_mmio + PHY_MODE4);
1855
1856 if (hp_flags & MV_HP_ERRATA_60X1B2)
1857 writel(tmp, port_mmio + 0x310);
1858 }
1859
1860 /* Revert values of pre-emphasis and signal amps to the saved ones */
1861 m2 = readl(port_mmio + PHY_MODE2);
1862
1863 m2 &= ~MV_M2_PREAMP_MASK;
1864 m2 |= hpriv->signal[port].amps;
1865 m2 |= hpriv->signal[port].pre;
1866 m2 &= ~(1 << 16);
1867
1868 /* according to mvSata 3.6.1, some IIE values are fixed */
1869 if (IS_GEN_IIE(hpriv)) {
1870 m2 &= ~0xC30FF01F;
1871 m2 |= 0x0000900F;
1872 }
1873
1874 writel(m2, port_mmio + PHY_MODE2);
1875}
1876
1877static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1878 unsigned int port_no)
1879{
1880 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1881
1882 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1883
1884 if (IS_60XX(hpriv)) {
1885 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1886 ifctl |= (1 << 7); /* enable gen2i speed */
1887 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1888 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1889 }
1890
1891 udelay(25); /* allow reset propagation */
1892
1893 /* Spec never mentions clearing the bit. Marvell's driver does
1894 * clear the bit, however.
1895 */
1896 writelfl(0, port_mmio + EDMA_CMD_OFS);
1897
1898 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1899
1900 if (IS_50XX(hpriv))
1901 mdelay(1);
1902}
1903
1904static void mv_stop_and_reset(struct ata_port *ap)
1905{
1906 struct mv_host_priv *hpriv = ap->host->private_data;
1907 void __iomem *mmio = ap->host->mmio_base;
1908
1909 mv_stop_dma(ap);
1910
1911 mv_channel_reset(hpriv, mmio, ap->port_no);
1912
1913 __mv_phy_reset(ap, 0);
1914}
1915
1916static inline void __msleep(unsigned int msec, int can_sleep)
1917{
1918 if (can_sleep)
1919 msleep(msec);
1920 else
1921 mdelay(msec);
1922}
1923
1924/**
1925 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1926 * @ap: ATA channel to manipulate
1927 *
1928 * Part of this is taken from __sata_phy_reset and modified to
1929 * not sleep since this routine gets called from interrupt level.
1930 *
1931 * LOCKING:
1932 * Inherited from caller. This is coded to safe to call at
1933 * interrupt level, i.e. it does not sleep.
1934 */
1935static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1936{
1937 struct mv_port_priv *pp = ap->private_data;
1938 struct mv_host_priv *hpriv = ap->host->private_data;
1939 void __iomem *port_mmio = mv_ap_base(ap);
1940 struct ata_taskfile tf;
1941 struct ata_device *dev = &ap->device[0];
1942 unsigned long timeout;
1943 int retry = 5;
1944 u32 sstatus;
1945
1946 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1947
1948 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1949 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1950 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1951
1952 /* Issue COMRESET via SControl */
1953comreset_retry:
1954 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1955 __msleep(1, can_sleep);
1956
1957 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1958 __msleep(20, can_sleep);
1959
1960 timeout = jiffies + msecs_to_jiffies(200);
1961 do {
1962 sata_scr_read(ap, SCR_STATUS, &sstatus);
1963 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
1964 break;
1965
1966 __msleep(1, can_sleep);
1967 } while (time_before(jiffies, timeout));
1968
1969 /* work around errata */
1970 if (IS_60XX(hpriv) &&
1971 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1972 (retry-- > 0))
1973 goto comreset_retry;
1974
1975 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1976 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1977 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1978
1979 if (ata_port_online(ap)) {
1980 ata_port_probe(ap);
1981 } else {
1982 sata_scr_read(ap, SCR_STATUS, &sstatus);
1983 ata_port_printk(ap, KERN_INFO,
1984 "no device found (phy stat %08x)\n", sstatus);
1985 ata_port_disable(ap);
1986 return;
1987 }
1988 ap->cbl = ATA_CBL_SATA;
1989
1990 /* even after SStatus reflects that device is ready,
1991 * it seems to take a while for link to be fully
1992 * established (and thus Status no longer 0x80/0x7F),
1993 * so we poll a bit for that, here.
1994 */
1995 retry = 20;
1996 while (1) {
1997 u8 drv_stat = ata_check_status(ap);
1998 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
1999 break;
2000 __msleep(500, can_sleep);
2001 if (retry-- <= 0)
2002 break;
2003 }
2004
2005 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
2006 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
2007 tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
2008 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
2009
2010 dev->class = ata_dev_classify(&tf);
2011 if (!ata_dev_enabled(dev)) {
2012 VPRINTK("Port disabled post-sig: No device present.\n");
2013 ata_port_disable(ap);
2014 }
2015
2016 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2017
2018 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2019
2020 VPRINTK("EXIT\n");
2021}
2022
2023static void mv_phy_reset(struct ata_port *ap)
2024{
2025 __mv_phy_reset(ap, 1);
2026}
2027
2028/**
2029 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2030 * @ap: ATA channel to manipulate
2031 *
2032 * Intent is to clear all pending error conditions, reset the
2033 * chip/bus, fail the command, and move on.
2034 *
2035 * LOCKING:
2036 * This routine holds the host lock while failing the command.
2037 */
2038static void mv_eng_timeout(struct ata_port *ap)
2039{
2040 struct ata_queued_cmd *qc;
2041 unsigned long flags;
2042
2043 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2044 DPRINTK("All regs @ start of eng_timeout\n");
2045 mv_dump_all_regs(ap->host->mmio_base, ap->port_no,
2046 to_pci_dev(ap->host->dev));
2047
2048 qc = ata_qc_from_tag(ap, ap->active_tag);
2049 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2050 ap->host->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2051
2052 spin_lock_irqsave(&ap->host->lock, flags);
2053 mv_err_intr(ap, 0);
2054 mv_stop_and_reset(ap);
2055 spin_unlock_irqrestore(&ap->host->lock, flags);
2056
2057 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2058 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2059 qc->err_mask |= AC_ERR_TIMEOUT;
2060 ata_eh_qc_complete(qc);
2061 }
2062}
2063
2064/**
2065 * mv_port_init - Perform some early initialization on a single port.
2066 * @port: libata data structure storing shadow register addresses
2067 * @port_mmio: base address of the port
2068 *
2069 * Initialize shadow register mmio addresses, clear outstanding
2070 * interrupts on the port, and unmask interrupts for the future
2071 * start of the port.
2072 *
2073 * LOCKING:
2074 * Inherited from caller.
2075 */
2076static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2077{
2078 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
2079 unsigned serr_ofs;
2080
2081 /* PIO related setup
2082 */
2083 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2084 port->error_addr =
2085 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2086 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2087 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2088 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2089 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2090 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2091 port->status_addr =
2092 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2093 /* special case: control/altstatus doesn't have ATA_REG_ address */
2094 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2095
2096 /* unused: */
2097 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
2098
2099 /* Clear any currently outstanding port interrupt conditions */
2100 serr_ofs = mv_scr_offset(SCR_ERROR);
2101 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2102 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2103
2104 /* unmask all EDMA error interrupts */
2105 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2106
2107 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2108 readl(port_mmio + EDMA_CFG_OFS),
2109 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2110 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2111}
2112
2113static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
2114 unsigned int board_idx)
2115{
2116 u8 rev_id;
2117 u32 hp_flags = hpriv->hp_flags;
2118
2119 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2120
2121 switch(board_idx) {
2122 case chip_5080:
2123 hpriv->ops = &mv5xxx_ops;
2124 hp_flags |= MV_HP_50XX;
2125
2126 switch (rev_id) {
2127 case 0x1:
2128 hp_flags |= MV_HP_ERRATA_50XXB0;
2129 break;
2130 case 0x3:
2131 hp_flags |= MV_HP_ERRATA_50XXB2;
2132 break;
2133 default:
2134 dev_printk(KERN_WARNING, &pdev->dev,
2135 "Applying 50XXB2 workarounds to unknown rev\n");
2136 hp_flags |= MV_HP_ERRATA_50XXB2;
2137 break;
2138 }
2139 break;
2140
2141 case chip_504x:
2142 case chip_508x:
2143 hpriv->ops = &mv5xxx_ops;
2144 hp_flags |= MV_HP_50XX;
2145
2146 switch (rev_id) {
2147 case 0x0:
2148 hp_flags |= MV_HP_ERRATA_50XXB0;
2149 break;
2150 case 0x3:
2151 hp_flags |= MV_HP_ERRATA_50XXB2;
2152 break;
2153 default:
2154 dev_printk(KERN_WARNING, &pdev->dev,
2155 "Applying B2 workarounds to unknown rev\n");
2156 hp_flags |= MV_HP_ERRATA_50XXB2;
2157 break;
2158 }
2159 break;
2160
2161 case chip_604x:
2162 case chip_608x:
2163 hpriv->ops = &mv6xxx_ops;
2164
2165 switch (rev_id) {
2166 case 0x7:
2167 hp_flags |= MV_HP_ERRATA_60X1B2;
2168 break;
2169 case 0x9:
2170 hp_flags |= MV_HP_ERRATA_60X1C0;
2171 break;
2172 default:
2173 dev_printk(KERN_WARNING, &pdev->dev,
2174 "Applying B2 workarounds to unknown rev\n");
2175 hp_flags |= MV_HP_ERRATA_60X1B2;
2176 break;
2177 }
2178 break;
2179
2180 case chip_7042:
2181 case chip_6042:
2182 hpriv->ops = &mv6xxx_ops;
2183
2184 hp_flags |= MV_HP_GEN_IIE;
2185
2186 switch (rev_id) {
2187 case 0x0:
2188 hp_flags |= MV_HP_ERRATA_XX42A0;
2189 break;
2190 case 0x1:
2191 hp_flags |= MV_HP_ERRATA_60X1C0;
2192 break;
2193 default:
2194 dev_printk(KERN_WARNING, &pdev->dev,
2195 "Applying 60X1C0 workarounds to unknown rev\n");
2196 hp_flags |= MV_HP_ERRATA_60X1C0;
2197 break;
2198 }
2199 break;
2200
2201 default:
2202 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2203 return 1;
2204 }
2205
2206 hpriv->hp_flags = hp_flags;
2207
2208 return 0;
2209}
2210
2211/**
2212 * mv_init_host - Perform some early initialization of the host.
2213 * @pdev: host PCI device
2214 * @probe_ent: early data struct representing the host
2215 *
2216 * If possible, do an early global reset of the host. Then do
2217 * our port init and clear/unmask all/relevant host interrupts.
2218 *
2219 * LOCKING:
2220 * Inherited from caller.
2221 */
2222static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2223 unsigned int board_idx)
2224{
2225 int rc = 0, n_hc, port, hc;
2226 void __iomem *mmio = probe_ent->mmio_base;
2227 struct mv_host_priv *hpriv = probe_ent->private_data;
2228
2229 /* global interrupt mask */
2230 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2231
2232 rc = mv_chip_id(pdev, hpriv, board_idx);
2233 if (rc)
2234 goto done;
2235
2236 n_hc = mv_get_hc_count(probe_ent->port_flags);
2237 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
2238
2239 for (port = 0; port < probe_ent->n_ports; port++)
2240 hpriv->ops->read_preamp(hpriv, port, mmio);
2241
2242 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2243 if (rc)
2244 goto done;
2245
2246 hpriv->ops->reset_flash(hpriv, mmio);
2247 hpriv->ops->reset_bus(pdev, mmio);
2248 hpriv->ops->enable_leds(hpriv, mmio);
2249
2250 for (port = 0; port < probe_ent->n_ports; port++) {
2251 if (IS_60XX(hpriv)) {
2252 void __iomem *port_mmio = mv_port_base(mmio, port);
2253
2254 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2255 ifctl |= (1 << 7); /* enable gen2i speed */
2256 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2257 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2258 }
2259
2260 hpriv->ops->phy_errata(hpriv, mmio, port);
2261 }
2262
2263 for (port = 0; port < probe_ent->n_ports; port++) {
2264 void __iomem *port_mmio = mv_port_base(mmio, port);
2265 mv_port_init(&probe_ent->port[port], port_mmio);
2266 }
2267
2268 for (hc = 0; hc < n_hc; hc++) {
2269 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2270
2271 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2272 "(before clear)=0x%08x\n", hc,
2273 readl(hc_mmio + HC_CFG_OFS),
2274 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2275
2276 /* Clear any currently outstanding hc interrupt conditions */
2277 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2278 }
2279
2280 /* Clear any currently outstanding host interrupt conditions */
2281 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2282
2283 /* and unmask interrupt generation for host regs */
2284 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2285 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2286
2287 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2288 "PCI int cause/mask=0x%08x/0x%08x\n",
2289 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2290 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2291 readl(mmio + PCI_IRQ_CAUSE_OFS),
2292 readl(mmio + PCI_IRQ_MASK_OFS));
2293
2294done:
2295 return rc;
2296}
2297
2298/**
2299 * mv_print_info - Dump key info to kernel log for perusal.
2300 * @probe_ent: early data struct representing the host
2301 *
2302 * FIXME: complete this.
2303 *
2304 * LOCKING:
2305 * Inherited from caller.
2306 */
2307static void mv_print_info(struct ata_probe_ent *probe_ent)
2308{
2309 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
2310 struct mv_host_priv *hpriv = probe_ent->private_data;
2311 u8 rev_id, scc;
2312 const char *scc_s;
2313
2314 /* Use this to determine the HW stepping of the chip so we know
2315 * what errata to workaround
2316 */
2317 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2318
2319 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2320 if (scc == 0)
2321 scc_s = "SCSI";
2322 else if (scc == 0x01)
2323 scc_s = "RAID";
2324 else
2325 scc_s = "unknown";
2326
2327 dev_printk(KERN_INFO, &pdev->dev,
2328 "%u slots %u ports %s mode IRQ via %s\n",
2329 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
2330 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2331}
2332
2333/**
2334 * mv_init_one - handle a positive probe of a Marvell host
2335 * @pdev: PCI device found
2336 * @ent: PCI device ID entry for the matched host
2337 *
2338 * LOCKING:
2339 * Inherited from caller.
2340 */
2341static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2342{
2343 static int printed_version = 0;
2344 struct ata_probe_ent *probe_ent = NULL;
2345 struct mv_host_priv *hpriv;
2346 unsigned int board_idx = (unsigned int)ent->driver_data;
2347 void __iomem *mmio_base;
2348 int pci_dev_busy = 0, rc;
2349
2350 if (!printed_version++)
2351 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2352
2353 rc = pci_enable_device(pdev);
2354 if (rc) {
2355 return rc;
2356 }
2357 pci_set_master(pdev);
2358
2359 rc = pci_request_regions(pdev, DRV_NAME);
2360 if (rc) {
2361 pci_dev_busy = 1;
2362 goto err_out;
2363 }
2364
2365 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
2366 if (probe_ent == NULL) {
2367 rc = -ENOMEM;
2368 goto err_out_regions;
2369 }
2370
2371 memset(probe_ent, 0, sizeof(*probe_ent));
2372 probe_ent->dev = pci_dev_to_dev(pdev);
2373 INIT_LIST_HEAD(&probe_ent->node);
2374
2375 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
2376 if (mmio_base == NULL) {
2377 rc = -ENOMEM;
2378 goto err_out_free_ent;
2379 }
2380
2381 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
2382 if (!hpriv) {
2383 rc = -ENOMEM;
2384 goto err_out_iounmap;
2385 }
2386 memset(hpriv, 0, sizeof(*hpriv));
2387
2388 probe_ent->sht = mv_port_info[board_idx].sht;
2389 probe_ent->port_flags = mv_port_info[board_idx].flags;
2390 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
2391 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
2392 probe_ent->port_ops = mv_port_info[board_idx].port_ops;
2393
2394 probe_ent->irq = pdev->irq;
2395 probe_ent->irq_flags = IRQF_SHARED;
2396 probe_ent->mmio_base = mmio_base;
2397 probe_ent->private_data = hpriv;
2398
2399 /* initialize adapter */
2400 rc = mv_init_host(pdev, probe_ent, board_idx);
2401 if (rc) {
2402 goto err_out_hpriv;
2403 }
2404
2405 /* Enable interrupts */
2406 if (msi && pci_enable_msi(pdev) == 0) {
2407 hpriv->hp_flags |= MV_HP_FLAG_MSI;
2408 } else {
2409 pci_intx(pdev, 1);
2410 }
2411
2412 mv_dump_pci_cfg(pdev, 0x68);
2413 mv_print_info(probe_ent);
2414
2415 if (ata_device_add(probe_ent) == 0) {
2416 rc = -ENODEV; /* No devices discovered */
2417 goto err_out_dev_add;
2418 }
2419
2420 kfree(probe_ent);
2421 return 0;
2422
2423err_out_dev_add:
2424 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
2425 pci_disable_msi(pdev);
2426 } else {
2427 pci_intx(pdev, 0);
2428 }
2429err_out_hpriv:
2430 kfree(hpriv);
2431err_out_iounmap:
2432 pci_iounmap(pdev, mmio_base);
2433err_out_free_ent:
2434 kfree(probe_ent);
2435err_out_regions:
2436 pci_release_regions(pdev);
2437err_out:
2438 if (!pci_dev_busy) {
2439 pci_disable_device(pdev);
2440 }
2441
2442 return rc;
2443}
2444
2445static int __init mv_init(void)
2446{
2447 return pci_register_driver(&mv_pci_driver);
2448}
2449
2450static void __exit mv_exit(void)
2451{
2452 pci_unregister_driver(&mv_pci_driver);
2453}
2454
2455MODULE_AUTHOR("Brett Russ");
2456MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2457MODULE_LICENSE("GPL");
2458MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2459MODULE_VERSION(DRV_VERSION);
2460
2461module_param(msi, int, 0444);
2462MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2463
2464module_init(mv_init);
2465module_exit(mv_exit);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
new file mode 100644
index 000000000000..27c22feebf30
--- /dev/null
+++ b/drivers/ata/sata_nv.c
@@ -0,0 +1,595 @@
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/init.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <linux/interrupt.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <linux/libata.h>
44
45#define DRV_NAME "sata_nv"
46#define DRV_VERSION "2.0"
47
48enum {
49 NV_PORTS = 2,
50 NV_PIO_MASK = 0x1f,
51 NV_MWDMA_MASK = 0x07,
52 NV_UDMA_MASK = 0x7f,
53 NV_PORT0_SCR_REG_OFFSET = 0x00,
54 NV_PORT1_SCR_REG_OFFSET = 0x40,
55
56 /* INT_STATUS/ENABLE */
57 NV_INT_STATUS = 0x10,
58 NV_INT_ENABLE = 0x11,
59 NV_INT_STATUS_CK804 = 0x440,
60 NV_INT_ENABLE_CK804 = 0x441,
61
62 /* INT_STATUS/ENABLE bits */
63 NV_INT_DEV = 0x01,
64 NV_INT_PM = 0x02,
65 NV_INT_ADDED = 0x04,
66 NV_INT_REMOVED = 0x08,
67
68 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
69
70 NV_INT_ALL = 0x0f,
71 NV_INT_MASK = NV_INT_DEV |
72 NV_INT_ADDED | NV_INT_REMOVED,
73
74 /* INT_CONFIG */
75 NV_INT_CONFIG = 0x12,
76 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
77
78 // For PCI config register 20
79 NV_MCP_SATA_CFG_20 = 0x50,
80 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
81};
82
83static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
84static void nv_ck804_host_stop(struct ata_host *host);
85static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
86 struct pt_regs *regs);
87static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
88 struct pt_regs *regs);
89static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
90 struct pt_regs *regs);
91static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
92static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
93
94static void nv_nf2_freeze(struct ata_port *ap);
95static void nv_nf2_thaw(struct ata_port *ap);
96static void nv_ck804_freeze(struct ata_port *ap);
97static void nv_ck804_thaw(struct ata_port *ap);
98static void nv_error_handler(struct ata_port *ap);
99
100enum nv_host_type
101{
102 GENERIC,
103 NFORCE2,
104 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
105 CK804
106};
107
108static const struct pci_device_id nv_pci_tbl[] = {
109 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
111 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
113 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
115 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
117 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
119 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
121 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
123 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
125 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
127 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
129 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
131 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
133 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
135 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
137 { PCI_VENDOR_ID_NVIDIA, 0x045c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
138 { PCI_VENDOR_ID_NVIDIA, 0x045d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
139 { PCI_VENDOR_ID_NVIDIA, 0x045e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
140 { PCI_VENDOR_ID_NVIDIA, 0x045f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
141 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
142 PCI_ANY_ID, PCI_ANY_ID,
143 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
144 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
145 PCI_ANY_ID, PCI_ANY_ID,
146 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
147 { 0, } /* terminate list */
148};
149
150static struct pci_driver nv_pci_driver = {
151 .name = DRV_NAME,
152 .id_table = nv_pci_tbl,
153 .probe = nv_init_one,
154 .remove = ata_pci_remove_one,
155};
156
157static struct scsi_host_template nv_sht = {
158 .module = THIS_MODULE,
159 .name = DRV_NAME,
160 .ioctl = ata_scsi_ioctl,
161 .queuecommand = ata_scsi_queuecmd,
162 .can_queue = ATA_DEF_QUEUE,
163 .this_id = ATA_SHT_THIS_ID,
164 .sg_tablesize = LIBATA_MAX_PRD,
165 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
166 .emulated = ATA_SHT_EMULATED,
167 .use_clustering = ATA_SHT_USE_CLUSTERING,
168 .proc_name = DRV_NAME,
169 .dma_boundary = ATA_DMA_BOUNDARY,
170 .slave_configure = ata_scsi_slave_config,
171 .slave_destroy = ata_scsi_slave_destroy,
172 .bios_param = ata_std_bios_param,
173};
174
175static const struct ata_port_operations nv_generic_ops = {
176 .port_disable = ata_port_disable,
177 .tf_load = ata_tf_load,
178 .tf_read = ata_tf_read,
179 .exec_command = ata_exec_command,
180 .check_status = ata_check_status,
181 .dev_select = ata_std_dev_select,
182 .bmdma_setup = ata_bmdma_setup,
183 .bmdma_start = ata_bmdma_start,
184 .bmdma_stop = ata_bmdma_stop,
185 .bmdma_status = ata_bmdma_status,
186 .qc_prep = ata_qc_prep,
187 .qc_issue = ata_qc_issue_prot,
188 .freeze = ata_bmdma_freeze,
189 .thaw = ata_bmdma_thaw,
190 .error_handler = nv_error_handler,
191 .post_internal_cmd = ata_bmdma_post_internal_cmd,
192 .data_xfer = ata_pio_data_xfer,
193 .irq_handler = nv_generic_interrupt,
194 .irq_clear = ata_bmdma_irq_clear,
195 .scr_read = nv_scr_read,
196 .scr_write = nv_scr_write,
197 .port_start = ata_port_start,
198 .port_stop = ata_port_stop,
199 .host_stop = ata_pci_host_stop,
200};
201
202static const struct ata_port_operations nv_nf2_ops = {
203 .port_disable = ata_port_disable,
204 .tf_load = ata_tf_load,
205 .tf_read = ata_tf_read,
206 .exec_command = ata_exec_command,
207 .check_status = ata_check_status,
208 .dev_select = ata_std_dev_select,
209 .bmdma_setup = ata_bmdma_setup,
210 .bmdma_start = ata_bmdma_start,
211 .bmdma_stop = ata_bmdma_stop,
212 .bmdma_status = ata_bmdma_status,
213 .qc_prep = ata_qc_prep,
214 .qc_issue = ata_qc_issue_prot,
215 .freeze = nv_nf2_freeze,
216 .thaw = nv_nf2_thaw,
217 .error_handler = nv_error_handler,
218 .post_internal_cmd = ata_bmdma_post_internal_cmd,
219 .data_xfer = ata_pio_data_xfer,
220 .irq_handler = nv_nf2_interrupt,
221 .irq_clear = ata_bmdma_irq_clear,
222 .scr_read = nv_scr_read,
223 .scr_write = nv_scr_write,
224 .port_start = ata_port_start,
225 .port_stop = ata_port_stop,
226 .host_stop = ata_pci_host_stop,
227};
228
229static const struct ata_port_operations nv_ck804_ops = {
230 .port_disable = ata_port_disable,
231 .tf_load = ata_tf_load,
232 .tf_read = ata_tf_read,
233 .exec_command = ata_exec_command,
234 .check_status = ata_check_status,
235 .dev_select = ata_std_dev_select,
236 .bmdma_setup = ata_bmdma_setup,
237 .bmdma_start = ata_bmdma_start,
238 .bmdma_stop = ata_bmdma_stop,
239 .bmdma_status = ata_bmdma_status,
240 .qc_prep = ata_qc_prep,
241 .qc_issue = ata_qc_issue_prot,
242 .freeze = nv_ck804_freeze,
243 .thaw = nv_ck804_thaw,
244 .error_handler = nv_error_handler,
245 .post_internal_cmd = ata_bmdma_post_internal_cmd,
246 .data_xfer = ata_pio_data_xfer,
247 .irq_handler = nv_ck804_interrupt,
248 .irq_clear = ata_bmdma_irq_clear,
249 .scr_read = nv_scr_read,
250 .scr_write = nv_scr_write,
251 .port_start = ata_port_start,
252 .port_stop = ata_port_stop,
253 .host_stop = nv_ck804_host_stop,
254};
255
256static struct ata_port_info nv_port_info[] = {
257 /* generic */
258 {
259 .sht = &nv_sht,
260 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
261 .pio_mask = NV_PIO_MASK,
262 .mwdma_mask = NV_MWDMA_MASK,
263 .udma_mask = NV_UDMA_MASK,
264 .port_ops = &nv_generic_ops,
265 },
266 /* nforce2/3 */
267 {
268 .sht = &nv_sht,
269 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
270 .pio_mask = NV_PIO_MASK,
271 .mwdma_mask = NV_MWDMA_MASK,
272 .udma_mask = NV_UDMA_MASK,
273 .port_ops = &nv_nf2_ops,
274 },
275 /* ck804 */
276 {
277 .sht = &nv_sht,
278 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
279 .pio_mask = NV_PIO_MASK,
280 .mwdma_mask = NV_MWDMA_MASK,
281 .udma_mask = NV_UDMA_MASK,
282 .port_ops = &nv_ck804_ops,
283 },
284};
285
286MODULE_AUTHOR("NVIDIA");
287MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
288MODULE_LICENSE("GPL");
289MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
290MODULE_VERSION(DRV_VERSION);
291
292static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
293 struct pt_regs *regs)
294{
295 struct ata_host *host = dev_instance;
296 unsigned int i;
297 unsigned int handled = 0;
298 unsigned long flags;
299
300 spin_lock_irqsave(&host->lock, flags);
301
302 for (i = 0; i < host->n_ports; i++) {
303 struct ata_port *ap;
304
305 ap = host->ports[i];
306 if (ap &&
307 !(ap->flags & ATA_FLAG_DISABLED)) {
308 struct ata_queued_cmd *qc;
309
310 qc = ata_qc_from_tag(ap, ap->active_tag);
311 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
312 handled += ata_host_intr(ap, qc);
313 else
314 // No request pending? Clear interrupt status
315 // anyway, in case there's one pending.
316 ap->ops->check_status(ap);
317 }
318
319 }
320
321 spin_unlock_irqrestore(&host->lock, flags);
322
323 return IRQ_RETVAL(handled);
324}
325
326static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
327{
328 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
329 int handled;
330
331 /* freeze if hotplugged */
332 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
333 ata_port_freeze(ap);
334 return 1;
335 }
336
337 /* bail out if not our interrupt */
338 if (!(irq_stat & NV_INT_DEV))
339 return 0;
340
341 /* DEV interrupt w/ no active qc? */
342 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
343 ata_check_status(ap);
344 return 1;
345 }
346
347 /* handle interrupt */
348 handled = ata_host_intr(ap, qc);
349 if (unlikely(!handled)) {
350 /* spurious, clear it */
351 ata_check_status(ap);
352 }
353
354 return 1;
355}
356
357static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
358{
359 int i, handled = 0;
360
361 for (i = 0; i < host->n_ports; i++) {
362 struct ata_port *ap = host->ports[i];
363
364 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
365 handled += nv_host_intr(ap, irq_stat);
366
367 irq_stat >>= NV_INT_PORT_SHIFT;
368 }
369
370 return IRQ_RETVAL(handled);
371}
372
373static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
374 struct pt_regs *regs)
375{
376 struct ata_host *host = dev_instance;
377 u8 irq_stat;
378 irqreturn_t ret;
379
380 spin_lock(&host->lock);
381 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
382 ret = nv_do_interrupt(host, irq_stat);
383 spin_unlock(&host->lock);
384
385 return ret;
386}
387
388static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
389 struct pt_regs *regs)
390{
391 struct ata_host *host = dev_instance;
392 u8 irq_stat;
393 irqreturn_t ret;
394
395 spin_lock(&host->lock);
396 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
397 ret = nv_do_interrupt(host, irq_stat);
398 spin_unlock(&host->lock);
399
400 return ret;
401}
402
403static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
404{
405 if (sc_reg > SCR_CONTROL)
406 return 0xffffffffU;
407
408 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
409}
410
411static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
412{
413 if (sc_reg > SCR_CONTROL)
414 return;
415
416 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
417}
418
419static void nv_nf2_freeze(struct ata_port *ap)
420{
421 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
422 int shift = ap->port_no * NV_INT_PORT_SHIFT;
423 u8 mask;
424
425 mask = inb(scr_addr + NV_INT_ENABLE);
426 mask &= ~(NV_INT_ALL << shift);
427 outb(mask, scr_addr + NV_INT_ENABLE);
428}
429
430static void nv_nf2_thaw(struct ata_port *ap)
431{
432 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
433 int shift = ap->port_no * NV_INT_PORT_SHIFT;
434 u8 mask;
435
436 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
437
438 mask = inb(scr_addr + NV_INT_ENABLE);
439 mask |= (NV_INT_MASK << shift);
440 outb(mask, scr_addr + NV_INT_ENABLE);
441}
442
443static void nv_ck804_freeze(struct ata_port *ap)
444{
445 void __iomem *mmio_base = ap->host->mmio_base;
446 int shift = ap->port_no * NV_INT_PORT_SHIFT;
447 u8 mask;
448
449 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
450 mask &= ~(NV_INT_ALL << shift);
451 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
452}
453
454static void nv_ck804_thaw(struct ata_port *ap)
455{
456 void __iomem *mmio_base = ap->host->mmio_base;
457 int shift = ap->port_no * NV_INT_PORT_SHIFT;
458 u8 mask;
459
460 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
461
462 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
463 mask |= (NV_INT_MASK << shift);
464 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
465}
466
467static int nv_hardreset(struct ata_port *ap, unsigned int *class)
468{
469 unsigned int dummy;
470
471 /* SATA hardreset fails to retrieve proper device signature on
472 * some controllers. Don't classify on hardreset. For more
473 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
474 */
475 return sata_std_hardreset(ap, &dummy);
476}
477
478static void nv_error_handler(struct ata_port *ap)
479{
480 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
481 nv_hardreset, ata_std_postreset);
482}
483
484static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
485{
486 static int printed_version = 0;
487 struct ata_port_info *ppi;
488 struct ata_probe_ent *probe_ent;
489 int pci_dev_busy = 0;
490 int rc;
491 u32 bar;
492 unsigned long base;
493
494 // Make sure this is a SATA controller by counting the number of bars
495 // (NVIDIA SATA controllers will always have six bars). Otherwise,
496 // it's an IDE controller and we ignore it.
497 for (bar=0; bar<6; bar++)
498 if (pci_resource_start(pdev, bar) == 0)
499 return -ENODEV;
500
501 if (!printed_version++)
502 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
503
504 rc = pci_enable_device(pdev);
505 if (rc)
506 goto err_out;
507
508 rc = pci_request_regions(pdev, DRV_NAME);
509 if (rc) {
510 pci_dev_busy = 1;
511 goto err_out_disable;
512 }
513
514 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
515 if (rc)
516 goto err_out_regions;
517 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
518 if (rc)
519 goto err_out_regions;
520
521 rc = -ENOMEM;
522
523 ppi = &nv_port_info[ent->driver_data];
524 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
525 if (!probe_ent)
526 goto err_out_regions;
527
528 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
529 if (!probe_ent->mmio_base) {
530 rc = -EIO;
531 goto err_out_free_ent;
532 }
533
534 base = (unsigned long)probe_ent->mmio_base;
535
536 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
537 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
538
539 /* enable SATA space for CK804 */
540 if (ent->driver_data == CK804) {
541 u8 regval;
542
543 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
544 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
545 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
546 }
547
548 pci_set_master(pdev);
549
550 rc = ata_device_add(probe_ent);
551 if (rc != NV_PORTS)
552 goto err_out_iounmap;
553
554 kfree(probe_ent);
555
556 return 0;
557
558err_out_iounmap:
559 pci_iounmap(pdev, probe_ent->mmio_base);
560err_out_free_ent:
561 kfree(probe_ent);
562err_out_regions:
563 pci_release_regions(pdev);
564err_out_disable:
565 if (!pci_dev_busy)
566 pci_disable_device(pdev);
567err_out:
568 return rc;
569}
570
571static void nv_ck804_host_stop(struct ata_host *host)
572{
573 struct pci_dev *pdev = to_pci_dev(host->dev);
574 u8 regval;
575
576 /* disable SATA space for CK804 */
577 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
578 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
579 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
580
581 ata_pci_host_stop(host);
582}
583
584static int __init nv_init(void)
585{
586 return pci_register_driver(&nv_pci_driver);
587}
588
589static void __exit nv_exit(void)
590{
591 pci_unregister_driver(&nv_pci_driver);
592}
593
594module_init(nv_init);
595module_exit(nv_exit);
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
new file mode 100644
index 000000000000..d627812ea73d
--- /dev/null
+++ b/drivers/ata/sata_promise.c
@@ -0,0 +1,844 @@
1/*
2 * sata_promise.c - Promise SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware information only available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_cmnd.h>
44#include <linux/libata.h>
45#include <asm/io.h>
46#include "sata_promise.h"
47
48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.04"
50
51
52enum {
53 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
54 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
55 PDC_TBG_MODE = 0x41, /* TBG mode */
56 PDC_FLASH_CTL = 0x44, /* Flash control register */
57 PDC_PCI_CTL = 0x48, /* PCI control and status register */
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */
63
64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
65 (1<<8) | (1<<9) | (1<<10),
66
67 board_2037x = 0, /* FastTrak S150 TX2plus */
68 board_20319 = 1, /* FastTrak S150 TX4 */
69 board_20619 = 2, /* FastTrak TX4000 */
70 board_20771 = 3, /* FastTrak TX2300 */
71 board_2057x = 4, /* SATAII150 Tx2plus */
72 board_40518 = 5, /* SATAII150 Tx4 */
73
74 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
75
76 PDC_RESET = (1 << 11), /* HDMA reset */
77
78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
80 ATA_FLAG_PIO_POLLING,
81};
82
83
84struct pdc_port_priv {
85 u8 *pkt;
86 dma_addr_t pkt_dma;
87};
88
89struct pdc_host_priv {
90 int hotplug_offset;
91};
92
93static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
94static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
95static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
96static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
97static void pdc_eng_timeout(struct ata_port *ap);
98static int pdc_port_start(struct ata_port *ap);
99static void pdc_port_stop(struct ata_port *ap);
100static void pdc_pata_phy_reset(struct ata_port *ap);
101static void pdc_sata_phy_reset(struct ata_port *ap);
102static void pdc_qc_prep(struct ata_queued_cmd *qc);
103static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
104static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
105static void pdc_irq_clear(struct ata_port *ap);
106static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
107static void pdc_host_stop(struct ata_host *host);
108
109
110static struct scsi_host_template pdc_ata_sht = {
111 .module = THIS_MODULE,
112 .name = DRV_NAME,
113 .ioctl = ata_scsi_ioctl,
114 .queuecommand = ata_scsi_queuecmd,
115 .can_queue = ATA_DEF_QUEUE,
116 .this_id = ATA_SHT_THIS_ID,
117 .sg_tablesize = LIBATA_MAX_PRD,
118 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
119 .emulated = ATA_SHT_EMULATED,
120 .use_clustering = ATA_SHT_USE_CLUSTERING,
121 .proc_name = DRV_NAME,
122 .dma_boundary = ATA_DMA_BOUNDARY,
123 .slave_configure = ata_scsi_slave_config,
124 .slave_destroy = ata_scsi_slave_destroy,
125 .bios_param = ata_std_bios_param,
126};
127
128static const struct ata_port_operations pdc_sata_ops = {
129 .port_disable = ata_port_disable,
130 .tf_load = pdc_tf_load_mmio,
131 .tf_read = ata_tf_read,
132 .check_status = ata_check_status,
133 .exec_command = pdc_exec_command_mmio,
134 .dev_select = ata_std_dev_select,
135
136 .phy_reset = pdc_sata_phy_reset,
137
138 .qc_prep = pdc_qc_prep,
139 .qc_issue = pdc_qc_issue_prot,
140 .eng_timeout = pdc_eng_timeout,
141 .data_xfer = ata_mmio_data_xfer,
142 .irq_handler = pdc_interrupt,
143 .irq_clear = pdc_irq_clear,
144
145 .scr_read = pdc_sata_scr_read,
146 .scr_write = pdc_sata_scr_write,
147 .port_start = pdc_port_start,
148 .port_stop = pdc_port_stop,
149 .host_stop = pdc_host_stop,
150};
151
152static const struct ata_port_operations pdc_pata_ops = {
153 .port_disable = ata_port_disable,
154 .tf_load = pdc_tf_load_mmio,
155 .tf_read = ata_tf_read,
156 .check_status = ata_check_status,
157 .exec_command = pdc_exec_command_mmio,
158 .dev_select = ata_std_dev_select,
159
160 .phy_reset = pdc_pata_phy_reset,
161
162 .qc_prep = pdc_qc_prep,
163 .qc_issue = pdc_qc_issue_prot,
164 .data_xfer = ata_mmio_data_xfer,
165 .eng_timeout = pdc_eng_timeout,
166 .irq_handler = pdc_interrupt,
167 .irq_clear = pdc_irq_clear,
168
169 .port_start = pdc_port_start,
170 .port_stop = pdc_port_stop,
171 .host_stop = pdc_host_stop,
172};
173
174static const struct ata_port_info pdc_port_info[] = {
175 /* board_2037x */
176 {
177 .sht = &pdc_ata_sht,
178 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
179 .pio_mask = 0x1f, /* pio0-4 */
180 .mwdma_mask = 0x07, /* mwdma0-2 */
181 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
182 .port_ops = &pdc_sata_ops,
183 },
184
185 /* board_20319 */
186 {
187 .sht = &pdc_ata_sht,
188 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
189 .pio_mask = 0x1f, /* pio0-4 */
190 .mwdma_mask = 0x07, /* mwdma0-2 */
191 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
192 .port_ops = &pdc_sata_ops,
193 },
194
195 /* board_20619 */
196 {
197 .sht = &pdc_ata_sht,
198 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
199 .pio_mask = 0x1f, /* pio0-4 */
200 .mwdma_mask = 0x07, /* mwdma0-2 */
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_pata_ops,
203 },
204
205 /* board_20771 */
206 {
207 .sht = &pdc_ata_sht,
208 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
209 .pio_mask = 0x1f, /* pio0-4 */
210 .mwdma_mask = 0x07, /* mwdma0-2 */
211 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
212 .port_ops = &pdc_sata_ops,
213 },
214
215 /* board_2057x */
216 {
217 .sht = &pdc_ata_sht,
218 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
219 .pio_mask = 0x1f, /* pio0-4 */
220 .mwdma_mask = 0x07, /* mwdma0-2 */
221 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
222 .port_ops = &pdc_sata_ops,
223 },
224
225 /* board_40518 */
226 {
227 .sht = &pdc_ata_sht,
228 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
229 .pio_mask = 0x1f, /* pio0-4 */
230 .mwdma_mask = 0x07, /* mwdma0-2 */
231 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
232 .port_ops = &pdc_sata_ops,
233 },
234};
235
236static const struct pci_device_id pdc_ata_pci_tbl[] = {
237 { PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
238 board_2037x },
239 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
240 board_2037x },
241 { PCI_VENDOR_ID_PROMISE, 0x3571, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
242 board_2037x },
243 { PCI_VENDOR_ID_PROMISE, 0x3373, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
244 board_2037x },
245 { PCI_VENDOR_ID_PROMISE, 0x3375, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
246 board_2037x },
247 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
248 board_2037x },
249 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
250 board_2057x },
251 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
252 board_2057x },
253 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
254 board_2037x },
255
256 { PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
257 board_20319 },
258 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
259 board_20319 },
260 { PCI_VENDOR_ID_PROMISE, 0x3515, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
261 board_20319 },
262 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
263 board_20319 },
264 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
265 board_20319 },
266 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
267 board_40518 },
268
269 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
270 board_20619 },
271
272/* TODO: remove all associated board_20771 code, as it completely
273 * duplicates board_2037x code, unless reason for separation can be
274 * divined.
275 */
276#if 0
277 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
278 board_20771 },
279#endif
280
281 { } /* terminate list */
282};
283
284
285static struct pci_driver pdc_ata_pci_driver = {
286 .name = DRV_NAME,
287 .id_table = pdc_ata_pci_tbl,
288 .probe = pdc_ata_init_one,
289 .remove = ata_pci_remove_one,
290};
291
292
293static int pdc_port_start(struct ata_port *ap)
294{
295 struct device *dev = ap->host->dev;
296 struct pdc_port_priv *pp;
297 int rc;
298
299 rc = ata_port_start(ap);
300 if (rc)
301 return rc;
302
303 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
304 if (!pp) {
305 rc = -ENOMEM;
306 goto err_out;
307 }
308
309 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
310 if (!pp->pkt) {
311 rc = -ENOMEM;
312 goto err_out_kfree;
313 }
314
315 ap->private_data = pp;
316
317 return 0;
318
319err_out_kfree:
320 kfree(pp);
321err_out:
322 ata_port_stop(ap);
323 return rc;
324}
325
326
327static void pdc_port_stop(struct ata_port *ap)
328{
329 struct device *dev = ap->host->dev;
330 struct pdc_port_priv *pp = ap->private_data;
331
332 ap->private_data = NULL;
333 dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
334 kfree(pp);
335 ata_port_stop(ap);
336}
337
338
339static void pdc_host_stop(struct ata_host *host)
340{
341 struct pdc_host_priv *hp = host->private_data;
342
343 ata_pci_host_stop(host);
344
345 kfree(hp);
346}
347
348
349static void pdc_reset_port(struct ata_port *ap)
350{
351 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
352 unsigned int i;
353 u32 tmp;
354
355 for (i = 11; i > 0; i--) {
356 tmp = readl(mmio);
357 if (tmp & PDC_RESET)
358 break;
359
360 udelay(100);
361
362 tmp |= PDC_RESET;
363 writel(tmp, mmio);
364 }
365
366 tmp &= ~PDC_RESET;
367 writel(tmp, mmio);
368 readl(mmio); /* flush */
369}
370
371static void pdc_sata_phy_reset(struct ata_port *ap)
372{
373 pdc_reset_port(ap);
374 sata_phy_reset(ap);
375}
376
377static void pdc_pata_cbl_detect(struct ata_port *ap)
378{
379 u8 tmp;
380 void __iomem *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03;
381
382 tmp = readb(mmio);
383
384 if (tmp & 0x01) {
385 ap->cbl = ATA_CBL_PATA40;
386 ap->udma_mask &= ATA_UDMA_MASK_40C;
387 } else
388 ap->cbl = ATA_CBL_PATA80;
389}
390
391static void pdc_pata_phy_reset(struct ata_port *ap)
392{
393 pdc_pata_cbl_detect(ap);
394 pdc_reset_port(ap);
395 ata_port_probe(ap);
396 ata_bus_reset(ap);
397}
398
399static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
400{
401 if (sc_reg > SCR_CONTROL)
402 return 0xffffffffU;
403 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
404}
405
406
407static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
408 u32 val)
409{
410 if (sc_reg > SCR_CONTROL)
411 return;
412 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
413}
414
415static void pdc_qc_prep(struct ata_queued_cmd *qc)
416{
417 struct pdc_port_priv *pp = qc->ap->private_data;
418 unsigned int i;
419
420 VPRINTK("ENTER\n");
421
422 switch (qc->tf.protocol) {
423 case ATA_PROT_DMA:
424 ata_qc_prep(qc);
425 /* fall through */
426
427 case ATA_PROT_NODATA:
428 i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma,
429 qc->dev->devno, pp->pkt);
430
431 if (qc->tf.flags & ATA_TFLAG_LBA48)
432 i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
433 else
434 i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
435
436 pdc_pkt_footer(&qc->tf, pp->pkt, i);
437 break;
438
439 default:
440 break;
441 }
442}
443
444static void pdc_eng_timeout(struct ata_port *ap)
445{
446 struct ata_host *host = ap->host;
447 u8 drv_stat;
448 struct ata_queued_cmd *qc;
449 unsigned long flags;
450
451 DPRINTK("ENTER\n");
452
453 spin_lock_irqsave(&host->lock, flags);
454
455 qc = ata_qc_from_tag(ap, ap->active_tag);
456
457 switch (qc->tf.protocol) {
458 case ATA_PROT_DMA:
459 case ATA_PROT_NODATA:
460 ata_port_printk(ap, KERN_ERR, "command timeout\n");
461 drv_stat = ata_wait_idle(ap);
462 qc->err_mask |= __ac_err_mask(drv_stat);
463 break;
464
465 default:
466 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
467
468 ata_port_printk(ap, KERN_ERR,
469 "unknown timeout, cmd 0x%x stat 0x%x\n",
470 qc->tf.command, drv_stat);
471
472 qc->err_mask |= ac_err_mask(drv_stat);
473 break;
474 }
475
476 spin_unlock_irqrestore(&host->lock, flags);
477 ata_eh_qc_complete(qc);
478 DPRINTK("EXIT\n");
479}
480
481static inline unsigned int pdc_host_intr( struct ata_port *ap,
482 struct ata_queued_cmd *qc)
483{
484 unsigned int handled = 0;
485 u32 tmp;
486 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
487
488 tmp = readl(mmio);
489 if (tmp & PDC_ERR_MASK) {
490 qc->err_mask |= AC_ERR_DEV;
491 pdc_reset_port(ap);
492 }
493
494 switch (qc->tf.protocol) {
495 case ATA_PROT_DMA:
496 case ATA_PROT_NODATA:
497 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
498 ata_qc_complete(qc);
499 handled = 1;
500 break;
501
502 default:
503 ap->stats.idle_irq++;
504 break;
505 }
506
507 return handled;
508}
509
510static void pdc_irq_clear(struct ata_port *ap)
511{
512 struct ata_host *host = ap->host;
513 void __iomem *mmio = host->mmio_base;
514
515 readl(mmio + PDC_INT_SEQMASK);
516}
517
518static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
519{
520 struct ata_host *host = dev_instance;
521 struct ata_port *ap;
522 u32 mask = 0;
523 unsigned int i, tmp;
524 unsigned int handled = 0;
525 void __iomem *mmio_base;
526
527 VPRINTK("ENTER\n");
528
529 if (!host || !host->mmio_base) {
530 VPRINTK("QUICK EXIT\n");
531 return IRQ_NONE;
532 }
533
534 mmio_base = host->mmio_base;
535
536 /* reading should also clear interrupts */
537 mask = readl(mmio_base + PDC_INT_SEQMASK);
538
539 if (mask == 0xffffffff) {
540 VPRINTK("QUICK EXIT 2\n");
541 return IRQ_NONE;
542 }
543
544 spin_lock(&host->lock);
545
546 mask &= 0xffff; /* only 16 tags possible */
547 if (!mask) {
548 VPRINTK("QUICK EXIT 3\n");
549 goto done_irq;
550 }
551
552 writel(mask, mmio_base + PDC_INT_SEQMASK);
553
554 for (i = 0; i < host->n_ports; i++) {
555 VPRINTK("port %u\n", i);
556 ap = host->ports[i];
557 tmp = mask & (1 << (i + 1));
558 if (tmp && ap &&
559 !(ap->flags & ATA_FLAG_DISABLED)) {
560 struct ata_queued_cmd *qc;
561
562 qc = ata_qc_from_tag(ap, ap->active_tag);
563 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
564 handled += pdc_host_intr(ap, qc);
565 }
566 }
567
568 VPRINTK("EXIT\n");
569
570done_irq:
571 spin_unlock(&host->lock);
572 return IRQ_RETVAL(handled);
573}
574
575static inline void pdc_packet_start(struct ata_queued_cmd *qc)
576{
577 struct ata_port *ap = qc->ap;
578 struct pdc_port_priv *pp = ap->private_data;
579 unsigned int port_no = ap->port_no;
580 u8 seq = (u8) (port_no + 1);
581
582 VPRINTK("ENTER, ap %p\n", ap);
583
584 writel(0x00000001, ap->host->mmio_base + (seq * 4));
585 readl(ap->host->mmio_base + (seq * 4)); /* flush */
586
587 pp->pkt[2] = seq;
588 wmb(); /* flush PRD, pkt writes */
589 writel(pp->pkt_dma, (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
590 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
591}
592
593static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
594{
595 switch (qc->tf.protocol) {
596 case ATA_PROT_DMA:
597 case ATA_PROT_NODATA:
598 pdc_packet_start(qc);
599 return 0;
600
601 case ATA_PROT_ATAPI_DMA:
602 BUG();
603 break;
604
605 default:
606 break;
607 }
608
609 return ata_qc_issue_prot(qc);
610}
611
612static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
613{
614 WARN_ON (tf->protocol == ATA_PROT_DMA ||
615 tf->protocol == ATA_PROT_NODATA);
616 ata_tf_load(ap, tf);
617}
618
619
620static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
621{
622 WARN_ON (tf->protocol == ATA_PROT_DMA ||
623 tf->protocol == ATA_PROT_NODATA);
624 ata_exec_command(ap, tf);
625}
626
627
628static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
629{
630 port->cmd_addr = base;
631 port->data_addr = base;
632 port->feature_addr =
633 port->error_addr = base + 0x4;
634 port->nsect_addr = base + 0x8;
635 port->lbal_addr = base + 0xc;
636 port->lbam_addr = base + 0x10;
637 port->lbah_addr = base + 0x14;
638 port->device_addr = base + 0x18;
639 port->command_addr =
640 port->status_addr = base + 0x1c;
641 port->altstatus_addr =
642 port->ctl_addr = base + 0x38;
643}
644
645
646static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
647{
648 void __iomem *mmio = pe->mmio_base;
649 struct pdc_host_priv *hp = pe->private_data;
650 int hotplug_offset = hp->hotplug_offset;
651 u32 tmp;
652
653 /*
654 * Except for the hotplug stuff, this is voodoo from the
655 * Promise driver. Label this entire section
656 * "TODO: figure out why we do this"
657 */
658
659 /* change FIFO_SHD to 8 dwords, enable BMR_BURST */
660 tmp = readl(mmio + PDC_FLASH_CTL);
661 tmp |= 0x12000; /* bit 16 (fifo 8 dw) and 13 (bmr burst?) */
662 writel(tmp, mmio + PDC_FLASH_CTL);
663
664 /* clear plug/unplug flags for all ports */
665 tmp = readl(mmio + hotplug_offset);
666 writel(tmp | 0xff, mmio + hotplug_offset);
667
668 /* mask plug/unplug ints */
669 tmp = readl(mmio + hotplug_offset);
670 writel(tmp | 0xff0000, mmio + hotplug_offset);
671
672 /* reduce TBG clock to 133 Mhz. */
673 tmp = readl(mmio + PDC_TBG_MODE);
674 tmp &= ~0x30000; /* clear bit 17, 16*/
675 tmp |= 0x10000; /* set bit 17:16 = 0:1 */
676 writel(tmp, mmio + PDC_TBG_MODE);
677
678 readl(mmio + PDC_TBG_MODE); /* flush */
679 msleep(10);
680
681 /* adjust slew rate control register. */
682 tmp = readl(mmio + PDC_SLEW_CTL);
683 tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
684 tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
685 writel(tmp, mmio + PDC_SLEW_CTL);
686}
687
688static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
689{
690 static int printed_version;
691 struct ata_probe_ent *probe_ent = NULL;
692 struct pdc_host_priv *hp;
693 unsigned long base;
694 void __iomem *mmio_base;
695 unsigned int board_idx = (unsigned int) ent->driver_data;
696 int pci_dev_busy = 0;
697 int rc;
698
699 if (!printed_version++)
700 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
701
702 rc = pci_enable_device(pdev);
703 if (rc)
704 return rc;
705
706 rc = pci_request_regions(pdev, DRV_NAME);
707 if (rc) {
708 pci_dev_busy = 1;
709 goto err_out;
710 }
711
712 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
713 if (rc)
714 goto err_out_regions;
715 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
716 if (rc)
717 goto err_out_regions;
718
719 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
720 if (probe_ent == NULL) {
721 rc = -ENOMEM;
722 goto err_out_regions;
723 }
724
725 probe_ent->dev = pci_dev_to_dev(pdev);
726 INIT_LIST_HEAD(&probe_ent->node);
727
728 mmio_base = pci_iomap(pdev, 3, 0);
729 if (mmio_base == NULL) {
730 rc = -ENOMEM;
731 goto err_out_free_ent;
732 }
733 base = (unsigned long) mmio_base;
734
735 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
736 if (hp == NULL) {
737 rc = -ENOMEM;
738 goto err_out_free_ent;
739 }
740
741 /* Set default hotplug offset */
742 hp->hotplug_offset = PDC_SATA_PLUG_CSR;
743 probe_ent->private_data = hp;
744
745 probe_ent->sht = pdc_port_info[board_idx].sht;
746 probe_ent->port_flags = pdc_port_info[board_idx].flags;
747 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
748 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
749 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
750 probe_ent->port_ops = pdc_port_info[board_idx].port_ops;
751
752 probe_ent->irq = pdev->irq;
753 probe_ent->irq_flags = IRQF_SHARED;
754 probe_ent->mmio_base = mmio_base;
755
756 pdc_ata_setup_port(&probe_ent->port[0], base + 0x200);
757 pdc_ata_setup_port(&probe_ent->port[1], base + 0x280);
758
759 probe_ent->port[0].scr_addr = base + 0x400;
760 probe_ent->port[1].scr_addr = base + 0x500;
761
762 /* notice 4-port boards */
763 switch (board_idx) {
764 case board_40518:
765 /* Override hotplug offset for SATAII150 */
766 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
767 /* Fall through */
768 case board_20319:
769 probe_ent->n_ports = 4;
770
771 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
772 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
773
774 probe_ent->port[2].scr_addr = base + 0x600;
775 probe_ent->port[3].scr_addr = base + 0x700;
776 break;
777 case board_2057x:
778 /* Override hotplug offset for SATAII150 */
779 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
780 /* Fall through */
781 case board_2037x:
782 probe_ent->n_ports = 2;
783 break;
784 case board_20771:
785 probe_ent->n_ports = 2;
786 break;
787 case board_20619:
788 probe_ent->n_ports = 4;
789
790 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
791 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
792
793 probe_ent->port[2].scr_addr = base + 0x600;
794 probe_ent->port[3].scr_addr = base + 0x700;
795 break;
796 default:
797 BUG();
798 break;
799 }
800
801 pci_set_master(pdev);
802
803 /* initialize adapter */
804 pdc_host_init(board_idx, probe_ent);
805
806 /* FIXME: Need any other frees than hp? */
807 if (!ata_device_add(probe_ent))
808 kfree(hp);
809
810 kfree(probe_ent);
811
812 return 0;
813
814err_out_free_ent:
815 kfree(probe_ent);
816err_out_regions:
817 pci_release_regions(pdev);
818err_out:
819 if (!pci_dev_busy)
820 pci_disable_device(pdev);
821 return rc;
822}
823
824
825static int __init pdc_ata_init(void)
826{
827 return pci_register_driver(&pdc_ata_pci_driver);
828}
829
830
831static void __exit pdc_ata_exit(void)
832{
833 pci_unregister_driver(&pdc_ata_pci_driver);
834}
835
836
837MODULE_AUTHOR("Jeff Garzik");
838MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
839MODULE_LICENSE("GPL");
840MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl);
841MODULE_VERSION(DRV_VERSION);
842
843module_init(pdc_ata_init);
844module_exit(pdc_ata_exit);
diff --git a/drivers/ata/sata_promise.h b/drivers/ata/sata_promise.h
new file mode 100644
index 000000000000..6ee5e190262d
--- /dev/null
+++ b/drivers/ata/sata_promise.h
@@ -0,0 +1,157 @@
1/*
2 * sata_promise.h - Promise SATA common definitions and inline funcs
3 *
4 * Copyright 2003-2004 Red Hat, Inc.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING. If not, write to
19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 *
22 * libata documentation is available via 'make {ps|pdf}docs',
23 * as Documentation/DocBook/libata.*
24 *
25 */
26
27#ifndef __SATA_PROMISE_H__
28#define __SATA_PROMISE_H__
29
30#include <linux/ata.h>
31
32enum pdc_packet_bits {
33 PDC_PKT_READ = (1 << 2),
34 PDC_PKT_NODATA = (1 << 3),
35
36 PDC_PKT_SIZEMASK = (1 << 7) | (1 << 6) | (1 << 5),
37 PDC_PKT_CLEAR_BSY = (1 << 4),
38 PDC_PKT_WAIT_DRDY = (1 << 3) | (1 << 4),
39 PDC_LAST_REG = (1 << 3),
40
41 PDC_REG_DEVCTL = (1 << 3) | (1 << 2) | (1 << 1),
42};
43
44static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf,
45 dma_addr_t sg_table,
46 unsigned int devno, u8 *buf)
47{
48 u8 dev_reg;
49 u32 *buf32 = (u32 *) buf;
50
51 /* set control bits (byte 0), zero delay seq id (byte 3),
52 * and seq id (byte 2)
53 */
54 switch (tf->protocol) {
55 case ATA_PROT_DMA:
56 if (!(tf->flags & ATA_TFLAG_WRITE))
57 buf32[0] = cpu_to_le32(PDC_PKT_READ);
58 else
59 buf32[0] = 0;
60 break;
61
62 case ATA_PROT_NODATA:
63 buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
64 break;
65
66 default:
67 BUG();
68 break;
69 }
70
71 buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */
72 buf32[2] = 0; /* no next-packet */
73
74 if (devno == 0)
75 dev_reg = ATA_DEVICE_OBS;
76 else
77 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
78
79 /* select device */
80 buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
81 buf[13] = dev_reg;
82
83 /* device control register */
84 buf[14] = (1 << 5) | PDC_REG_DEVCTL;
85 buf[15] = tf->ctl;
86
87 return 16; /* offset of next byte */
88}
89
90static inline unsigned int pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf,
91 unsigned int i)
92{
93 if (tf->flags & ATA_TFLAG_DEVICE) {
94 buf[i++] = (1 << 5) | ATA_REG_DEVICE;
95 buf[i++] = tf->device;
96 }
97
98 /* and finally the command itself; also includes end-of-pkt marker */
99 buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD;
100 buf[i++] = tf->command;
101
102 return i;
103}
104
105static inline unsigned int pdc_prep_lba28(struct ata_taskfile *tf, u8 *buf, unsigned int i)
106{
107 /* the "(1 << 5)" should be read "(count << 5)" */
108
109 /* ATA command block registers */
110 buf[i++] = (1 << 5) | ATA_REG_FEATURE;
111 buf[i++] = tf->feature;
112
113 buf[i++] = (1 << 5) | ATA_REG_NSECT;
114 buf[i++] = tf->nsect;
115
116 buf[i++] = (1 << 5) | ATA_REG_LBAL;
117 buf[i++] = tf->lbal;
118
119 buf[i++] = (1 << 5) | ATA_REG_LBAM;
120 buf[i++] = tf->lbam;
121
122 buf[i++] = (1 << 5) | ATA_REG_LBAH;
123 buf[i++] = tf->lbah;
124
125 return i;
126}
127
128static inline unsigned int pdc_prep_lba48(struct ata_taskfile *tf, u8 *buf, unsigned int i)
129{
130 /* the "(2 << 5)" should be read "(count << 5)" */
131
132 /* ATA command block registers */
133 buf[i++] = (2 << 5) | ATA_REG_FEATURE;
134 buf[i++] = tf->hob_feature;
135 buf[i++] = tf->feature;
136
137 buf[i++] = (2 << 5) | ATA_REG_NSECT;
138 buf[i++] = tf->hob_nsect;
139 buf[i++] = tf->nsect;
140
141 buf[i++] = (2 << 5) | ATA_REG_LBAL;
142 buf[i++] = tf->hob_lbal;
143 buf[i++] = tf->lbal;
144
145 buf[i++] = (2 << 5) | ATA_REG_LBAM;
146 buf[i++] = tf->hob_lbam;
147 buf[i++] = tf->lbam;
148
149 buf[i++] = (2 << 5) | ATA_REG_LBAH;
150 buf[i++] = tf->hob_lbah;
151 buf[i++] = tf->lbah;
152
153 return i;
154}
155
156
157#endif /* __SATA_PROMISE_H__ */
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
new file mode 100644
index 000000000000..fa29dfe2a7b5
--- /dev/null
+++ b/drivers/ata/sata_qstor.c
@@ -0,0 +1,730 @@
1/*
2 * sata_qstor.c - Pacific Digital Corporation QStor SATA
3 *
4 * Maintained by: Mark Lord <mlord@pobox.com>
5 *
6 * Copyright 2005 Pacific Digital Corporation.
7 * (OSL/GPL code release authorized by Jalil Fadavi).
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 *
25 * libata documentation is available via 'make {ps|pdf}docs',
26 * as Documentation/DocBook/libata.*
27 *
28 */
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/pci.h>
33#include <linux/init.h>
34#include <linux/blkdev.h>
35#include <linux/delay.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
38#include <linux/device.h>
39#include <scsi/scsi_host.h>
40#include <asm/io.h>
41#include <linux/libata.h>
42
43#define DRV_NAME "sata_qstor"
44#define DRV_VERSION "0.06"
45
46enum {
47 QS_PORTS = 4,
48 QS_MAX_PRD = LIBATA_MAX_PRD,
49 QS_CPB_ORDER = 6,
50 QS_CPB_BYTES = (1 << QS_CPB_ORDER),
51 QS_PRD_BYTES = QS_MAX_PRD * 16,
52 QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES,
53
54 /* global register offsets */
55 QS_HCF_CNFG3 = 0x0003, /* host configuration offset */
56 QS_HID_HPHY = 0x0004, /* host physical interface info */
57 QS_HCT_CTRL = 0x00e4, /* global interrupt mask offset */
58 QS_HST_SFF = 0x0100, /* host status fifo offset */
59 QS_HVS_SERD3 = 0x0393, /* PHY enable offset */
60
61 /* global control bits */
62 QS_HPHY_64BIT = (1 << 1), /* 64-bit bus detected */
63 QS_CNFG3_GSRST = 0x01, /* global chip reset */
64 QS_SERD3_PHY_ENA = 0xf0, /* PHY detection ENAble*/
65
66 /* per-channel register offsets */
67 QS_CCF_CPBA = 0x0710, /* chan CPB base address */
68 QS_CCF_CSEP = 0x0718, /* chan CPB separation factor */
69 QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */
70 QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */
71 QS_CFC_DUFT = 0x0808, /* dev upstream fifo threshold */
72 QS_CFC_DDFT = 0x080c, /* dev downstream fifo threshold */
73 QS_CCT_CTR0 = 0x0900, /* chan control-0 offset */
74 QS_CCT_CTR1 = 0x0901, /* chan control-1 offset */
75 QS_CCT_CFF = 0x0a00, /* chan command fifo offset */
76
77 /* channel control bits */
78 QS_CTR0_REG = (1 << 1), /* register mode (vs. pkt mode) */
79 QS_CTR0_CLER = (1 << 2), /* clear channel errors */
80 QS_CTR1_RDEV = (1 << 1), /* sata phy/comms reset */
81 QS_CTR1_RCHN = (1 << 4), /* reset channel logic */
82 QS_CCF_RUN_PKT = 0x107, /* RUN a new dma PKT */
83
84 /* pkt sub-field headers */
85 QS_HCB_HDR = 0x01, /* Host Control Block header */
86 QS_DCB_HDR = 0x02, /* Device Control Block header */
87
88 /* pkt HCB flag bits */
89 QS_HF_DIRO = (1 << 0), /* data DIRection Out */
90 QS_HF_DAT = (1 << 3), /* DATa pkt */
91 QS_HF_IEN = (1 << 4), /* Interrupt ENable */
92 QS_HF_VLD = (1 << 5), /* VaLiD pkt */
93
94 /* pkt DCB flag bits */
95 QS_DF_PORD = (1 << 2), /* Pio OR Dma */
96 QS_DF_ELBA = (1 << 3), /* Extended LBA (lba48) */
97
98 /* PCI device IDs */
99 board_2068_idx = 0, /* QStor 4-port SATA/RAID */
100};
101
102enum {
103 QS_DMA_BOUNDARY = ~0UL
104};
105
106typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t;
107
108struct qs_port_priv {
109 u8 *pkt;
110 dma_addr_t pkt_dma;
111 qs_state_t state;
112};
113
114static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg);
115static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
116static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
117static irqreturn_t qs_intr (int irq, void *dev_instance, struct pt_regs *regs);
118static int qs_port_start(struct ata_port *ap);
119static void qs_host_stop(struct ata_host *host);
120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc);
123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
124static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
125static void qs_bmdma_stop(struct ata_queued_cmd *qc);
126static u8 qs_bmdma_status(struct ata_port *ap);
127static void qs_irq_clear(struct ata_port *ap);
128static void qs_eng_timeout(struct ata_port *ap);
129
130static struct scsi_host_template qs_ata_sht = {
131 .module = THIS_MODULE,
132 .name = DRV_NAME,
133 .ioctl = ata_scsi_ioctl,
134 .queuecommand = ata_scsi_queuecmd,
135 .can_queue = ATA_DEF_QUEUE,
136 .this_id = ATA_SHT_THIS_ID,
137 .sg_tablesize = QS_MAX_PRD,
138 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
139 .emulated = ATA_SHT_EMULATED,
140 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
141 .use_clustering = ENABLE_CLUSTERING,
142 .proc_name = DRV_NAME,
143 .dma_boundary = QS_DMA_BOUNDARY,
144 .slave_configure = ata_scsi_slave_config,
145 .slave_destroy = ata_scsi_slave_destroy,
146 .bios_param = ata_std_bios_param,
147};
148
149static const struct ata_port_operations qs_ata_ops = {
150 .port_disable = ata_port_disable,
151 .tf_load = ata_tf_load,
152 .tf_read = ata_tf_read,
153 .check_status = ata_check_status,
154 .check_atapi_dma = qs_check_atapi_dma,
155 .exec_command = ata_exec_command,
156 .dev_select = ata_std_dev_select,
157 .phy_reset = qs_phy_reset,
158 .qc_prep = qs_qc_prep,
159 .qc_issue = qs_qc_issue,
160 .data_xfer = ata_mmio_data_xfer,
161 .eng_timeout = qs_eng_timeout,
162 .irq_handler = qs_intr,
163 .irq_clear = qs_irq_clear,
164 .scr_read = qs_scr_read,
165 .scr_write = qs_scr_write,
166 .port_start = qs_port_start,
167 .port_stop = qs_port_stop,
168 .host_stop = qs_host_stop,
169 .bmdma_stop = qs_bmdma_stop,
170 .bmdma_status = qs_bmdma_status,
171};
172
173static const struct ata_port_info qs_port_info[] = {
174 /* board_2068_idx */
175 {
176 .sht = &qs_ata_sht,
177 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
178 ATA_FLAG_SATA_RESET |
179 //FIXME ATA_FLAG_SRST |
180 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
181 .pio_mask = 0x10, /* pio4 */
182 .udma_mask = 0x7f, /* udma0-6 */
183 .port_ops = &qs_ata_ops,
184 },
185};
186
187static const struct pci_device_id qs_ata_pci_tbl[] = {
188 { PCI_VENDOR_ID_PDC, 0x2068, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
189 board_2068_idx },
190
191 { } /* terminate list */
192};
193
194static struct pci_driver qs_ata_pci_driver = {
195 .name = DRV_NAME,
196 .id_table = qs_ata_pci_tbl,
197 .probe = qs_ata_init_one,
198 .remove = ata_pci_remove_one,
199};
200
201static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
202{
203 return 1; /* ATAPI DMA not supported */
204}
205
206static void qs_bmdma_stop(struct ata_queued_cmd *qc)
207{
208 /* nothing */
209}
210
211static u8 qs_bmdma_status(struct ata_port *ap)
212{
213 return 0;
214}
215
216static void qs_irq_clear(struct ata_port *ap)
217{
218 /* nothing */
219}
220
221static inline void qs_enter_reg_mode(struct ata_port *ap)
222{
223 u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
224
225 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
226 readb(chan + QS_CCT_CTR0); /* flush */
227}
228
229static inline void qs_reset_channel_logic(struct ata_port *ap)
230{
231 u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
232
233 writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
234 readb(chan + QS_CCT_CTR0); /* flush */
235 qs_enter_reg_mode(ap);
236}
237
238static void qs_phy_reset(struct ata_port *ap)
239{
240 struct qs_port_priv *pp = ap->private_data;
241
242 pp->state = qs_state_idle;
243 qs_reset_channel_logic(ap);
244 sata_phy_reset(ap);
245}
246
247static void qs_eng_timeout(struct ata_port *ap)
248{
249 struct qs_port_priv *pp = ap->private_data;
250
251 if (pp->state != qs_state_idle) /* healthy paranoia */
252 pp->state = qs_state_mmio;
253 qs_reset_channel_logic(ap);
254 ata_eng_timeout(ap);
255}
256
257static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg)
258{
259 if (sc_reg > SCR_CONTROL)
260 return ~0U;
261 return readl((void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
262}
263
264static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
265{
266 if (sc_reg > SCR_CONTROL)
267 return;
268 writel(val, (void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
269}
270
271static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
272{
273 struct scatterlist *sg;
274 struct ata_port *ap = qc->ap;
275 struct qs_port_priv *pp = ap->private_data;
276 unsigned int nelem;
277 u8 *prd = pp->pkt + QS_CPB_BYTES;
278
279 WARN_ON(qc->__sg == NULL);
280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
281
282 nelem = 0;
283 ata_for_each_sg(sg, qc) {
284 u64 addr;
285 u32 len;
286
287 addr = sg_dma_address(sg);
288 *(__le64 *)prd = cpu_to_le64(addr);
289 prd += sizeof(u64);
290
291 len = sg_dma_len(sg);
292 *(__le32 *)prd = cpu_to_le32(len);
293 prd += sizeof(u64);
294
295 VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem,
296 (unsigned long long)addr, len);
297 nelem++;
298 }
299
300 return nelem;
301}
302
303static void qs_qc_prep(struct ata_queued_cmd *qc)
304{
305 struct qs_port_priv *pp = qc->ap->private_data;
306 u8 dflags = QS_DF_PORD, *buf = pp->pkt;
307 u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
308 u64 addr;
309 unsigned int nelem;
310
311 VPRINTK("ENTER\n");
312
313 qs_enter_reg_mode(qc->ap);
314 if (qc->tf.protocol != ATA_PROT_DMA) {
315 ata_qc_prep(qc);
316 return;
317 }
318
319 nelem = qs_fill_sg(qc);
320
321 if ((qc->tf.flags & ATA_TFLAG_WRITE))
322 hflags |= QS_HF_DIRO;
323 if ((qc->tf.flags & ATA_TFLAG_LBA48))
324 dflags |= QS_DF_ELBA;
325
326 /* host control block (HCB) */
327 buf[ 0] = QS_HCB_HDR;
328 buf[ 1] = hflags;
329 *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nsect * ATA_SECT_SIZE);
330 *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
331 addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
332 *(__le64 *)(&buf[16]) = cpu_to_le64(addr);
333
334 /* device control block (DCB) */
335 buf[24] = QS_DCB_HDR;
336 buf[28] = dflags;
337
338 /* frame information structure (FIS) */
339 ata_tf_to_fis(&qc->tf, &buf[32], 0);
340}
341
342static inline void qs_packet_start(struct ata_queued_cmd *qc)
343{
344 struct ata_port *ap = qc->ap;
345 u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
346
347 VPRINTK("ENTER, ap %p\n", ap);
348
349 writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
350 wmb(); /* flush PRDs and pkt to memory */
351 writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
352 readl(chan + QS_CCT_CFF); /* flush */
353}
354
355static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
356{
357 struct qs_port_priv *pp = qc->ap->private_data;
358
359 switch (qc->tf.protocol) {
360 case ATA_PROT_DMA:
361
362 pp->state = qs_state_pkt;
363 qs_packet_start(qc);
364 return 0;
365
366 case ATA_PROT_ATAPI_DMA:
367 BUG();
368 break;
369
370 default:
371 break;
372 }
373
374 pp->state = qs_state_mmio;
375 return ata_qc_issue_prot(qc);
376}
377
378static inline unsigned int qs_intr_pkt(struct ata_host *host)
379{
380 unsigned int handled = 0;
381 u8 sFFE;
382 u8 __iomem *mmio_base = host->mmio_base;
383
384 do {
385 u32 sff0 = readl(mmio_base + QS_HST_SFF);
386 u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
387 u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */
388 sFFE = sff1 >> 31; /* empty flag */
389
390 if (sEVLD) {
391 u8 sDST = sff0 >> 16; /* dev status */
392 u8 sHST = sff1 & 0x3f; /* host status */
393 unsigned int port_no = (sff1 >> 8) & 0x03;
394 struct ata_port *ap = host->ports[port_no];
395
396 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
397 sff1, sff0, port_no, sHST, sDST);
398 handled = 1;
399 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
400 struct ata_queued_cmd *qc;
401 struct qs_port_priv *pp = ap->private_data;
402 if (!pp || pp->state != qs_state_pkt)
403 continue;
404 qc = ata_qc_from_tag(ap, ap->active_tag);
405 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
406 switch (sHST) {
407 case 0: /* successful CPB */
408 case 3: /* device error */
409 pp->state = qs_state_idle;
410 qs_enter_reg_mode(qc->ap);
411 qc->err_mask |= ac_err_mask(sDST);
412 ata_qc_complete(qc);
413 break;
414 default:
415 break;
416 }
417 }
418 }
419 }
420 } while (!sFFE);
421 return handled;
422}
423
424static inline unsigned int qs_intr_mmio(struct ata_host *host)
425{
426 unsigned int handled = 0, port_no;
427
428 for (port_no = 0; port_no < host->n_ports; ++port_no) {
429 struct ata_port *ap;
430 ap = host->ports[port_no];
431 if (ap &&
432 !(ap->flags & ATA_FLAG_DISABLED)) {
433 struct ata_queued_cmd *qc;
434 struct qs_port_priv *pp = ap->private_data;
435 if (!pp || pp->state != qs_state_mmio)
436 continue;
437 qc = ata_qc_from_tag(ap, ap->active_tag);
438 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
439
440 /* check main status, clearing INTRQ */
441 u8 status = ata_check_status(ap);
442 if ((status & ATA_BUSY))
443 continue;
444 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
445 ap->id, qc->tf.protocol, status);
446
447 /* complete taskfile transaction */
448 pp->state = qs_state_idle;
449 qc->err_mask |= ac_err_mask(status);
450 ata_qc_complete(qc);
451 handled = 1;
452 }
453 }
454 }
455 return handled;
456}
457
458static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *regs)
459{
460 struct ata_host *host = dev_instance;
461 unsigned int handled = 0;
462
463 VPRINTK("ENTER\n");
464
465 spin_lock(&host->lock);
466 handled = qs_intr_pkt(host) | qs_intr_mmio(host);
467 spin_unlock(&host->lock);
468
469 VPRINTK("EXIT\n");
470
471 return IRQ_RETVAL(handled);
472}
473
474static void qs_ata_setup_port(struct ata_ioports *port, unsigned long base)
475{
476 port->cmd_addr =
477 port->data_addr = base + 0x400;
478 port->error_addr =
479 port->feature_addr = base + 0x408; /* hob_feature = 0x409 */
480 port->nsect_addr = base + 0x410; /* hob_nsect = 0x411 */
481 port->lbal_addr = base + 0x418; /* hob_lbal = 0x419 */
482 port->lbam_addr = base + 0x420; /* hob_lbam = 0x421 */
483 port->lbah_addr = base + 0x428; /* hob_lbah = 0x429 */
484 port->device_addr = base + 0x430;
485 port->status_addr =
486 port->command_addr = base + 0x438;
487 port->altstatus_addr =
488 port->ctl_addr = base + 0x440;
489 port->scr_addr = base + 0xc00;
490}
491
492static int qs_port_start(struct ata_port *ap)
493{
494 struct device *dev = ap->host->dev;
495 struct qs_port_priv *pp;
496 void __iomem *mmio_base = ap->host->mmio_base;
497 void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
498 u64 addr;
499 int rc;
500
501 rc = ata_port_start(ap);
502 if (rc)
503 return rc;
504 qs_enter_reg_mode(ap);
505 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
506 if (!pp) {
507 rc = -ENOMEM;
508 goto err_out;
509 }
510 pp->pkt = dma_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
511 GFP_KERNEL);
512 if (!pp->pkt) {
513 rc = -ENOMEM;
514 goto err_out_kfree;
515 }
516 memset(pp->pkt, 0, QS_PKT_BYTES);
517 ap->private_data = pp;
518
519 addr = (u64)pp->pkt_dma;
520 writel((u32) addr, chan + QS_CCF_CPBA);
521 writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
522 return 0;
523
524err_out_kfree:
525 kfree(pp);
526err_out:
527 ata_port_stop(ap);
528 return rc;
529}
530
531static void qs_port_stop(struct ata_port *ap)
532{
533 struct device *dev = ap->host->dev;
534 struct qs_port_priv *pp = ap->private_data;
535
536 if (pp != NULL) {
537 ap->private_data = NULL;
538 if (pp->pkt != NULL)
539 dma_free_coherent(dev, QS_PKT_BYTES, pp->pkt,
540 pp->pkt_dma);
541 kfree(pp);
542 }
543 ata_port_stop(ap);
544}
545
546static void qs_host_stop(struct ata_host *host)
547{
548 void __iomem *mmio_base = host->mmio_base;
549 struct pci_dev *pdev = to_pci_dev(host->dev);
550
551 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
552 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
553
554 pci_iounmap(pdev, mmio_base);
555}
556
557static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
558{
559 void __iomem *mmio_base = pe->mmio_base;
560 unsigned int port_no;
561
562 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
563 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
564
565 /* reset each channel in turn */
566 for (port_no = 0; port_no < pe->n_ports; ++port_no) {
567 u8 __iomem *chan = mmio_base + (port_no * 0x4000);
568 writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
569 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
570 readb(chan + QS_CCT_CTR0); /* flush */
571 }
572 writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
573
574 for (port_no = 0; port_no < pe->n_ports; ++port_no) {
575 u8 __iomem *chan = mmio_base + (port_no * 0x4000);
576 /* set FIFO depths to same settings as Windows driver */
577 writew(32, chan + QS_CFC_HUFT);
578 writew(32, chan + QS_CFC_HDFT);
579 writew(10, chan + QS_CFC_DUFT);
580 writew( 8, chan + QS_CFC_DDFT);
581 /* set CPB size in bytes, as a power of two */
582 writeb(QS_CPB_ORDER, chan + QS_CCF_CSEP);
583 }
584 writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
585}
586
587/*
588 * The QStor understands 64-bit buses, and uses 64-bit fields
589 * for DMA pointers regardless of bus width. We just have to
590 * make sure our DMA masks are set appropriately for whatever
591 * bridge lies between us and the QStor, and then the DMA mapping
592 * code will ensure we only ever "see" appropriate buffer addresses.
593 * If we're 32-bit limited somewhere, then our 64-bit fields will
594 * just end up with zeros in the upper 32-bits, without any special
595 * logic required outside of this routine (below).
596 */
597static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
598{
599 u32 bus_info = readl(mmio_base + QS_HID_HPHY);
600 int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
601
602 if (have_64bit_bus &&
603 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
604 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
605 if (rc) {
606 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
607 if (rc) {
608 dev_printk(KERN_ERR, &pdev->dev,
609 "64-bit DMA enable failed\n");
610 return rc;
611 }
612 }
613 } else {
614 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
615 if (rc) {
616 dev_printk(KERN_ERR, &pdev->dev,
617 "32-bit DMA enable failed\n");
618 return rc;
619 }
620 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
621 if (rc) {
622 dev_printk(KERN_ERR, &pdev->dev,
623 "32-bit consistent DMA enable failed\n");
624 return rc;
625 }
626 }
627 return 0;
628}
629
630static int qs_ata_init_one(struct pci_dev *pdev,
631 const struct pci_device_id *ent)
632{
633 static int printed_version;
634 struct ata_probe_ent *probe_ent = NULL;
635 void __iomem *mmio_base;
636 unsigned int board_idx = (unsigned int) ent->driver_data;
637 int rc, port_no;
638
639 if (!printed_version++)
640 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
641
642 rc = pci_enable_device(pdev);
643 if (rc)
644 return rc;
645
646 rc = pci_request_regions(pdev, DRV_NAME);
647 if (rc)
648 goto err_out;
649
650 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
651 rc = -ENODEV;
652 goto err_out_regions;
653 }
654
655 mmio_base = pci_iomap(pdev, 4, 0);
656 if (mmio_base == NULL) {
657 rc = -ENOMEM;
658 goto err_out_regions;
659 }
660
661 rc = qs_set_dma_masks(pdev, mmio_base);
662 if (rc)
663 goto err_out_iounmap;
664
665 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
666 if (probe_ent == NULL) {
667 rc = -ENOMEM;
668 goto err_out_iounmap;
669 }
670
671 memset(probe_ent, 0, sizeof(*probe_ent));
672 probe_ent->dev = pci_dev_to_dev(pdev);
673 INIT_LIST_HEAD(&probe_ent->node);
674
675 probe_ent->sht = qs_port_info[board_idx].sht;
676 probe_ent->port_flags = qs_port_info[board_idx].flags;
677 probe_ent->pio_mask = qs_port_info[board_idx].pio_mask;
678 probe_ent->mwdma_mask = qs_port_info[board_idx].mwdma_mask;
679 probe_ent->udma_mask = qs_port_info[board_idx].udma_mask;
680 probe_ent->port_ops = qs_port_info[board_idx].port_ops;
681
682 probe_ent->irq = pdev->irq;
683 probe_ent->irq_flags = IRQF_SHARED;
684 probe_ent->mmio_base = mmio_base;
685 probe_ent->n_ports = QS_PORTS;
686
687 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
688 unsigned long chan = (unsigned long)mmio_base +
689 (port_no * 0x4000);
690 qs_ata_setup_port(&probe_ent->port[port_no], chan);
691 }
692
693 pci_set_master(pdev);
694
695 /* initialize adapter */
696 qs_host_init(board_idx, probe_ent);
697
698 rc = ata_device_add(probe_ent);
699 kfree(probe_ent);
700 if (rc != QS_PORTS)
701 goto err_out_iounmap;
702 return 0;
703
704err_out_iounmap:
705 pci_iounmap(pdev, mmio_base);
706err_out_regions:
707 pci_release_regions(pdev);
708err_out:
709 pci_disable_device(pdev);
710 return rc;
711}
712
713static int __init qs_ata_init(void)
714{
715 return pci_register_driver(&qs_ata_pci_driver);
716}
717
718static void __exit qs_ata_exit(void)
719{
720 pci_unregister_driver(&qs_ata_pci_driver);
721}
722
723MODULE_AUTHOR("Mark Lord");
724MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
725MODULE_LICENSE("GPL");
726MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
727MODULE_VERSION(DRV_VERSION);
728
729module_init(qs_ata_init);
730module_exit(qs_ata_exit);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
new file mode 100644
index 000000000000..c63dbabc0cd9
--- /dev/null
+++ b/drivers/ata/sata_sil.c
@@ -0,0 +1,728 @@
1/*
2 * sata_sil.c - Silicon Image SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2005 Red Hat, Inc.
9 * Copyright 2003 Benjamin Herrenschmidt
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Documentation for SiI 3112:
31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
32 *
33 * Other errata and documentation available under NDA.
34 *
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/blkdev.h>
42#include <linux/delay.h>
43#include <linux/interrupt.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <linux/libata.h>
47
48#define DRV_NAME "sata_sil"
49#define DRV_VERSION "2.0"
50
51enum {
52 /*
53 * host flags
54 */
55 SIL_FLAG_NO_SATA_IRQ = (1 << 28),
56 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
57 SIL_FLAG_MOD15WRITE = (1 << 30),
58
59 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
60 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
61
62 /*
63 * Controller IDs
64 */
65 sil_3112 = 0,
66 sil_3112_no_sata_irq = 1,
67 sil_3512 = 2,
68 sil_3114 = 3,
69
70 /*
71 * Register offsets
72 */
73 SIL_SYSCFG = 0x48,
74
75 /*
76 * Register bits
77 */
78 /* SYSCFG */
79 SIL_MASK_IDE0_INT = (1 << 22),
80 SIL_MASK_IDE1_INT = (1 << 23),
81 SIL_MASK_IDE2_INT = (1 << 24),
82 SIL_MASK_IDE3_INT = (1 << 25),
83 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
84 SIL_MASK_4PORT = SIL_MASK_2PORT |
85 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
86
87 /* BMDMA/BMDMA2 */
88 SIL_INTR_STEERING = (1 << 1),
89
90 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */
91 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */
92 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */
93 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */
94 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */
95 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */
96 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */
97 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */
98 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */
99 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */
100
101 /* SIEN */
102 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */
103
104 /*
105 * Others
106 */
107 SIL_QUIRK_MOD15WRITE = (1 << 0),
108 SIL_QUIRK_UDMA5MAX = (1 << 1),
109};
110
111static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
112#ifdef CONFIG_PM
113static int sil_pci_device_resume(struct pci_dev *pdev);
114#endif
115static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
116static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
117static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
118static void sil_post_set_mode (struct ata_port *ap);
119static irqreturn_t sil_interrupt(int irq, void *dev_instance,
120 struct pt_regs *regs);
121static void sil_freeze(struct ata_port *ap);
122static void sil_thaw(struct ata_port *ap);
123
124
125static const struct pci_device_id sil_pci_tbl[] = {
126 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
127 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
128 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
129 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
130 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
131 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
132 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
133 { } /* terminate list */
134};
135
136
137/* TODO firmware versions should be added - eric */
138static const struct sil_drivelist {
139 const char * product;
140 unsigned int quirk;
141} sil_blacklist [] = {
142 { "ST320012AS", SIL_QUIRK_MOD15WRITE },
143 { "ST330013AS", SIL_QUIRK_MOD15WRITE },
144 { "ST340017AS", SIL_QUIRK_MOD15WRITE },
145 { "ST360015AS", SIL_QUIRK_MOD15WRITE },
146 { "ST380023AS", SIL_QUIRK_MOD15WRITE },
147 { "ST3120023AS", SIL_QUIRK_MOD15WRITE },
148 { "ST340014ASL", SIL_QUIRK_MOD15WRITE },
149 { "ST360014ASL", SIL_QUIRK_MOD15WRITE },
150 { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
151 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
152 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
153 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
154 { }
155};
156
157static struct pci_driver sil_pci_driver = {
158 .name = DRV_NAME,
159 .id_table = sil_pci_tbl,
160 .probe = sil_init_one,
161 .remove = ata_pci_remove_one,
162#ifdef CONFIG_PM
163 .suspend = ata_pci_device_suspend,
164 .resume = sil_pci_device_resume,
165#endif
166};
167
168static struct scsi_host_template sil_sht = {
169 .module = THIS_MODULE,
170 .name = DRV_NAME,
171 .ioctl = ata_scsi_ioctl,
172 .queuecommand = ata_scsi_queuecmd,
173 .can_queue = ATA_DEF_QUEUE,
174 .this_id = ATA_SHT_THIS_ID,
175 .sg_tablesize = LIBATA_MAX_PRD,
176 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
177 .emulated = ATA_SHT_EMULATED,
178 .use_clustering = ATA_SHT_USE_CLUSTERING,
179 .proc_name = DRV_NAME,
180 .dma_boundary = ATA_DMA_BOUNDARY,
181 .slave_configure = ata_scsi_slave_config,
182 .slave_destroy = ata_scsi_slave_destroy,
183 .bios_param = ata_std_bios_param,
184 .suspend = ata_scsi_device_suspend,
185 .resume = ata_scsi_device_resume,
186};
187
188static const struct ata_port_operations sil_ops = {
189 .port_disable = ata_port_disable,
190 .dev_config = sil_dev_config,
191 .tf_load = ata_tf_load,
192 .tf_read = ata_tf_read,
193 .check_status = ata_check_status,
194 .exec_command = ata_exec_command,
195 .dev_select = ata_std_dev_select,
196 .post_set_mode = sil_post_set_mode,
197 .bmdma_setup = ata_bmdma_setup,
198 .bmdma_start = ata_bmdma_start,
199 .bmdma_stop = ata_bmdma_stop,
200 .bmdma_status = ata_bmdma_status,
201 .qc_prep = ata_qc_prep,
202 .qc_issue = ata_qc_issue_prot,
203 .data_xfer = ata_mmio_data_xfer,
204 .freeze = sil_freeze,
205 .thaw = sil_thaw,
206 .error_handler = ata_bmdma_error_handler,
207 .post_internal_cmd = ata_bmdma_post_internal_cmd,
208 .irq_handler = sil_interrupt,
209 .irq_clear = ata_bmdma_irq_clear,
210 .scr_read = sil_scr_read,
211 .scr_write = sil_scr_write,
212 .port_start = ata_port_start,
213 .port_stop = ata_port_stop,
214 .host_stop = ata_pci_host_stop,
215};
216
217static const struct ata_port_info sil_port_info[] = {
218 /* sil_3112 */
219 {
220 .sht = &sil_sht,
221 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
222 .pio_mask = 0x1f, /* pio0-4 */
223 .mwdma_mask = 0x07, /* mwdma0-2 */
224 .udma_mask = 0x3f, /* udma0-5 */
225 .port_ops = &sil_ops,
226 },
227 /* sil_3112_no_sata_irq */
228 {
229 .sht = &sil_sht,
230 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
231 SIL_FLAG_NO_SATA_IRQ,
232 .pio_mask = 0x1f, /* pio0-4 */
233 .mwdma_mask = 0x07, /* mwdma0-2 */
234 .udma_mask = 0x3f, /* udma0-5 */
235 .port_ops = &sil_ops,
236 },
237 /* sil_3512 */
238 {
239 .sht = &sil_sht,
240 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
241 .pio_mask = 0x1f, /* pio0-4 */
242 .mwdma_mask = 0x07, /* mwdma0-2 */
243 .udma_mask = 0x3f, /* udma0-5 */
244 .port_ops = &sil_ops,
245 },
246 /* sil_3114 */
247 {
248 .sht = &sil_sht,
249 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
250 .pio_mask = 0x1f, /* pio0-4 */
251 .mwdma_mask = 0x07, /* mwdma0-2 */
252 .udma_mask = 0x3f, /* udma0-5 */
253 .port_ops = &sil_ops,
254 },
255};
256
257/* per-port register offsets */
258/* TODO: we can probably calculate rather than use a table */
259static const struct {
260 unsigned long tf; /* ATA taskfile register block */
261 unsigned long ctl; /* ATA control/altstatus register block */
262 unsigned long bmdma; /* DMA register block */
263 unsigned long bmdma2; /* DMA register block #2 */
264 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
265 unsigned long scr; /* SATA control register block */
266 unsigned long sien; /* SATA Interrupt Enable register */
267 unsigned long xfer_mode;/* data transfer mode register */
268 unsigned long sfis_cfg; /* SATA FIS reception config register */
269} sil_port[] = {
270 /* port 0 ... */
271 { 0x80, 0x8A, 0x00, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
272 { 0xC0, 0xCA, 0x08, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
273 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
274 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
275 /* ... port 3 */
276};
277
278MODULE_AUTHOR("Jeff Garzik");
279MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
280MODULE_LICENSE("GPL");
281MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
282MODULE_VERSION(DRV_VERSION);
283
284static int slow_down = 0;
285module_param(slow_down, int, 0444);
286MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
287
288
289static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
290{
291 u8 cache_line = 0;
292 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
293 return cache_line;
294}
295
296static void sil_post_set_mode (struct ata_port *ap)
297{
298 struct ata_host *host = ap->host;
299 struct ata_device *dev;
300 void __iomem *addr = host->mmio_base + sil_port[ap->port_no].xfer_mode;
301 u32 tmp, dev_mode[2];
302 unsigned int i;
303
304 for (i = 0; i < 2; i++) {
305 dev = &ap->device[i];
306 if (!ata_dev_enabled(dev))
307 dev_mode[i] = 0; /* PIO0/1/2 */
308 else if (dev->flags & ATA_DFLAG_PIO)
309 dev_mode[i] = 1; /* PIO3/4 */
310 else
311 dev_mode[i] = 3; /* UDMA */
312 /* value 2 indicates MDMA */
313 }
314
315 tmp = readl(addr);
316 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
317 tmp |= dev_mode[0];
318 tmp |= (dev_mode[1] << 4);
319 writel(tmp, addr);
320 readl(addr); /* flush */
321}
322
323static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
324{
325 unsigned long offset = ap->ioaddr.scr_addr;
326
327 switch (sc_reg) {
328 case SCR_STATUS:
329 return offset + 4;
330 case SCR_ERROR:
331 return offset + 8;
332 case SCR_CONTROL:
333 return offset;
334 default:
335 /* do nothing */
336 break;
337 }
338
339 return 0;
340}
341
342static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
343{
344 void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
345 if (mmio)
346 return readl(mmio);
347 return 0xffffffffU;
348}
349
350static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
351{
352 void *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
353 if (mmio)
354 writel(val, mmio);
355}
356
357static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
358{
359 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
360 u8 status;
361
362 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
363 u32 serror;
364
365 /* SIEN doesn't mask SATA IRQs on some 3112s. Those
366 * controllers continue to assert IRQ as long as
367 * SError bits are pending. Clear SError immediately.
368 */
369 serror = sil_scr_read(ap, SCR_ERROR);
370 sil_scr_write(ap, SCR_ERROR, serror);
371
372 /* Trigger hotplug and accumulate SError only if the
373 * port isn't already frozen. Otherwise, PHY events
374 * during hardreset makes controllers with broken SIEN
375 * repeat probing needlessly.
376 */
377 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
378 ata_ehi_hotplugged(&ap->eh_info);
379 ap->eh_info.serror |= serror;
380 }
381
382 goto freeze;
383 }
384
385 if (unlikely(!qc || qc->tf.ctl & ATA_NIEN))
386 goto freeze;
387
388 /* Check whether we are expecting interrupt in this state */
389 switch (ap->hsm_task_state) {
390 case HSM_ST_FIRST:
391 /* Some pre-ATAPI-4 devices assert INTRQ
392 * at this state when ready to receive CDB.
393 */
394
395 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
396 * The flag was turned on only for atapi devices.
397 * No need to check is_atapi_taskfile(&qc->tf) again.
398 */
399 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
400 goto err_hsm;
401 break;
402 case HSM_ST_LAST:
403 if (qc->tf.protocol == ATA_PROT_DMA ||
404 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
405 /* clear DMA-Start bit */
406 ap->ops->bmdma_stop(qc);
407
408 if (bmdma2 & SIL_DMA_ERROR) {
409 qc->err_mask |= AC_ERR_HOST_BUS;
410 ap->hsm_task_state = HSM_ST_ERR;
411 }
412 }
413 break;
414 case HSM_ST:
415 break;
416 default:
417 goto err_hsm;
418 }
419
420 /* check main status, clearing INTRQ */
421 status = ata_chk_status(ap);
422 if (unlikely(status & ATA_BUSY))
423 goto err_hsm;
424
425 /* ack bmdma irq events */
426 ata_bmdma_irq_clear(ap);
427
428 /* kick HSM in the ass */
429 ata_hsm_move(ap, qc, status, 0);
430
431 return;
432
433 err_hsm:
434 qc->err_mask |= AC_ERR_HSM;
435 freeze:
436 ata_port_freeze(ap);
437}
438
439static irqreturn_t sil_interrupt(int irq, void *dev_instance,
440 struct pt_regs *regs)
441{
442 struct ata_host *host = dev_instance;
443 void __iomem *mmio_base = host->mmio_base;
444 int handled = 0;
445 int i;
446
447 spin_lock(&host->lock);
448
449 for (i = 0; i < host->n_ports; i++) {
450 struct ata_port *ap = host->ports[i];
451 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
452
453 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
454 continue;
455
456 /* turn off SATA_IRQ if not supported */
457 if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
458 bmdma2 &= ~SIL_DMA_SATA_IRQ;
459
460 if (bmdma2 == 0xffffffff ||
461 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
462 continue;
463
464 sil_host_intr(ap, bmdma2);
465 handled = 1;
466 }
467
468 spin_unlock(&host->lock);
469
470 return IRQ_RETVAL(handled);
471}
472
473static void sil_freeze(struct ata_port *ap)
474{
475 void __iomem *mmio_base = ap->host->mmio_base;
476 u32 tmp;
477
478 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
479 writel(0, mmio_base + sil_port[ap->port_no].sien);
480
481 /* plug IRQ */
482 tmp = readl(mmio_base + SIL_SYSCFG);
483 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
484 writel(tmp, mmio_base + SIL_SYSCFG);
485 readl(mmio_base + SIL_SYSCFG); /* flush */
486}
487
488static void sil_thaw(struct ata_port *ap)
489{
490 void __iomem *mmio_base = ap->host->mmio_base;
491 u32 tmp;
492
493 /* clear IRQ */
494 ata_chk_status(ap);
495 ata_bmdma_irq_clear(ap);
496
497 /* turn on SATA IRQ if supported */
498 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
499 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
500
501 /* turn on IRQ */
502 tmp = readl(mmio_base + SIL_SYSCFG);
503 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
504 writel(tmp, mmio_base + SIL_SYSCFG);
505}
506
507/**
508 * sil_dev_config - Apply device/host-specific errata fixups
509 * @ap: Port containing device to be examined
510 * @dev: Device to be examined
511 *
512 * After the IDENTIFY [PACKET] DEVICE step is complete, and a
513 * device is known to be present, this function is called.
514 * We apply two errata fixups which are specific to Silicon Image,
515 * a Seagate and a Maxtor fixup.
516 *
517 * For certain Seagate devices, we must limit the maximum sectors
518 * to under 8K.
519 *
520 * For certain Maxtor devices, we must not program the drive
521 * beyond udma5.
522 *
523 * Both fixups are unfairly pessimistic. As soon as I get more
524 * information on these errata, I will create a more exhaustive
525 * list, and apply the fixups to only the specific
526 * devices/hosts/firmwares that need it.
527 *
528 * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
529 * The Maxtor quirk is in the blacklist, but I'm keeping the original
530 * pessimistic fix for the following reasons...
531 * - There seems to be less info on it, only one device gleaned off the
532 * Windows driver, maybe only one is affected. More info would be greatly
533 * appreciated.
534 * - But then again UDMA5 is hardly anything to complain about
535 */
536static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
537{
538 unsigned int n, quirks = 0;
539 unsigned char model_num[41];
540
541 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
542
543 for (n = 0; sil_blacklist[n].product; n++)
544 if (!strcmp(sil_blacklist[n].product, model_num)) {
545 quirks = sil_blacklist[n].quirk;
546 break;
547 }
548
549 /* limit requests to 15 sectors */
550 if (slow_down ||
551 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
552 (quirks & SIL_QUIRK_MOD15WRITE))) {
553 ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
554 "(mod15write workaround)\n");
555 dev->max_sectors = 15;
556 return;
557 }
558
559 /* limit to udma5 */
560 if (quirks & SIL_QUIRK_UDMA5MAX) {
561 ata_dev_printk(dev, KERN_INFO,
562 "applying Maxtor errata fix %s\n", model_num);
563 dev->udma_mask &= ATA_UDMA5;
564 return;
565 }
566}
567
568static void sil_init_controller(struct pci_dev *pdev,
569 int n_ports, unsigned long port_flags,
570 void __iomem *mmio_base)
571{
572 u8 cls;
573 u32 tmp;
574 int i;
575
576 /* Initialize FIFO PCI bus arbitration */
577 cls = sil_get_device_cache_line(pdev);
578 if (cls) {
579 cls >>= 3;
580 cls++; /* cls = (line_size/8)+1 */
581 for (i = 0; i < n_ports; i++)
582 writew(cls << 8 | cls,
583 mmio_base + sil_port[i].fifo_cfg);
584 } else
585 dev_printk(KERN_WARNING, &pdev->dev,
586 "cache line size not set. Driver may not function\n");
587
588 /* Apply R_ERR on DMA activate FIS errata workaround */
589 if (port_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
590 int cnt;
591
592 for (i = 0, cnt = 0; i < n_ports; i++) {
593 tmp = readl(mmio_base + sil_port[i].sfis_cfg);
594 if ((tmp & 0x3) != 0x01)
595 continue;
596 if (!cnt)
597 dev_printk(KERN_INFO, &pdev->dev,
598 "Applying R_ERR on DMA activate "
599 "FIS errata fix\n");
600 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
601 cnt++;
602 }
603 }
604
605 if (n_ports == 4) {
606 /* flip the magic "make 4 ports work" bit */
607 tmp = readl(mmio_base + sil_port[2].bmdma);
608 if ((tmp & SIL_INTR_STEERING) == 0)
609 writel(tmp | SIL_INTR_STEERING,
610 mmio_base + sil_port[2].bmdma);
611 }
612}
613
614static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
615{
616 static int printed_version;
617 struct ata_probe_ent *probe_ent = NULL;
618 unsigned long base;
619 void __iomem *mmio_base;
620 int rc;
621 unsigned int i;
622 int pci_dev_busy = 0;
623
624 if (!printed_version++)
625 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
626
627 rc = pci_enable_device(pdev);
628 if (rc)
629 return rc;
630
631 rc = pci_request_regions(pdev, DRV_NAME);
632 if (rc) {
633 pci_dev_busy = 1;
634 goto err_out;
635 }
636
637 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
638 if (rc)
639 goto err_out_regions;
640 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
641 if (rc)
642 goto err_out_regions;
643
644 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
645 if (probe_ent == NULL) {
646 rc = -ENOMEM;
647 goto err_out_regions;
648 }
649
650 INIT_LIST_HEAD(&probe_ent->node);
651 probe_ent->dev = pci_dev_to_dev(pdev);
652 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops;
653 probe_ent->sht = sil_port_info[ent->driver_data].sht;
654 probe_ent->n_ports = (ent->driver_data == sil_3114) ? 4 : 2;
655 probe_ent->pio_mask = sil_port_info[ent->driver_data].pio_mask;
656 probe_ent->mwdma_mask = sil_port_info[ent->driver_data].mwdma_mask;
657 probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask;
658 probe_ent->irq = pdev->irq;
659 probe_ent->irq_flags = IRQF_SHARED;
660 probe_ent->port_flags = sil_port_info[ent->driver_data].flags;
661
662 mmio_base = pci_iomap(pdev, 5, 0);
663 if (mmio_base == NULL) {
664 rc = -ENOMEM;
665 goto err_out_free_ent;
666 }
667
668 probe_ent->mmio_base = mmio_base;
669
670 base = (unsigned long) mmio_base;
671
672 for (i = 0; i < probe_ent->n_ports; i++) {
673 probe_ent->port[i].cmd_addr = base + sil_port[i].tf;
674 probe_ent->port[i].altstatus_addr =
675 probe_ent->port[i].ctl_addr = base + sil_port[i].ctl;
676 probe_ent->port[i].bmdma_addr = base + sil_port[i].bmdma;
677 probe_ent->port[i].scr_addr = base + sil_port[i].scr;
678 ata_std_ports(&probe_ent->port[i]);
679 }
680
681 sil_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags,
682 mmio_base);
683
684 pci_set_master(pdev);
685
686 /* FIXME: check ata_device_add return value */
687 ata_device_add(probe_ent);
688 kfree(probe_ent);
689
690 return 0;
691
692err_out_free_ent:
693 kfree(probe_ent);
694err_out_regions:
695 pci_release_regions(pdev);
696err_out:
697 if (!pci_dev_busy)
698 pci_disable_device(pdev);
699 return rc;
700}
701
702#ifdef CONFIG_PM
703static int sil_pci_device_resume(struct pci_dev *pdev)
704{
705 struct ata_host *host = dev_get_drvdata(&pdev->dev);
706
707 ata_pci_device_do_resume(pdev);
708 sil_init_controller(pdev, host->n_ports, host->ports[0]->flags,
709 host->mmio_base);
710 ata_host_resume(host);
711
712 return 0;
713}
714#endif
715
716static int __init sil_init(void)
717{
718 return pci_register_driver(&sil_pci_driver);
719}
720
721static void __exit sil_exit(void)
722{
723 pci_unregister_driver(&sil_pci_driver);
724}
725
726
727module_init(sil_init);
728module_exit(sil_exit);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
new file mode 100644
index 000000000000..39cb07baebae
--- /dev/null
+++ b/drivers/ata/sata_sil24.c
@@ -0,0 +1,1227 @@
1/*
2 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
3 *
4 * Copyright 2005 Tejun Heo
5 *
6 * Based on preview driver from Silicon Image.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/blkdev.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/dma-mapping.h>
27#include <linux/device.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_cmnd.h>
30#include <linux/libata.h>
31#include <asm/io.h>
32
33#define DRV_NAME "sata_sil24"
34#define DRV_VERSION "0.3"
35
36/*
37 * Port request block (PRB) 32 bytes
38 */
39struct sil24_prb {
40 __le16 ctrl;
41 __le16 prot;
42 __le32 rx_cnt;
43 u8 fis[6 * 4];
44};
45
46/*
47 * Scatter gather entry (SGE) 16 bytes
48 */
49struct sil24_sge {
50 __le64 addr;
51 __le32 cnt;
52 __le32 flags;
53};
54
55/*
56 * Port multiplier
57 */
58struct sil24_port_multiplier {
59 __le32 diag;
60 __le32 sactive;
61};
62
63enum {
64 /*
65 * Global controller registers (128 bytes @ BAR0)
66 */
67 /* 32 bit regs */
68 HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */
69 HOST_CTRL = 0x40,
70 HOST_IRQ_STAT = 0x44,
71 HOST_PHY_CFG = 0x48,
72 HOST_BIST_CTRL = 0x50,
73 HOST_BIST_PTRN = 0x54,
74 HOST_BIST_STAT = 0x58,
75 HOST_MEM_BIST_STAT = 0x5c,
76 HOST_FLASH_CMD = 0x70,
77 /* 8 bit regs */
78 HOST_FLASH_DATA = 0x74,
79 HOST_TRANSITION_DETECT = 0x75,
80 HOST_GPIO_CTRL = 0x76,
81 HOST_I2C_ADDR = 0x78, /* 32 bit */
82 HOST_I2C_DATA = 0x7c,
83 HOST_I2C_XFER_CNT = 0x7e,
84 HOST_I2C_CTRL = 0x7f,
85
86 /* HOST_SLOT_STAT bits */
87 HOST_SSTAT_ATTN = (1 << 31),
88
89 /* HOST_CTRL bits */
90 HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
91 HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
92 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
93 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
94 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
95 HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */
96
97 /*
98 * Port registers
99 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
100 */
101 PORT_REGS_SIZE = 0x2000,
102
103 PORT_LRAM = 0x0000, /* 31 LRAM slots and PM regs */
104 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
105
106 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
107 /* 32 bit regs */
108 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
109 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
110 PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */
111 PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */
112 PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */
113 PORT_ACTIVATE_UPPER_ADDR= 0x101c,
114 PORT_EXEC_FIFO = 0x1020, /* command execution fifo */
115 PORT_CMD_ERR = 0x1024, /* command error number */
116 PORT_FIS_CFG = 0x1028,
117 PORT_FIFO_THRES = 0x102c,
118 /* 16 bit regs */
119 PORT_DECODE_ERR_CNT = 0x1040,
120 PORT_DECODE_ERR_THRESH = 0x1042,
121 PORT_CRC_ERR_CNT = 0x1044,
122 PORT_CRC_ERR_THRESH = 0x1046,
123 PORT_HSHK_ERR_CNT = 0x1048,
124 PORT_HSHK_ERR_THRESH = 0x104a,
125 /* 32 bit regs */
126 PORT_PHY_CFG = 0x1050,
127 PORT_SLOT_STAT = 0x1800,
128 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
129 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
130 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
131 PORT_SCONTROL = 0x1f00,
132 PORT_SSTATUS = 0x1f04,
133 PORT_SERROR = 0x1f08,
134 PORT_SACTIVE = 0x1f0c,
135
136 /* PORT_CTRL_STAT bits */
137 PORT_CS_PORT_RST = (1 << 0), /* port reset */
138 PORT_CS_DEV_RST = (1 << 1), /* device reset */
139 PORT_CS_INIT = (1 << 2), /* port initialize */
140 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
141 PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */
142 PORT_CS_RESUME = (1 << 6), /* port resume */
143 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
144 PORT_CS_PM_EN = (1 << 13), /* port multiplier enable */
145 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
146
147 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
148 /* bits[11:0] are masked */
149 PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */
150 PORT_IRQ_ERROR = (1 << 1), /* command execution error */
151 PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */
152 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
153 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
154 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
155 PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
156 PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
157 PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
158 PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
159 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
160 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
161
162 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
163 PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
164 PORT_IRQ_UNK_FIS,
165
166 /* bits[27:16] are unmasked (raw) */
167 PORT_IRQ_RAW_SHIFT = 16,
168 PORT_IRQ_MASKED_MASK = 0x7ff,
169 PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT),
170
171 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
172 PORT_IRQ_STEER_SHIFT = 30,
173 PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT),
174
175 /* PORT_CMD_ERR constants */
176 PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */
177 PORT_CERR_SDB = 2, /* Error bit in SDB FIS */
178 PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */
179 PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */
180 PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */
181 PORT_CERR_DIRECTION = 6, /* Data direction mismatch */
182 PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */
183 PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */
184 PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */
185 PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */
186 PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */
187 PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */
188 PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
189 PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
190 PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */
191 PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */
192 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
193 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
194 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
195 PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
196 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
197 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
198
199 /* bits of PRB control field */
200 PRB_CTRL_PROTOCOL = (1 << 0), /* override def. ATA protocol */
201 PRB_CTRL_PACKET_READ = (1 << 4), /* PACKET cmd read */
202 PRB_CTRL_PACKET_WRITE = (1 << 5), /* PACKET cmd write */
203 PRB_CTRL_NIEN = (1 << 6), /* Mask completion irq */
204 PRB_CTRL_SRST = (1 << 7), /* Soft reset request (ign BSY?) */
205
206 /* PRB protocol field */
207 PRB_PROT_PACKET = (1 << 0),
208 PRB_PROT_TCQ = (1 << 1),
209 PRB_PROT_NCQ = (1 << 2),
210 PRB_PROT_READ = (1 << 3),
211 PRB_PROT_WRITE = (1 << 4),
212 PRB_PROT_TRANSPARENT = (1 << 5),
213
214 /*
215 * Other constants
216 */
217 SGE_TRM = (1 << 31), /* Last SGE in chain */
218 SGE_LNK = (1 << 30), /* linked list
219 Points to SGT, not SGE */
220 SGE_DRD = (1 << 29), /* discard data read (/dev/null)
221 data address ignored */
222
223 SIL24_MAX_CMDS = 31,
224
225 /* board id */
226 BID_SIL3124 = 0,
227 BID_SIL3132 = 1,
228 BID_SIL3131 = 2,
229
230 /* host flags */
231 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
232 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
233 ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY,
234 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
235
236 IRQ_STAT_4PORTS = 0xf,
237};
238
239struct sil24_ata_block {
240 struct sil24_prb prb;
241 struct sil24_sge sge[LIBATA_MAX_PRD];
242};
243
244struct sil24_atapi_block {
245 struct sil24_prb prb;
246 u8 cdb[16];
247 struct sil24_sge sge[LIBATA_MAX_PRD - 1];
248};
249
250union sil24_cmd_block {
251 struct sil24_ata_block ata;
252 struct sil24_atapi_block atapi;
253};
254
255static struct sil24_cerr_info {
256 unsigned int err_mask, action;
257 const char *desc;
258} sil24_cerr_db[] = {
259 [0] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
260 "device error" },
261 [PORT_CERR_DEV] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
262 "device error via D2H FIS" },
263 [PORT_CERR_SDB] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
264 "device error via SDB FIS" },
265 [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
266 "error in data FIS" },
267 [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
268 "failed to transmit command FIS" },
269 [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
270 "protocol mismatch" },
271 [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
272 "data directon mismatch" },
273 [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
274 "ran out of SGEs while writing" },
275 [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
276 "ran out of SGEs while reading" },
277 [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
278 "invalid data directon for ATAPI CDB" },
279 [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
280 "SGT no on qword boundary" },
281 [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
282 "PCI target abort while fetching SGT" },
283 [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
284 "PCI master abort while fetching SGT" },
285 [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
286 "PCI parity error while fetching SGT" },
287 [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
288 "PRB not on qword boundary" },
289 [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
290 "PCI target abort while fetching PRB" },
291 [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
292 "PCI master abort while fetching PRB" },
293 [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
294 "PCI parity error while fetching PRB" },
295 [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
296 "undefined error while transferring data" },
297 [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
298 "PCI target abort while transferring data" },
299 [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
300 "PCI master abort while transferring data" },
301 [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
302 "PCI parity error while transferring data" },
303 [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
304 "FIS received while sending service FIS" },
305};
306
307/*
308 * ap->private_data
309 *
310 * The preview driver always returned 0 for status. We emulate it
311 * here from the previous interrupt.
312 */
313struct sil24_port_priv {
314 union sil24_cmd_block *cmd_block; /* 32 cmd blocks */
315 dma_addr_t cmd_block_dma; /* DMA base addr for them */
316 struct ata_taskfile tf; /* Cached taskfile registers */
317};
318
319/* ap->host->private_data */
320struct sil24_host_priv {
321 void __iomem *host_base; /* global controller control (128 bytes @BAR0) */
322 void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */
323};
324
325static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev);
326static u8 sil24_check_status(struct ata_port *ap);
327static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
328static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
329static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
330static void sil24_qc_prep(struct ata_queued_cmd *qc);
331static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
332static void sil24_irq_clear(struct ata_port *ap);
333static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
334static void sil24_freeze(struct ata_port *ap);
335static void sil24_thaw(struct ata_port *ap);
336static void sil24_error_handler(struct ata_port *ap);
337static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
338static int sil24_port_start(struct ata_port *ap);
339static void sil24_port_stop(struct ata_port *ap);
340static void sil24_host_stop(struct ata_host *host);
341static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
342#ifdef CONFIG_PM
343static int sil24_pci_device_resume(struct pci_dev *pdev);
344#endif
345
346static const struct pci_device_id sil24_pci_tbl[] = {
347 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
348 { 0x8086, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
349 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
350 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
351 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
352 { } /* terminate list */
353};
354
355static struct pci_driver sil24_pci_driver = {
356 .name = DRV_NAME,
357 .id_table = sil24_pci_tbl,
358 .probe = sil24_init_one,
359 .remove = ata_pci_remove_one, /* safe? */
360#ifdef CONFIG_PM
361 .suspend = ata_pci_device_suspend,
362 .resume = sil24_pci_device_resume,
363#endif
364};
365
366static struct scsi_host_template sil24_sht = {
367 .module = THIS_MODULE,
368 .name = DRV_NAME,
369 .ioctl = ata_scsi_ioctl,
370 .queuecommand = ata_scsi_queuecmd,
371 .change_queue_depth = ata_scsi_change_queue_depth,
372 .can_queue = SIL24_MAX_CMDS,
373 .this_id = ATA_SHT_THIS_ID,
374 .sg_tablesize = LIBATA_MAX_PRD,
375 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
376 .emulated = ATA_SHT_EMULATED,
377 .use_clustering = ATA_SHT_USE_CLUSTERING,
378 .proc_name = DRV_NAME,
379 .dma_boundary = ATA_DMA_BOUNDARY,
380 .slave_configure = ata_scsi_slave_config,
381 .slave_destroy = ata_scsi_slave_destroy,
382 .bios_param = ata_std_bios_param,
383 .suspend = ata_scsi_device_suspend,
384 .resume = ata_scsi_device_resume,
385};
386
387static const struct ata_port_operations sil24_ops = {
388 .port_disable = ata_port_disable,
389
390 .dev_config = sil24_dev_config,
391
392 .check_status = sil24_check_status,
393 .check_altstatus = sil24_check_status,
394 .dev_select = ata_noop_dev_select,
395
396 .tf_read = sil24_tf_read,
397
398 .qc_prep = sil24_qc_prep,
399 .qc_issue = sil24_qc_issue,
400
401 .irq_handler = sil24_interrupt,
402 .irq_clear = sil24_irq_clear,
403
404 .scr_read = sil24_scr_read,
405 .scr_write = sil24_scr_write,
406
407 .freeze = sil24_freeze,
408 .thaw = sil24_thaw,
409 .error_handler = sil24_error_handler,
410 .post_internal_cmd = sil24_post_internal_cmd,
411
412 .port_start = sil24_port_start,
413 .port_stop = sil24_port_stop,
414 .host_stop = sil24_host_stop,
415};
416
417/*
418 * Use bits 30-31 of port_flags to encode available port numbers.
419 * Current maxium is 4.
420 */
421#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
422#define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
423
424static struct ata_port_info sil24_port_info[] = {
425 /* sil_3124 */
426 {
427 .sht = &sil24_sht,
428 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
429 SIL24_FLAG_PCIX_IRQ_WOC,
430 .pio_mask = 0x1f, /* pio0-4 */
431 .mwdma_mask = 0x07, /* mwdma0-2 */
432 .udma_mask = 0x3f, /* udma0-5 */
433 .port_ops = &sil24_ops,
434 },
435 /* sil_3132 */
436 {
437 .sht = &sil24_sht,
438 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
439 .pio_mask = 0x1f, /* pio0-4 */
440 .mwdma_mask = 0x07, /* mwdma0-2 */
441 .udma_mask = 0x3f, /* udma0-5 */
442 .port_ops = &sil24_ops,
443 },
444 /* sil_3131/sil_3531 */
445 {
446 .sht = &sil24_sht,
447 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
448 .pio_mask = 0x1f, /* pio0-4 */
449 .mwdma_mask = 0x07, /* mwdma0-2 */
450 .udma_mask = 0x3f, /* udma0-5 */
451 .port_ops = &sil24_ops,
452 },
453};
454
455static int sil24_tag(int tag)
456{
457 if (unlikely(ata_tag_internal(tag)))
458 return 0;
459 return tag;
460}
461
462static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
463{
464 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
465
466 if (dev->cdb_len == 16)
467 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
468 else
469 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
470}
471
472static inline void sil24_update_tf(struct ata_port *ap)
473{
474 struct sil24_port_priv *pp = ap->private_data;
475 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
476 struct sil24_prb __iomem *prb = port;
477 u8 fis[6 * 4];
478
479 memcpy_fromio(fis, prb->fis, 6 * 4);
480 ata_tf_from_fis(fis, &pp->tf);
481}
482
483static u8 sil24_check_status(struct ata_port *ap)
484{
485 struct sil24_port_priv *pp = ap->private_data;
486 return pp->tf.command;
487}
488
489static int sil24_scr_map[] = {
490 [SCR_CONTROL] = 0,
491 [SCR_STATUS] = 1,
492 [SCR_ERROR] = 2,
493 [SCR_ACTIVE] = 3,
494};
495
496static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
497{
498 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
499 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
500 void __iomem *addr;
501 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
502 return readl(scr_addr + sil24_scr_map[sc_reg] * 4);
503 }
504 return 0xffffffffU;
505}
506
507static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
508{
509 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
510 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
511 void __iomem *addr;
512 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
513 writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
514 }
515}
516
517static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
518{
519 struct sil24_port_priv *pp = ap->private_data;
520 *tf = pp->tf;
521}
522
523static int sil24_init_port(struct ata_port *ap)
524{
525 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
526 u32 tmp;
527
528 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
529 ata_wait_register(port + PORT_CTRL_STAT,
530 PORT_CS_INIT, PORT_CS_INIT, 10, 100);
531 tmp = ata_wait_register(port + PORT_CTRL_STAT,
532 PORT_CS_RDY, 0, 10, 100);
533
534 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY)
535 return -EIO;
536 return 0;
537}
538
539static int sil24_softreset(struct ata_port *ap, unsigned int *class)
540{
541 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
542 struct sil24_port_priv *pp = ap->private_data;
543 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
544 dma_addr_t paddr = pp->cmd_block_dma;
545 u32 mask, irq_stat;
546 const char *reason;
547
548 DPRINTK("ENTER\n");
549
550 if (ata_port_offline(ap)) {
551 DPRINTK("PHY reports no device\n");
552 *class = ATA_DEV_NONE;
553 goto out;
554 }
555
556 /* put the port into known state */
557 if (sil24_init_port(ap)) {
558 reason ="port not ready";
559 goto err;
560 }
561
562 /* do SRST */
563 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
564 prb->fis[1] = 0; /* no PM yet */
565
566 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
567 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
568
569 mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
570 irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0,
571 100, ATA_TMOUT_BOOT / HZ * 1000);
572
573 writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
574 irq_stat >>= PORT_IRQ_RAW_SHIFT;
575
576 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
577 if (irq_stat & PORT_IRQ_ERROR)
578 reason = "SRST command error";
579 else
580 reason = "timeout";
581 goto err;
582 }
583
584 sil24_update_tf(ap);
585 *class = ata_dev_classify(&pp->tf);
586
587 if (*class == ATA_DEV_UNKNOWN)
588 *class = ATA_DEV_NONE;
589
590 out:
591 DPRINTK("EXIT, class=%u\n", *class);
592 return 0;
593
594 err:
595 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
596 return -EIO;
597}
598
599static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
600{
601 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
602 const char *reason;
603 int tout_msec, rc;
604 u32 tmp;
605
606 /* sil24 does the right thing(tm) without any protection */
607 sata_set_spd(ap);
608
609 tout_msec = 100;
610 if (ata_port_online(ap))
611 tout_msec = 5000;
612
613 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
614 tmp = ata_wait_register(port + PORT_CTRL_STAT,
615 PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec);
616
617 /* SStatus oscillates between zero and valid status after
618 * DEV_RST, debounce it.
619 */
620 rc = sata_phy_debounce(ap, sata_deb_timing_long);
621 if (rc) {
622 reason = "PHY debouncing failed";
623 goto err;
624 }
625
626 if (tmp & PORT_CS_DEV_RST) {
627 if (ata_port_offline(ap))
628 return 0;
629 reason = "link not ready";
630 goto err;
631 }
632
633 /* Sil24 doesn't store signature FIS after hardreset, so we
634 * can't wait for BSY to clear. Some devices take a long time
635 * to get ready and those devices will choke if we don't wait
636 * for BSY clearance here. Tell libata to perform follow-up
637 * softreset.
638 */
639 return -EAGAIN;
640
641 err:
642 ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason);
643 return -EIO;
644}
645
646static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
647 struct sil24_sge *sge)
648{
649 struct scatterlist *sg;
650 unsigned int idx = 0;
651
652 ata_for_each_sg(sg, qc) {
653 sge->addr = cpu_to_le64(sg_dma_address(sg));
654 sge->cnt = cpu_to_le32(sg_dma_len(sg));
655 if (ata_sg_is_last(sg, qc))
656 sge->flags = cpu_to_le32(SGE_TRM);
657 else
658 sge->flags = 0;
659
660 sge++;
661 idx++;
662 }
663}
664
665static void sil24_qc_prep(struct ata_queued_cmd *qc)
666{
667 struct ata_port *ap = qc->ap;
668 struct sil24_port_priv *pp = ap->private_data;
669 union sil24_cmd_block *cb;
670 struct sil24_prb *prb;
671 struct sil24_sge *sge;
672 u16 ctrl = 0;
673
674 cb = &pp->cmd_block[sil24_tag(qc->tag)];
675
676 switch (qc->tf.protocol) {
677 case ATA_PROT_PIO:
678 case ATA_PROT_DMA:
679 case ATA_PROT_NCQ:
680 case ATA_PROT_NODATA:
681 prb = &cb->ata.prb;
682 sge = cb->ata.sge;
683 break;
684
685 case ATA_PROT_ATAPI:
686 case ATA_PROT_ATAPI_DMA:
687 case ATA_PROT_ATAPI_NODATA:
688 prb = &cb->atapi.prb;
689 sge = cb->atapi.sge;
690 memset(cb->atapi.cdb, 0, 32);
691 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
692
693 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
694 if (qc->tf.flags & ATA_TFLAG_WRITE)
695 ctrl = PRB_CTRL_PACKET_WRITE;
696 else
697 ctrl = PRB_CTRL_PACKET_READ;
698 }
699 break;
700
701 default:
702 prb = NULL; /* shut up, gcc */
703 sge = NULL;
704 BUG();
705 }
706
707 prb->ctrl = cpu_to_le16(ctrl);
708 ata_tf_to_fis(&qc->tf, prb->fis, 0);
709
710 if (qc->flags & ATA_QCFLAG_DMAMAP)
711 sil24_fill_sg(qc, sge);
712}
713
714static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
715{
716 struct ata_port *ap = qc->ap;
717 struct sil24_port_priv *pp = ap->private_data;
718 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
719 unsigned int tag = sil24_tag(qc->tag);
720 dma_addr_t paddr;
721 void __iomem *activate;
722
723 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
724 activate = port + PORT_CMD_ACTIVATE + tag * 8;
725
726 writel((u32)paddr, activate);
727 writel((u64)paddr >> 32, activate + 4);
728
729 return 0;
730}
731
732static void sil24_irq_clear(struct ata_port *ap)
733{
734 /* unused */
735}
736
737static void sil24_freeze(struct ata_port *ap)
738{
739 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
740
741 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
742 * PORT_IRQ_ENABLE instead.
743 */
744 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
745}
746
747static void sil24_thaw(struct ata_port *ap)
748{
749 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
750 u32 tmp;
751
752 /* clear IRQ */
753 tmp = readl(port + PORT_IRQ_STAT);
754 writel(tmp, port + PORT_IRQ_STAT);
755
756 /* turn IRQ back on */
757 writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
758}
759
760static void sil24_error_intr(struct ata_port *ap)
761{
762 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
763 struct ata_eh_info *ehi = &ap->eh_info;
764 int freeze = 0;
765 u32 irq_stat;
766
767 /* on error, we need to clear IRQ explicitly */
768 irq_stat = readl(port + PORT_IRQ_STAT);
769 writel(irq_stat, port + PORT_IRQ_STAT);
770
771 /* first, analyze and record host port events */
772 ata_ehi_clear_desc(ehi);
773
774 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
775
776 if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
777 ata_ehi_hotplugged(ehi);
778 ata_ehi_push_desc(ehi, ", %s",
779 irq_stat & PORT_IRQ_PHYRDY_CHG ?
780 "PHY RDY changed" : "device exchanged");
781 freeze = 1;
782 }
783
784 if (irq_stat & PORT_IRQ_UNK_FIS) {
785 ehi->err_mask |= AC_ERR_HSM;
786 ehi->action |= ATA_EH_SOFTRESET;
787 ata_ehi_push_desc(ehi , ", unknown FIS");
788 freeze = 1;
789 }
790
791 /* deal with command error */
792 if (irq_stat & PORT_IRQ_ERROR) {
793 struct sil24_cerr_info *ci = NULL;
794 unsigned int err_mask = 0, action = 0;
795 struct ata_queued_cmd *qc;
796 u32 cerr;
797
798 /* analyze CMD_ERR */
799 cerr = readl(port + PORT_CMD_ERR);
800 if (cerr < ARRAY_SIZE(sil24_cerr_db))
801 ci = &sil24_cerr_db[cerr];
802
803 if (ci && ci->desc) {
804 err_mask |= ci->err_mask;
805 action |= ci->action;
806 ata_ehi_push_desc(ehi, ", %s", ci->desc);
807 } else {
808 err_mask |= AC_ERR_OTHER;
809 action |= ATA_EH_SOFTRESET;
810 ata_ehi_push_desc(ehi, ", unknown command error %d",
811 cerr);
812 }
813
814 /* record error info */
815 qc = ata_qc_from_tag(ap, ap->active_tag);
816 if (qc) {
817 sil24_update_tf(ap);
818 qc->err_mask |= err_mask;
819 } else
820 ehi->err_mask |= err_mask;
821
822 ehi->action |= action;
823 }
824
825 /* freeze or abort */
826 if (freeze)
827 ata_port_freeze(ap);
828 else
829 ata_port_abort(ap);
830}
831
832static void sil24_finish_qc(struct ata_queued_cmd *qc)
833{
834 if (qc->flags & ATA_QCFLAG_RESULT_TF)
835 sil24_update_tf(qc->ap);
836}
837
838static inline void sil24_host_intr(struct ata_port *ap)
839{
840 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
841 u32 slot_stat, qc_active;
842 int rc;
843
844 slot_stat = readl(port + PORT_SLOT_STAT);
845
846 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
847 sil24_error_intr(ap);
848 return;
849 }
850
851 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
852 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
853
854 qc_active = slot_stat & ~HOST_SSTAT_ATTN;
855 rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
856 if (rc > 0)
857 return;
858 if (rc < 0) {
859 struct ata_eh_info *ehi = &ap->eh_info;
860 ehi->err_mask |= AC_ERR_HSM;
861 ehi->action |= ATA_EH_SOFTRESET;
862 ata_port_freeze(ap);
863 return;
864 }
865
866 if (ata_ratelimit())
867 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
868 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
869 slot_stat, ap->active_tag, ap->sactive);
870}
871
872static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
873{
874 struct ata_host *host = dev_instance;
875 struct sil24_host_priv *hpriv = host->private_data;
876 unsigned handled = 0;
877 u32 status;
878 int i;
879
880 status = readl(hpriv->host_base + HOST_IRQ_STAT);
881
882 if (status == 0xffffffff) {
883 printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
884 "PCI fault or device removal?\n");
885 goto out;
886 }
887
888 if (!(status & IRQ_STAT_4PORTS))
889 goto out;
890
891 spin_lock(&host->lock);
892
893 for (i = 0; i < host->n_ports; i++)
894 if (status & (1 << i)) {
895 struct ata_port *ap = host->ports[i];
896 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
897 sil24_host_intr(host->ports[i]);
898 handled++;
899 } else
900 printk(KERN_ERR DRV_NAME
901 ": interrupt from disabled port %d\n", i);
902 }
903
904 spin_unlock(&host->lock);
905 out:
906 return IRQ_RETVAL(handled);
907}
908
909static void sil24_error_handler(struct ata_port *ap)
910{
911 struct ata_eh_context *ehc = &ap->eh_context;
912
913 if (sil24_init_port(ap)) {
914 ata_eh_freeze_port(ap);
915 ehc->i.action |= ATA_EH_HARDRESET;
916 }
917
918 /* perform recovery */
919 ata_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset,
920 ata_std_postreset);
921}
922
923static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
924{
925 struct ata_port *ap = qc->ap;
926
927 if (qc->flags & ATA_QCFLAG_FAILED)
928 qc->err_mask |= AC_ERR_OTHER;
929
930 /* make DMA engine forget about the failed command */
931 if (qc->err_mask)
932 sil24_init_port(ap);
933}
934
935static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
936{
937 const size_t cb_size = sizeof(*pp->cmd_block) * SIL24_MAX_CMDS;
938
939 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
940}
941
942static int sil24_port_start(struct ata_port *ap)
943{
944 struct device *dev = ap->host->dev;
945 struct sil24_port_priv *pp;
946 union sil24_cmd_block *cb;
947 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
948 dma_addr_t cb_dma;
949 int rc = -ENOMEM;
950
951 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
952 if (!pp)
953 goto err_out;
954
955 pp->tf.command = ATA_DRDY;
956
957 cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
958 if (!cb)
959 goto err_out_pp;
960 memset(cb, 0, cb_size);
961
962 rc = ata_pad_alloc(ap, dev);
963 if (rc)
964 goto err_out_pad;
965
966 pp->cmd_block = cb;
967 pp->cmd_block_dma = cb_dma;
968
969 ap->private_data = pp;
970
971 return 0;
972
973err_out_pad:
974 sil24_cblk_free(pp, dev);
975err_out_pp:
976 kfree(pp);
977err_out:
978 return rc;
979}
980
981static void sil24_port_stop(struct ata_port *ap)
982{
983 struct device *dev = ap->host->dev;
984 struct sil24_port_priv *pp = ap->private_data;
985
986 sil24_cblk_free(pp, dev);
987 ata_pad_free(ap, dev);
988 kfree(pp);
989}
990
991static void sil24_host_stop(struct ata_host *host)
992{
993 struct sil24_host_priv *hpriv = host->private_data;
994 struct pci_dev *pdev = to_pci_dev(host->dev);
995
996 pci_iounmap(pdev, hpriv->host_base);
997 pci_iounmap(pdev, hpriv->port_base);
998 kfree(hpriv);
999}
1000
1001static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
1002 unsigned long port_flags,
1003 void __iomem *host_base,
1004 void __iomem *port_base)
1005{
1006 u32 tmp;
1007 int i;
1008
1009 /* GPIO off */
1010 writel(0, host_base + HOST_FLASH_CMD);
1011
1012 /* clear global reset & mask interrupts during initialization */
1013 writel(0, host_base + HOST_CTRL);
1014
1015 /* init ports */
1016 for (i = 0; i < n_ports; i++) {
1017 void __iomem *port = port_base + i * PORT_REGS_SIZE;
1018
1019 /* Initial PHY setting */
1020 writel(0x20c, port + PORT_PHY_CFG);
1021
1022 /* Clear port RST */
1023 tmp = readl(port + PORT_CTRL_STAT);
1024 if (tmp & PORT_CS_PORT_RST) {
1025 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
1026 tmp = ata_wait_register(port + PORT_CTRL_STAT,
1027 PORT_CS_PORT_RST,
1028 PORT_CS_PORT_RST, 10, 100);
1029 if (tmp & PORT_CS_PORT_RST)
1030 dev_printk(KERN_ERR, &pdev->dev,
1031 "failed to clear port RST\n");
1032 }
1033
1034 /* Configure IRQ WoC */
1035 if (port_flags & SIL24_FLAG_PCIX_IRQ_WOC)
1036 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1037 else
1038 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1039
1040 /* Zero error counters. */
1041 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
1042 writel(0x8000, port + PORT_CRC_ERR_THRESH);
1043 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
1044 writel(0x0000, port + PORT_DECODE_ERR_CNT);
1045 writel(0x0000, port + PORT_CRC_ERR_CNT);
1046 writel(0x0000, port + PORT_HSHK_ERR_CNT);
1047
1048 /* Always use 64bit activation */
1049 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1050
1051 /* Clear port multiplier enable and resume bits */
1052 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
1053 }
1054
1055 /* Turn on interrupts */
1056 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
1057}
1058
1059static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1060{
1061 static int printed_version = 0;
1062 unsigned int board_id = (unsigned int)ent->driver_data;
1063 struct ata_port_info *pinfo = &sil24_port_info[board_id];
1064 struct ata_probe_ent *probe_ent = NULL;
1065 struct sil24_host_priv *hpriv = NULL;
1066 void __iomem *host_base = NULL;
1067 void __iomem *port_base = NULL;
1068 int i, rc;
1069 u32 tmp;
1070
1071 if (!printed_version++)
1072 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1073
1074 rc = pci_enable_device(pdev);
1075 if (rc)
1076 return rc;
1077
1078 rc = pci_request_regions(pdev, DRV_NAME);
1079 if (rc)
1080 goto out_disable;
1081
1082 rc = -ENOMEM;
1083 /* map mmio registers */
1084 host_base = pci_iomap(pdev, 0, 0);
1085 if (!host_base)
1086 goto out_free;
1087 port_base = pci_iomap(pdev, 2, 0);
1088 if (!port_base)
1089 goto out_free;
1090
1091 /* allocate & init probe_ent and hpriv */
1092 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
1093 if (!probe_ent)
1094 goto out_free;
1095
1096 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
1097 if (!hpriv)
1098 goto out_free;
1099
1100 probe_ent->dev = pci_dev_to_dev(pdev);
1101 INIT_LIST_HEAD(&probe_ent->node);
1102
1103 probe_ent->sht = pinfo->sht;
1104 probe_ent->port_flags = pinfo->flags;
1105 probe_ent->pio_mask = pinfo->pio_mask;
1106 probe_ent->mwdma_mask = pinfo->mwdma_mask;
1107 probe_ent->udma_mask = pinfo->udma_mask;
1108 probe_ent->port_ops = pinfo->port_ops;
1109 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->flags);
1110
1111 probe_ent->irq = pdev->irq;
1112 probe_ent->irq_flags = IRQF_SHARED;
1113 probe_ent->private_data = hpriv;
1114
1115 hpriv->host_base = host_base;
1116 hpriv->port_base = port_base;
1117
1118 /*
1119 * Configure the device
1120 */
1121 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1122 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1123 if (rc) {
1124 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1125 if (rc) {
1126 dev_printk(KERN_ERR, &pdev->dev,
1127 "64-bit DMA enable failed\n");
1128 goto out_free;
1129 }
1130 }
1131 } else {
1132 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1133 if (rc) {
1134 dev_printk(KERN_ERR, &pdev->dev,
1135 "32-bit DMA enable failed\n");
1136 goto out_free;
1137 }
1138 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1139 if (rc) {
1140 dev_printk(KERN_ERR, &pdev->dev,
1141 "32-bit consistent DMA enable failed\n");
1142 goto out_free;
1143 }
1144 }
1145
1146 /* Apply workaround for completion IRQ loss on PCI-X errata */
1147 if (probe_ent->port_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
1148 tmp = readl(host_base + HOST_CTRL);
1149 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
1150 dev_printk(KERN_INFO, &pdev->dev,
1151 "Applying completion IRQ loss on PCI-X "
1152 "errata fix\n");
1153 else
1154 probe_ent->port_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
1155 }
1156
1157 for (i = 0; i < probe_ent->n_ports; i++) {
1158 unsigned long portu =
1159 (unsigned long)port_base + i * PORT_REGS_SIZE;
1160
1161 probe_ent->port[i].cmd_addr = portu;
1162 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
1163
1164 ata_std_ports(&probe_ent->port[i]);
1165 }
1166
1167 sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags,
1168 host_base, port_base);
1169
1170 pci_set_master(pdev);
1171
1172 /* FIXME: check ata_device_add return value */
1173 ata_device_add(probe_ent);
1174
1175 kfree(probe_ent);
1176 return 0;
1177
1178 out_free:
1179 if (host_base)
1180 pci_iounmap(pdev, host_base);
1181 if (port_base)
1182 pci_iounmap(pdev, port_base);
1183 kfree(probe_ent);
1184 kfree(hpriv);
1185 pci_release_regions(pdev);
1186 out_disable:
1187 pci_disable_device(pdev);
1188 return rc;
1189}
1190
1191#ifdef CONFIG_PM
1192static int sil24_pci_device_resume(struct pci_dev *pdev)
1193{
1194 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1195 struct sil24_host_priv *hpriv = host->private_data;
1196
1197 ata_pci_device_do_resume(pdev);
1198
1199 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
1200 writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL);
1201
1202 sil24_init_controller(pdev, host->n_ports, host->ports[0]->flags,
1203 hpriv->host_base, hpriv->port_base);
1204
1205 ata_host_resume(host);
1206
1207 return 0;
1208}
1209#endif
1210
1211static int __init sil24_init(void)
1212{
1213 return pci_register_driver(&sil24_pci_driver);
1214}
1215
1216static void __exit sil24_exit(void)
1217{
1218 pci_unregister_driver(&sil24_pci_driver);
1219}
1220
1221MODULE_AUTHOR("Tejun Heo");
1222MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
1223MODULE_LICENSE("GPL");
1224MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
1225
1226module_init(sil24_init);
1227module_exit(sil24_exit);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
new file mode 100644
index 000000000000..9b17375d8056
--- /dev/null
+++ b/drivers/ata/sata_sis.c
@@ -0,0 +1,347 @@
1/*
2 * sata_sis.c - Silicon Integrated Systems SATA
3 *
4 * Maintained by: Uwe Koziolek
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004 Uwe Koziolek
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/device.h>
41#include <scsi/scsi_host.h>
42#include <linux/libata.h>
43
44#define DRV_NAME "sata_sis"
45#define DRV_VERSION "0.6"
46
47enum {
48 sis_180 = 0,
49 SIS_SCR_PCI_BAR = 5,
50
51 /* PCI configuration registers */
52 SIS_GENCTL = 0x54, /* IDE General Control register */
53 SIS_SCR_BASE = 0xc0, /* sata0 phy SCR registers */
54 SIS180_SATA1_OFS = 0x10, /* offset from sata0->sata1 phy regs */
55 SIS182_SATA1_OFS = 0x20, /* offset from sata0->sata1 phy regs */
56 SIS_PMR = 0x90, /* port mapping register */
57 SIS_PMR_COMBINED = 0x30,
58
59 /* random bits */
60 SIS_FLAG_CFGSCR = (1 << 30), /* host flag: SCRs via PCI cfg */
61
62 GENCTL_IOMAPPED_SCR = (1 << 26), /* if set, SCRs are in IO space */
63};
64
65static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
66static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg);
67static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
68
69static const struct pci_device_id sis_pci_tbl[] = {
70 { PCI_VENDOR_ID_SI, 0x180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
71 { PCI_VENDOR_ID_SI, 0x181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
72 { PCI_VENDOR_ID_SI, 0x182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
73 { } /* terminate list */
74};
75
76
77static struct pci_driver sis_pci_driver = {
78 .name = DRV_NAME,
79 .id_table = sis_pci_tbl,
80 .probe = sis_init_one,
81 .remove = ata_pci_remove_one,
82};
83
84static struct scsi_host_template sis_sht = {
85 .module = THIS_MODULE,
86 .name = DRV_NAME,
87 .ioctl = ata_scsi_ioctl,
88 .queuecommand = ata_scsi_queuecmd,
89 .can_queue = ATA_DEF_QUEUE,
90 .this_id = ATA_SHT_THIS_ID,
91 .sg_tablesize = ATA_MAX_PRD,
92 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
93 .emulated = ATA_SHT_EMULATED,
94 .use_clustering = ATA_SHT_USE_CLUSTERING,
95 .proc_name = DRV_NAME,
96 .dma_boundary = ATA_DMA_BOUNDARY,
97 .slave_configure = ata_scsi_slave_config,
98 .slave_destroy = ata_scsi_slave_destroy,
99 .bios_param = ata_std_bios_param,
100};
101
102static const struct ata_port_operations sis_ops = {
103 .port_disable = ata_port_disable,
104 .tf_load = ata_tf_load,
105 .tf_read = ata_tf_read,
106 .check_status = ata_check_status,
107 .exec_command = ata_exec_command,
108 .dev_select = ata_std_dev_select,
109 .bmdma_setup = ata_bmdma_setup,
110 .bmdma_start = ata_bmdma_start,
111 .bmdma_stop = ata_bmdma_stop,
112 .bmdma_status = ata_bmdma_status,
113 .qc_prep = ata_qc_prep,
114 .qc_issue = ata_qc_issue_prot,
115 .data_xfer = ata_pio_data_xfer,
116 .freeze = ata_bmdma_freeze,
117 .thaw = ata_bmdma_thaw,
118 .error_handler = ata_bmdma_error_handler,
119 .post_internal_cmd = ata_bmdma_post_internal_cmd,
120 .irq_handler = ata_interrupt,
121 .irq_clear = ata_bmdma_irq_clear,
122 .scr_read = sis_scr_read,
123 .scr_write = sis_scr_write,
124 .port_start = ata_port_start,
125 .port_stop = ata_port_stop,
126 .host_stop = ata_host_stop,
127};
128
129static struct ata_port_info sis_port_info = {
130 .sht = &sis_sht,
131 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
132 .pio_mask = 0x1f,
133 .mwdma_mask = 0x7,
134 .udma_mask = 0x7f,
135 .port_ops = &sis_ops,
136};
137
138
139MODULE_AUTHOR("Uwe Koziolek");
140MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller");
141MODULE_LICENSE("GPL");
142MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
143MODULE_VERSION(DRV_VERSION);
144
145static unsigned int get_scr_cfg_addr(unsigned int port_no, unsigned int sc_reg, int device)
146{
147 unsigned int addr = SIS_SCR_BASE + (4 * sc_reg);
148
149 if (port_no) {
150 if (device == 0x182)
151 addr += SIS182_SATA1_OFS;
152 else
153 addr += SIS180_SATA1_OFS;
154 }
155
156 return addr;
157}
158
159static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
160{
161 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
162 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device);
163 u32 val, val2 = 0;
164 u8 pmr;
165
166 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
167 return 0xffffffff;
168
169 pci_read_config_byte(pdev, SIS_PMR, &pmr);
170
171 pci_read_config_dword(pdev, cfg_addr, &val);
172
173 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
174 pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
175
176 return val|val2;
177}
178
179static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
180{
181 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
182 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr, pdev->device);
183 u8 pmr;
184
185 if (scr == SCR_ERROR) /* doesn't exist in PCI cfg space */
186 return;
187
188 pci_read_config_byte(pdev, SIS_PMR, &pmr);
189
190 pci_write_config_dword(pdev, cfg_addr, val);
191
192 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
193 pci_write_config_dword(pdev, cfg_addr+0x10, val);
194}
195
196static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
197{
198 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
199 u32 val, val2 = 0;
200 u8 pmr;
201
202 if (sc_reg > SCR_CONTROL)
203 return 0xffffffffU;
204
205 if (ap->flags & SIS_FLAG_CFGSCR)
206 return sis_scr_cfg_read(ap, sc_reg);
207
208 pci_read_config_byte(pdev, SIS_PMR, &pmr);
209
210 val = inl(ap->ioaddr.scr_addr + (sc_reg * 4));
211
212 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
213 val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
214
215 return val | val2;
216}
217
218static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
219{
220 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
221 u8 pmr;
222
223 if (sc_reg > SCR_CONTROL)
224 return;
225
226 pci_read_config_byte(pdev, SIS_PMR, &pmr);
227
228 if (ap->flags & SIS_FLAG_CFGSCR)
229 sis_scr_cfg_write(ap, sc_reg, val);
230 else {
231 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
232 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
233 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
234 }
235}
236
237static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
238{
239 static int printed_version;
240 struct ata_probe_ent *probe_ent = NULL;
241 int rc;
242 u32 genctl;
243 struct ata_port_info *ppi;
244 int pci_dev_busy = 0;
245 u8 pmr;
246 u8 port2_start;
247
248 if (!printed_version++)
249 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
250
251 rc = pci_enable_device(pdev);
252 if (rc)
253 return rc;
254
255 rc = pci_request_regions(pdev, DRV_NAME);
256 if (rc) {
257 pci_dev_busy = 1;
258 goto err_out;
259 }
260
261 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
262 if (rc)
263 goto err_out_regions;
264 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
265 if (rc)
266 goto err_out_regions;
267
268 ppi = &sis_port_info;
269 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
270 if (!probe_ent) {
271 rc = -ENOMEM;
272 goto err_out_regions;
273 }
274
275 /* check and see if the SCRs are in IO space or PCI cfg space */
276 pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
277 if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
278 probe_ent->port_flags |= SIS_FLAG_CFGSCR;
279
280 /* if hardware thinks SCRs are in IO space, but there are
281 * no IO resources assigned, change to PCI cfg space.
282 */
283 if ((!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) &&
284 ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
285 (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
286 genctl &= ~GENCTL_IOMAPPED_SCR;
287 pci_write_config_dword(pdev, SIS_GENCTL, genctl);
288 probe_ent->port_flags |= SIS_FLAG_CFGSCR;
289 }
290
291 pci_read_config_byte(pdev, SIS_PMR, &pmr);
292 if (ent->device != 0x182) {
293 if ((pmr & SIS_PMR_COMBINED) == 0) {
294 dev_printk(KERN_INFO, &pdev->dev,
295 "Detected SiS 180/181 chipset in SATA mode\n");
296 port2_start = 64;
297 }
298 else {
299 dev_printk(KERN_INFO, &pdev->dev,
300 "Detected SiS 180/181 chipset in combined mode\n");
301 port2_start=0;
302 }
303 }
304 else {
305 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182 chipset\n");
306 port2_start = 0x20;
307 }
308
309 if (!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) {
310 probe_ent->port[0].scr_addr =
311 pci_resource_start(pdev, SIS_SCR_PCI_BAR);
312 probe_ent->port[1].scr_addr =
313 pci_resource_start(pdev, SIS_SCR_PCI_BAR) + port2_start;
314 }
315
316 pci_set_master(pdev);
317 pci_intx(pdev, 1);
318
319 /* FIXME: check ata_device_add return value */
320 ata_device_add(probe_ent);
321 kfree(probe_ent);
322
323 return 0;
324
325err_out_regions:
326 pci_release_regions(pdev);
327
328err_out:
329 if (!pci_dev_busy)
330 pci_disable_device(pdev);
331 return rc;
332
333}
334
335static int __init sis_init(void)
336{
337 return pci_register_driver(&sis_pci_driver);
338}
339
340static void __exit sis_exit(void)
341{
342 pci_unregister_driver(&sis_pci_driver);
343}
344
345module_init(sis_init);
346module_exit(sis_exit);
347
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
new file mode 100644
index 000000000000..d6d6658d8328
--- /dev/null
+++ b/drivers/ata/sata_svw.c
@@ -0,0 +1,508 @@
1/*
2 * sata_svw.c - ServerWorks / Apple K2 SATA
3 *
4 * Maintained by: Benjamin Herrenschmidt <benh@kernel.crashing.org> and
5 * Jeff Garzik <jgarzik@pobox.com>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
10 *
11 * Bits from Jeff Garzik, Copyright RedHat, Inc.
12 *
13 * This driver probably works with non-Apple versions of the
14 * Broadcom chipset...
15 *
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; see the file COPYING. If not, write to
29 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
30 *
31 *
32 * libata documentation is available via 'make {ps|pdf}docs',
33 * as Documentation/DocBook/libata.*
34 *
35 * Hardware documentation available under NDA.
36 *
37 */
38
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
46#include <linux/device.h>
47#include <scsi/scsi_host.h>
48#include <linux/libata.h>
49
50#ifdef CONFIG_PPC_OF
51#include <asm/prom.h>
52#include <asm/pci-bridge.h>
53#endif /* CONFIG_PPC_OF */
54
55#define DRV_NAME "sata_svw"
56#define DRV_VERSION "2.0"
57
58enum {
59 /* Taskfile registers offsets */
60 K2_SATA_TF_CMD_OFFSET = 0x00,
61 K2_SATA_TF_DATA_OFFSET = 0x00,
62 K2_SATA_TF_ERROR_OFFSET = 0x04,
63 K2_SATA_TF_NSECT_OFFSET = 0x08,
64 K2_SATA_TF_LBAL_OFFSET = 0x0c,
65 K2_SATA_TF_LBAM_OFFSET = 0x10,
66 K2_SATA_TF_LBAH_OFFSET = 0x14,
67 K2_SATA_TF_DEVICE_OFFSET = 0x18,
68 K2_SATA_TF_CMDSTAT_OFFSET = 0x1c,
69 K2_SATA_TF_CTL_OFFSET = 0x20,
70
71 /* DMA base */
72 K2_SATA_DMA_CMD_OFFSET = 0x30,
73
74 /* SCRs base */
75 K2_SATA_SCR_STATUS_OFFSET = 0x40,
76 K2_SATA_SCR_ERROR_OFFSET = 0x44,
77 K2_SATA_SCR_CONTROL_OFFSET = 0x48,
78
79 /* Others */
80 K2_SATA_SICR1_OFFSET = 0x80,
81 K2_SATA_SICR2_OFFSET = 0x84,
82 K2_SATA_SIM_OFFSET = 0x88,
83
84 /* Port stride */
85 K2_SATA_PORT_OFFSET = 0x100,
86};
87
88static u8 k2_stat_check_status(struct ata_port *ap);
89
90
91static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
92{
93 if (sc_reg > SCR_CONTROL)
94 return 0xffffffffU;
95 return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
96}
97
98
99static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
100 u32 val)
101{
102 if (sc_reg > SCR_CONTROL)
103 return;
104 writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
105}
106
107
108static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
109{
110 struct ata_ioports *ioaddr = &ap->ioaddr;
111 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
112
113 if (tf->ctl != ap->last_ctl) {
114 writeb(tf->ctl, ioaddr->ctl_addr);
115 ap->last_ctl = tf->ctl;
116 ata_wait_idle(ap);
117 }
118 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
119 writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr);
120 writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
121 writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
122 writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
123 writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
124 } else if (is_addr) {
125 writew(tf->feature, ioaddr->feature_addr);
126 writew(tf->nsect, ioaddr->nsect_addr);
127 writew(tf->lbal, ioaddr->lbal_addr);
128 writew(tf->lbam, ioaddr->lbam_addr);
129 writew(tf->lbah, ioaddr->lbah_addr);
130 }
131
132 if (tf->flags & ATA_TFLAG_DEVICE)
133 writeb(tf->device, ioaddr->device_addr);
134
135 ata_wait_idle(ap);
136}
137
138
139static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
140{
141 struct ata_ioports *ioaddr = &ap->ioaddr;
142 u16 nsect, lbal, lbam, lbah, feature;
143
144 tf->command = k2_stat_check_status(ap);
145 tf->device = readw(ioaddr->device_addr);
146 feature = readw(ioaddr->error_addr);
147 nsect = readw(ioaddr->nsect_addr);
148 lbal = readw(ioaddr->lbal_addr);
149 lbam = readw(ioaddr->lbam_addr);
150 lbah = readw(ioaddr->lbah_addr);
151
152 tf->feature = feature;
153 tf->nsect = nsect;
154 tf->lbal = lbal;
155 tf->lbam = lbam;
156 tf->lbah = lbah;
157
158 if (tf->flags & ATA_TFLAG_LBA48) {
159 tf->hob_feature = feature >> 8;
160 tf->hob_nsect = nsect >> 8;
161 tf->hob_lbal = lbal >> 8;
162 tf->hob_lbam = lbam >> 8;
163 tf->hob_lbah = lbah >> 8;
164 }
165}
166
167/**
168 * k2_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction (MMIO)
169 * @qc: Info associated with this ATA transaction.
170 *
171 * LOCKING:
172 * spin_lock_irqsave(host lock)
173 */
174
175static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
176{
177 struct ata_port *ap = qc->ap;
178 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
179 u8 dmactl;
180 void *mmio = (void *) ap->ioaddr.bmdma_addr;
181 /* load PRD table addr. */
182 mb(); /* make sure PRD table writes are visible to controller */
183 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
184
185 /* specify data direction, triple-check start bit is clear */
186 dmactl = readb(mmio + ATA_DMA_CMD);
187 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
188 if (!rw)
189 dmactl |= ATA_DMA_WR;
190 writeb(dmactl, mmio + ATA_DMA_CMD);
191
192 /* issue r/w command if this is not a ATA DMA command*/
193 if (qc->tf.protocol != ATA_PROT_DMA)
194 ap->ops->exec_command(ap, &qc->tf);
195}
196
197/**
198 * k2_bmdma_start_mmio - Start a PCI IDE BMDMA transaction (MMIO)
199 * @qc: Info associated with this ATA transaction.
200 *
201 * LOCKING:
202 * spin_lock_irqsave(host lock)
203 */
204
205static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
206{
207 struct ata_port *ap = qc->ap;
208 void *mmio = (void *) ap->ioaddr.bmdma_addr;
209 u8 dmactl;
210
211 /* start host DMA transaction */
212 dmactl = readb(mmio + ATA_DMA_CMD);
213 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
214 /* There is a race condition in certain SATA controllers that can
215 be seen when the r/w command is given to the controller before the
216 host DMA is started. On a Read command, the controller would initiate
217 the command to the drive even before it sees the DMA start. When there
218 are very fast drives connected to the controller, or when the data request
219 hits in the drive cache, there is the possibility that the drive returns a part
220 or all of the requested data to the controller before the DMA start is issued.
221 In this case, the controller would become confused as to what to do with the data.
222 In the worst case when all the data is returned back to the controller, the
223 controller could hang. In other cases it could return partial data returning
224 in data corruption. This problem has been seen in PPC systems and can also appear
225 on an system with very fast disks, where the SATA controller is sitting behind a
226 number of bridges, and hence there is significant latency between the r/w command
227 and the start command. */
228 /* issue r/w command if the access is to ATA*/
229 if (qc->tf.protocol == ATA_PROT_DMA)
230 ap->ops->exec_command(ap, &qc->tf);
231}
232
233
234static u8 k2_stat_check_status(struct ata_port *ap)
235{
236 return readl((void *) ap->ioaddr.status_addr);
237}
238
239#ifdef CONFIG_PPC_OF
240/*
241 * k2_sata_proc_info
242 * inout : decides on the direction of the dataflow and the meaning of the
243 * variables
244 * buffer: If inout==FALSE data is being written to it else read from it
245 * *start: If inout==FALSE start of the valid data in the buffer
246 * offset: If inout==FALSE offset from the beginning of the imaginary file
247 * from which we start writing into the buffer
248 * length: If inout==FALSE max number of bytes to be written into the buffer
249 * else number of bytes in the buffer
250 */
251static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
252 off_t offset, int count, int inout)
253{
254 struct ata_port *ap;
255 struct device_node *np;
256 int len, index;
257
258 /* Find the ata_port */
259 ap = ata_shost_to_port(shost);
260 if (ap == NULL)
261 return 0;
262
263 /* Find the OF node for the PCI device proper */
264 np = pci_device_to_OF_node(to_pci_dev(ap->host->dev));
265 if (np == NULL)
266 return 0;
267
268 /* Match it to a port node */
269 index = (ap == ap->host->ports[0]) ? 0 : 1;
270 for (np = np->child; np != NULL; np = np->sibling) {
271 const u32 *reg = get_property(np, "reg", NULL);
272 if (!reg)
273 continue;
274 if (index == *reg)
275 break;
276 }
277 if (np == NULL)
278 return 0;
279
280 len = sprintf(page, "devspec: %s\n", np->full_name);
281
282 return len;
283}
284#endif /* CONFIG_PPC_OF */
285
286
287static struct scsi_host_template k2_sata_sht = {
288 .module = THIS_MODULE,
289 .name = DRV_NAME,
290 .ioctl = ata_scsi_ioctl,
291 .queuecommand = ata_scsi_queuecmd,
292 .can_queue = ATA_DEF_QUEUE,
293 .this_id = ATA_SHT_THIS_ID,
294 .sg_tablesize = LIBATA_MAX_PRD,
295 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
296 .emulated = ATA_SHT_EMULATED,
297 .use_clustering = ATA_SHT_USE_CLUSTERING,
298 .proc_name = DRV_NAME,
299 .dma_boundary = ATA_DMA_BOUNDARY,
300 .slave_configure = ata_scsi_slave_config,
301 .slave_destroy = ata_scsi_slave_destroy,
302#ifdef CONFIG_PPC_OF
303 .proc_info = k2_sata_proc_info,
304#endif
305 .bios_param = ata_std_bios_param,
306};
307
308
309static const struct ata_port_operations k2_sata_ops = {
310 .port_disable = ata_port_disable,
311 .tf_load = k2_sata_tf_load,
312 .tf_read = k2_sata_tf_read,
313 .check_status = k2_stat_check_status,
314 .exec_command = ata_exec_command,
315 .dev_select = ata_std_dev_select,
316 .bmdma_setup = k2_bmdma_setup_mmio,
317 .bmdma_start = k2_bmdma_start_mmio,
318 .bmdma_stop = ata_bmdma_stop,
319 .bmdma_status = ata_bmdma_status,
320 .qc_prep = ata_qc_prep,
321 .qc_issue = ata_qc_issue_prot,
322 .data_xfer = ata_mmio_data_xfer,
323 .freeze = ata_bmdma_freeze,
324 .thaw = ata_bmdma_thaw,
325 .error_handler = ata_bmdma_error_handler,
326 .post_internal_cmd = ata_bmdma_post_internal_cmd,
327 .irq_handler = ata_interrupt,
328 .irq_clear = ata_bmdma_irq_clear,
329 .scr_read = k2_sata_scr_read,
330 .scr_write = k2_sata_scr_write,
331 .port_start = ata_port_start,
332 .port_stop = ata_port_stop,
333 .host_stop = ata_pci_host_stop,
334};
335
336static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
337{
338 port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET;
339 port->data_addr = base + K2_SATA_TF_DATA_OFFSET;
340 port->feature_addr =
341 port->error_addr = base + K2_SATA_TF_ERROR_OFFSET;
342 port->nsect_addr = base + K2_SATA_TF_NSECT_OFFSET;
343 port->lbal_addr = base + K2_SATA_TF_LBAL_OFFSET;
344 port->lbam_addr = base + K2_SATA_TF_LBAM_OFFSET;
345 port->lbah_addr = base + K2_SATA_TF_LBAH_OFFSET;
346 port->device_addr = base + K2_SATA_TF_DEVICE_OFFSET;
347 port->command_addr =
348 port->status_addr = base + K2_SATA_TF_CMDSTAT_OFFSET;
349 port->altstatus_addr =
350 port->ctl_addr = base + K2_SATA_TF_CTL_OFFSET;
351 port->bmdma_addr = base + K2_SATA_DMA_CMD_OFFSET;
352 port->scr_addr = base + K2_SATA_SCR_STATUS_OFFSET;
353}
354
355
356static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
357{
358 static int printed_version;
359 struct ata_probe_ent *probe_ent = NULL;
360 unsigned long base;
361 void __iomem *mmio_base;
362 int pci_dev_busy = 0;
363 int rc;
364 int i;
365
366 if (!printed_version++)
367 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
368
369 /*
370 * If this driver happens to only be useful on Apple's K2, then
371 * we should check that here as it has a normal Serverworks ID
372 */
373 rc = pci_enable_device(pdev);
374 if (rc)
375 return rc;
376 /*
377 * Check if we have resources mapped at all (second function may
378 * have been disabled by firmware)
379 */
380 if (pci_resource_len(pdev, 5) == 0)
381 return -ENODEV;
382
383 /* Request PCI regions */
384 rc = pci_request_regions(pdev, DRV_NAME);
385 if (rc) {
386 pci_dev_busy = 1;
387 goto err_out;
388 }
389
390 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
391 if (rc)
392 goto err_out_regions;
393 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
394 if (rc)
395 goto err_out_regions;
396
397 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
398 if (probe_ent == NULL) {
399 rc = -ENOMEM;
400 goto err_out_regions;
401 }
402
403 memset(probe_ent, 0, sizeof(*probe_ent));
404 probe_ent->dev = pci_dev_to_dev(pdev);
405 INIT_LIST_HEAD(&probe_ent->node);
406
407 mmio_base = pci_iomap(pdev, 5, 0);
408 if (mmio_base == NULL) {
409 rc = -ENOMEM;
410 goto err_out_free_ent;
411 }
412 base = (unsigned long) mmio_base;
413
414 /* Clear a magic bit in SCR1 according to Darwin, those help
415 * some funky seagate drives (though so far, those were already
416 * set by the firmware on the machines I had access to)
417 */
418 writel(readl(mmio_base + K2_SATA_SICR1_OFFSET) & ~0x00040000,
419 mmio_base + K2_SATA_SICR1_OFFSET);
420
421 /* Clear SATA error & interrupts we don't use */
422 writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET);
423 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
424
425 probe_ent->sht = &k2_sata_sht;
426 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
427 ATA_FLAG_MMIO;
428 probe_ent->port_ops = &k2_sata_ops;
429 probe_ent->n_ports = 4;
430 probe_ent->irq = pdev->irq;
431 probe_ent->irq_flags = IRQF_SHARED;
432 probe_ent->mmio_base = mmio_base;
433
434 /* We don't care much about the PIO/UDMA masks, but the core won't like us
435 * if we don't fill these
436 */
437 probe_ent->pio_mask = 0x1f;
438 probe_ent->mwdma_mask = 0x7;
439 probe_ent->udma_mask = 0x7f;
440
441 /* different controllers have different number of ports - currently 4 or 8 */
442 /* All ports are on the same function. Multi-function device is no
443 * longer available. This should not be seen in any system. */
444 for (i = 0; i < ent->driver_data; i++)
445 k2_sata_setup_port(&probe_ent->port[i], base + i * K2_SATA_PORT_OFFSET);
446
447 pci_set_master(pdev);
448
449 /* FIXME: check ata_device_add return value */
450 ata_device_add(probe_ent);
451 kfree(probe_ent);
452
453 return 0;
454
455err_out_free_ent:
456 kfree(probe_ent);
457err_out_regions:
458 pci_release_regions(pdev);
459err_out:
460 if (!pci_dev_busy)
461 pci_disable_device(pdev);
462 return rc;
463}
464
465/* 0x240 is device ID for Apple K2 device
466 * 0x241 is device ID for Serverworks Frodo4
467 * 0x242 is device ID for Serverworks Frodo8
468 * 0x24a is device ID for BCM5785 (aka HT1000) HT southbridge integrated SATA
469 * controller
470 * */
471static const struct pci_device_id k2_sata_pci_tbl[] = {
472 { 0x1166, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
473 { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
474 { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
475 { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
476 { 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
477 { }
478};
479
480
481static struct pci_driver k2_sata_pci_driver = {
482 .name = DRV_NAME,
483 .id_table = k2_sata_pci_tbl,
484 .probe = k2_sata_init_one,
485 .remove = ata_pci_remove_one,
486};
487
488
489static int __init k2_sata_init(void)
490{
491 return pci_register_driver(&k2_sata_pci_driver);
492}
493
494
495static void __exit k2_sata_exit(void)
496{
497 pci_unregister_driver(&k2_sata_pci_driver);
498}
499
500
501MODULE_AUTHOR("Benjamin Herrenschmidt");
502MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
503MODULE_LICENSE("GPL");
504MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl);
505MODULE_VERSION(DRV_VERSION);
506
507module_init(k2_sata_init);
508module_exit(k2_sata_exit);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
new file mode 100644
index 000000000000..091867e10ea3
--- /dev/null
+++ b/drivers/ata/sata_sx4.c
@@ -0,0 +1,1502 @@
1/*
2 * sata_sx4.c - Promise SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_cmnd.h>
44#include <linux/libata.h>
45#include <asm/io.h>
46#include "sata_promise.h"
47
48#define DRV_NAME "sata_sx4"
49#define DRV_VERSION "0.9"
50
51
52enum {
53 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
54
55 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
56 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
57 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
58 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
59
60 PDC_20621_SEQCTL = 0x400,
61 PDC_20621_SEQMASK = 0x480,
62 PDC_20621_GENERAL_CTL = 0x484,
63 PDC_20621_PAGE_SIZE = (32 * 1024),
64
65 /* chosen, not constant, values; we design our own DIMM mem map */
66 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
67 PDC_20621_DIMM_BASE = 0x00200000,
68 PDC_20621_DIMM_DATA = (64 * 1024),
69 PDC_DIMM_DATA_STEP = (256 * 1024),
70 PDC_DIMM_WINDOW_STEP = (8 * 1024),
71 PDC_DIMM_HOST_PRD = (6 * 1024),
72 PDC_DIMM_HOST_PKT = (128 * 0),
73 PDC_DIMM_HPKT_PRD = (128 * 1),
74 PDC_DIMM_ATA_PKT = (128 * 2),
75 PDC_DIMM_APKT_PRD = (128 * 3),
76 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
77 PDC_PAGE_WINDOW = 0x40,
78 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
79 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
80 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
81
82 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
83
84 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
85 (1<<23),
86
87 board_20621 = 0, /* FastTrak S150 SX4 */
88
89 PDC_RESET = (1 << 11), /* HDMA reset */
90
91 PDC_MAX_HDMA = 32,
92 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
93
94 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
95 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
96 PDC_MAX_DIMM_MODULE = 0x02,
97 PDC_I2C_CONTROL_OFFSET = 0x48,
98 PDC_I2C_ADDR_DATA_OFFSET = 0x4C,
99 PDC_DIMM0_CONTROL_OFFSET = 0x80,
100 PDC_DIMM1_CONTROL_OFFSET = 0x84,
101 PDC_SDRAM_CONTROL_OFFSET = 0x88,
102 PDC_I2C_WRITE = 0x00000000,
103 PDC_I2C_READ = 0x00000040,
104 PDC_I2C_START = 0x00000080,
105 PDC_I2C_MASK_INT = 0x00000020,
106 PDC_I2C_COMPLETE = 0x00010000,
107 PDC_I2C_NO_ACK = 0x00100000,
108 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
109 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
110 PDC_DIMM_SPD_ROW_NUM = 3,
111 PDC_DIMM_SPD_COLUMN_NUM = 4,
112 PDC_DIMM_SPD_MODULE_ROW = 5,
113 PDC_DIMM_SPD_TYPE = 11,
114 PDC_DIMM_SPD_FRESH_RATE = 12,
115 PDC_DIMM_SPD_BANK_NUM = 17,
116 PDC_DIMM_SPD_CAS_LATENCY = 18,
117 PDC_DIMM_SPD_ATTRIBUTE = 21,
118 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
119 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
120 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
121 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
122 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
123 PDC_CTL_STATUS = 0x08,
124 PDC_DIMM_WINDOW_CTLR = 0x0C,
125 PDC_TIME_CONTROL = 0x3C,
126 PDC_TIME_PERIOD = 0x40,
127 PDC_TIME_COUNTER = 0x44,
128 PDC_GENERAL_CTLR = 0x484,
129 PCI_PLL_INIT = 0x8A531824,
130 PCI_X_TCOUNT = 0xEE1E5CFF
131};
132
133
134struct pdc_port_priv {
135 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
136 u8 *pkt;
137 dma_addr_t pkt_dma;
138};
139
140struct pdc_host_priv {
141 void __iomem *dimm_mmio;
142
143 unsigned int doing_hdma;
144 unsigned int hdma_prod;
145 unsigned int hdma_cons;
146 struct {
147 struct ata_queued_cmd *qc;
148 unsigned int seq;
149 unsigned long pkt_ofs;
150 } hdma[32];
151};
152
153
154static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
155static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
156static void pdc_eng_timeout(struct ata_port *ap);
157static void pdc_20621_phy_reset (struct ata_port *ap);
158static int pdc_port_start(struct ata_port *ap);
159static void pdc_port_stop(struct ata_port *ap);
160static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
161static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
162static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
163static void pdc20621_host_stop(struct ata_host *host);
164static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
165static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
166static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe,
167 u32 device, u32 subaddr, u32 *pdata);
168static int pdc20621_prog_dimm0(struct ata_probe_ent *pe);
169static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe);
170#ifdef ATA_VERBOSE_DEBUG
171static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
172 void *psource, u32 offset, u32 size);
173#endif
174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
175 void *psource, u32 offset, u32 size);
176static void pdc20621_irq_clear(struct ata_port *ap);
177static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
178
179
180static struct scsi_host_template pdc_sata_sht = {
181 .module = THIS_MODULE,
182 .name = DRV_NAME,
183 .ioctl = ata_scsi_ioctl,
184 .queuecommand = ata_scsi_queuecmd,
185 .can_queue = ATA_DEF_QUEUE,
186 .this_id = ATA_SHT_THIS_ID,
187 .sg_tablesize = LIBATA_MAX_PRD,
188 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
189 .emulated = ATA_SHT_EMULATED,
190 .use_clustering = ATA_SHT_USE_CLUSTERING,
191 .proc_name = DRV_NAME,
192 .dma_boundary = ATA_DMA_BOUNDARY,
193 .slave_configure = ata_scsi_slave_config,
194 .slave_destroy = ata_scsi_slave_destroy,
195 .bios_param = ata_std_bios_param,
196};
197
198static const struct ata_port_operations pdc_20621_ops = {
199 .port_disable = ata_port_disable,
200 .tf_load = pdc_tf_load_mmio,
201 .tf_read = ata_tf_read,
202 .check_status = ata_check_status,
203 .exec_command = pdc_exec_command_mmio,
204 .dev_select = ata_std_dev_select,
205 .phy_reset = pdc_20621_phy_reset,
206 .qc_prep = pdc20621_qc_prep,
207 .qc_issue = pdc20621_qc_issue_prot,
208 .data_xfer = ata_mmio_data_xfer,
209 .eng_timeout = pdc_eng_timeout,
210 .irq_handler = pdc20621_interrupt,
211 .irq_clear = pdc20621_irq_clear,
212 .port_start = pdc_port_start,
213 .port_stop = pdc_port_stop,
214 .host_stop = pdc20621_host_stop,
215};
216
217static const struct ata_port_info pdc_port_info[] = {
218 /* board_20621 */
219 {
220 .sht = &pdc_sata_sht,
221 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
222 ATA_FLAG_SRST | ATA_FLAG_MMIO |
223 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
224 .pio_mask = 0x1f, /* pio0-4 */
225 .mwdma_mask = 0x07, /* mwdma0-2 */
226 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
227 .port_ops = &pdc_20621_ops,
228 },
229
230};
231
232static const struct pci_device_id pdc_sata_pci_tbl[] = {
233 { PCI_VENDOR_ID_PROMISE, 0x6622, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
234 board_20621 },
235 { } /* terminate list */
236};
237
238
239static struct pci_driver pdc_sata_pci_driver = {
240 .name = DRV_NAME,
241 .id_table = pdc_sata_pci_tbl,
242 .probe = pdc_sata_init_one,
243 .remove = ata_pci_remove_one,
244};
245
246
247static void pdc20621_host_stop(struct ata_host *host)
248{
249 struct pci_dev *pdev = to_pci_dev(host->dev);
250 struct pdc_host_priv *hpriv = host->private_data;
251 void __iomem *dimm_mmio = hpriv->dimm_mmio;
252
253 pci_iounmap(pdev, dimm_mmio);
254 kfree(hpriv);
255
256 pci_iounmap(pdev, host->mmio_base);
257}
258
259static int pdc_port_start(struct ata_port *ap)
260{
261 struct device *dev = ap->host->dev;
262 struct pdc_port_priv *pp;
263 int rc;
264
265 rc = ata_port_start(ap);
266 if (rc)
267 return rc;
268
269 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
270 if (!pp) {
271 rc = -ENOMEM;
272 goto err_out;
273 }
274 memset(pp, 0, sizeof(*pp));
275
276 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
277 if (!pp->pkt) {
278 rc = -ENOMEM;
279 goto err_out_kfree;
280 }
281
282 ap->private_data = pp;
283
284 return 0;
285
286err_out_kfree:
287 kfree(pp);
288err_out:
289 ata_port_stop(ap);
290 return rc;
291}
292
293
294static void pdc_port_stop(struct ata_port *ap)
295{
296 struct device *dev = ap->host->dev;
297 struct pdc_port_priv *pp = ap->private_data;
298
299 ap->private_data = NULL;
300 dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
301 kfree(pp);
302 ata_port_stop(ap);
303}
304
305
306static void pdc_20621_phy_reset (struct ata_port *ap)
307{
308 VPRINTK("ENTER\n");
309 ap->cbl = ATA_CBL_SATA;
310 ata_port_probe(ap);
311 ata_bus_reset(ap);
312}
313
314static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
315 unsigned int portno,
316 unsigned int total_len)
317{
318 u32 addr;
319 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
320 u32 *buf32 = (u32 *) buf;
321
322 /* output ATA packet S/G table */
323 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
324 (PDC_DIMM_DATA_STEP * portno);
325 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
326 buf32[dw] = cpu_to_le32(addr);
327 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
328
329 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
330 PDC_20621_DIMM_BASE +
331 (PDC_DIMM_WINDOW_STEP * portno) +
332 PDC_DIMM_APKT_PRD,
333 buf32[dw], buf32[dw + 1]);
334}
335
336static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
337 unsigned int portno,
338 unsigned int total_len)
339{
340 u32 addr;
341 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
342 u32 *buf32 = (u32 *) buf;
343
344 /* output Host DMA packet S/G table */
345 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
346 (PDC_DIMM_DATA_STEP * portno);
347
348 buf32[dw] = cpu_to_le32(addr);
349 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
350
351 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
352 PDC_20621_DIMM_BASE +
353 (PDC_DIMM_WINDOW_STEP * portno) +
354 PDC_DIMM_HPKT_PRD,
355 buf32[dw], buf32[dw + 1]);
356}
357
358static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
359 unsigned int devno, u8 *buf,
360 unsigned int portno)
361{
362 unsigned int i, dw;
363 u32 *buf32 = (u32 *) buf;
364 u8 dev_reg;
365
366 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
367 (PDC_DIMM_WINDOW_STEP * portno) +
368 PDC_DIMM_APKT_PRD;
369 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
370
371 i = PDC_DIMM_ATA_PKT;
372
373 /*
374 * Set up ATA packet
375 */
376 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
377 buf[i++] = PDC_PKT_READ;
378 else if (tf->protocol == ATA_PROT_NODATA)
379 buf[i++] = PDC_PKT_NODATA;
380 else
381 buf[i++] = 0;
382 buf[i++] = 0; /* reserved */
383 buf[i++] = portno + 1; /* seq. id */
384 buf[i++] = 0xff; /* delay seq. id */
385
386 /* dimm dma S/G, and next-pkt */
387 dw = i >> 2;
388 if (tf->protocol == ATA_PROT_NODATA)
389 buf32[dw] = 0;
390 else
391 buf32[dw] = cpu_to_le32(dimm_sg);
392 buf32[dw + 1] = 0;
393 i += 8;
394
395 if (devno == 0)
396 dev_reg = ATA_DEVICE_OBS;
397 else
398 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
399
400 /* select device */
401 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
402 buf[i++] = dev_reg;
403
404 /* device control register */
405 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
406 buf[i++] = tf->ctl;
407
408 return i;
409}
410
411static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
412 unsigned int portno)
413{
414 unsigned int dw;
415 u32 tmp, *buf32 = (u32 *) buf;
416
417 unsigned int host_sg = PDC_20621_DIMM_BASE +
418 (PDC_DIMM_WINDOW_STEP * portno) +
419 PDC_DIMM_HOST_PRD;
420 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
421 (PDC_DIMM_WINDOW_STEP * portno) +
422 PDC_DIMM_HPKT_PRD;
423 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
424 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
425
426 dw = PDC_DIMM_HOST_PKT >> 2;
427
428 /*
429 * Set up Host DMA packet
430 */
431 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
432 tmp = PDC_PKT_READ;
433 else
434 tmp = 0;
435 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
436 tmp |= (0xff << 24); /* delay seq. id */
437 buf32[dw + 0] = cpu_to_le32(tmp);
438 buf32[dw + 1] = cpu_to_le32(host_sg);
439 buf32[dw + 2] = cpu_to_le32(dimm_sg);
440 buf32[dw + 3] = 0;
441
442 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
443 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
444 PDC_DIMM_HOST_PKT,
445 buf32[dw + 0],
446 buf32[dw + 1],
447 buf32[dw + 2],
448 buf32[dw + 3]);
449}
450
451static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
452{
453 struct scatterlist *sg;
454 struct ata_port *ap = qc->ap;
455 struct pdc_port_priv *pp = ap->private_data;
456 void __iomem *mmio = ap->host->mmio_base;
457 struct pdc_host_priv *hpriv = ap->host->private_data;
458 void __iomem *dimm_mmio = hpriv->dimm_mmio;
459 unsigned int portno = ap->port_no;
460 unsigned int i, idx, total_len = 0, sgt_len;
461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
462
463 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
464
465 VPRINTK("ata%u: ENTER\n", ap->id);
466
467 /* hard-code chip #0 */
468 mmio += PDC_CHIP0_OFS;
469
470 /*
471 * Build S/G table
472 */
473 idx = 0;
474 ata_for_each_sg(sg, qc) {
475 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
476 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
477 total_len += sg_dma_len(sg);
478 }
479 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
480 sgt_len = idx * 4;
481
482 /*
483 * Build ATA, host DMA packets
484 */
485 pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
486 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
487
488 pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
489 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
490
491 if (qc->tf.flags & ATA_TFLAG_LBA48)
492 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
493 else
494 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
495
496 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
497
498 /* copy three S/G tables and two packets to DIMM MMIO window */
499 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
500 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
501 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
502 PDC_DIMM_HOST_PRD,
503 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
504
505 /* force host FIFO dump */
506 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
507
508 readl(dimm_mmio); /* MMIO PCI posting flush */
509
510 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
511}
512
513static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
514{
515 struct ata_port *ap = qc->ap;
516 struct pdc_port_priv *pp = ap->private_data;
517 void __iomem *mmio = ap->host->mmio_base;
518 struct pdc_host_priv *hpriv = ap->host->private_data;
519 void __iomem *dimm_mmio = hpriv->dimm_mmio;
520 unsigned int portno = ap->port_no;
521 unsigned int i;
522
523 VPRINTK("ata%u: ENTER\n", ap->id);
524
525 /* hard-code chip #0 */
526 mmio += PDC_CHIP0_OFS;
527
528 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
529
530 if (qc->tf.flags & ATA_TFLAG_LBA48)
531 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
532 else
533 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
534
535 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
536
537 /* copy three S/G tables and two packets to DIMM MMIO window */
538 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
539 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
540
541 /* force host FIFO dump */
542 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
543
544 readl(dimm_mmio); /* MMIO PCI posting flush */
545
546 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
547}
548
549static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
550{
551 switch (qc->tf.protocol) {
552 case ATA_PROT_DMA:
553 pdc20621_dma_prep(qc);
554 break;
555 case ATA_PROT_NODATA:
556 pdc20621_nodata_prep(qc);
557 break;
558 default:
559 break;
560 }
561}
562
563static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
564 unsigned int seq,
565 u32 pkt_ofs)
566{
567 struct ata_port *ap = qc->ap;
568 struct ata_host *host = ap->host;
569 void __iomem *mmio = host->mmio_base;
570
571 /* hard-code chip #0 */
572 mmio += PDC_CHIP0_OFS;
573
574 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
575 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
576
577 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
578 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
579}
580
581static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
582 unsigned int seq,
583 u32 pkt_ofs)
584{
585 struct ata_port *ap = qc->ap;
586 struct pdc_host_priv *pp = ap->host->private_data;
587 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
588
589 if (!pp->doing_hdma) {
590 __pdc20621_push_hdma(qc, seq, pkt_ofs);
591 pp->doing_hdma = 1;
592 return;
593 }
594
595 pp->hdma[idx].qc = qc;
596 pp->hdma[idx].seq = seq;
597 pp->hdma[idx].pkt_ofs = pkt_ofs;
598 pp->hdma_prod++;
599}
600
601static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
602{
603 struct ata_port *ap = qc->ap;
604 struct pdc_host_priv *pp = ap->host->private_data;
605 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
606
607 /* if nothing on queue, we're done */
608 if (pp->hdma_prod == pp->hdma_cons) {
609 pp->doing_hdma = 0;
610 return;
611 }
612
613 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
614 pp->hdma[idx].pkt_ofs);
615 pp->hdma_cons++;
616}
617
618#ifdef ATA_VERBOSE_DEBUG
619static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
620{
621 struct ata_port *ap = qc->ap;
622 unsigned int port_no = ap->port_no;
623 struct pdc_host_priv *hpriv = ap->host->private_data;
624 void *dimm_mmio = hpriv->dimm_mmio;
625
626 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
627 dimm_mmio += PDC_DIMM_HOST_PKT;
628
629 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
630 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
631 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
632 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
633}
634#else
635static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
636#endif /* ATA_VERBOSE_DEBUG */
637
638static void pdc20621_packet_start(struct ata_queued_cmd *qc)
639{
640 struct ata_port *ap = qc->ap;
641 struct ata_host *host = ap->host;
642 unsigned int port_no = ap->port_no;
643 void __iomem *mmio = host->mmio_base;
644 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
645 u8 seq = (u8) (port_no + 1);
646 unsigned int port_ofs;
647
648 /* hard-code chip #0 */
649 mmio += PDC_CHIP0_OFS;
650
651 VPRINTK("ata%u: ENTER\n", ap->id);
652
653 wmb(); /* flush PRD, pkt writes */
654
655 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
656
657 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
658 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
659 seq += 4;
660
661 pdc20621_dump_hdma(qc);
662 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
663 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
664 port_ofs + PDC_DIMM_HOST_PKT,
665 port_ofs + PDC_DIMM_HOST_PKT,
666 seq);
667 } else {
668 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
669 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
670
671 writel(port_ofs + PDC_DIMM_ATA_PKT,
672 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
673 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
674 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
675 port_ofs + PDC_DIMM_ATA_PKT,
676 port_ofs + PDC_DIMM_ATA_PKT,
677 seq);
678 }
679}
680
681static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
682{
683 switch (qc->tf.protocol) {
684 case ATA_PROT_DMA:
685 case ATA_PROT_NODATA:
686 pdc20621_packet_start(qc);
687 return 0;
688
689 case ATA_PROT_ATAPI_DMA:
690 BUG();
691 break;
692
693 default:
694 break;
695 }
696
697 return ata_qc_issue_prot(qc);
698}
699
700static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
701 struct ata_queued_cmd *qc,
702 unsigned int doing_hdma,
703 void __iomem *mmio)
704{
705 unsigned int port_no = ap->port_no;
706 unsigned int port_ofs =
707 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
708 u8 status;
709 unsigned int handled = 0;
710
711 VPRINTK("ENTER\n");
712
713 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
714 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
715
716 /* step two - DMA from DIMM to host */
717 if (doing_hdma) {
718 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id,
719 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
720 /* get drive status; clear intr; complete txn */
721 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
722 ata_qc_complete(qc);
723 pdc20621_pop_hdma(qc);
724 }
725
726 /* step one - exec ATA command */
727 else {
728 u8 seq = (u8) (port_no + 1 + 4);
729 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->id,
730 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
731
732 /* submit hdma pkt */
733 pdc20621_dump_hdma(qc);
734 pdc20621_push_hdma(qc, seq,
735 port_ofs + PDC_DIMM_HOST_PKT);
736 }
737 handled = 1;
738
739 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
740
741 /* step one - DMA from host to DIMM */
742 if (doing_hdma) {
743 u8 seq = (u8) (port_no + 1);
744 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->id,
745 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
746
747 /* submit ata pkt */
748 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
749 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
750 writel(port_ofs + PDC_DIMM_ATA_PKT,
751 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
752 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
753 }
754
755 /* step two - execute ATA command */
756 else {
757 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id,
758 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
759 /* get drive status; clear intr; complete txn */
760 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
761 ata_qc_complete(qc);
762 pdc20621_pop_hdma(qc);
763 }
764 handled = 1;
765
766 /* command completion, but no data xfer */
767 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
768
769 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
770 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
771 qc->err_mask |= ac_err_mask(status);
772 ata_qc_complete(qc);
773 handled = 1;
774
775 } else {
776 ap->stats.idle_irq++;
777 }
778
779 return handled;
780}
781
782static void pdc20621_irq_clear(struct ata_port *ap)
783{
784 struct ata_host *host = ap->host;
785 void __iomem *mmio = host->mmio_base;
786
787 mmio += PDC_CHIP0_OFS;
788
789 readl(mmio + PDC_20621_SEQMASK);
790}
791
792static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
793{
794 struct ata_host *host = dev_instance;
795 struct ata_port *ap;
796 u32 mask = 0;
797 unsigned int i, tmp, port_no;
798 unsigned int handled = 0;
799 void __iomem *mmio_base;
800
801 VPRINTK("ENTER\n");
802
803 if (!host || !host->mmio_base) {
804 VPRINTK("QUICK EXIT\n");
805 return IRQ_NONE;
806 }
807
808 mmio_base = host->mmio_base;
809
810 /* reading should also clear interrupts */
811 mmio_base += PDC_CHIP0_OFS;
812 mask = readl(mmio_base + PDC_20621_SEQMASK);
813 VPRINTK("mask == 0x%x\n", mask);
814
815 if (mask == 0xffffffff) {
816 VPRINTK("QUICK EXIT 2\n");
817 return IRQ_NONE;
818 }
819 mask &= 0xffff; /* only 16 tags possible */
820 if (!mask) {
821 VPRINTK("QUICK EXIT 3\n");
822 return IRQ_NONE;
823 }
824
825 spin_lock(&host->lock);
826
827 for (i = 1; i < 9; i++) {
828 port_no = i - 1;
829 if (port_no > 3)
830 port_no -= 4;
831 if (port_no >= host->n_ports)
832 ap = NULL;
833 else
834 ap = host->ports[port_no];
835 tmp = mask & (1 << i);
836 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
837 if (tmp && ap &&
838 !(ap->flags & ATA_FLAG_DISABLED)) {
839 struct ata_queued_cmd *qc;
840
841 qc = ata_qc_from_tag(ap, ap->active_tag);
842 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
843 handled += pdc20621_host_intr(ap, qc, (i > 4),
844 mmio_base);
845 }
846 }
847
848 spin_unlock(&host->lock);
849
850 VPRINTK("mask == 0x%x\n", mask);
851
852 VPRINTK("EXIT\n");
853
854 return IRQ_RETVAL(handled);
855}
856
857static void pdc_eng_timeout(struct ata_port *ap)
858{
859 u8 drv_stat;
860 struct ata_host *host = ap->host;
861 struct ata_queued_cmd *qc;
862 unsigned long flags;
863
864 DPRINTK("ENTER\n");
865
866 spin_lock_irqsave(&host->lock, flags);
867
868 qc = ata_qc_from_tag(ap, ap->active_tag);
869
870 switch (qc->tf.protocol) {
871 case ATA_PROT_DMA:
872 case ATA_PROT_NODATA:
873 ata_port_printk(ap, KERN_ERR, "command timeout\n");
874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
875 break;
876
877 default:
878 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
879
880 ata_port_printk(ap, KERN_ERR,
881 "unknown timeout, cmd 0x%x stat 0x%x\n",
882 qc->tf.command, drv_stat);
883
884 qc->err_mask |= ac_err_mask(drv_stat);
885 break;
886 }
887
888 spin_unlock_irqrestore(&host->lock, flags);
889 ata_eh_qc_complete(qc);
890 DPRINTK("EXIT\n");
891}
892
893static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
894{
895 WARN_ON (tf->protocol == ATA_PROT_DMA ||
896 tf->protocol == ATA_PROT_NODATA);
897 ata_tf_load(ap, tf);
898}
899
900
901static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
902{
903 WARN_ON (tf->protocol == ATA_PROT_DMA ||
904 tf->protocol == ATA_PROT_NODATA);
905 ata_exec_command(ap, tf);
906}
907
908
909static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base)
910{
911 port->cmd_addr = base;
912 port->data_addr = base;
913 port->feature_addr =
914 port->error_addr = base + 0x4;
915 port->nsect_addr = base + 0x8;
916 port->lbal_addr = base + 0xc;
917 port->lbam_addr = base + 0x10;
918 port->lbah_addr = base + 0x14;
919 port->device_addr = base + 0x18;
920 port->command_addr =
921 port->status_addr = base + 0x1c;
922 port->altstatus_addr =
923 port->ctl_addr = base + 0x38;
924}
925
926
927#ifdef ATA_VERBOSE_DEBUG
928static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
929 u32 offset, u32 size)
930{
931 u32 window_size;
932 u16 idx;
933 u8 page_mask;
934 long dist;
935 void __iomem *mmio = pe->mmio_base;
936 struct pdc_host_priv *hpriv = pe->private_data;
937 void __iomem *dimm_mmio = hpriv->dimm_mmio;
938
939 /* hard-code chip #0 */
940 mmio += PDC_CHIP0_OFS;
941
942 page_mask = 0x00;
943 window_size = 0x2000 * 4; /* 32K byte uchar size */
944 idx = (u16) (offset / window_size);
945
946 writel(0x01, mmio + PDC_GENERAL_CTLR);
947 readl(mmio + PDC_GENERAL_CTLR);
948 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
949 readl(mmio + PDC_DIMM_WINDOW_CTLR);
950
951 offset -= (idx * window_size);
952 idx++;
953 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
954 (long) (window_size - offset);
955 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
956 dist);
957
958 psource += dist;
959 size -= dist;
960 for (; (long) size >= (long) window_size ;) {
961 writel(0x01, mmio + PDC_GENERAL_CTLR);
962 readl(mmio + PDC_GENERAL_CTLR);
963 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
964 readl(mmio + PDC_DIMM_WINDOW_CTLR);
965 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
966 window_size / 4);
967 psource += window_size;
968 size -= window_size;
969 idx ++;
970 }
971
972 if (size) {
973 writel(0x01, mmio + PDC_GENERAL_CTLR);
974 readl(mmio + PDC_GENERAL_CTLR);
975 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
976 readl(mmio + PDC_DIMM_WINDOW_CTLR);
977 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
978 size / 4);
979 }
980}
981#endif
982
983
984static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
985 u32 offset, u32 size)
986{
987 u32 window_size;
988 u16 idx;
989 u8 page_mask;
990 long dist;
991 void __iomem *mmio = pe->mmio_base;
992 struct pdc_host_priv *hpriv = pe->private_data;
993 void __iomem *dimm_mmio = hpriv->dimm_mmio;
994
995 /* hard-code chip #0 */
996 mmio += PDC_CHIP0_OFS;
997
998 page_mask = 0x00;
999 window_size = 0x2000 * 4; /* 32K byte uchar size */
1000 idx = (u16) (offset / window_size);
1001
1002 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1003 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1004 offset -= (idx * window_size);
1005 idx++;
1006 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1007 (long) (window_size - offset);
1008 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1009 writel(0x01, mmio + PDC_GENERAL_CTLR);
1010 readl(mmio + PDC_GENERAL_CTLR);
1011
1012 psource += dist;
1013 size -= dist;
1014 for (; (long) size >= (long) window_size ;) {
1015 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1016 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1017 memcpy_toio(dimm_mmio, psource, window_size / 4);
1018 writel(0x01, mmio + PDC_GENERAL_CTLR);
1019 readl(mmio + PDC_GENERAL_CTLR);
1020 psource += window_size;
1021 size -= window_size;
1022 idx ++;
1023 }
1024
1025 if (size) {
1026 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1027 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1028 memcpy_toio(dimm_mmio, psource, size / 4);
1029 writel(0x01, mmio + PDC_GENERAL_CTLR);
1030 readl(mmio + PDC_GENERAL_CTLR);
1031 }
1032}
1033
1034
1035static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
1036 u32 subaddr, u32 *pdata)
1037{
1038 void __iomem *mmio = pe->mmio_base;
1039 u32 i2creg = 0;
1040 u32 status;
1041 u32 count =0;
1042
1043 /* hard-code chip #0 */
1044 mmio += PDC_CHIP0_OFS;
1045
1046 i2creg |= device << 24;
1047 i2creg |= subaddr << 16;
1048
1049 /* Set the device and subaddress */
1050 writel(i2creg, mmio + PDC_I2C_ADDR_DATA_OFFSET);
1051 readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
1052
1053 /* Write Control to perform read operation, mask int */
1054 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1055 mmio + PDC_I2C_CONTROL_OFFSET);
1056
1057 for (count = 0; count <= 1000; count ++) {
1058 status = readl(mmio + PDC_I2C_CONTROL_OFFSET);
1059 if (status & PDC_I2C_COMPLETE) {
1060 status = readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
1061 break;
1062 } else if (count == 1000)
1063 return 0;
1064 }
1065
1066 *pdata = (status >> 8) & 0x000000ff;
1067 return 1;
1068}
1069
1070
1071static int pdc20621_detect_dimm(struct ata_probe_ent *pe)
1072{
1073 u32 data=0 ;
1074 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1075 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1076 if (data == 100)
1077 return 100;
1078 } else
1079 return 0;
1080
1081 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1082 if(data <= 0x75)
1083 return 133;
1084 } else
1085 return 0;
1086
1087 return 0;
1088}
1089
1090
1091static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1092{
1093 u32 spd0[50];
1094 u32 data = 0;
1095 int size, i;
1096 u8 bdimmsize;
1097 void __iomem *mmio = pe->mmio_base;
1098 static const struct {
1099 unsigned int reg;
1100 unsigned int ofs;
1101 } pdc_i2c_read_data [] = {
1102 { PDC_DIMM_SPD_TYPE, 11 },
1103 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1104 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1105 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1106 { PDC_DIMM_SPD_ROW_NUM, 3 },
1107 { PDC_DIMM_SPD_BANK_NUM, 17 },
1108 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1109 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1110 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1111 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1112 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1113 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1114 };
1115
1116 /* hard-code chip #0 */
1117 mmio += PDC_CHIP0_OFS;
1118
1119 for(i=0; i<ARRAY_SIZE(pdc_i2c_read_data); i++)
1120 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1121 pdc_i2c_read_data[i].reg,
1122 &spd0[pdc_i2c_read_data[i].ofs]);
1123
1124 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1125 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1126 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1127 data |= (((((spd0[29] > spd0[28])
1128 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1129 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1130
1131 if (spd0[18] & 0x08)
1132 data |= ((0x03) << 14);
1133 else if (spd0[18] & 0x04)
1134 data |= ((0x02) << 14);
1135 else if (spd0[18] & 0x01)
1136 data |= ((0x01) << 14);
1137 else
1138 data |= (0 << 14);
1139
1140 /*
1141 Calculate the size of bDIMMSize (power of 2) and
1142 merge the DIMM size by program start/end address.
1143 */
1144
1145 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1146 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1147 data |= (((size / 16) - 1) << 16);
1148 data |= (0 << 23);
1149 data |= 8;
1150 writel(data, mmio + PDC_DIMM0_CONTROL_OFFSET);
1151 readl(mmio + PDC_DIMM0_CONTROL_OFFSET);
1152 return size;
1153}
1154
1155
1156static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
1157{
1158 u32 data, spd0;
1159 int error, i;
1160 void __iomem *mmio = pe->mmio_base;
1161
1162 /* hard-code chip #0 */
1163 mmio += PDC_CHIP0_OFS;
1164
1165 /*
1166 Set To Default : DIMM Module Global Control Register (0x022259F1)
1167 DIMM Arbitration Disable (bit 20)
1168 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1169 Refresh Enable (bit 17)
1170 */
1171
1172 data = 0x022259F1;
1173 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1174 readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1175
1176 /* Turn on for ECC */
1177 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1178 PDC_DIMM_SPD_TYPE, &spd0);
1179 if (spd0 == 0x02) {
1180 data |= (0x01 << 16);
1181 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1182 readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1183 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1184 }
1185
1186 /* DIMM Initialization Select/Enable (bit 18/19) */
1187 data &= (~(1<<18));
1188 data |= (1<<19);
1189 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1190
1191 error = 1;
1192 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1193 data = readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1194 if (!(data & (1<<19))) {
1195 error = 0;
1196 break;
1197 }
1198 msleep(i*100);
1199 }
1200 return error;
1201}
1202
1203
1204static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1205{
1206 int speed, size, length;
1207 u32 addr,spd0,pci_status;
1208 u32 tmp=0;
1209 u32 time_period=0;
1210 u32 tcount=0;
1211 u32 ticks=0;
1212 u32 clock=0;
1213 u32 fparam=0;
1214 void __iomem *mmio = pe->mmio_base;
1215
1216 /* hard-code chip #0 */
1217 mmio += PDC_CHIP0_OFS;
1218
1219 /* Initialize PLL based upon PCI Bus Frequency */
1220
1221 /* Initialize Time Period Register */
1222 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1223 time_period = readl(mmio + PDC_TIME_PERIOD);
1224 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1225
1226 /* Enable timer */
1227 writel(0x00001a0, mmio + PDC_TIME_CONTROL);
1228 readl(mmio + PDC_TIME_CONTROL);
1229
1230 /* Wait 3 seconds */
1231 msleep(3000);
1232
1233 /*
1234 When timer is enabled, counter is decreased every internal
1235 clock cycle.
1236 */
1237
1238 tcount = readl(mmio + PDC_TIME_COUNTER);
1239 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1240
1241 /*
1242 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1243 register should be >= (0xffffffff - 3x10^8).
1244 */
1245 if(tcount >= PCI_X_TCOUNT) {
1246 ticks = (time_period - tcount);
1247 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1248
1249 clock = (ticks / 300000);
1250 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1251
1252 clock = (clock * 33);
1253 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1254
1255 /* PLL F Param (bit 22:16) */
1256 fparam = (1400000 / clock) - 2;
1257 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1258
1259 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1260 pci_status = (0x8a001824 | (fparam << 16));
1261 } else
1262 pci_status = PCI_PLL_INIT;
1263
1264 /* Initialize PLL. */
1265 VPRINTK("pci_status: 0x%x\n", pci_status);
1266 writel(pci_status, mmio + PDC_CTL_STATUS);
1267 readl(mmio + PDC_CTL_STATUS);
1268
1269 /*
1270 Read SPD of DIMM by I2C interface,
1271 and program the DIMM Module Controller.
1272 */
1273 if (!(speed = pdc20621_detect_dimm(pe))) {
1274 printk(KERN_ERR "Detect Local DIMM Fail\n");
1275 return 1; /* DIMM error */
1276 }
1277 VPRINTK("Local DIMM Speed = %d\n", speed);
1278
1279 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1280 size = pdc20621_prog_dimm0(pe);
1281 VPRINTK("Local DIMM Size = %dMB\n",size);
1282
1283 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1284 if (pdc20621_prog_dimm_global(pe)) {
1285 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1286 return 1;
1287 }
1288
1289#ifdef ATA_VERBOSE_DEBUG
1290 {
1291 u8 test_parttern1[40] = {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1292 'N','o','t',' ','Y','e','t',' ','D','e','f','i','n','e','d',' ',
1293 '1','.','1','0',
1294 '9','8','0','3','1','6','1','2',0,0};
1295 u8 test_parttern2[40] = {0};
1296
1297 pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x10040, 40);
1298 pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x40, 40);
1299
1300 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x10040, 40);
1301 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
1302 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1303 test_parttern2[1], &(test_parttern2[2]));
1304 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x10040,
1305 40);
1306 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1307 test_parttern2[1], &(test_parttern2[2]));
1308
1309 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x40, 40);
1310 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
1311 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1312 test_parttern2[1], &(test_parttern2[2]));
1313 }
1314#endif
1315
1316 /* ECC initiliazation. */
1317
1318 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1319 PDC_DIMM_SPD_TYPE, &spd0);
1320 if (spd0 == 0x02) {
1321 VPRINTK("Start ECC initialization\n");
1322 addr = 0;
1323 length = size * 1024 * 1024;
1324 while (addr < length) {
1325 pdc20621_put_to_dimm(pe, (void *) &tmp, addr,
1326 sizeof(u32));
1327 addr += sizeof(u32);
1328 }
1329 VPRINTK("Finish ECC initialization\n");
1330 }
1331 return 0;
1332}
1333
1334
1335static void pdc_20621_init(struct ata_probe_ent *pe)
1336{
1337 u32 tmp;
1338 void __iomem *mmio = pe->mmio_base;
1339
1340 /* hard-code chip #0 */
1341 mmio += PDC_CHIP0_OFS;
1342
1343 /*
1344 * Select page 0x40 for our 32k DIMM window
1345 */
1346 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1347 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1348 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1349
1350 /*
1351 * Reset Host DMA
1352 */
1353 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1354 tmp |= PDC_RESET;
1355 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1356 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1357
1358 udelay(10);
1359
1360 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1361 tmp &= ~PDC_RESET;
1362 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1363 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1364}
1365
1366static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1367{
1368 static int printed_version;
1369 struct ata_probe_ent *probe_ent = NULL;
1370 unsigned long base;
1371 void __iomem *mmio_base;
1372 void __iomem *dimm_mmio = NULL;
1373 struct pdc_host_priv *hpriv = NULL;
1374 unsigned int board_idx = (unsigned int) ent->driver_data;
1375 int pci_dev_busy = 0;
1376 int rc;
1377
1378 if (!printed_version++)
1379 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1380
1381 rc = pci_enable_device(pdev);
1382 if (rc)
1383 return rc;
1384
1385 rc = pci_request_regions(pdev, DRV_NAME);
1386 if (rc) {
1387 pci_dev_busy = 1;
1388 goto err_out;
1389 }
1390
1391 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1392 if (rc)
1393 goto err_out_regions;
1394 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1395 if (rc)
1396 goto err_out_regions;
1397
1398 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1399 if (probe_ent == NULL) {
1400 rc = -ENOMEM;
1401 goto err_out_regions;
1402 }
1403
1404 memset(probe_ent, 0, sizeof(*probe_ent));
1405 probe_ent->dev = pci_dev_to_dev(pdev);
1406 INIT_LIST_HEAD(&probe_ent->node);
1407
1408 mmio_base = pci_iomap(pdev, 3, 0);
1409 if (mmio_base == NULL) {
1410 rc = -ENOMEM;
1411 goto err_out_free_ent;
1412 }
1413 base = (unsigned long) mmio_base;
1414
1415 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1416 if (!hpriv) {
1417 rc = -ENOMEM;
1418 goto err_out_iounmap;
1419 }
1420 memset(hpriv, 0, sizeof(*hpriv));
1421
1422 dimm_mmio = pci_iomap(pdev, 4, 0);
1423 if (!dimm_mmio) {
1424 kfree(hpriv);
1425 rc = -ENOMEM;
1426 goto err_out_iounmap;
1427 }
1428
1429 hpriv->dimm_mmio = dimm_mmio;
1430
1431 probe_ent->sht = pdc_port_info[board_idx].sht;
1432 probe_ent->port_flags = pdc_port_info[board_idx].flags;
1433 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
1434 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
1435 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
1436 probe_ent->port_ops = pdc_port_info[board_idx].port_ops;
1437
1438 probe_ent->irq = pdev->irq;
1439 probe_ent->irq_flags = IRQF_SHARED;
1440 probe_ent->mmio_base = mmio_base;
1441
1442 probe_ent->private_data = hpriv;
1443 base += PDC_CHIP0_OFS;
1444
1445 probe_ent->n_ports = 4;
1446 pdc_sata_setup_port(&probe_ent->port[0], base + 0x200);
1447 pdc_sata_setup_port(&probe_ent->port[1], base + 0x280);
1448 pdc_sata_setup_port(&probe_ent->port[2], base + 0x300);
1449 pdc_sata_setup_port(&probe_ent->port[3], base + 0x380);
1450
1451 pci_set_master(pdev);
1452
1453 /* initialize adapter */
1454 /* initialize local dimm */
1455 if (pdc20621_dimm_init(probe_ent)) {
1456 rc = -ENOMEM;
1457 goto err_out_iounmap_dimm;
1458 }
1459 pdc_20621_init(probe_ent);
1460
1461 /* FIXME: check ata_device_add return value */
1462 ata_device_add(probe_ent);
1463 kfree(probe_ent);
1464
1465 return 0;
1466
1467err_out_iounmap_dimm: /* only get to this label if 20621 */
1468 kfree(hpriv);
1469 pci_iounmap(pdev, dimm_mmio);
1470err_out_iounmap:
1471 pci_iounmap(pdev, mmio_base);
1472err_out_free_ent:
1473 kfree(probe_ent);
1474err_out_regions:
1475 pci_release_regions(pdev);
1476err_out:
1477 if (!pci_dev_busy)
1478 pci_disable_device(pdev);
1479 return rc;
1480}
1481
1482
1483static int __init pdc_sata_init(void)
1484{
1485 return pci_register_driver(&pdc_sata_pci_driver);
1486}
1487
1488
1489static void __exit pdc_sata_exit(void)
1490{
1491 pci_unregister_driver(&pdc_sata_pci_driver);
1492}
1493
1494
1495MODULE_AUTHOR("Jeff Garzik");
1496MODULE_DESCRIPTION("Promise SATA low-level driver");
1497MODULE_LICENSE("GPL");
1498MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1499MODULE_VERSION(DRV_VERSION);
1500
1501module_init(pdc_sata_init);
1502module_exit(pdc_sata_exit);
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
new file mode 100644
index 000000000000..8fc6e800011a
--- /dev/null
+++ b/drivers/ata/sata_uli.c
@@ -0,0 +1,300 @@
1/*
2 * sata_uli.c - ULi Electronics SATA
3 *
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; see the file COPYING. If not, write to
17 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 *
20 * libata documentation is available via 'make {ps|pdf}docs',
21 * as Documentation/DocBook/libata.*
22 *
23 * Hardware documentation available under NDA.
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/blkdev.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/device.h>
35#include <scsi/scsi_host.h>
36#include <linux/libata.h>
37
38#define DRV_NAME "sata_uli"
39#define DRV_VERSION "1.0"
40
41enum {
42 uli_5289 = 0,
43 uli_5287 = 1,
44 uli_5281 = 2,
45
46 uli_max_ports = 4,
47
48 /* PCI configuration registers */
49 ULI5287_BASE = 0x90, /* sata0 phy SCR registers */
50 ULI5287_OFFS = 0x10, /* offset from sata0->sata1 phy regs */
51 ULI5281_BASE = 0x60, /* sata0 phy SCR registers */
52 ULI5281_OFFS = 0x60, /* offset from sata0->sata1 phy regs */
53};
54
55struct uli_priv {
56 unsigned int scr_cfg_addr[uli_max_ports];
57};
58
59static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
60static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg);
61static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
62
63static const struct pci_device_id uli_pci_tbl[] = {
64 { PCI_VENDOR_ID_AL, 0x5289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5289 },
65 { PCI_VENDOR_ID_AL, 0x5287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5287 },
66 { PCI_VENDOR_ID_AL, 0x5281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5281 },
67 { } /* terminate list */
68};
69
70
71static struct pci_driver uli_pci_driver = {
72 .name = DRV_NAME,
73 .id_table = uli_pci_tbl,
74 .probe = uli_init_one,
75 .remove = ata_pci_remove_one,
76};
77
78static struct scsi_host_template uli_sht = {
79 .module = THIS_MODULE,
80 .name = DRV_NAME,
81 .ioctl = ata_scsi_ioctl,
82 .queuecommand = ata_scsi_queuecmd,
83 .can_queue = ATA_DEF_QUEUE,
84 .this_id = ATA_SHT_THIS_ID,
85 .sg_tablesize = LIBATA_MAX_PRD,
86 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
87 .emulated = ATA_SHT_EMULATED,
88 .use_clustering = ATA_SHT_USE_CLUSTERING,
89 .proc_name = DRV_NAME,
90 .dma_boundary = ATA_DMA_BOUNDARY,
91 .slave_configure = ata_scsi_slave_config,
92 .slave_destroy = ata_scsi_slave_destroy,
93 .bios_param = ata_std_bios_param,
94};
95
96static const struct ata_port_operations uli_ops = {
97 .port_disable = ata_port_disable,
98
99 .tf_load = ata_tf_load,
100 .tf_read = ata_tf_read,
101 .check_status = ata_check_status,
102 .exec_command = ata_exec_command,
103 .dev_select = ata_std_dev_select,
104
105 .bmdma_setup = ata_bmdma_setup,
106 .bmdma_start = ata_bmdma_start,
107 .bmdma_stop = ata_bmdma_stop,
108 .bmdma_status = ata_bmdma_status,
109 .qc_prep = ata_qc_prep,
110 .qc_issue = ata_qc_issue_prot,
111 .data_xfer = ata_pio_data_xfer,
112
113 .freeze = ata_bmdma_freeze,
114 .thaw = ata_bmdma_thaw,
115 .error_handler = ata_bmdma_error_handler,
116 .post_internal_cmd = ata_bmdma_post_internal_cmd,
117
118 .irq_handler = ata_interrupt,
119 .irq_clear = ata_bmdma_irq_clear,
120
121 .scr_read = uli_scr_read,
122 .scr_write = uli_scr_write,
123
124 .port_start = ata_port_start,
125 .port_stop = ata_port_stop,
126 .host_stop = ata_host_stop,
127};
128
129static struct ata_port_info uli_port_info = {
130 .sht = &uli_sht,
131 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
132 .pio_mask = 0x1f, /* pio0-4 */
133 .udma_mask = 0x7f, /* udma0-6 */
134 .port_ops = &uli_ops,
135};
136
137
138MODULE_AUTHOR("Peer Chen");
139MODULE_DESCRIPTION("low-level driver for ULi Electronics SATA controller");
140MODULE_LICENSE("GPL");
141MODULE_DEVICE_TABLE(pci, uli_pci_tbl);
142MODULE_VERSION(DRV_VERSION);
143
144static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
145{
146 struct uli_priv *hpriv = ap->host->private_data;
147 return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
148}
149
150static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
151{
152 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
153 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
154 u32 val;
155
156 pci_read_config_dword(pdev, cfg_addr, &val);
157 return val;
158}
159
160static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
161{
162 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
163 unsigned int cfg_addr = get_scr_cfg_addr(ap, scr);
164
165 pci_write_config_dword(pdev, cfg_addr, val);
166}
167
168static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg)
169{
170 if (sc_reg > SCR_CONTROL)
171 return 0xffffffffU;
172
173 return uli_scr_cfg_read(ap, sc_reg);
174}
175
176static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
177{
178 if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
179 return;
180
181 uli_scr_cfg_write(ap, sc_reg, val);
182}
183
184static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
185{
186 static int printed_version;
187 struct ata_probe_ent *probe_ent;
188 struct ata_port_info *ppi;
189 int rc;
190 unsigned int board_idx = (unsigned int) ent->driver_data;
191 int pci_dev_busy = 0;
192 struct uli_priv *hpriv;
193
194 if (!printed_version++)
195 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
196
197 rc = pci_enable_device(pdev);
198 if (rc)
199 return rc;
200
201 rc = pci_request_regions(pdev, DRV_NAME);
202 if (rc) {
203 pci_dev_busy = 1;
204 goto err_out;
205 }
206
207 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
208 if (rc)
209 goto err_out_regions;
210 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
211 if (rc)
212 goto err_out_regions;
213
214 ppi = &uli_port_info;
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent) {
217 rc = -ENOMEM;
218 goto err_out_regions;
219 }
220
221 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
222 if (!hpriv) {
223 rc = -ENOMEM;
224 goto err_out_probe_ent;
225 }
226
227 probe_ent->private_data = hpriv;
228
229 switch (board_idx) {
230 case uli_5287:
231 hpriv->scr_cfg_addr[0] = ULI5287_BASE;
232 hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
233 probe_ent->n_ports = 4;
234
235 probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8;
236 probe_ent->port[2].altstatus_addr =
237 probe_ent->port[2].ctl_addr =
238 (pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4;
239 probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16;
240 hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
241
242 probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8;
243 probe_ent->port[3].altstatus_addr =
244 probe_ent->port[3].ctl_addr =
245 (pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4;
246 probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24;
247 hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
248
249 ata_std_ports(&probe_ent->port[2]);
250 ata_std_ports(&probe_ent->port[3]);
251 break;
252
253 case uli_5289:
254 hpriv->scr_cfg_addr[0] = ULI5287_BASE;
255 hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
256 break;
257
258 case uli_5281:
259 hpriv->scr_cfg_addr[0] = ULI5281_BASE;
260 hpriv->scr_cfg_addr[1] = ULI5281_BASE + ULI5281_OFFS;
261 break;
262
263 default:
264 BUG();
265 break;
266 }
267
268 pci_set_master(pdev);
269 pci_intx(pdev, 1);
270
271 /* FIXME: check ata_device_add return value */
272 ata_device_add(probe_ent);
273 kfree(probe_ent);
274
275 return 0;
276
277err_out_probe_ent:
278 kfree(probe_ent);
279err_out_regions:
280 pci_release_regions(pdev);
281err_out:
282 if (!pci_dev_busy)
283 pci_disable_device(pdev);
284 return rc;
285
286}
287
288static int __init uli_init(void)
289{
290 return pci_register_driver(&uli_pci_driver);
291}
292
293static void __exit uli_exit(void)
294{
295 pci_unregister_driver(&uli_pci_driver);
296}
297
298
299module_init(uli_init);
300module_exit(uli_exit);
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
new file mode 100644
index 000000000000..7f087aef99de
--- /dev/null
+++ b/drivers/ata/sata_via.c
@@ -0,0 +1,502 @@
1/*
2 * sata_via.c - VIA Serial ATA controllers
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available under NDA.
31 *
32 *
33 * To-do list:
34 * - VT6421 PATA support
35 *
36 */
37
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/pci.h>
41#include <linux/init.h>
42#include <linux/blkdev.h>
43#include <linux/delay.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <linux/libata.h>
47#include <asm/io.h>
48
49#define DRV_NAME "sata_via"
50#define DRV_VERSION "2.0"
51
52enum board_ids_enum {
53 vt6420,
54 vt6421,
55};
56
57enum {
58 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
59 SATA_INT_GATE = 0x41, /* SATA interrupt gating */
60 SATA_NATIVE_MODE = 0x42, /* Native mode enable */
61 SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */
62
63 PORT0 = (1 << 1),
64 PORT1 = (1 << 0),
65 ALL_PORTS = PORT0 | PORT1,
66 N_PORTS = 2,
67
68 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
69
70 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
71 SATA_2DEV = (1 << 5), /* SATA is master/slave */
72};
73
74static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
75static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg);
76static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
77static void vt6420_error_handler(struct ata_port *ap);
78
79static const struct pci_device_id svia_pci_tbl[] = {
80 { 0x1106, 0x0591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
81 { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
82 { 0x1106, 0x3249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6421 },
83
84 { } /* terminate list */
85};
86
87static struct pci_driver svia_pci_driver = {
88 .name = DRV_NAME,
89 .id_table = svia_pci_tbl,
90 .probe = svia_init_one,
91 .remove = ata_pci_remove_one,
92};
93
94static struct scsi_host_template svia_sht = {
95 .module = THIS_MODULE,
96 .name = DRV_NAME,
97 .ioctl = ata_scsi_ioctl,
98 .queuecommand = ata_scsi_queuecmd,
99 .can_queue = ATA_DEF_QUEUE,
100 .this_id = ATA_SHT_THIS_ID,
101 .sg_tablesize = LIBATA_MAX_PRD,
102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
103 .emulated = ATA_SHT_EMULATED,
104 .use_clustering = ATA_SHT_USE_CLUSTERING,
105 .proc_name = DRV_NAME,
106 .dma_boundary = ATA_DMA_BOUNDARY,
107 .slave_configure = ata_scsi_slave_config,
108 .slave_destroy = ata_scsi_slave_destroy,
109 .bios_param = ata_std_bios_param,
110};
111
112static const struct ata_port_operations vt6420_sata_ops = {
113 .port_disable = ata_port_disable,
114
115 .tf_load = ata_tf_load,
116 .tf_read = ata_tf_read,
117 .check_status = ata_check_status,
118 .exec_command = ata_exec_command,
119 .dev_select = ata_std_dev_select,
120
121 .bmdma_setup = ata_bmdma_setup,
122 .bmdma_start = ata_bmdma_start,
123 .bmdma_stop = ata_bmdma_stop,
124 .bmdma_status = ata_bmdma_status,
125
126 .qc_prep = ata_qc_prep,
127 .qc_issue = ata_qc_issue_prot,
128 .data_xfer = ata_pio_data_xfer,
129
130 .freeze = ata_bmdma_freeze,
131 .thaw = ata_bmdma_thaw,
132 .error_handler = vt6420_error_handler,
133 .post_internal_cmd = ata_bmdma_post_internal_cmd,
134
135 .irq_handler = ata_interrupt,
136 .irq_clear = ata_bmdma_irq_clear,
137
138 .port_start = ata_port_start,
139 .port_stop = ata_port_stop,
140 .host_stop = ata_host_stop,
141};
142
143static const struct ata_port_operations vt6421_sata_ops = {
144 .port_disable = ata_port_disable,
145
146 .tf_load = ata_tf_load,
147 .tf_read = ata_tf_read,
148 .check_status = ata_check_status,
149 .exec_command = ata_exec_command,
150 .dev_select = ata_std_dev_select,
151
152 .bmdma_setup = ata_bmdma_setup,
153 .bmdma_start = ata_bmdma_start,
154 .bmdma_stop = ata_bmdma_stop,
155 .bmdma_status = ata_bmdma_status,
156
157 .qc_prep = ata_qc_prep,
158 .qc_issue = ata_qc_issue_prot,
159 .data_xfer = ata_pio_data_xfer,
160
161 .freeze = ata_bmdma_freeze,
162 .thaw = ata_bmdma_thaw,
163 .error_handler = ata_bmdma_error_handler,
164 .post_internal_cmd = ata_bmdma_post_internal_cmd,
165
166 .irq_handler = ata_interrupt,
167 .irq_clear = ata_bmdma_irq_clear,
168
169 .scr_read = svia_scr_read,
170 .scr_write = svia_scr_write,
171
172 .port_start = ata_port_start,
173 .port_stop = ata_port_stop,
174 .host_stop = ata_host_stop,
175};
176
177static struct ata_port_info vt6420_port_info = {
178 .sht = &svia_sht,
179 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
180 .pio_mask = 0x1f,
181 .mwdma_mask = 0x07,
182 .udma_mask = 0x7f,
183 .port_ops = &vt6420_sata_ops,
184};
185
186MODULE_AUTHOR("Jeff Garzik");
187MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
188MODULE_LICENSE("GPL");
189MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
190MODULE_VERSION(DRV_VERSION);
191
192static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg)
193{
194 if (sc_reg > SCR_CONTROL)
195 return 0xffffffffU;
196 return inl(ap->ioaddr.scr_addr + (4 * sc_reg));
197}
198
199static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
200{
201 if (sc_reg > SCR_CONTROL)
202 return;
203 outl(val, ap->ioaddr.scr_addr + (4 * sc_reg));
204}
205
206/**
207 * vt6420_prereset - prereset for vt6420
208 * @ap: target ATA port
209 *
210 * SCR registers on vt6420 are pieces of shit and may hang the
211 * whole machine completely if accessed with the wrong timing.
212 * To avoid such catastrophe, vt6420 doesn't provide generic SCR
213 * access operations, but uses SStatus and SControl only during
214 * boot probing in controlled way.
215 *
216 * As the old (pre EH update) probing code is proven to work, we
217 * strictly follow the access pattern.
218 *
219 * LOCKING:
220 * Kernel thread context (may sleep)
221 *
222 * RETURNS:
223 * 0 on success, -errno otherwise.
224 */
225static int vt6420_prereset(struct ata_port *ap)
226{
227 struct ata_eh_context *ehc = &ap->eh_context;
228 unsigned long timeout = jiffies + (HZ * 5);
229 u32 sstatus, scontrol;
230 int online;
231
232 /* don't do any SCR stuff if we're not loading */
233 if (!ATA_PFLAG_LOADING)
234 goto skip_scr;
235
236 /* Resume phy. This is the old resume sequence from
237 * __sata_phy_reset().
238 */
239 svia_scr_write(ap, SCR_CONTROL, 0x300);
240 svia_scr_read(ap, SCR_CONTROL); /* flush */
241
242 /* wait for phy to become ready, if necessary */
243 do {
244 msleep(200);
245 if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1)
246 break;
247 } while (time_before(jiffies, timeout));
248
249 /* open code sata_print_link_status() */
250 sstatus = svia_scr_read(ap, SCR_STATUS);
251 scontrol = svia_scr_read(ap, SCR_CONTROL);
252
253 online = (sstatus & 0xf) == 0x3;
254
255 ata_port_printk(ap, KERN_INFO,
256 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
257 online ? "up" : "down", sstatus, scontrol);
258
259 /* SStatus is read one more time */
260 svia_scr_read(ap, SCR_STATUS);
261
262 if (!online) {
263 /* tell EH to bail */
264 ehc->i.action &= ~ATA_EH_RESET_MASK;
265 return 0;
266 }
267
268 skip_scr:
269 /* wait for !BSY */
270 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
271
272 return 0;
273}
274
275static void vt6420_error_handler(struct ata_port *ap)
276{
277 return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset,
278 NULL, ata_std_postreset);
279}
280
281static const unsigned int svia_bar_sizes[] = {
282 8, 4, 8, 4, 16, 256
283};
284
285static const unsigned int vt6421_bar_sizes[] = {
286 16, 16, 16, 16, 32, 128
287};
288
289static unsigned long svia_scr_addr(unsigned long addr, unsigned int port)
290{
291 return addr + (port * 128);
292}
293
294static unsigned long vt6421_scr_addr(unsigned long addr, unsigned int port)
295{
296 return addr + (port * 64);
297}
298
299static void vt6421_init_addrs(struct ata_probe_ent *probe_ent,
300 struct pci_dev *pdev,
301 unsigned int port)
302{
303 unsigned long reg_addr = pci_resource_start(pdev, port);
304 unsigned long bmdma_addr = pci_resource_start(pdev, 4) + (port * 8);
305 unsigned long scr_addr;
306
307 probe_ent->port[port].cmd_addr = reg_addr;
308 probe_ent->port[port].altstatus_addr =
309 probe_ent->port[port].ctl_addr = (reg_addr + 8) | ATA_PCI_CTL_OFS;
310 probe_ent->port[port].bmdma_addr = bmdma_addr;
311
312 scr_addr = vt6421_scr_addr(pci_resource_start(pdev, 5), port);
313 probe_ent->port[port].scr_addr = scr_addr;
314
315 ata_std_ports(&probe_ent->port[port]);
316}
317
318static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
319{
320 struct ata_probe_ent *probe_ent;
321 struct ata_port_info *ppi = &vt6420_port_info;
322
323 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
324 if (!probe_ent)
325 return NULL;
326
327 probe_ent->port[0].scr_addr =
328 svia_scr_addr(pci_resource_start(pdev, 5), 0);
329 probe_ent->port[1].scr_addr =
330 svia_scr_addr(pci_resource_start(pdev, 5), 1);
331
332 return probe_ent;
333}
334
335static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
336{
337 struct ata_probe_ent *probe_ent;
338 unsigned int i;
339
340 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
341 if (!probe_ent)
342 return NULL;
343
344 memset(probe_ent, 0, sizeof(*probe_ent));
345 probe_ent->dev = pci_dev_to_dev(pdev);
346 INIT_LIST_HEAD(&probe_ent->node);
347
348 probe_ent->sht = &svia_sht;
349 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
350 probe_ent->port_ops = &vt6421_sata_ops;
351 probe_ent->n_ports = N_PORTS;
352 probe_ent->irq = pdev->irq;
353 probe_ent->irq_flags = IRQF_SHARED;
354 probe_ent->pio_mask = 0x1f;
355 probe_ent->mwdma_mask = 0x07;
356 probe_ent->udma_mask = 0x7f;
357
358 for (i = 0; i < N_PORTS; i++)
359 vt6421_init_addrs(probe_ent, pdev, i);
360
361 return probe_ent;
362}
363
364static void svia_configure(struct pci_dev *pdev)
365{
366 u8 tmp8;
367
368 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
369 dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n",
370 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
371
372 /* make sure SATA channels are enabled */
373 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
374 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
375 dev_printk(KERN_DEBUG, &pdev->dev,
376 "enabling SATA channels (0x%x)\n",
377 (int) tmp8);
378 tmp8 |= ALL_PORTS;
379 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
380 }
381
382 /* make sure interrupts for each channel sent to us */
383 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
384 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
385 dev_printk(KERN_DEBUG, &pdev->dev,
386 "enabling SATA channel interrupts (0x%x)\n",
387 (int) tmp8);
388 tmp8 |= ALL_PORTS;
389 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
390 }
391
392 /* make sure native mode is enabled */
393 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
394 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
395 dev_printk(KERN_DEBUG, &pdev->dev,
396 "enabling SATA channel native mode (0x%x)\n",
397 (int) tmp8);
398 tmp8 |= NATIVE_MODE_ALL;
399 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
400 }
401}
402
403static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
404{
405 static int printed_version;
406 unsigned int i;
407 int rc;
408 struct ata_probe_ent *probe_ent;
409 int board_id = (int) ent->driver_data;
410 const int *bar_sizes;
411 int pci_dev_busy = 0;
412 u8 tmp8;
413
414 if (!printed_version++)
415 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
416
417 rc = pci_enable_device(pdev);
418 if (rc)
419 return rc;
420
421 rc = pci_request_regions(pdev, DRV_NAME);
422 if (rc) {
423 pci_dev_busy = 1;
424 goto err_out;
425 }
426
427 if (board_id == vt6420) {
428 pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8);
429 if (tmp8 & SATA_2DEV) {
430 dev_printk(KERN_ERR, &pdev->dev,
431 "SATA master/slave not supported (0x%x)\n",
432 (int) tmp8);
433 rc = -EIO;
434 goto err_out_regions;
435 }
436
437 bar_sizes = &svia_bar_sizes[0];
438 } else {
439 bar_sizes = &vt6421_bar_sizes[0];
440 }
441
442 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
443 if ((pci_resource_start(pdev, i) == 0) ||
444 (pci_resource_len(pdev, i) < bar_sizes[i])) {
445 dev_printk(KERN_ERR, &pdev->dev,
446 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
447 i,
448 (unsigned long long)pci_resource_start(pdev, i),
449 (unsigned long long)pci_resource_len(pdev, i));
450 rc = -ENODEV;
451 goto err_out_regions;
452 }
453
454 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
455 if (rc)
456 goto err_out_regions;
457 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
458 if (rc)
459 goto err_out_regions;
460
461 if (board_id == vt6420)
462 probe_ent = vt6420_init_probe_ent(pdev);
463 else
464 probe_ent = vt6421_init_probe_ent(pdev);
465
466 if (!probe_ent) {
467 dev_printk(KERN_ERR, &pdev->dev, "out of memory\n");
468 rc = -ENOMEM;
469 goto err_out_regions;
470 }
471
472 svia_configure(pdev);
473
474 pci_set_master(pdev);
475
476 /* FIXME: check ata_device_add return value */
477 ata_device_add(probe_ent);
478 kfree(probe_ent);
479
480 return 0;
481
482err_out_regions:
483 pci_release_regions(pdev);
484err_out:
485 if (!pci_dev_busy)
486 pci_disable_device(pdev);
487 return rc;
488}
489
490static int __init svia_init(void)
491{
492 return pci_register_driver(&svia_pci_driver);
493}
494
495static void __exit svia_exit(void)
496{
497 pci_unregister_driver(&svia_pci_driver);
498}
499
500module_init(svia_init);
501module_exit(svia_exit);
502
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
new file mode 100644
index 000000000000..d0d92f33de54
--- /dev/null
+++ b/drivers/ata/sata_vsc.c
@@ -0,0 +1,482 @@
1/*
2 * sata_vsc.c - Vitesse VSC7174 4 port DPA SATA
3 *
4 * Maintained by: Jeremy Higdon @ SGI
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004 SGI
9 *
10 * Bits from Jeff Garzik, Copyright RedHat, Inc.
11 *
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; see the file COPYING. If not, write to
25 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 *
28 * libata documentation is available via 'make {ps|pdf}docs',
29 * as Documentation/DocBook/libata.*
30 *
31 * Vitesse hardware documentation presumably available under NDA.
32 * Intel 31244 (same hardware interface) documentation presumably
33 * available from http://developer.intel.com/
34 *
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/blkdev.h>
42#include <linux/delay.h>
43#include <linux/interrupt.h>
44#include <linux/dma-mapping.h>
45#include <linux/device.h>
46#include <scsi/scsi_host.h>
47#include <linux/libata.h>
48
49#define DRV_NAME "sata_vsc"
50#define DRV_VERSION "2.0"
51
52enum {
53 /* Interrupt register offsets (from chip base address) */
54 VSC_SATA_INT_STAT_OFFSET = 0x00,
55 VSC_SATA_INT_MASK_OFFSET = 0x04,
56
57 /* Taskfile registers offsets */
58 VSC_SATA_TF_CMD_OFFSET = 0x00,
59 VSC_SATA_TF_DATA_OFFSET = 0x00,
60 VSC_SATA_TF_ERROR_OFFSET = 0x04,
61 VSC_SATA_TF_FEATURE_OFFSET = 0x06,
62 VSC_SATA_TF_NSECT_OFFSET = 0x08,
63 VSC_SATA_TF_LBAL_OFFSET = 0x0c,
64 VSC_SATA_TF_LBAM_OFFSET = 0x10,
65 VSC_SATA_TF_LBAH_OFFSET = 0x14,
66 VSC_SATA_TF_DEVICE_OFFSET = 0x18,
67 VSC_SATA_TF_STATUS_OFFSET = 0x1c,
68 VSC_SATA_TF_COMMAND_OFFSET = 0x1d,
69 VSC_SATA_TF_ALTSTATUS_OFFSET = 0x28,
70 VSC_SATA_TF_CTL_OFFSET = 0x29,
71
72 /* DMA base */
73 VSC_SATA_UP_DESCRIPTOR_OFFSET = 0x64,
74 VSC_SATA_UP_DATA_BUFFER_OFFSET = 0x6C,
75 VSC_SATA_DMA_CMD_OFFSET = 0x70,
76
77 /* SCRs base */
78 VSC_SATA_SCR_STATUS_OFFSET = 0x100,
79 VSC_SATA_SCR_ERROR_OFFSET = 0x104,
80 VSC_SATA_SCR_CONTROL_OFFSET = 0x108,
81
82 /* Port stride */
83 VSC_SATA_PORT_OFFSET = 0x200,
84
85 /* Error interrupt status bit offsets */
86 VSC_SATA_INT_ERROR_CRC = 0x40,
87 VSC_SATA_INT_ERROR_T = 0x20,
88 VSC_SATA_INT_ERROR_P = 0x10,
89 VSC_SATA_INT_ERROR_R = 0x8,
90 VSC_SATA_INT_ERROR_E = 0x4,
91 VSC_SATA_INT_ERROR_M = 0x2,
92 VSC_SATA_INT_PHY_CHANGE = 0x1,
93 VSC_SATA_INT_ERROR = (VSC_SATA_INT_ERROR_CRC | VSC_SATA_INT_ERROR_T | \
94 VSC_SATA_INT_ERROR_P | VSC_SATA_INT_ERROR_R | \
95 VSC_SATA_INT_ERROR_E | VSC_SATA_INT_ERROR_M | \
96 VSC_SATA_INT_PHY_CHANGE),
97};
98
99
100#define is_vsc_sata_int_err(port_idx, int_status) \
101 (int_status & (VSC_SATA_INT_ERROR << (8 * port_idx)))
102
103
104static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
105{
106 if (sc_reg > SCR_CONTROL)
107 return 0xffffffffU;
108 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
109}
110
111
112static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
113 u32 val)
114{
115 if (sc_reg > SCR_CONTROL)
116 return;
117 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
118}
119
120
121static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
122{
123 void __iomem *mask_addr;
124 u8 mask;
125
126 mask_addr = ap->host->mmio_base +
127 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
128 mask = readb(mask_addr);
129 if (ctl & ATA_NIEN)
130 mask |= 0x80;
131 else
132 mask &= 0x7F;
133 writeb(mask, mask_addr);
134}
135
136
137static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
138{
139 struct ata_ioports *ioaddr = &ap->ioaddr;
140 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
141
142 /*
143 * The only thing the ctl register is used for is SRST.
144 * That is not enabled or disabled via tf_load.
145 * However, if ATA_NIEN is changed, then we need to change the interrupt register.
146 */
147 if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) {
148 ap->last_ctl = tf->ctl;
149 vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN);
150 }
151 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
152 writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr);
153 writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
154 writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
155 writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
156 writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
157 } else if (is_addr) {
158 writew(tf->feature, ioaddr->feature_addr);
159 writew(tf->nsect, ioaddr->nsect_addr);
160 writew(tf->lbal, ioaddr->lbal_addr);
161 writew(tf->lbam, ioaddr->lbam_addr);
162 writew(tf->lbah, ioaddr->lbah_addr);
163 }
164
165 if (tf->flags & ATA_TFLAG_DEVICE)
166 writeb(tf->device, ioaddr->device_addr);
167
168 ata_wait_idle(ap);
169}
170
171
172static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
173{
174 struct ata_ioports *ioaddr = &ap->ioaddr;
175 u16 nsect, lbal, lbam, lbah, feature;
176
177 tf->command = ata_check_status(ap);
178 tf->device = readw(ioaddr->device_addr);
179 feature = readw(ioaddr->error_addr);
180 nsect = readw(ioaddr->nsect_addr);
181 lbal = readw(ioaddr->lbal_addr);
182 lbam = readw(ioaddr->lbam_addr);
183 lbah = readw(ioaddr->lbah_addr);
184
185 tf->feature = feature;
186 tf->nsect = nsect;
187 tf->lbal = lbal;
188 tf->lbam = lbam;
189 tf->lbah = lbah;
190
191 if (tf->flags & ATA_TFLAG_LBA48) {
192 tf->hob_feature = feature >> 8;
193 tf->hob_nsect = nsect >> 8;
194 tf->hob_lbal = lbal >> 8;
195 tf->hob_lbam = lbam >> 8;
196 tf->hob_lbah = lbah >> 8;
197 }
198}
199
200
201/*
202 * vsc_sata_interrupt
203 *
204 * Read the interrupt register and process for the devices that have them pending.
205 */
206static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
207 struct pt_regs *regs)
208{
209 struct ata_host *host = dev_instance;
210 unsigned int i;
211 unsigned int handled = 0;
212 u32 int_status;
213
214 spin_lock(&host->lock);
215
216 int_status = readl(host->mmio_base + VSC_SATA_INT_STAT_OFFSET);
217
218 for (i = 0; i < host->n_ports; i++) {
219 if (int_status & ((u32) 0xFF << (8 * i))) {
220 struct ata_port *ap;
221
222 ap = host->ports[i];
223
224 if (is_vsc_sata_int_err(i, int_status)) {
225 u32 err_status;
226 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
227 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
228 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
229 handled++;
230 }
231
232 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
233 struct ata_queued_cmd *qc;
234
235 qc = ata_qc_from_tag(ap, ap->active_tag);
236 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
237 handled += ata_host_intr(ap, qc);
238 else if (is_vsc_sata_int_err(i, int_status)) {
239 /*
240 * On some chips (i.e. Intel 31244), an error
241 * interrupt will sneak in at initialization
242 * time (phy state changes). Clearing the SCR
243 * error register is not required, but it prevents
244 * the phy state change interrupts from recurring
245 * later.
246 */
247 u32 err_status;
248 err_status = vsc_sata_scr_read(ap, SCR_ERROR);
249 printk(KERN_DEBUG "%s: clearing interrupt, "
250 "status %x; sata err status %x\n",
251 __FUNCTION__,
252 int_status, err_status);
253 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
254 /* Clear interrupt status */
255 ata_chk_status(ap);
256 handled++;
257 }
258 }
259 }
260 }
261
262 spin_unlock(&host->lock);
263
264 return IRQ_RETVAL(handled);
265}
266
267
268static struct scsi_host_template vsc_sata_sht = {
269 .module = THIS_MODULE,
270 .name = DRV_NAME,
271 .ioctl = ata_scsi_ioctl,
272 .queuecommand = ata_scsi_queuecmd,
273 .can_queue = ATA_DEF_QUEUE,
274 .this_id = ATA_SHT_THIS_ID,
275 .sg_tablesize = LIBATA_MAX_PRD,
276 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
277 .emulated = ATA_SHT_EMULATED,
278 .use_clustering = ATA_SHT_USE_CLUSTERING,
279 .proc_name = DRV_NAME,
280 .dma_boundary = ATA_DMA_BOUNDARY,
281 .slave_configure = ata_scsi_slave_config,
282 .slave_destroy = ata_scsi_slave_destroy,
283 .bios_param = ata_std_bios_param,
284};
285
286
287static const struct ata_port_operations vsc_sata_ops = {
288 .port_disable = ata_port_disable,
289 .tf_load = vsc_sata_tf_load,
290 .tf_read = vsc_sata_tf_read,
291 .exec_command = ata_exec_command,
292 .check_status = ata_check_status,
293 .dev_select = ata_std_dev_select,
294 .bmdma_setup = ata_bmdma_setup,
295 .bmdma_start = ata_bmdma_start,
296 .bmdma_stop = ata_bmdma_stop,
297 .bmdma_status = ata_bmdma_status,
298 .qc_prep = ata_qc_prep,
299 .qc_issue = ata_qc_issue_prot,
300 .data_xfer = ata_mmio_data_xfer,
301 .freeze = ata_bmdma_freeze,
302 .thaw = ata_bmdma_thaw,
303 .error_handler = ata_bmdma_error_handler,
304 .post_internal_cmd = ata_bmdma_post_internal_cmd,
305 .irq_handler = vsc_sata_interrupt,
306 .irq_clear = ata_bmdma_irq_clear,
307 .scr_read = vsc_sata_scr_read,
308 .scr_write = vsc_sata_scr_write,
309 .port_start = ata_port_start,
310 .port_stop = ata_port_stop,
311 .host_stop = ata_pci_host_stop,
312};
313
314static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base)
315{
316 port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET;
317 port->data_addr = base + VSC_SATA_TF_DATA_OFFSET;
318 port->error_addr = base + VSC_SATA_TF_ERROR_OFFSET;
319 port->feature_addr = base + VSC_SATA_TF_FEATURE_OFFSET;
320 port->nsect_addr = base + VSC_SATA_TF_NSECT_OFFSET;
321 port->lbal_addr = base + VSC_SATA_TF_LBAL_OFFSET;
322 port->lbam_addr = base + VSC_SATA_TF_LBAM_OFFSET;
323 port->lbah_addr = base + VSC_SATA_TF_LBAH_OFFSET;
324 port->device_addr = base + VSC_SATA_TF_DEVICE_OFFSET;
325 port->status_addr = base + VSC_SATA_TF_STATUS_OFFSET;
326 port->command_addr = base + VSC_SATA_TF_COMMAND_OFFSET;
327 port->altstatus_addr = base + VSC_SATA_TF_ALTSTATUS_OFFSET;
328 port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET;
329 port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET;
330 port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET;
331 writel(0, base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
332 writel(0, base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
333}
334
335
336static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
337{
338 static int printed_version;
339 struct ata_probe_ent *probe_ent = NULL;
340 unsigned long base;
341 int pci_dev_busy = 0;
342 void __iomem *mmio_base;
343 int rc;
344
345 if (!printed_version++)
346 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
347
348 rc = pci_enable_device(pdev);
349 if (rc)
350 return rc;
351
352 /*
353 * Check if we have needed resource mapped.
354 */
355 if (pci_resource_len(pdev, 0) == 0) {
356 rc = -ENODEV;
357 goto err_out;
358 }
359
360 rc = pci_request_regions(pdev, DRV_NAME);
361 if (rc) {
362 pci_dev_busy = 1;
363 goto err_out;
364 }
365
366 /*
367 * Use 32 bit DMA mask, because 64 bit address support is poor.
368 */
369 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
370 if (rc)
371 goto err_out_regions;
372 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
373 if (rc)
374 goto err_out_regions;
375
376 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
377 if (probe_ent == NULL) {
378 rc = -ENOMEM;
379 goto err_out_regions;
380 }
381 memset(probe_ent, 0, sizeof(*probe_ent));
382 probe_ent->dev = pci_dev_to_dev(pdev);
383 INIT_LIST_HEAD(&probe_ent->node);
384
385 mmio_base = pci_iomap(pdev, 0, 0);
386 if (mmio_base == NULL) {
387 rc = -ENOMEM;
388 goto err_out_free_ent;
389 }
390 base = (unsigned long) mmio_base;
391
392 /*
393 * Due to a bug in the chip, the default cache line size can't be used
394 */
395 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
396
397 probe_ent->sht = &vsc_sata_sht;
398 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
399 ATA_FLAG_MMIO;
400 probe_ent->port_ops = &vsc_sata_ops;
401 probe_ent->n_ports = 4;
402 probe_ent->irq = pdev->irq;
403 probe_ent->irq_flags = IRQF_SHARED;
404 probe_ent->mmio_base = mmio_base;
405
406 /* We don't care much about the PIO/UDMA masks, but the core won't like us
407 * if we don't fill these
408 */
409 probe_ent->pio_mask = 0x1f;
410 probe_ent->mwdma_mask = 0x07;
411 probe_ent->udma_mask = 0x7f;
412
413 /* We have 4 ports per PCI function */
414 vsc_sata_setup_port(&probe_ent->port[0], base + 1 * VSC_SATA_PORT_OFFSET);
415 vsc_sata_setup_port(&probe_ent->port[1], base + 2 * VSC_SATA_PORT_OFFSET);
416 vsc_sata_setup_port(&probe_ent->port[2], base + 3 * VSC_SATA_PORT_OFFSET);
417 vsc_sata_setup_port(&probe_ent->port[3], base + 4 * VSC_SATA_PORT_OFFSET);
418
419 pci_set_master(pdev);
420
421 /*
422 * Config offset 0x98 is "Extended Control and Status Register 0"
423 * Default value is (1 << 28). All bits except bit 28 are reserved in
424 * DPA mode. If bit 28 is set, LED 0 reflects all ports' activity.
425 * If bit 28 is clear, each port has its own LED.
426 */
427 pci_write_config_dword(pdev, 0x98, 0);
428
429 /* FIXME: check ata_device_add return value */
430 ata_device_add(probe_ent);
431 kfree(probe_ent);
432
433 return 0;
434
435err_out_free_ent:
436 kfree(probe_ent);
437err_out_regions:
438 pci_release_regions(pdev);
439err_out:
440 if (!pci_dev_busy)
441 pci_disable_device(pdev);
442 return rc;
443}
444
445
446static const struct pci_device_id vsc_sata_pci_tbl[] = {
447 { PCI_VENDOR_ID_VITESSE, 0x7174,
448 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
449 { PCI_VENDOR_ID_INTEL, 0x3200,
450 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
451 { } /* terminate list */
452};
453
454
455static struct pci_driver vsc_sata_pci_driver = {
456 .name = DRV_NAME,
457 .id_table = vsc_sata_pci_tbl,
458 .probe = vsc_sata_init_one,
459 .remove = ata_pci_remove_one,
460};
461
462
463static int __init vsc_sata_init(void)
464{
465 return pci_register_driver(&vsc_sata_pci_driver);
466}
467
468
469static void __exit vsc_sata_exit(void)
470{
471 pci_unregister_driver(&vsc_sata_pci_driver);
472}
473
474
475MODULE_AUTHOR("Jeremy Higdon");
476MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
477MODULE_LICENSE("GPL");
478MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl);
479MODULE_VERSION(DRV_VERSION);
480
481module_init(vsc_sata_init);
482module_exit(vsc_sata_exit);