aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-09-24 13:19:56 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-24 13:19:56 -0400
commita68aa1cc6f3203b8a332683ebde67a00f39eec43 (patch)
treec05bd86850d8e0d0fb096a4b5f8d9f268b5561e8 /drivers
parenta319a2773a13bab56a0d0b3744ba8703324313b5 (diff)
parent23930fa1cebfea6f79881c588ccd1b0781e49e3f (diff)
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev: (50 commits) [libata] Delete pata_it8172 driver [PATCH] libata: improve handling of diagostic fail (and hardware that misreports it) [PATCH] libata: fix non-uniform ports handling Fix libata resource conflict for legacy mode [libata] ata_piix: build fix [PATCH] pata_amd: Check enable bits on Nvidia [PATCH] Update SiS PATA [libata] Add pata_jmicron driver to Kconfig, Makefile [libata #pata-drivers] Trim trailing whitespace. [libata] Trim trailing whitespace. [libata] Add a bunch of PATA drivers. Rename libata-bmdma.c to libata-sff.c. libata: Grand renaming. Clean up drivers/ata/Kconfig a bit. [PATCH] CONFIG_PM=n slim: drivers/scsi/sata_sil* [PATCH] sata_via: Add SATA support for vt8237a [PATCH] libata: change path to libata in libata.tmpl [PATCH] libata: s/CONFIG_SCSI_SATA/CONFIG_[S]ATA/g in pci/quirks.c libata: Make sure drivers/ata is a separate Kconfig menu [libata] ata_piix: add missing kfree() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/ata/Kconfig484
-rw-r--r--drivers/ata/Makefile62
-rw-r--r--drivers/ata/ahci.c (renamed from drivers/scsi/ahci.c)721
-rw-r--r--drivers/ata/ata_generic.c252
-rw-r--r--drivers/ata/ata_piix.c (renamed from drivers/scsi/ata_piix.c)512
-rw-r--r--drivers/ata/libata-core.c (renamed from drivers/scsi/libata-core.c)619
-rw-r--r--drivers/ata/libata-eh.c (renamed from drivers/scsi/libata-eh.c)18
-rw-r--r--drivers/ata/libata-scsi.c (renamed from drivers/scsi/libata-scsi.c)223
-rw-r--r--drivers/ata/libata-sff.c (renamed from drivers/scsi/libata-bmdma.c)200
-rw-r--r--drivers/ata/libata.h (renamed from drivers/scsi/libata.h)5
-rw-r--r--drivers/ata/pata_ali.c679
-rw-r--r--drivers/ata/pata_amd.c718
-rw-r--r--drivers/ata/pata_artop.c518
-rw-r--r--drivers/ata/pata_atiixp.c306
-rw-r--r--drivers/ata/pata_cmd64x.c505
-rw-r--r--drivers/ata/pata_cs5520.c336
-rw-r--r--drivers/ata/pata_cs5530.c387
-rw-r--r--drivers/ata/pata_cs5535.c291
-rw-r--r--drivers/ata/pata_cypress.c227
-rw-r--r--drivers/ata/pata_efar.c342
-rw-r--r--drivers/ata/pata_hpt366.c478
-rw-r--r--drivers/ata/pata_hpt37x.c1257
-rw-r--r--drivers/ata/pata_hpt3x2n.c597
-rw-r--r--drivers/ata/pata_hpt3x3.c226
-rw-r--r--drivers/ata/pata_isapnp.c156
-rw-r--r--drivers/ata/pata_it821x.c847
-rw-r--r--drivers/ata/pata_jmicron.c266
-rw-r--r--drivers/ata/pata_legacy.c949
-rw-r--r--drivers/ata/pata_mpiix.c313
-rw-r--r--drivers/ata/pata_netcell.c175
-rw-r--r--drivers/ata/pata_ns87410.c236
-rw-r--r--drivers/ata/pata_oldpiix.c339
-rw-r--r--drivers/ata/pata_opti.c292
-rw-r--r--drivers/ata/pata_optidma.c547
-rw-r--r--drivers/ata/pata_pcmcia.c393
-rw-r--r--drivers/ata/pata_pdc2027x.c869
-rw-r--r--drivers/ata/pata_pdc202xx_old.c423
-rw-r--r--drivers/ata/pata_qdi.c403
-rw-r--r--drivers/ata/pata_radisys.c335
-rw-r--r--drivers/ata/pata_rz1000.c205
-rw-r--r--drivers/ata/pata_sc1200.c287
-rw-r--r--drivers/ata/pata_serverworks.c587
-rw-r--r--drivers/ata/pata_sil680.c381
-rw-r--r--drivers/ata/pata_sis.c1034
-rw-r--r--drivers/ata/pata_sl82c105.c388
-rw-r--r--drivers/ata/pata_triflex.c285
-rw-r--r--drivers/ata/pata_via.c568
-rw-r--r--drivers/ata/pdc_adma.c (renamed from drivers/scsi/pdc_adma.c)50
-rw-r--r--drivers/ata/sata_mv.c (renamed from drivers/scsi/sata_mv.c)100
-rw-r--r--drivers/ata/sata_nv.c (renamed from drivers/scsi/sata_nv.c)60
-rw-r--r--drivers/ata/sata_promise.c (renamed from drivers/scsi/sata_promise.c)56
-rw-r--r--drivers/ata/sata_promise.h (renamed from drivers/scsi/sata_promise.h)0
-rw-r--r--drivers/ata/sata_qstor.c (renamed from drivers/scsi/sata_qstor.c)46
-rw-r--r--drivers/ata/sata_sil.c (renamed from drivers/scsi/sata_sil.c)59
-rw-r--r--drivers/ata/sata_sil24.c (renamed from drivers/scsi/sata_sil24.c)67
-rw-r--r--drivers/ata/sata_sis.c (renamed from drivers/scsi/sata_sis.c)20
-rw-r--r--drivers/ata/sata_svw.c (renamed from drivers/scsi/sata_svw.c)12
-rw-r--r--drivers/ata/sata_sx4.c (renamed from drivers/scsi/sata_sx4.c)66
-rw-r--r--drivers/ata/sata_uli.c (renamed from drivers/scsi/sata_uli.c)10
-rw-r--r--drivers/ata/sata_via.c (renamed from drivers/scsi/sata_via.c)6
-rw-r--r--drivers/ata/sata_vsc.c (renamed from drivers/scsi/sata_vsc.c)18
-rw-r--r--drivers/pci/quirks.c6
-rw-r--r--drivers/scsi/Kconfig138
-rw-r--r--drivers/scsi/Makefile16
66 files changed, 19738 insertions, 1236 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 8b11cebe65df..263e86ddc1a4 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -18,6 +18,8 @@ source "drivers/ide/Kconfig"
18 18
19source "drivers/scsi/Kconfig" 19source "drivers/scsi/Kconfig"
20 20
21source "drivers/ata/Kconfig"
22
21source "drivers/cdrom/Kconfig" 23source "drivers/cdrom/Kconfig"
22 24
23source "drivers/md/Kconfig" 25source "drivers/md/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index fc2d744a4e4a..4ac14dab3079 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_PPC_PMAC) += macintosh/
34obj-$(CONFIG_IDE) += ide/ 34obj-$(CONFIG_IDE) += ide/
35obj-$(CONFIG_FC4) += fc4/ 35obj-$(CONFIG_FC4) += fc4/
36obj-$(CONFIG_SCSI) += scsi/ 36obj-$(CONFIG_SCSI) += scsi/
37obj-$(CONFIG_ATA) += ata/
37obj-$(CONFIG_FUSION) += message/ 38obj-$(CONFIG_FUSION) += message/
38obj-$(CONFIG_IEEE1394) += ieee1394/ 39obj-$(CONFIG_IEEE1394) += ieee1394/
39obj-y += cdrom/ 40obj-y += cdrom/
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
new file mode 100644
index 000000000000..5a8bdac5f5e8
--- /dev/null
+++ b/drivers/ata/Kconfig
@@ -0,0 +1,484 @@
1#
2# SATA/PATA driver configuration
3#
4
5menu "Serial ATA (prod) and Parallel ATA (experimental) drivers"
6
7config ATA
8 tristate "ATA device support"
9 select SCSI
10 ---help---
11 If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
12 any other ATA device under Linux, say Y and make sure that you know
13 the name of your ATA host adapter (the card inside your computer
14 that "speaks" the ATA protocol, also called ATA controller),
15 because you will be asked for it.
16
17if ATA
18
19config SATA_AHCI
20 tristate "AHCI SATA support"
21 depends on PCI
22 help
23 This option enables support for AHCI Serial ATA.
24
25 If unsure, say N.
26
27config SATA_SVW
28 tristate "ServerWorks Frodo / Apple K2 SATA support"
29 depends on PCI
30 help
31 This option enables support for Broadcom/Serverworks/Apple K2
32 SATA support.
33
34 If unsure, say N.
35
36config ATA_PIIX
37 tristate "Intel PIIX/ICH SATA support"
38 depends on PCI
39 help
40 This option enables support for ICH5/6/7/8 Serial ATA.
41 If PATA support was enabled previously, this enables
42 support for select Intel PIIX/ICH PATA host controllers.
43
44 If unsure, say N.
45
46config SATA_MV
47 tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)"
48 depends on PCI && EXPERIMENTAL
49 help
50 This option enables support for the Marvell Serial ATA family.
51 Currently supports 88SX[56]0[48][01] chips.
52
53 If unsure, say N.
54
55config SATA_NV
56 tristate "NVIDIA SATA support"
57 depends on PCI
58 help
59 This option enables support for NVIDIA Serial ATA.
60
61 If unsure, say N.
62
63config PDC_ADMA
64 tristate "Pacific Digital ADMA support"
65 depends on PCI
66 help
67 This option enables support for Pacific Digital ADMA controllers
68
69 If unsure, say N.
70
71config SATA_QSTOR
72 tristate "Pacific Digital SATA QStor support"
73 depends on PCI
74 help
75 This option enables support for Pacific Digital Serial ATA QStor.
76
77 If unsure, say N.
78
79config SATA_PROMISE
80 tristate "Promise SATA TX2/TX4 support"
81 depends on PCI
82 help
83 This option enables support for Promise Serial ATA TX2/TX4.
84
85 If unsure, say N.
86
87config SATA_SX4
88 tristate "Promise SATA SX4 support"
89 depends on PCI && EXPERIMENTAL
90 help
91 This option enables support for Promise Serial ATA SX4.
92
93 If unsure, say N.
94
95config SATA_SIL
96 tristate "Silicon Image SATA support"
97 depends on PCI
98 help
99 This option enables support for Silicon Image Serial ATA.
100
101 If unsure, say N.
102
103config SATA_SIL24
104 tristate "Silicon Image 3124/3132 SATA support"
105 depends on PCI
106 help
107 This option enables support for Silicon Image 3124/3132 Serial ATA.
108
109 If unsure, say N.
110
111config SATA_SIS
112 tristate "SiS 964/180 SATA support"
113 depends on PCI
114 help
115 This option enables support for SiS Serial ATA 964/180.
116
117 If unsure, say N.
118
119config SATA_ULI
120 tristate "ULi Electronics SATA support"
121 depends on PCI
122 help
123 This option enables support for ULi Electronics SATA.
124
125 If unsure, say N.
126
127config SATA_VIA
128 tristate "VIA SATA support"
129 depends on PCI
130 help
131 This option enables support for VIA Serial ATA.
132
133 If unsure, say N.
134
135config SATA_VITESSE
136 tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
137 depends on PCI
138 help
139 This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
140
141 If unsure, say N.
142
143config SATA_INTEL_COMBINED
144 bool
145 depends on IDE=y && !BLK_DEV_IDE_SATA && (SATA_AHCI || ATA_PIIX)
146 default y
147
148config PATA_ALI
149 tristate "ALi PATA support (Experimental)"
150 depends on PCI && EXPERIMENTAL
151 help
152 This option enables support for the ALi ATA interfaces
153 found on the many ALi chipsets.
154
155 If unsure, say N.
156
157config PATA_AMD
158 tristate "AMD/NVidia PATA support (Experimental)"
159 depends on PCI
160 help
161 This option enables support for the AMD and NVidia PATA
162 interfaces found on the chipsets for Athlon/Athlon64.
163
164 If unsure, say N.
165
166config PATA_ARTOP
167 tristate "ARTOP 6210/6260 PATA support (Experimental)"
168 depends on PCI && EXPERIMENTAL
169 help
170 This option enables support for ARTOP PATA controllers.
171
172 If unsure, say N.
173
174config PATA_ATIIXP
175 tristate "ATI PATA support (Experimental)"
176 depends on PCI && EXPERIMENTAL
177 help
178 This option enables support for the ATI ATA interfaces
179 found on the many ATI chipsets.
180
181 If unsure, say N.
182
183config PATA_CMD64X
184 tristate "CMD64x PATA support (Very Experimental)"
185 depends on PCI&& EXPERIMENTAL
186 help
187 This option enables support for the CMD64x series chips
188 except for the CMD640.
189
190 If unsure, say N.
191
192config PATA_CS5520
193 tristate "CS5510/5520 PATA support"
194 depends on PCI
195 help
196 This option enables support for the Cyrix 5510/5520
197 companion chip used with the MediaGX/Geode processor family.
198
199 If unsure, say N.
200
201config PATA_CS5530
202 tristate "CS5530 PATA support (Experimental)"
203 depends on PCI && EXPERIMENTAL
204 help
205 This option enables support for the Cyrix/NatSemi/AMD CS5530
206 companion chip used with the MediaGX/Geode processor family.
207
208 If unsure, say N.
209
210config PATA_CS5535
211 tristate "CS5535 PATA support (Experimental)"
212 depends on PCI && X86 && !X86_64 && EXPERIMENTAL
213 help
214 This option enables support for the NatSemi/AMD CS5535
215 companion chip used with the Geode processor family.
216
217 If unsure, say N.
218
219config PATA_CYPRESS
220 tristate "Cypress CY82C693 PATA support (Very Experimental)"
221 depends on PCI && EXPERIMENTAL
222 help
223 This option enables support for the Cypress/Contaq CY82C693
224 chipset found in some Alpha systems
225
226 If unsure, say N.
227
228config PATA_EFAR
229 tristate "EFAR SLC90E66 support"
230 depends on PCI
231 help
232 This option enables support for the EFAR SLC90E66
233 IDE controller found on some older machines.
234
235 If unsure, say N.
236
237config ATA_GENERIC
238 tristate "Generic ATA support"
239 depends on PCI
240 help
241 This option enables support for generic BIOS configured
242 ATA controllers via the new ATA layer
243
244 If unsure, say N.
245
246config PATA_HPT366
247 tristate "HPT 366/368 PATA support (Very Experimental)"
248 depends on PCI && EXPERIMENTAL
249 help
250 This option enables support for the HPT 366 and 368
251 PATA controllers via the new ATA layer.
252
253 If unsure, say N.
254
255config PATA_HPT37X
256 tristate "HPT 370/370A/371/372/374/302 PATA support (Very Experimental)"
257 depends on PCI && EXPERIMENTAL
258 help
259 This option enables support for the majority of the later HPT
260 PATA controllers via the new ATA layer.
261
262 If unsure, say N.
263
264config PATA_HPT3X2N
265 tristate "HPT 372N/302N PATA support (Very Experimental)"
266 depends on PCI && EXPERIMENTAL
267 help
268 This option enables support for the N variant HPT PATA
269 controllers via the new ATA layer
270
271 If unsure, say N.
272
273config PATA_HPT3X3
274 tristate "HPT 343/363 PATA support (Experimental)"
275 depends on PCI
276 help
277 This option enables support for the HPT 343/363
278 PATA controllers via the new ATA layer
279
280 If unsure, say N.
281
282config PATA_ISAPNP
283 tristate "ISA Plug and Play PATA support (Very Experimental)"
284 depends on EXPERIMENTAL && ISAPNP
285 help
286 This option enables support for ISA plug & play ATA
287 controllers such as those found on old soundcards.
288
289 If unsure, say N.
290
291config PATA_IT821X
292 tristate "IT821x PATA support (Experimental)"
293 depends on PCI && EXPERIMENTAL
294 help
295 This option enables support for the ITE 8211 and 8212
296 PATA controllers via the new ATA layer, including RAID
297 mode.
298
299 If unsure, say N.
300
301config PATA_JMICRON
302 tristate "JMicron PATA support"
303 depends on PCI
304 help
305 Enable support for the JMicron IDE controller, via the new
306 ATA layer.
307
308 If unsure, say N.
309
310config PATA_LEGACY
311 tristate "Legacy ISA PATA support (Experimental)"
312 depends on PCI && EXPERIMENTAL
313 help
314 This option enables support for ISA/VLB bus legacy PATA
315 ports and allows them to be accessed via the new ATA layer.
316
317 If unsure, say N.
318
319config PATA_TRIFLEX
320 tristate "Compaq Triflex PATA support"
321 depends on PCI
322 help
323 Enable support for the Compaq 'Triflex' IDE controller as found
324 on many Compaq Pentium-Pro systems, via the new ATA layer.
325
326 If unsure, say N.
327
328config PATA_MPIIX
329 tristate "Intel PATA MPIIX support"
330 depends on PCI
331 help
332 This option enables support for MPIIX PATA support.
333
334 If unsure, say N.
335
336config PATA_OLDPIIX
337 tristate "Intel PATA old PIIX support (Experimental)"
338 depends on PCI && EXPERIMENTAL
339 help
340 This option enables support for old(?) PIIX PATA support.
341
342 If unsure, say N.
343
344config PATA_NETCELL
345 tristate "NETCELL Revolution RAID support"
346 depends on PCI
347 help
348 This option enables support for the Netcell Revolution RAID
349 PATA controller.
350
351 If unsure, say N.
352
353config PATA_NS87410
354 tristate "Nat Semi NS87410 PATA support (Experimental)"
355 depends on PCI && EXPERIMENTAL
356 help
357 This option enables support for the National Semiconductor
358 NS87410 PCI-IDE controller.
359
360 If unsure, say N.
361
362config PATA_OPTI
363 tristate "OPTI621/6215 PATA support (Very Experimental)"
364 depends on PCI && EXPERIMENTAL
365 help
366 This option enables full PIO support for the early Opti ATA
367 controllers found on some old motherboards.
368
369 If unsure, say N.
370
371config PATA_OPTIDMA
372 tristate "OPTI FireStar PATA support (Veyr Experimental)"
373 depends on PCI && EXPERIMENTAL
374 help
375 This option enables DMA/PIO support for the later OPTi
376 controllers found on some old motherboards and in some
377 latops
378
379 If unsure, say N.
380
381config PATA_PCMCIA
382 tristate "PCMCIA PATA support"
383 depends on PCMCIA
384 help
385 This option enables support for PCMCIA ATA interfaces, including
386 compact flash card adapters via the new ATA layer.
387
388 If unsure, say N.
389
390config PATA_PDC_OLD
391 tristate "Older Promise PATA controller support (Very Experimental)"
392 depends on PCI && EXPERIMENTAL
393 help
394 This option enables support for the Promise 20246, 20262, 20263,
395 20265 and 20267 adapters.
396
397 If unsure, say N.
398
399config PATA_QDI
400 tristate "QDI VLB PATA support"
401 help
402 Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
403
404config PATA_RADISYS
405 tristate "RADISYS 82600 PATA support (Very experimental)"
406 depends on PCI && EXPERIMENTAL
407 help
408 This option enables support for the RADISYS 82600
409 PATA controllers via the new ATA layer
410
411 If unsure, say N.
412
413config PATA_RZ1000
414 tristate "PC Tech RZ1000 PATA support"
415 depends on PCI
416 help
417 This option enables basic support for the PC Tech RZ1000/1
418 PATA controllers via the new ATA layer
419
420 If unsure, say N.
421
422config PATA_SC1200
423 tristate "SC1200 PATA support (Raving Lunatic)"
424 depends on PCI && EXPERIMENTAL
425 help
426 This option enables support for the NatSemi/AMD SC1200 SoC
427 companion chip used with the Geode processor family.
428
429 If unsure, say N.
430
431config PATA_SERVERWORKS
432 tristate "SERVERWORKS OSB4/CSB5/CSB6/HT1000 PATA support (Experimental)"
433 depends on PCI && EXPERIMENTAL
434 help
435 This option enables support for the Serverworks OSB4/CSB5/CSB6 and
436 HT1000 PATA controllers, via the new ATA layer.
437
438 If unsure, say N.
439
440config PATA_PDC2027X
441 tristate "Promise PATA 2027x support"
442 depends on PCI
443 help
444 This option enables support for Promise PATA pdc20268 to pdc20277 host adapters.
445
446 If unsure, say N.
447
448config PATA_SIL680
449 tristate "CMD / Silicon Image 680 PATA support"
450 depends on PCI
451 help
452 This option enables support for CMD / Silicon Image 680 PATA.
453
454 If unsure, say N.
455
456config PATA_SIS
457 tristate "SiS PATA support (Experimental)"
458 depends on PCI && EXPERIMENTAL
459 help
460 This option enables support for SiS PATA controllers
461
462 If unsure, say N.
463
464config PATA_VIA
465 tristate "VIA PATA support"
466 depends on PCI
467 help
468 This option enables support for the VIA PATA interfaces
469 found on the many VIA chipsets.
470
471 If unsure, say N.
472
473config PATA_WINBOND
474 tristate "Winbond SL82C105 PATA support"
475 depends on PCI
476 help
477 This option enables support for SL82C105 PATA devices found in the
478 Netwinder and some other systems
479
480 If unsure, say N.
481
482endif
483endmenu
484
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
new file mode 100644
index 000000000000..72243a677f9b
--- /dev/null
+++ b/drivers/ata/Makefile
@@ -0,0 +1,62 @@
1
2obj-$(CONFIG_ATA) += libata.o
3
4obj-$(CONFIG_SATA_AHCI) += ahci.o
5obj-$(CONFIG_SATA_SVW) += sata_svw.o
6obj-$(CONFIG_ATA_PIIX) += ata_piix.o
7obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
8obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
9obj-$(CONFIG_SATA_SIL) += sata_sil.o
10obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
11obj-$(CONFIG_SATA_VIA) += sata_via.o
12obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
13obj-$(CONFIG_SATA_SIS) += sata_sis.o
14obj-$(CONFIG_SATA_SX4) += sata_sx4.o
15obj-$(CONFIG_SATA_NV) += sata_nv.o
16obj-$(CONFIG_SATA_ULI) += sata_uli.o
17obj-$(CONFIG_SATA_MV) += sata_mv.o
18obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
19
20obj-$(CONFIG_PATA_ALI) += pata_ali.o
21obj-$(CONFIG_PATA_AMD) += pata_amd.o
22obj-$(CONFIG_PATA_ARTOP) += pata_artop.o
23obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o
24obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o
25obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o
26obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o
27obj-$(CONFIG_PATA_CS5535) += pata_cs5535.o
28obj-$(CONFIG_PATA_CYPRESS) += pata_cypress.o
29obj-$(CONFIG_PATA_EFAR) += pata_efar.o
30obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o
31obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
32obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
33obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
34obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
35obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
36obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
37obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
38obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
39obj-$(CONFIG_PATA_OPTI) += pata_opti.o
40obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
41obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
42obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
43obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
44obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o
45obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
46obj-$(CONFIG_PATA_QDI) += pata_qdi.o
47obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
48obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
49obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
50obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
51obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
52obj-$(CONFIG_PATA_VIA) += pata_via.o
53obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
54obj-$(CONFIG_PATA_SIS) += pata_sis.o
55obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
56# Should be last but one libata driver
57obj-$(CONFIG_ATA_GENERIC) += ata_generic.o
58# Should be last libata driver
59obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o
60
61libata-objs := libata-core.o libata-scsi.o libata-sff.o libata-eh.o
62
diff --git a/drivers/scsi/ahci.c b/drivers/ata/ahci.c
index 904c25fb4ba4..1aabc81d82f1 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/ata/ahci.c
@@ -92,7 +92,9 @@ enum {
92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
93 93
94 /* HOST_CAP bits */ 94 /* HOST_CAP bits */
95 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
95 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 96 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
97 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
96 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ 98 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
97 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 99 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
98 100
@@ -155,6 +157,7 @@ enum {
155 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 157 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
156 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 158 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
157 159
160 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
158 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ 161 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
159 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ 162 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
160 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ 163 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
@@ -212,6 +215,10 @@ static void ahci_freeze(struct ata_port *ap);
212static void ahci_thaw(struct ata_port *ap); 215static void ahci_thaw(struct ata_port *ap);
213static void ahci_error_handler(struct ata_port *ap); 216static void ahci_error_handler(struct ata_port *ap);
214static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 217static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
218static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
219static int ahci_port_resume(struct ata_port *ap);
220static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
221static int ahci_pci_device_resume(struct pci_dev *pdev);
215static void ahci_remove_one (struct pci_dev *pdev); 222static void ahci_remove_one (struct pci_dev *pdev);
216 223
217static struct scsi_host_template ahci_sht = { 224static struct scsi_host_template ahci_sht = {
@@ -231,6 +238,8 @@ static struct scsi_host_template ahci_sht = {
231 .slave_configure = ata_scsi_slave_config, 238 .slave_configure = ata_scsi_slave_config,
232 .slave_destroy = ata_scsi_slave_destroy, 239 .slave_destroy = ata_scsi_slave_destroy,
233 .bios_param = ata_std_bios_param, 240 .bios_param = ata_std_bios_param,
241 .suspend = ata_scsi_device_suspend,
242 .resume = ata_scsi_device_resume,
234}; 243};
235 244
236static const struct ata_port_operations ahci_ops = { 245static const struct ata_port_operations ahci_ops = {
@@ -257,6 +266,9 @@ static const struct ata_port_operations ahci_ops = {
257 .error_handler = ahci_error_handler, 266 .error_handler = ahci_error_handler,
258 .post_internal_cmd = ahci_post_internal_cmd, 267 .post_internal_cmd = ahci_post_internal_cmd,
259 268
269 .port_suspend = ahci_port_suspend,
270 .port_resume = ahci_port_resume,
271
260 .port_start = ahci_port_start, 272 .port_start = ahci_port_start,
261 .port_stop = ahci_port_stop, 273 .port_stop = ahci_port_stop,
262}; 274};
@@ -265,7 +277,7 @@ static const struct ata_port_info ahci_port_info[] = {
265 /* board_ahci */ 277 /* board_ahci */
266 { 278 {
267 .sht = &ahci_sht, 279 .sht = &ahci_sht,
268 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 280 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
269 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 281 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
270 ATA_FLAG_SKIP_D2H_BSY, 282 ATA_FLAG_SKIP_D2H_BSY,
271 .pio_mask = 0x1f, /* pio0-4 */ 283 .pio_mask = 0x1f, /* pio0-4 */
@@ -275,7 +287,7 @@ static const struct ata_port_info ahci_port_info[] = {
275 /* board_ahci_vt8251 */ 287 /* board_ahci_vt8251 */
276 { 288 {
277 .sht = &ahci_sht, 289 .sht = &ahci_sht,
278 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 290 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
279 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 291 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
280 ATA_FLAG_SKIP_D2H_BSY | 292 ATA_FLAG_SKIP_D2H_BSY |
281 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ, 293 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
@@ -350,6 +362,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
350 { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 362 { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
351 board_ahci }, /* MCP65 */ 363 board_ahci }, /* MCP65 */
352 364
365 /* SiS */
366 { PCI_VENDOR_ID_SI, 0x1184, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
367 board_ahci }, /* SiS 966 */
368 { PCI_VENDOR_ID_SI, 0x1185, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
369 board_ahci }, /* SiS 966 */
370 { PCI_VENDOR_ID_SI, 0x0186, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
371 board_ahci }, /* SiS 968 */
372
353 { } /* terminate list */ 373 { } /* terminate list */
354}; 374};
355 375
@@ -358,6 +378,8 @@ static struct pci_driver ahci_pci_driver = {
358 .name = DRV_NAME, 378 .name = DRV_NAME,
359 .id_table = ahci_pci_tbl, 379 .id_table = ahci_pci_tbl,
360 .probe = ahci_init_one, 380 .probe = ahci_init_one,
381 .suspend = ahci_pci_device_suspend,
382 .resume = ahci_pci_device_resume,
361 .remove = ahci_remove_one, 383 .remove = ahci_remove_one,
362}; 384};
363 385
@@ -372,177 +394,288 @@ static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int por
372 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port); 394 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
373} 395}
374 396
375static int ahci_port_start(struct ata_port *ap) 397static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
376{ 398{
377 struct device *dev = ap->host_set->dev; 399 unsigned int sc_reg;
378 struct ahci_host_priv *hpriv = ap->host_set->private_data;
379 struct ahci_port_priv *pp;
380 void __iomem *mmio = ap->host_set->mmio_base;
381 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
382 void *mem;
383 dma_addr_t mem_dma;
384 int rc;
385
386 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
387 if (!pp)
388 return -ENOMEM;
389 memset(pp, 0, sizeof(*pp));
390 400
391 rc = ata_pad_alloc(ap, dev); 401 switch (sc_reg_in) {
392 if (rc) { 402 case SCR_STATUS: sc_reg = 0; break;
393 kfree(pp); 403 case SCR_CONTROL: sc_reg = 1; break;
394 return rc; 404 case SCR_ERROR: sc_reg = 2; break;
405 case SCR_ACTIVE: sc_reg = 3; break;
406 default:
407 return 0xffffffffU;
395 } 408 }
396 409
397 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL); 410 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
398 if (!mem) { 411}
399 ata_pad_free(ap, dev);
400 kfree(pp);
401 return -ENOMEM;
402 }
403 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
404 412
405 /*
406 * First item in chunk of DMA memory: 32-slot command table,
407 * 32 bytes each in size
408 */
409 pp->cmd_slot = mem;
410 pp->cmd_slot_dma = mem_dma;
411 413
412 mem += AHCI_CMD_SLOT_SZ; 414static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
413 mem_dma += AHCI_CMD_SLOT_SZ; 415 u32 val)
416{
417 unsigned int sc_reg;
414 418
415 /* 419 switch (sc_reg_in) {
416 * Second item: Received-FIS area 420 case SCR_STATUS: sc_reg = 0; break;
417 */ 421 case SCR_CONTROL: sc_reg = 1; break;
418 pp->rx_fis = mem; 422 case SCR_ERROR: sc_reg = 2; break;
419 pp->rx_fis_dma = mem_dma; 423 case SCR_ACTIVE: sc_reg = 3; break;
424 default:
425 return;
426 }
420 427
421 mem += AHCI_RX_FIS_SZ; 428 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
422 mem_dma += AHCI_RX_FIS_SZ; 429}
423 430
424 /* 431static void ahci_start_engine(void __iomem *port_mmio)
425 * Third item: data area for storing a single command 432{
426 * and its scatter-gather table 433 u32 tmp;
427 */
428 pp->cmd_tbl = mem;
429 pp->cmd_tbl_dma = mem_dma;
430 434
431 ap->private_data = pp; 435 /* start DMA */
436 tmp = readl(port_mmio + PORT_CMD);
437 tmp |= PORT_CMD_START;
438 writel(tmp, port_mmio + PORT_CMD);
439 readl(port_mmio + PORT_CMD); /* flush */
440}
432 441
433 if (hpriv->cap & HOST_CAP_64) 442static int ahci_stop_engine(void __iomem *port_mmio)
434 writel((pp->cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI); 443{
435 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); 444 u32 tmp;
436 readl(port_mmio + PORT_LST_ADDR); /* flush */
437 445
438 if (hpriv->cap & HOST_CAP_64) 446 tmp = readl(port_mmio + PORT_CMD);
439 writel((pp->rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
440 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
441 readl(port_mmio + PORT_FIS_ADDR); /* flush */
442 447
443 writel(PORT_CMD_ICC_ACTIVE | PORT_CMD_FIS_RX | 448 /* check if the HBA is idle */
444 PORT_CMD_POWER_ON | PORT_CMD_SPIN_UP | 449 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
445 PORT_CMD_START, port_mmio + PORT_CMD); 450 return 0;
446 readl(port_mmio + PORT_CMD); /* flush */ 451
452 /* setting HBA to idle */
453 tmp &= ~PORT_CMD_START;
454 writel(tmp, port_mmio + PORT_CMD);
455
456 /* wait for engine to stop. This could be as long as 500 msec */
457 tmp = ata_wait_register(port_mmio + PORT_CMD,
458 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
459 if (tmp & PORT_CMD_LIST_ON)
460 return -EIO;
447 461
448 return 0; 462 return 0;
449} 463}
450 464
465static void ahci_start_fis_rx(void __iomem *port_mmio, u32 cap,
466 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
467{
468 u32 tmp;
451 469
452static void ahci_port_stop(struct ata_port *ap) 470 /* set FIS registers */
471 if (cap & HOST_CAP_64)
472 writel((cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
473 writel(cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
474
475 if (cap & HOST_CAP_64)
476 writel((rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
477 writel(rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
478
479 /* enable FIS reception */
480 tmp = readl(port_mmio + PORT_CMD);
481 tmp |= PORT_CMD_FIS_RX;
482 writel(tmp, port_mmio + PORT_CMD);
483
484 /* flush */
485 readl(port_mmio + PORT_CMD);
486}
487
488static int ahci_stop_fis_rx(void __iomem *port_mmio)
453{ 489{
454 struct device *dev = ap->host_set->dev;
455 struct ahci_port_priv *pp = ap->private_data;
456 void __iomem *mmio = ap->host_set->mmio_base;
457 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
458 u32 tmp; 490 u32 tmp;
459 491
492 /* disable FIS reception */
460 tmp = readl(port_mmio + PORT_CMD); 493 tmp = readl(port_mmio + PORT_CMD);
461 tmp &= ~(PORT_CMD_START | PORT_CMD_FIS_RX); 494 tmp &= ~PORT_CMD_FIS_RX;
462 writel(tmp, port_mmio + PORT_CMD); 495 writel(tmp, port_mmio + PORT_CMD);
463 readl(port_mmio + PORT_CMD); /* flush */
464 496
465 /* spec says 500 msecs for each PORT_CMD_{START,FIS_RX} bit, so 497 /* wait for completion, spec says 500ms, give it 1000 */
466 * this is slightly incorrect. 498 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
467 */ 499 PORT_CMD_FIS_ON, 10, 1000);
468 msleep(500); 500 if (tmp & PORT_CMD_FIS_ON)
501 return -EBUSY;
469 502
470 ap->private_data = NULL; 503 return 0;
471 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
472 pp->cmd_slot, pp->cmd_slot_dma);
473 ata_pad_free(ap, dev);
474 kfree(pp);
475} 504}
476 505
477static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in) 506static void ahci_power_up(void __iomem *port_mmio, u32 cap)
478{ 507{
479 unsigned int sc_reg; 508 u32 cmd;
480 509
481 switch (sc_reg_in) { 510 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
482 case SCR_STATUS: sc_reg = 0; break; 511
483 case SCR_CONTROL: sc_reg = 1; break; 512 /* spin up device */
484 case SCR_ERROR: sc_reg = 2; break; 513 if (cap & HOST_CAP_SSS) {
485 case SCR_ACTIVE: sc_reg = 3; break; 514 cmd |= PORT_CMD_SPIN_UP;
486 default: 515 writel(cmd, port_mmio + PORT_CMD);
487 return 0xffffffffU;
488 } 516 }
489 517
490 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 518 /* wake up link */
519 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
491} 520}
492 521
522static void ahci_power_down(void __iomem *port_mmio, u32 cap)
523{
524 u32 cmd, scontrol;
493 525
494static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in, 526 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
495 u32 val) 527
528 if (cap & HOST_CAP_SSC) {
529 /* enable transitions to slumber mode */
530 scontrol = readl(port_mmio + PORT_SCR_CTL);
531 if ((scontrol & 0x0f00) > 0x100) {
532 scontrol &= ~0xf00;
533 writel(scontrol, port_mmio + PORT_SCR_CTL);
534 }
535
536 /* put device into slumber mode */
537 writel(cmd | PORT_CMD_ICC_SLUMBER, port_mmio + PORT_CMD);
538
539 /* wait for the transition to complete */
540 ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_ICC_SLUMBER,
541 PORT_CMD_ICC_SLUMBER, 1, 50);
542 }
543
544 /* put device into listen mode */
545 if (cap & HOST_CAP_SSS) {
546 /* first set PxSCTL.DET to 0 */
547 scontrol = readl(port_mmio + PORT_SCR_CTL);
548 scontrol &= ~0xf;
549 writel(scontrol, port_mmio + PORT_SCR_CTL);
550
551 /* then set PxCMD.SUD to 0 */
552 cmd &= ~PORT_CMD_SPIN_UP;
553 writel(cmd, port_mmio + PORT_CMD);
554 }
555}
556
557static void ahci_init_port(void __iomem *port_mmio, u32 cap,
558 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
496{ 559{
497 unsigned int sc_reg; 560 /* power up */
561 ahci_power_up(port_mmio, cap);
498 562
499 switch (sc_reg_in) { 563 /* enable FIS reception */
500 case SCR_STATUS: sc_reg = 0; break; 564 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
501 case SCR_CONTROL: sc_reg = 1; break; 565
502 case SCR_ERROR: sc_reg = 2; break; 566 /* enable DMA */
503 case SCR_ACTIVE: sc_reg = 3; break; 567 ahci_start_engine(port_mmio);
504 default: 568}
505 return; 569
570static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg)
571{
572 int rc;
573
574 /* disable DMA */
575 rc = ahci_stop_engine(port_mmio);
576 if (rc) {
577 *emsg = "failed to stop engine";
578 return rc;
506 } 579 }
507 580
508 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 581 /* disable FIS reception */
582 rc = ahci_stop_fis_rx(port_mmio);
583 if (rc) {
584 *emsg = "failed stop FIS RX";
585 return rc;
586 }
587
588 /* put device into slumber mode */
589 ahci_power_down(port_mmio, cap);
590
591 return 0;
509} 592}
510 593
511static int ahci_stop_engine(struct ata_port *ap) 594static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
512{ 595{
513 void __iomem *mmio = ap->host_set->mmio_base; 596 u32 cap_save, tmp;
514 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
515 int work;
516 u32 tmp;
517 597
518 tmp = readl(port_mmio + PORT_CMD); 598 cap_save = readl(mmio + HOST_CAP);
519 tmp &= ~PORT_CMD_START; 599 cap_save &= ( (1<<28) | (1<<17) );
520 writel(tmp, port_mmio + PORT_CMD); 600 cap_save |= (1 << 27);
601
602 /* global controller reset */
603 tmp = readl(mmio + HOST_CTL);
604 if ((tmp & HOST_RESET) == 0) {
605 writel(tmp | HOST_RESET, mmio + HOST_CTL);
606 readl(mmio + HOST_CTL); /* flush */
607 }
521 608
522 /* wait for engine to stop. TODO: this could be 609 /* reset must complete within 1 second, or
523 * as long as 500 msec 610 * the hardware should be considered fried.
524 */ 611 */
525 work = 1000; 612 ssleep(1);
526 while (work-- > 0) { 613
527 tmp = readl(port_mmio + PORT_CMD); 614 tmp = readl(mmio + HOST_CTL);
528 if ((tmp & PORT_CMD_LIST_ON) == 0) 615 if (tmp & HOST_RESET) {
529 return 0; 616 dev_printk(KERN_ERR, &pdev->dev,
530 udelay(10); 617 "controller reset failed (0x%x)\n", tmp);
618 return -EIO;
531 } 619 }
532 620
533 return -EIO; 621 writel(HOST_AHCI_EN, mmio + HOST_CTL);
622 (void) readl(mmio + HOST_CTL); /* flush */
623 writel(cap_save, mmio + HOST_CAP);
624 writel(0xf, mmio + HOST_PORTS_IMPL);
625 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
626
627 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
628 u16 tmp16;
629
630 /* configure PCS */
631 pci_read_config_word(pdev, 0x92, &tmp16);
632 tmp16 |= 0xf;
633 pci_write_config_word(pdev, 0x92, tmp16);
634 }
635
636 return 0;
534} 637}
535 638
536static void ahci_start_engine(struct ata_port *ap) 639static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
640 int n_ports, u32 cap)
537{ 641{
538 void __iomem *mmio = ap->host_set->mmio_base; 642 int i, rc;
539 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
540 u32 tmp; 643 u32 tmp;
541 644
542 tmp = readl(port_mmio + PORT_CMD); 645 for (i = 0; i < n_ports; i++) {
543 tmp |= PORT_CMD_START; 646 void __iomem *port_mmio = ahci_port_base(mmio, i);
544 writel(tmp, port_mmio + PORT_CMD); 647 const char *emsg = NULL;
545 readl(port_mmio + PORT_CMD); /* flush */ 648
649#if 0 /* BIOSen initialize this incorrectly */
650 if (!(hpriv->port_map & (1 << i)))
651 continue;
652#endif
653
654 /* make sure port is not active */
655 rc = ahci_deinit_port(port_mmio, cap, &emsg);
656 if (rc)
657 dev_printk(KERN_WARNING, &pdev->dev,
658 "%s (%d)\n", emsg, rc);
659
660 /* clear SError */
661 tmp = readl(port_mmio + PORT_SCR_ERR);
662 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
663 writel(tmp, port_mmio + PORT_SCR_ERR);
664
665 /* clear port IRQ */
666 tmp = readl(port_mmio + PORT_IRQ_STAT);
667 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
668 if (tmp)
669 writel(tmp, port_mmio + PORT_IRQ_STAT);
670
671 writel(1 << i, mmio + HOST_IRQ_STAT);
672 }
673
674 tmp = readl(mmio + HOST_CTL);
675 VPRINTK("HOST_CTL 0x%x\n", tmp);
676 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
677 tmp = readl(mmio + HOST_CTL);
678 VPRINTK("HOST_CTL 0x%x\n", tmp);
546} 679}
547 680
548static unsigned int ahci_dev_classify(struct ata_port *ap) 681static unsigned int ahci_dev_classify(struct ata_port *ap)
@@ -576,7 +709,7 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
576static int ahci_clo(struct ata_port *ap) 709static int ahci_clo(struct ata_port *ap)
577{ 710{
578 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 711 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
579 struct ahci_host_priv *hpriv = ap->host_set->private_data; 712 struct ahci_host_priv *hpriv = ap->host->private_data;
580 u32 tmp; 713 u32 tmp;
581 714
582 if (!(hpriv->cap & HOST_CAP_CLO)) 715 if (!(hpriv->cap & HOST_CAP_CLO))
@@ -608,7 +741,7 @@ static int ahci_prereset(struct ata_port *ap)
608static int ahci_softreset(struct ata_port *ap, unsigned int *class) 741static int ahci_softreset(struct ata_port *ap, unsigned int *class)
609{ 742{
610 struct ahci_port_priv *pp = ap->private_data; 743 struct ahci_port_priv *pp = ap->private_data;
611 void __iomem *mmio = ap->host_set->mmio_base; 744 void __iomem *mmio = ap->host->mmio_base;
612 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 745 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
613 const u32 cmd_fis_len = 5; /* five dwords */ 746 const u32 cmd_fis_len = 5; /* five dwords */
614 const char *reason = NULL; 747 const char *reason = NULL;
@@ -626,7 +759,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
626 } 759 }
627 760
628 /* prepare for SRST (AHCI-1.1 10.4.1) */ 761 /* prepare for SRST (AHCI-1.1 10.4.1) */
629 rc = ahci_stop_engine(ap); 762 rc = ahci_stop_engine(port_mmio);
630 if (rc) { 763 if (rc) {
631 reason = "failed to stop engine"; 764 reason = "failed to stop engine";
632 goto fail_restart; 765 goto fail_restart;
@@ -647,7 +780,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
647 } 780 }
648 781
649 /* restart engine */ 782 /* restart engine */
650 ahci_start_engine(ap); 783 ahci_start_engine(port_mmio);
651 784
652 ata_tf_init(ap->device, &tf); 785 ata_tf_init(ap->device, &tf);
653 fis = pp->cmd_tbl; 786 fis = pp->cmd_tbl;
@@ -706,7 +839,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
706 return 0; 839 return 0;
707 840
708 fail_restart: 841 fail_restart:
709 ahci_start_engine(ap); 842 ahci_start_engine(port_mmio);
710 fail: 843 fail:
711 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason); 844 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
712 return rc; 845 return rc;
@@ -717,11 +850,13 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
717 struct ahci_port_priv *pp = ap->private_data; 850 struct ahci_port_priv *pp = ap->private_data;
718 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 851 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
719 struct ata_taskfile tf; 852 struct ata_taskfile tf;
853 void __iomem *mmio = ap->host->mmio_base;
854 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
720 int rc; 855 int rc;
721 856
722 DPRINTK("ENTER\n"); 857 DPRINTK("ENTER\n");
723 858
724 ahci_stop_engine(ap); 859 ahci_stop_engine(port_mmio);
725 860
726 /* clear D2H reception area to properly wait for D2H FIS */ 861 /* clear D2H reception area to properly wait for D2H FIS */
727 ata_tf_init(ap->device, &tf); 862 ata_tf_init(ap->device, &tf);
@@ -730,7 +865,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
730 865
731 rc = sata_std_hardreset(ap, class); 866 rc = sata_std_hardreset(ap, class);
732 867
733 ahci_start_engine(ap); 868 ahci_start_engine(port_mmio);
734 869
735 if (rc == 0 && ata_port_online(ap)) 870 if (rc == 0 && ata_port_online(ap))
736 *class = ahci_dev_classify(ap); 871 *class = ahci_dev_classify(ap);
@@ -904,7 +1039,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
904 1039
905static void ahci_host_intr(struct ata_port *ap) 1040static void ahci_host_intr(struct ata_port *ap)
906{ 1041{
907 void __iomem *mmio = ap->host_set->mmio_base; 1042 void __iomem *mmio = ap->host->mmio_base;
908 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1043 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
909 struct ata_eh_info *ehi = &ap->eh_info; 1044 struct ata_eh_info *ehi = &ap->eh_info;
910 u32 status, qc_active; 1045 u32 status, qc_active;
@@ -940,7 +1075,7 @@ static void ahci_host_intr(struct ata_port *ap)
940 return; 1075 return;
941 1076
942 /* ignore interim PIO setup fis interrupts */ 1077 /* ignore interim PIO setup fis interrupts */
943 if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS)) 1078 if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS))
944 return; 1079 return;
945 1080
946 if (ata_ratelimit()) 1081 if (ata_ratelimit())
@@ -956,7 +1091,7 @@ static void ahci_irq_clear(struct ata_port *ap)
956 1091
957static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs) 1092static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
958{ 1093{
959 struct ata_host_set *host_set = dev_instance; 1094 struct ata_host *host = dev_instance;
960 struct ahci_host_priv *hpriv; 1095 struct ahci_host_priv *hpriv;
961 unsigned int i, handled = 0; 1096 unsigned int i, handled = 0;
962 void __iomem *mmio; 1097 void __iomem *mmio;
@@ -964,8 +1099,8 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *r
964 1099
965 VPRINTK("ENTER\n"); 1100 VPRINTK("ENTER\n");
966 1101
967 hpriv = host_set->private_data; 1102 hpriv = host->private_data;
968 mmio = host_set->mmio_base; 1103 mmio = host->mmio_base;
969 1104
970 /* sigh. 0xffffffff is a valid return from h/w */ 1105 /* sigh. 0xffffffff is a valid return from h/w */
971 irq_stat = readl(mmio + HOST_IRQ_STAT); 1106 irq_stat = readl(mmio + HOST_IRQ_STAT);
@@ -973,22 +1108,22 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *r
973 if (!irq_stat) 1108 if (!irq_stat)
974 return IRQ_NONE; 1109 return IRQ_NONE;
975 1110
976 spin_lock(&host_set->lock); 1111 spin_lock(&host->lock);
977 1112
978 for (i = 0; i < host_set->n_ports; i++) { 1113 for (i = 0; i < host->n_ports; i++) {
979 struct ata_port *ap; 1114 struct ata_port *ap;
980 1115
981 if (!(irq_stat & (1 << i))) 1116 if (!(irq_stat & (1 << i)))
982 continue; 1117 continue;
983 1118
984 ap = host_set->ports[i]; 1119 ap = host->ports[i];
985 if (ap) { 1120 if (ap) {
986 ahci_host_intr(ap); 1121 ahci_host_intr(ap);
987 VPRINTK("port %u\n", i); 1122 VPRINTK("port %u\n", i);
988 } else { 1123 } else {
989 VPRINTK("port %u (no irq)\n", i); 1124 VPRINTK("port %u (no irq)\n", i);
990 if (ata_ratelimit()) 1125 if (ata_ratelimit())
991 dev_printk(KERN_WARNING, host_set->dev, 1126 dev_printk(KERN_WARNING, host->dev,
992 "interrupt on disabled port %u\n", i); 1127 "interrupt on disabled port %u\n", i);
993 } 1128 }
994 1129
@@ -1000,7 +1135,7 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *r
1000 handled = 1; 1135 handled = 1;
1001 } 1136 }
1002 1137
1003 spin_unlock(&host_set->lock); 1138 spin_unlock(&host->lock);
1004 1139
1005 VPRINTK("EXIT\n"); 1140 VPRINTK("EXIT\n");
1006 1141
@@ -1022,7 +1157,7 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1022 1157
1023static void ahci_freeze(struct ata_port *ap) 1158static void ahci_freeze(struct ata_port *ap)
1024{ 1159{
1025 void __iomem *mmio = ap->host_set->mmio_base; 1160 void __iomem *mmio = ap->host->mmio_base;
1026 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1161 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1027 1162
1028 /* turn IRQ off */ 1163 /* turn IRQ off */
@@ -1031,7 +1166,7 @@ static void ahci_freeze(struct ata_port *ap)
1031 1166
1032static void ahci_thaw(struct ata_port *ap) 1167static void ahci_thaw(struct ata_port *ap)
1033{ 1168{
1034 void __iomem *mmio = ap->host_set->mmio_base; 1169 void __iomem *mmio = ap->host->mmio_base;
1035 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1170 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1036 u32 tmp; 1171 u32 tmp;
1037 1172
@@ -1046,10 +1181,13 @@ static void ahci_thaw(struct ata_port *ap)
1046 1181
1047static void ahci_error_handler(struct ata_port *ap) 1182static void ahci_error_handler(struct ata_port *ap)
1048{ 1183{
1184 void __iomem *mmio = ap->host->mmio_base;
1185 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1186
1049 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1187 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1050 /* restart engine */ 1188 /* restart engine */
1051 ahci_stop_engine(ap); 1189 ahci_stop_engine(port_mmio);
1052 ahci_start_engine(ap); 1190 ahci_start_engine(port_mmio);
1053 } 1191 }
1054 1192
1055 /* perform recovery */ 1193 /* perform recovery */
@@ -1060,15 +1198,176 @@ static void ahci_error_handler(struct ata_port *ap)
1060static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) 1198static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1061{ 1199{
1062 struct ata_port *ap = qc->ap; 1200 struct ata_port *ap = qc->ap;
1201 void __iomem *mmio = ap->host->mmio_base;
1202 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1063 1203
1064 if (qc->flags & ATA_QCFLAG_FAILED) 1204 if (qc->flags & ATA_QCFLAG_FAILED)
1065 qc->err_mask |= AC_ERR_OTHER; 1205 qc->err_mask |= AC_ERR_OTHER;
1066 1206
1067 if (qc->err_mask) { 1207 if (qc->err_mask) {
1068 /* make DMA engine forget about the failed command */ 1208 /* make DMA engine forget about the failed command */
1069 ahci_stop_engine(ap); 1209 ahci_stop_engine(port_mmio);
1070 ahci_start_engine(ap); 1210 ahci_start_engine(port_mmio);
1211 }
1212}
1213
1214static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
1215{
1216 struct ahci_host_priv *hpriv = ap->host->private_data;
1217 struct ahci_port_priv *pp = ap->private_data;
1218 void __iomem *mmio = ap->host->mmio_base;
1219 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1220 const char *emsg = NULL;
1221 int rc;
1222
1223 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1224 if (rc) {
1225 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
1226 ahci_init_port(port_mmio, hpriv->cap,
1227 pp->cmd_slot_dma, pp->rx_fis_dma);
1228 }
1229
1230 return rc;
1231}
1232
1233static int ahci_port_resume(struct ata_port *ap)
1234{
1235 struct ahci_port_priv *pp = ap->private_data;
1236 struct ahci_host_priv *hpriv = ap->host->private_data;
1237 void __iomem *mmio = ap->host->mmio_base;
1238 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1239
1240 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1241
1242 return 0;
1243}
1244
1245static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1246{
1247 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1248 void __iomem *mmio = host->mmio_base;
1249 u32 ctl;
1250
1251 if (mesg.event == PM_EVENT_SUSPEND) {
1252 /* AHCI spec rev1.1 section 8.3.3:
1253 * Software must disable interrupts prior to requesting a
1254 * transition of the HBA to D3 state.
1255 */
1256 ctl = readl(mmio + HOST_CTL);
1257 ctl &= ~HOST_IRQ_EN;
1258 writel(ctl, mmio + HOST_CTL);
1259 readl(mmio + HOST_CTL); /* flush */
1260 }
1261
1262 return ata_pci_device_suspend(pdev, mesg);
1263}
1264
1265static int ahci_pci_device_resume(struct pci_dev *pdev)
1266{
1267 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1268 struct ahci_host_priv *hpriv = host->private_data;
1269 void __iomem *mmio = host->mmio_base;
1270 int rc;
1271
1272 ata_pci_device_do_resume(pdev);
1273
1274 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1275 rc = ahci_reset_controller(mmio, pdev);
1276 if (rc)
1277 return rc;
1278
1279 ahci_init_controller(mmio, pdev, host->n_ports, hpriv->cap);
1071 } 1280 }
1281
1282 ata_host_resume(host);
1283
1284 return 0;
1285}
1286
1287static int ahci_port_start(struct ata_port *ap)
1288{
1289 struct device *dev = ap->host->dev;
1290 struct ahci_host_priv *hpriv = ap->host->private_data;
1291 struct ahci_port_priv *pp;
1292 void __iomem *mmio = ap->host->mmio_base;
1293 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1294 void *mem;
1295 dma_addr_t mem_dma;
1296 int rc;
1297
1298 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
1299 if (!pp)
1300 return -ENOMEM;
1301 memset(pp, 0, sizeof(*pp));
1302
1303 rc = ata_pad_alloc(ap, dev);
1304 if (rc) {
1305 kfree(pp);
1306 return rc;
1307 }
1308
1309 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
1310 if (!mem) {
1311 ata_pad_free(ap, dev);
1312 kfree(pp);
1313 return -ENOMEM;
1314 }
1315 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
1316
1317 /*
1318 * First item in chunk of DMA memory: 32-slot command table,
1319 * 32 bytes each in size
1320 */
1321 pp->cmd_slot = mem;
1322 pp->cmd_slot_dma = mem_dma;
1323
1324 mem += AHCI_CMD_SLOT_SZ;
1325 mem_dma += AHCI_CMD_SLOT_SZ;
1326
1327 /*
1328 * Second item: Received-FIS area
1329 */
1330 pp->rx_fis = mem;
1331 pp->rx_fis_dma = mem_dma;
1332
1333 mem += AHCI_RX_FIS_SZ;
1334 mem_dma += AHCI_RX_FIS_SZ;
1335
1336 /*
1337 * Third item: data area for storing a single command
1338 * and its scatter-gather table
1339 */
1340 pp->cmd_tbl = mem;
1341 pp->cmd_tbl_dma = mem_dma;
1342
1343 ap->private_data = pp;
1344
1345 /* initialize port */
1346 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1347
1348 return 0;
1349}
1350
1351static void ahci_port_stop(struct ata_port *ap)
1352{
1353 struct device *dev = ap->host->dev;
1354 struct ahci_host_priv *hpriv = ap->host->private_data;
1355 struct ahci_port_priv *pp = ap->private_data;
1356 void __iomem *mmio = ap->host->mmio_base;
1357 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1358 const char *emsg = NULL;
1359 int rc;
1360
1361 /* de-initialize port */
1362 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1363 if (rc)
1364 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
1365
1366 ap->private_data = NULL;
1367 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
1368 pp->cmd_slot, pp->cmd_slot_dma);
1369 ata_pad_free(ap, dev);
1370 kfree(pp);
1072} 1371}
1073 1372
1074static void ahci_setup_port(struct ata_ioports *port, unsigned long base, 1373static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
@@ -1089,47 +1388,12 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1089 struct ahci_host_priv *hpriv = probe_ent->private_data; 1388 struct ahci_host_priv *hpriv = probe_ent->private_data;
1090 struct pci_dev *pdev = to_pci_dev(probe_ent->dev); 1389 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1091 void __iomem *mmio = probe_ent->mmio_base; 1390 void __iomem *mmio = probe_ent->mmio_base;
1092 u32 tmp, cap_save; 1391 unsigned int i, using_dac;
1093 unsigned int i, j, using_dac;
1094 int rc; 1392 int rc;
1095 void __iomem *port_mmio;
1096
1097 cap_save = readl(mmio + HOST_CAP);
1098 cap_save &= ( (1<<28) | (1<<17) );
1099 cap_save |= (1 << 27);
1100 1393
1101 /* global controller reset */ 1394 rc = ahci_reset_controller(mmio, pdev);
1102 tmp = readl(mmio + HOST_CTL); 1395 if (rc)
1103 if ((tmp & HOST_RESET) == 0) { 1396 return rc;
1104 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1105 readl(mmio + HOST_CTL); /* flush */
1106 }
1107
1108 /* reset must complete within 1 second, or
1109 * the hardware should be considered fried.
1110 */
1111 ssleep(1);
1112
1113 tmp = readl(mmio + HOST_CTL);
1114 if (tmp & HOST_RESET) {
1115 dev_printk(KERN_ERR, &pdev->dev,
1116 "controller reset failed (0x%x)\n", tmp);
1117 return -EIO;
1118 }
1119
1120 writel(HOST_AHCI_EN, mmio + HOST_CTL);
1121 (void) readl(mmio + HOST_CTL); /* flush */
1122 writel(cap_save, mmio + HOST_CAP);
1123 writel(0xf, mmio + HOST_PORTS_IMPL);
1124 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
1125
1126 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1127 u16 tmp16;
1128
1129 pci_read_config_word(pdev, 0x92, &tmp16);
1130 tmp16 |= 0xf;
1131 pci_write_config_word(pdev, 0x92, tmp16);
1132 }
1133 1397
1134 hpriv->cap = readl(mmio + HOST_CAP); 1398 hpriv->cap = readl(mmio + HOST_CAP);
1135 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL); 1399 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
@@ -1165,63 +1429,10 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1165 } 1429 }
1166 } 1430 }
1167 1431
1168 for (i = 0; i < probe_ent->n_ports; i++) { 1432 for (i = 0; i < probe_ent->n_ports; i++)
1169#if 0 /* BIOSen initialize this incorrectly */ 1433 ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
1170 if (!(hpriv->port_map & (1 << i)))
1171 continue;
1172#endif
1173
1174 port_mmio = ahci_port_base(mmio, i);
1175 VPRINTK("mmio %p port_mmio %p\n", mmio, port_mmio);
1176
1177 ahci_setup_port(&probe_ent->port[i],
1178 (unsigned long) mmio, i);
1179 1434
1180 /* make sure port is not active */ 1435 ahci_init_controller(mmio, pdev, probe_ent->n_ports, hpriv->cap);
1181 tmp = readl(port_mmio + PORT_CMD);
1182 VPRINTK("PORT_CMD 0x%x\n", tmp);
1183 if (tmp & (PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
1184 PORT_CMD_FIS_RX | PORT_CMD_START)) {
1185 tmp &= ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
1186 PORT_CMD_FIS_RX | PORT_CMD_START);
1187 writel(tmp, port_mmio + PORT_CMD);
1188 readl(port_mmio + PORT_CMD); /* flush */
1189
1190 /* spec says 500 msecs for each bit, so
1191 * this is slightly incorrect.
1192 */
1193 msleep(500);
1194 }
1195
1196 writel(PORT_CMD_SPIN_UP, port_mmio + PORT_CMD);
1197
1198 j = 0;
1199 while (j < 100) {
1200 msleep(10);
1201 tmp = readl(port_mmio + PORT_SCR_STAT);
1202 if ((tmp & 0xf) == 0x3)
1203 break;
1204 j++;
1205 }
1206
1207 tmp = readl(port_mmio + PORT_SCR_ERR);
1208 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1209 writel(tmp, port_mmio + PORT_SCR_ERR);
1210
1211 /* ack any pending irq events for this port */
1212 tmp = readl(port_mmio + PORT_IRQ_STAT);
1213 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1214 if (tmp)
1215 writel(tmp, port_mmio + PORT_IRQ_STAT);
1216
1217 writel(1 << i, mmio + HOST_IRQ_STAT);
1218 }
1219
1220 tmp = readl(mmio + HOST_CTL);
1221 VPRINTK("HOST_CTL 0x%x\n", tmp);
1222 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1223 tmp = readl(mmio + HOST_CTL);
1224 VPRINTK("HOST_CTL 0x%x\n", tmp);
1225 1436
1226 pci_set_master(pdev); 1437 pci_set_master(pdev);
1227 1438
@@ -1370,7 +1581,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1370 memset(hpriv, 0, sizeof(*hpriv)); 1581 memset(hpriv, 0, sizeof(*hpriv));
1371 1582
1372 probe_ent->sht = ahci_port_info[board_idx].sht; 1583 probe_ent->sht = ahci_port_info[board_idx].sht;
1373 probe_ent->host_flags = ahci_port_info[board_idx].host_flags; 1584 probe_ent->port_flags = ahci_port_info[board_idx].flags;
1374 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask; 1585 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
1375 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask; 1586 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
1376 probe_ent->port_ops = ahci_port_info[board_idx].port_ops; 1587 probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
@@ -1388,9 +1599,9 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1388 if (rc) 1599 if (rc)
1389 goto err_out_hpriv; 1600 goto err_out_hpriv;
1390 1601
1391 if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) && 1602 if (!(probe_ent->port_flags & AHCI_FLAG_NO_NCQ) &&
1392 (hpriv->cap & HOST_CAP_NCQ)) 1603 (hpriv->cap & HOST_CAP_NCQ))
1393 probe_ent->host_flags |= ATA_FLAG_NCQ; 1604 probe_ent->port_flags |= ATA_FLAG_NCQ;
1394 1605
1395 ahci_print_info(probe_ent); 1606 ahci_print_info(probe_ent);
1396 1607
@@ -1421,27 +1632,27 @@ err_out:
1421static void ahci_remove_one (struct pci_dev *pdev) 1632static void ahci_remove_one (struct pci_dev *pdev)
1422{ 1633{
1423 struct device *dev = pci_dev_to_dev(pdev); 1634 struct device *dev = pci_dev_to_dev(pdev);
1424 struct ata_host_set *host_set = dev_get_drvdata(dev); 1635 struct ata_host *host = dev_get_drvdata(dev);
1425 struct ahci_host_priv *hpriv = host_set->private_data; 1636 struct ahci_host_priv *hpriv = host->private_data;
1426 unsigned int i; 1637 unsigned int i;
1427 int have_msi; 1638 int have_msi;
1428 1639
1429 for (i = 0; i < host_set->n_ports; i++) 1640 for (i = 0; i < host->n_ports; i++)
1430 ata_port_detach(host_set->ports[i]); 1641 ata_port_detach(host->ports[i]);
1431 1642
1432 have_msi = hpriv->flags & AHCI_FLAG_MSI; 1643 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1433 free_irq(host_set->irq, host_set); 1644 free_irq(host->irq, host);
1434 1645
1435 for (i = 0; i < host_set->n_ports; i++) { 1646 for (i = 0; i < host->n_ports; i++) {
1436 struct ata_port *ap = host_set->ports[i]; 1647 struct ata_port *ap = host->ports[i];
1437 1648
1438 ata_scsi_release(ap->host); 1649 ata_scsi_release(ap->scsi_host);
1439 scsi_host_put(ap->host); 1650 scsi_host_put(ap->scsi_host);
1440 } 1651 }
1441 1652
1442 kfree(hpriv); 1653 kfree(hpriv);
1443 pci_iounmap(pdev, host_set->mmio_base); 1654 pci_iounmap(pdev, host->mmio_base);
1444 kfree(host_set); 1655 kfree(host);
1445 1656
1446 if (have_msi) 1657 if (have_msi)
1447 pci_disable_msi(pdev); 1658 pci_disable_msi(pdev);
@@ -1454,7 +1665,7 @@ static void ahci_remove_one (struct pci_dev *pdev)
1454 1665
1455static int __init ahci_init(void) 1666static int __init ahci_init(void)
1456{ 1667{
1457 return pci_module_init(&ahci_pci_driver); 1668 return pci_register_driver(&ahci_pci_driver);
1458} 1669}
1459 1670
1460static void __exit ahci_exit(void) 1671static void __exit ahci_exit(void)
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
new file mode 100644
index 000000000000..1d1c30a2fcd0
--- /dev/null
+++ b/drivers/ata/ata_generic.c
@@ -0,0 +1,252 @@
1/*
2 * ata_generic.c - Generic PATA/SATA controller driver.
3 * Copyright 2005 Red Hat Inc <alan@redhat.com>, all rights reserved.
4 *
5 * Elements from ide/pci/generic.c
6 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
7 * Portions (C) Copyright 2002 Red Hat Inc <alan@redhat.com>
8 *
9 * May be copied or modified under the terms of the GNU General Public License
10 *
11 * Driver for PCI IDE interfaces implementing the standard bus mastering
12 * interface functionality. This assumes the BIOS did the drive set up and
13 * tuning for us. By default we do not grab all IDE class devices as they
14 * may have other drivers or need fixups to avoid problems. Instead we keep
15 * a default list of stuff without documentation/driver that appears to
16 * work.
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/pci.h>
22#include <linux/init.h>
23#include <linux/blkdev.h>
24#include <linux/delay.h>
25#include <scsi/scsi_host.h>
26#include <linux/libata.h>
27
28#define DRV_NAME "ata_generic"
29#define DRV_VERSION "0.2.6"
30
31/*
32 * A generic parallel ATA driver using libata
33 */
34
35/**
36 * generic_pre_reset - probe begin
37 * @ap: ATA port
38 *
39 * Set up cable type and use generic probe init
40 */
41
42static int generic_pre_reset(struct ata_port *ap)
43{
44 ap->cbl = ATA_CBL_PATA80;
45 return ata_std_prereset(ap);
46}
47
48
49/**
50 * generic_error_handler - Probe specified port on PATA host controller
51 * @ap: Port to probe
52 * @classes:
53 *
54 * LOCKING:
55 * None (inherited from caller).
56 */
57
58
59static void generic_error_handler(struct ata_port *ap)
60{
61 ata_bmdma_drive_eh(ap, generic_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
62}
63
64/**
65 * generic_set_mode - mode setting
66 * @ap: interface to set up
67 *
68 * Use a non standard set_mode function. We don't want to be tuned.
69 * The BIOS configured everything. Our job is not to fiddle. We
70 * read the dma enabled bits from the PCI configuration of the device
71 * and respect them.
72 */
73
74static void generic_set_mode(struct ata_port *ap)
75{
76 int dma_enabled = 0;
77 int i;
78
79 /* Bits 5 and 6 indicate if DMA is active on master/slave */
80 if (ap->ioaddr.bmdma_addr)
81 dma_enabled = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
82
83 for (i = 0; i < ATA_MAX_DEVICES; i++) {
84 struct ata_device *dev = &ap->device[i];
85 if (ata_dev_enabled(dev)) {
86 /* We don't really care */
87 dev->pio_mode = XFER_PIO_0;
88 dev->dma_mode = XFER_MW_DMA_0;
89 /* We do need the right mode information for DMA or PIO
90 and this comes from the current configuration flags */
91 if (dma_enabled & (1 << (5 + i))) {
92 dev->xfer_mode = XFER_MW_DMA_0;
93 dev->xfer_shift = ATA_SHIFT_MWDMA;
94 dev->flags &= ~ATA_DFLAG_PIO;
95 } else {
96 dev->xfer_mode = XFER_PIO_0;
97 dev->xfer_shift = ATA_SHIFT_PIO;
98 dev->flags |= ATA_DFLAG_PIO;
99 }
100 }
101 }
102}
103
104static struct scsi_host_template generic_sht = {
105 .module = THIS_MODULE,
106 .name = DRV_NAME,
107 .ioctl = ata_scsi_ioctl,
108 .queuecommand = ata_scsi_queuecmd,
109 .can_queue = ATA_DEF_QUEUE,
110 .this_id = ATA_SHT_THIS_ID,
111 .sg_tablesize = LIBATA_MAX_PRD,
112 .max_sectors = ATA_MAX_SECTORS,
113 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
114 .emulated = ATA_SHT_EMULATED,
115 .use_clustering = ATA_SHT_USE_CLUSTERING,
116 .proc_name = DRV_NAME,
117 .dma_boundary = ATA_DMA_BOUNDARY,
118 .slave_configure = ata_scsi_slave_config,
119 .bios_param = ata_std_bios_param,
120};
121
122static struct ata_port_operations generic_port_ops = {
123 .set_mode = generic_set_mode,
124
125 .port_disable = ata_port_disable,
126 .tf_load = ata_tf_load,
127 .tf_read = ata_tf_read,
128 .check_status = ata_check_status,
129 .exec_command = ata_exec_command,
130 .dev_select = ata_std_dev_select,
131
132 .bmdma_setup = ata_bmdma_setup,
133 .bmdma_start = ata_bmdma_start,
134 .bmdma_stop = ata_bmdma_stop,
135 .bmdma_status = ata_bmdma_status,
136
137 .data_xfer = ata_pio_data_xfer,
138
139 .freeze = ata_bmdma_freeze,
140 .thaw = ata_bmdma_thaw,
141 .error_handler = generic_error_handler,
142 .post_internal_cmd = ata_bmdma_post_internal_cmd,
143
144 .qc_prep = ata_qc_prep,
145 .qc_issue = ata_qc_issue_prot,
146 .eng_timeout = ata_eng_timeout,
147 .irq_handler = ata_interrupt,
148 .irq_clear = ata_bmdma_irq_clear,
149
150 .port_start = ata_port_start,
151 .port_stop = ata_port_stop,
152 .host_stop = ata_host_stop
153};
154
155static int all_generic_ide; /* Set to claim all devices */
156
157/**
158 * ata_generic_init - attach generic IDE
159 * @dev: PCI device found
160 * @id: match entry
161 *
162 * Called each time a matching IDE interface is found. We check if the
163 * interface is one we wish to claim and if so we perform any chip
164 * specific hacks then let the ATA layer do the heavy lifting.
165 */
166
167static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
168{
169 u16 command;
170 static struct ata_port_info info = {
171 .sht = &generic_sht,
172 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
173 .pio_mask = 0x1f,
174 .mwdma_mask = 0x07,
175 .udma_mask = 0x3f,
176 .port_ops = &generic_port_ops
177 };
178 static struct ata_port_info *port_info[2] = { &info, &info };
179
180 /* Don't use the generic entry unless instructed to do so */
181 if (id->driver_data == 1 && all_generic_ide == 0)
182 return -ENODEV;
183
184 /* Devices that need care */
185 if (dev->vendor == PCI_VENDOR_ID_UMC &&
186 dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
187 (!(PCI_FUNC(dev->devfn) & 1)))
188 return -ENODEV;
189
190 if (dev->vendor == PCI_VENDOR_ID_OPTI &&
191 dev->device == PCI_DEVICE_ID_OPTI_82C558 &&
192 (!(PCI_FUNC(dev->devfn) & 1)))
193 return -ENODEV;
194
195 /* Don't re-enable devices in generic mode or we will break some
196 motherboards with disabled and unused IDE controllers */
197 pci_read_config_word(dev, PCI_COMMAND, &command);
198 if (!(command & PCI_COMMAND_IO))
199 return -ENODEV;
200
201 if (dev->vendor == PCI_VENDOR_ID_AL)
202 ata_pci_clear_simplex(dev);
203
204 return ata_pci_init_one(dev, port_info, 2);
205}
206
207static struct pci_device_id ata_generic[] = {
208 { PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), },
209 { PCI_DEVICE(PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), },
210 { PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8673F), },
211 { PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886A), },
212 { PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF), },
213 { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), },
214 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), },
215 { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), },
216 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), },
217 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
218 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
219 /* Must come last. If you add entries adjust this table appropriately */
220 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
221 { 0, },
222};
223
224static struct pci_driver ata_generic_pci_driver = {
225 .name = DRV_NAME,
226 .id_table = ata_generic,
227 .probe = ata_generic_init_one,
228 .remove = ata_pci_remove_one
229};
230
231static int __init ata_generic_init(void)
232{
233 return pci_module_init(&ata_generic_pci_driver);
234}
235
236
237static void __exit ata_generic_exit(void)
238{
239 pci_unregister_driver(&ata_generic_pci_driver);
240}
241
242
243MODULE_AUTHOR("Alan Cox");
244MODULE_DESCRIPTION("low-level driver for generic ATA");
245MODULE_LICENSE("GPL");
246MODULE_DEVICE_TABLE(pci, ata_generic);
247MODULE_VERSION(DRV_VERSION);
248
249module_init(ata_generic_init);
250module_exit(ata_generic_exit);
251
252module_param(all_generic_ide, int, 0);
diff --git a/drivers/scsi/ata_piix.c b/drivers/ata/ata_piix.c
index a9bb3cb7e89b..ab2ecccf7798 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -93,7 +93,7 @@
93#include <linux/libata.h> 93#include <linux/libata.h>
94 94
95#define DRV_NAME "ata_piix" 95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "2.00" 96#define DRV_VERSION "2.00ac6"
97 97
98enum { 98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ 99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
@@ -116,15 +116,18 @@ enum {
116 PIIX_80C_SEC = (1 << 7) | (1 << 6), 116 PIIX_80C_SEC = (1 << 7) | (1 << 6),
117 117
118 /* controller IDs */ 118 /* controller IDs */
119 piix4_pata = 0, 119 piix_pata_33 = 0, /* PIIX3 or 4 at 33Mhz */
120 ich5_pata = 1, 120 ich_pata_33 = 1, /* ICH up to UDMA 33 only */
121 ich5_sata = 2, 121 ich_pata_66 = 2, /* ICH up to 66 Mhz */
122 esb_sata = 3, 122 ich_pata_100 = 3, /* ICH up to UDMA 100 */
123 ich6_sata = 4, 123 ich_pata_133 = 4, /* ICH up to UDMA 133 */
124 ich6_sata_ahci = 5, 124 ich5_sata = 5,
125 ich6m_sata_ahci = 6, 125 esb_sata = 6,
126 ich7m_sata_ahci = 7, 126 ich6_sata = 7,
127 ich8_sata_ahci = 8, 127 ich6_sata_ahci = 8,
128 ich6m_sata_ahci = 9,
129 ich7m_sata_ahci = 10,
130 ich8_sata_ahci = 11,
128 131
129 /* constants for mapping table */ 132 /* constants for mapping table */
130 P0 = 0, /* port 0 */ 133 P0 = 0, /* port 0 */
@@ -152,20 +155,55 @@ struct piix_host_priv {
152 155
153static int piix_init_one (struct pci_dev *pdev, 156static int piix_init_one (struct pci_dev *pdev,
154 const struct pci_device_id *ent); 157 const struct pci_device_id *ent);
155static void piix_host_stop(struct ata_host_set *host_set); 158static void piix_host_stop(struct ata_host *host);
156static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
157static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
158static void piix_pata_error_handler(struct ata_port *ap); 159static void piix_pata_error_handler(struct ata_port *ap);
160static void ich_pata_error_handler(struct ata_port *ap);
159static void piix_sata_error_handler(struct ata_port *ap); 161static void piix_sata_error_handler(struct ata_port *ap);
162static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
163static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
164static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev);
160 165
161static unsigned int in_module_init = 1; 166static unsigned int in_module_init = 1;
162 167
163static const struct pci_device_id piix_pci_tbl[] = { 168static const struct pci_device_id piix_pci_tbl[] = {
164#ifdef ATA_ENABLE_PATA 169#ifdef ATA_ENABLE_PATA
165 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata }, 170 /* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */
166 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, 171 /* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */
167 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, 172 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
168 { 0x8086, 0x27df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, 173 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
174 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
175 /* Intel PIIX4 */
176 { 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
177 /* Intel PIIX4 */
178 { 0x8086, 0x7601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
179 /* Intel PIIX */
180 { 0x8086, 0x84CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
181 /* Intel ICH (i810, i815, i840) UDMA 66*/
182 { 0x8086, 0x2411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_66 },
183 /* Intel ICH0 : UDMA 33*/
184 { 0x8086, 0x2421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_33 },
185 /* Intel ICH2M */
186 { 0x8086, 0x244A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
187 /* Intel ICH2 (i810E2, i845, 850, 860) UDMA 100 */
188 { 0x8086, 0x244B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
189 /* Intel ICH3M */
190 { 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
191 /* Intel ICH3 (E7500/1) UDMA 100 */
192 { 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
193 /* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */
194 { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
195 { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
196 /* Intel ICH5 */
197 { 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 },
198 /* C-ICH (i810E2) */
199 { 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
200 /* ESB (855GME/875P + 6300ESB) UDMA 100 */
201 { 0x8086, 0x25A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
202 /* ICH6 (and 6) (i915) UDMA 100 */
203 { 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
204 /* ICH7/7-R (i945, i975) UDMA 100*/
205 { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 },
206 { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
169#endif 207#endif
170 208
171 /* NOTE: The following PCI ids must be kept in sync with the 209 /* NOTE: The following PCI ids must be kept in sync with the
@@ -264,6 +302,39 @@ static const struct ata_port_operations piix_pata_ops = {
264 .host_stop = piix_host_stop, 302 .host_stop = piix_host_stop,
265}; 303};
266 304
305static const struct ata_port_operations ich_pata_ops = {
306 .port_disable = ata_port_disable,
307 .set_piomode = piix_set_piomode,
308 .set_dmamode = ich_set_dmamode,
309 .mode_filter = ata_pci_default_filter,
310
311 .tf_load = ata_tf_load,
312 .tf_read = ata_tf_read,
313 .check_status = ata_check_status,
314 .exec_command = ata_exec_command,
315 .dev_select = ata_std_dev_select,
316
317 .bmdma_setup = ata_bmdma_setup,
318 .bmdma_start = ata_bmdma_start,
319 .bmdma_stop = ata_bmdma_stop,
320 .bmdma_status = ata_bmdma_status,
321 .qc_prep = ata_qc_prep,
322 .qc_issue = ata_qc_issue_prot,
323 .data_xfer = ata_pio_data_xfer,
324
325 .freeze = ata_bmdma_freeze,
326 .thaw = ata_bmdma_thaw,
327 .error_handler = ich_pata_error_handler,
328 .post_internal_cmd = ata_bmdma_post_internal_cmd,
329
330 .irq_handler = ata_interrupt,
331 .irq_clear = ata_bmdma_irq_clear,
332
333 .port_start = ata_port_start,
334 .port_stop = ata_port_stop,
335 .host_stop = ata_host_stop,
336};
337
267static const struct ata_port_operations piix_sata_ops = { 338static const struct ata_port_operations piix_sata_ops = {
268 .port_disable = ata_port_disable, 339 .port_disable = ata_port_disable,
269 340
@@ -379,38 +450,59 @@ static const struct piix_map_db *piix_map_db_table[] = {
379}; 450};
380 451
381static struct ata_port_info piix_port_info[] = { 452static struct ata_port_info piix_port_info[] = {
382 /* piix4_pata */ 453 /* piix_pata_33: 0: PIIX3 or 4 at 33MHz */
383 { 454 {
384 .sht = &piix_sht, 455 .sht = &piix_sht,
385 .host_flags = ATA_FLAG_SLAVE_POSS, 456 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
386 .pio_mask = 0x1f, /* pio0-4 */ 457 .pio_mask = 0x1f, /* pio0-4 */
387#if 0 458 .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
388 .mwdma_mask = 0x06, /* mwdma1-2 */
389#else
390 .mwdma_mask = 0x00, /* mwdma broken */
391#endif
392 .udma_mask = ATA_UDMA_MASK_40C, 459 .udma_mask = ATA_UDMA_MASK_40C,
393 .port_ops = &piix_pata_ops, 460 .port_ops = &piix_pata_ops,
394 }, 461 },
395 462
396 /* ich5_pata */ 463 /* ich_pata_33: 1 ICH0 - ICH at 33Mhz*/
464 {
465 .sht = &piix_sht,
466 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS,
467 .pio_mask = 0x1f, /* pio 0-4 */
468 .mwdma_mask = 0x06, /* Check: maybe 0x07 */
469 .udma_mask = ATA_UDMA2, /* UDMA33 */
470 .port_ops = &ich_pata_ops,
471 },
472 /* ich_pata_66: 2 ICH controllers up to 66MHz */
473 {
474 .sht = &piix_sht,
475 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS,
476 .pio_mask = 0x1f, /* pio 0-4 */
477 .mwdma_mask = 0x06, /* MWDMA0 is broken on chip */
478 .udma_mask = ATA_UDMA4,
479 .port_ops = &ich_pata_ops,
480 },
481
482 /* ich_pata_100: 3 */
397 { 483 {
398 .sht = &piix_sht, 484 .sht = &piix_sht,
399 .host_flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR, 485 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
400 .pio_mask = 0x1f, /* pio0-4 */ 486 .pio_mask = 0x1f, /* pio0-4 */
401#if 0
402 .mwdma_mask = 0x06, /* mwdma1-2 */ 487 .mwdma_mask = 0x06, /* mwdma1-2 */
403#else 488 .udma_mask = ATA_UDMA5, /* udma0-5 */
404 .mwdma_mask = 0x00, /* mwdma broken */ 489 .port_ops = &ich_pata_ops,
405#endif 490 },
406 .udma_mask = 0x3f, /* udma0-5 */ 491
407 .port_ops = &piix_pata_ops, 492 /* ich_pata_133: 4 ICH with full UDMA6 */
493 {
494 .sht = &piix_sht,
495 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
496 .pio_mask = 0x1f, /* pio 0-4 */
497 .mwdma_mask = 0x06, /* Check: maybe 0x07 */
498 .udma_mask = ATA_UDMA6, /* UDMA133 */
499 .port_ops = &ich_pata_ops,
408 }, 500 },
409 501
410 /* ich5_sata */ 502 /* ich5_sata: 5 */
411 { 503 {
412 .sht = &piix_sht, 504 .sht = &piix_sht,
413 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | 505 .flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
414 PIIX_FLAG_IGNORE_PCS, 506 PIIX_FLAG_IGNORE_PCS,
415 .pio_mask = 0x1f, /* pio0-4 */ 507 .pio_mask = 0x1f, /* pio0-4 */
416 .mwdma_mask = 0x07, /* mwdma0-2 */ 508 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -418,10 +510,10 @@ static struct ata_port_info piix_port_info[] = {
418 .port_ops = &piix_sata_ops, 510 .port_ops = &piix_sata_ops,
419 }, 511 },
420 512
421 /* i6300esb_sata */ 513 /* i6300esb_sata: 6 */
422 { 514 {
423 .sht = &piix_sht, 515 .sht = &piix_sht,
424 .host_flags = ATA_FLAG_SATA | 516 .flags = ATA_FLAG_SATA |
425 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS, 517 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
426 .pio_mask = 0x1f, /* pio0-4 */ 518 .pio_mask = 0x1f, /* pio0-4 */
427 .mwdma_mask = 0x07, /* mwdma0-2 */ 519 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -429,10 +521,10 @@ static struct ata_port_info piix_port_info[] = {
429 .port_ops = &piix_sata_ops, 521 .port_ops = &piix_sata_ops,
430 }, 522 },
431 523
432 /* ich6_sata */ 524 /* ich6_sata: 7 */
433 { 525 {
434 .sht = &piix_sht, 526 .sht = &piix_sht,
435 .host_flags = ATA_FLAG_SATA | 527 .flags = ATA_FLAG_SATA |
436 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR, 528 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
437 .pio_mask = 0x1f, /* pio0-4 */ 529 .pio_mask = 0x1f, /* pio0-4 */
438 .mwdma_mask = 0x07, /* mwdma0-2 */ 530 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -440,10 +532,10 @@ static struct ata_port_info piix_port_info[] = {
440 .port_ops = &piix_sata_ops, 532 .port_ops = &piix_sata_ops,
441 }, 533 },
442 534
443 /* ich6_sata_ahci */ 535 /* ich6_sata_ahci: 8 */
444 { 536 {
445 .sht = &piix_sht, 537 .sht = &piix_sht,
446 .host_flags = ATA_FLAG_SATA | 538 .flags = ATA_FLAG_SATA |
447 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | 539 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
448 PIIX_FLAG_AHCI, 540 PIIX_FLAG_AHCI,
449 .pio_mask = 0x1f, /* pio0-4 */ 541 .pio_mask = 0x1f, /* pio0-4 */
@@ -452,10 +544,10 @@ static struct ata_port_info piix_port_info[] = {
452 .port_ops = &piix_sata_ops, 544 .port_ops = &piix_sata_ops,
453 }, 545 },
454 546
455 /* ich6m_sata_ahci */ 547 /* ich6m_sata_ahci: 9 */
456 { 548 {
457 .sht = &piix_sht, 549 .sht = &piix_sht,
458 .host_flags = ATA_FLAG_SATA | 550 .flags = ATA_FLAG_SATA |
459 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | 551 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
460 PIIX_FLAG_AHCI, 552 PIIX_FLAG_AHCI,
461 .pio_mask = 0x1f, /* pio0-4 */ 553 .pio_mask = 0x1f, /* pio0-4 */
@@ -464,10 +556,10 @@ static struct ata_port_info piix_port_info[] = {
464 .port_ops = &piix_sata_ops, 556 .port_ops = &piix_sata_ops,
465 }, 557 },
466 558
467 /* ich7m_sata_ahci */ 559 /* ich7m_sata_ahci: 10 */
468 { 560 {
469 .sht = &piix_sht, 561 .sht = &piix_sht,
470 .host_flags = ATA_FLAG_SATA | 562 .flags = ATA_FLAG_SATA |
471 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | 563 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
472 PIIX_FLAG_AHCI, 564 PIIX_FLAG_AHCI,
473 .pio_mask = 0x1f, /* pio0-4 */ 565 .pio_mask = 0x1f, /* pio0-4 */
@@ -476,10 +568,10 @@ static struct ata_port_info piix_port_info[] = {
476 .port_ops = &piix_sata_ops, 568 .port_ops = &piix_sata_ops,
477 }, 569 },
478 570
479 /* ich8_sata_ahci */ 571 /* ich8_sata_ahci: 11 */
480 { 572 {
481 .sht = &piix_sht, 573 .sht = &piix_sht,
482 .host_flags = ATA_FLAG_SATA | 574 .flags = ATA_FLAG_SATA |
483 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | 575 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
484 PIIX_FLAG_AHCI, 576 PIIX_FLAG_AHCI,
485 .pio_mask = 0x1f, /* pio0-4 */ 577 .pio_mask = 0x1f, /* pio0-4 */
@@ -487,6 +579,7 @@ static struct ata_port_info piix_port_info[] = {
487 .udma_mask = 0x7f, /* udma0-6 */ 579 .udma_mask = 0x7f, /* udma0-6 */
488 .port_ops = &piix_sata_ops, 580 .port_ops = &piix_sata_ops,
489 }, 581 },
582
490}; 583};
491 584
492static struct pci_bits piix_enable_bits[] = { 585static struct pci_bits piix_enable_bits[] = {
@@ -515,9 +608,10 @@ MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
515 * LOCKING: 608 * LOCKING:
516 * None (inherited from caller). 609 * None (inherited from caller).
517 */ 610 */
518static void piix_pata_cbl_detect(struct ata_port *ap) 611
612static void ich_pata_cbl_detect(struct ata_port *ap)
519{ 613{
520 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 614 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
521 u8 tmp, mask; 615 u8 tmp, mask;
522 616
523 /* no 80c support in host controller? */ 617 /* no 80c support in host controller? */
@@ -525,7 +619,7 @@ static void piix_pata_cbl_detect(struct ata_port *ap)
525 goto cbl40; 619 goto cbl40;
526 620
527 /* check BIOS cable detect results */ 621 /* check BIOS cable detect results */
528 mask = ap->hard_port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; 622 mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
529 pci_read_config_byte(pdev, PIIX_IOCFG, &tmp); 623 pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
530 if ((tmp & mask) == 0) 624 if ((tmp & mask) == 0)
531 goto cbl40; 625 goto cbl40;
@@ -535,30 +629,26 @@ static void piix_pata_cbl_detect(struct ata_port *ap)
535 629
536cbl40: 630cbl40:
537 ap->cbl = ATA_CBL_PATA40; 631 ap->cbl = ATA_CBL_PATA40;
538 ap->udma_mask &= ATA_UDMA_MASK_40C;
539} 632}
540 633
541/** 634/**
542 * piix_pata_prereset - prereset for PATA host controller 635 * piix_pata_prereset - prereset for PATA host controller
543 * @ap: Target port 636 * @ap: Target port
544 * 637 *
545 * Prereset including cable detection.
546 * 638 *
547 * LOCKING: 639 * LOCKING:
548 * None (inherited from caller). 640 * None (inherited from caller).
549 */ 641 */
550static int piix_pata_prereset(struct ata_port *ap) 642static int piix_pata_prereset(struct ata_port *ap)
551{ 643{
552 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 644 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
553 645
554 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) { 646 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) {
555 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n"); 647 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
556 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; 648 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
557 return 0; 649 return 0;
558 } 650 }
559 651 ap->cbl = ATA_CBL_PATA40;
560 piix_pata_cbl_detect(ap);
561
562 return ata_std_prereset(ap); 652 return ata_std_prereset(ap);
563} 653}
564 654
@@ -568,6 +658,36 @@ static void piix_pata_error_handler(struct ata_port *ap)
568 ata_std_postreset); 658 ata_std_postreset);
569} 659}
570 660
661
662/**
663 * ich_pata_prereset - prereset for PATA host controller
664 * @ap: Target port
665 *
666 *
667 * LOCKING:
668 * None (inherited from caller).
669 */
670static int ich_pata_prereset(struct ata_port *ap)
671{
672 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
673
674 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) {
675 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
676 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
677 return 0;
678 }
679
680 ich_pata_cbl_detect(ap);
681
682 return ata_std_prereset(ap);
683}
684
685static void ich_pata_error_handler(struct ata_port *ap)
686{
687 ata_bmdma_drive_eh(ap, ich_pata_prereset, ata_std_softreset, NULL,
688 ata_std_postreset);
689}
690
571/** 691/**
572 * piix_sata_present_mask - determine present mask for SATA host controller 692 * piix_sata_present_mask - determine present mask for SATA host controller
573 * @ap: Target port 693 * @ap: Target port
@@ -583,10 +703,10 @@ static void piix_pata_error_handler(struct ata_port *ap)
583 */ 703 */
584static unsigned int piix_sata_present_mask(struct ata_port *ap) 704static unsigned int piix_sata_present_mask(struct ata_port *ap)
585{ 705{
586 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 706 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
587 struct piix_host_priv *hpriv = ap->host_set->private_data; 707 struct piix_host_priv *hpriv = ap->host->private_data;
588 const unsigned int *map = hpriv->map; 708 const unsigned int *map = hpriv->map;
589 int base = 2 * ap->hard_port_no; 709 int base = 2 * ap->port_no;
590 unsigned int present_mask = 0; 710 unsigned int present_mask = 0;
591 int port, i; 711 int port, i;
592 u16 pcs; 712 u16 pcs;
@@ -663,12 +783,19 @@ static void piix_sata_error_handler(struct ata_port *ap)
663static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev) 783static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
664{ 784{
665 unsigned int pio = adev->pio_mode - XFER_PIO_0; 785 unsigned int pio = adev->pio_mode - XFER_PIO_0;
666 struct pci_dev *dev = to_pci_dev(ap->host_set->dev); 786 struct pci_dev *dev = to_pci_dev(ap->host->dev);
667 unsigned int is_slave = (adev->devno != 0); 787 unsigned int is_slave = (adev->devno != 0);
668 unsigned int master_port= ap->hard_port_no ? 0x42 : 0x40; 788 unsigned int master_port= ap->port_no ? 0x42 : 0x40;
669 unsigned int slave_port = 0x44; 789 unsigned int slave_port = 0x44;
670 u16 master_data; 790 u16 master_data;
671 u8 slave_data; 791 u8 slave_data;
792 u8 udma_enable;
793 int control = 0;
794
795 /*
796 * See Intel Document 298600-004 for the timing programing rules
797 * for ICH controllers.
798 */
672 799
673 static const /* ISP RTC */ 800 static const /* ISP RTC */
674 u8 timings[][2] = { { 0, 0 }, 801 u8 timings[][2] = { { 0, 0 },
@@ -677,20 +804,30 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
677 { 2, 1 }, 804 { 2, 1 },
678 { 2, 3 }, }; 805 { 2, 3 }, };
679 806
807 if (pio >= 2)
808 control |= 1; /* TIME1 enable */
809 if (ata_pio_need_iordy(adev))
810 control |= 2; /* IE enable */
811
812 /* Intel specifies that the PPE functionality is for disk only */
813 if (adev->class == ATA_DEV_ATA)
814 control |= 4; /* PPE enable */
815
680 pci_read_config_word(dev, master_port, &master_data); 816 pci_read_config_word(dev, master_port, &master_data);
681 if (is_slave) { 817 if (is_slave) {
818 /* Enable SITRE (seperate slave timing register) */
682 master_data |= 0x4000; 819 master_data |= 0x4000;
683 /* enable PPE, IE and TIME */ 820 /* enable PPE1, IE1 and TIME1 as needed */
684 master_data |= 0x0070; 821 master_data |= (control << 4);
685 pci_read_config_byte(dev, slave_port, &slave_data); 822 pci_read_config_byte(dev, slave_port, &slave_data);
686 slave_data &= (ap->hard_port_no ? 0x0f : 0xf0); 823 slave_data &= (ap->port_no ? 0x0f : 0xf0);
687 slave_data |= 824 /* Load the timing nibble for this slave */
688 (timings[pio][0] << 2) | 825 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
689 (timings[pio][1] << (ap->hard_port_no ? 4 : 0));
690 } else { 826 } else {
827 /* Master keeps the bits in a different format */
691 master_data &= 0xccf8; 828 master_data &= 0xccf8;
692 /* enable PPE, IE and TIME */ 829 /* Enable PPE, IE and TIME as appropriate */
693 master_data |= 0x0007; 830 master_data |= control;
694 master_data |= 831 master_data |=
695 (timings[pio][0] << 12) | 832 (timings[pio][0] << 12) |
696 (timings[pio][1] << 8); 833 (timings[pio][1] << 8);
@@ -698,13 +835,23 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
698 pci_write_config_word(dev, master_port, master_data); 835 pci_write_config_word(dev, master_port, master_data);
699 if (is_slave) 836 if (is_slave)
700 pci_write_config_byte(dev, slave_port, slave_data); 837 pci_write_config_byte(dev, slave_port, slave_data);
838
839 /* Ensure the UDMA bit is off - it will be turned back on if
840 UDMA is selected */
841
842 if (ap->udma_mask) {
843 pci_read_config_byte(dev, 0x48, &udma_enable);
844 udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
845 pci_write_config_byte(dev, 0x48, udma_enable);
846 }
701} 847}
702 848
703/** 849/**
704 * piix_set_dmamode - Initialize host controller PATA PIO timings 850 * do_pata_set_dmamode - Initialize host controller PATA PIO timings
705 * @ap: Port whose timings we are configuring 851 * @ap: Port whose timings we are configuring
706 * @adev: um 852 * @adev: Drive in question
707 * @udma: udma mode, 0 - 6 853 * @udma: udma mode, 0 - 6
854 * @is_ich: set if the chip is an ICH device
708 * 855 *
709 * Set UDMA mode for device, in host controller PCI config space. 856 * Set UDMA mode for device, in host controller PCI config space.
710 * 857 *
@@ -712,70 +859,140 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
712 * None (inherited from caller). 859 * None (inherited from caller).
713 */ 860 */
714 861
715static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev) 862static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, int isich)
716{ 863{
717 unsigned int udma = adev->dma_mode; /* FIXME: MWDMA too */ 864 struct pci_dev *dev = to_pci_dev(ap->host->dev);
718 struct pci_dev *dev = to_pci_dev(ap->host_set->dev); 865 u8 master_port = ap->port_no ? 0x42 : 0x40;
719 u8 maslave = ap->hard_port_no ? 0x42 : 0x40; 866 u16 master_data;
720 u8 speed = udma; 867 u8 speed = adev->dma_mode;
721 unsigned int drive_dn = (ap->hard_port_no ? 2 : 0) + adev->devno; 868 int devid = adev->devno + 2 * ap->port_no;
722 int a_speed = 3 << (drive_dn * 4); 869 u8 udma_enable;
723 int u_flag = 1 << drive_dn; 870
724 int v_flag = 0x01 << drive_dn; 871 static const /* ISP RTC */
725 int w_flag = 0x10 << drive_dn; 872 u8 timings[][2] = { { 0, 0 },
726 int u_speed = 0; 873 { 0, 0 },
727 int sitre; 874 { 1, 0 },
728 u16 reg4042, reg4a; 875 { 2, 1 },
729 u8 reg48, reg54, reg55; 876 { 2, 3 }, };
730 877
731 pci_read_config_word(dev, maslave, &reg4042); 878 pci_read_config_word(dev, master_port, &master_data);
732 DPRINTK("reg4042 = 0x%04x\n", reg4042); 879 pci_read_config_byte(dev, 0x48, &udma_enable);
733 sitre = (reg4042 & 0x4000) ? 1 : 0;
734 pci_read_config_byte(dev, 0x48, &reg48);
735 pci_read_config_word(dev, 0x4a, &reg4a);
736 pci_read_config_byte(dev, 0x54, &reg54);
737 pci_read_config_byte(dev, 0x55, &reg55);
738
739 switch(speed) {
740 case XFER_UDMA_4:
741 case XFER_UDMA_2: u_speed = 2 << (drive_dn * 4); break;
742 case XFER_UDMA_6:
743 case XFER_UDMA_5:
744 case XFER_UDMA_3:
745 case XFER_UDMA_1: u_speed = 1 << (drive_dn * 4); break;
746 case XFER_UDMA_0: u_speed = 0 << (drive_dn * 4); break;
747 case XFER_MW_DMA_2:
748 case XFER_MW_DMA_1: break;
749 default:
750 BUG();
751 return;
752 }
753 880
754 if (speed >= XFER_UDMA_0) { 881 if (speed >= XFER_UDMA_0) {
755 if (!(reg48 & u_flag)) 882 unsigned int udma = adev->dma_mode - XFER_UDMA_0;
756 pci_write_config_byte(dev, 0x48, reg48 | u_flag); 883 u16 udma_timing;
757 if (speed == XFER_UDMA_5) { 884 u16 ideconf;
758 pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag); 885 int u_clock, u_speed;
759 } else { 886
760 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); 887 /*
888 * UDMA is handled by a combination of clock switching and
889 * selection of dividers
890 *
891 * Handy rule: Odd modes are UDMATIMx 01, even are 02
892 * except UDMA0 which is 00
893 */
894 u_speed = min(2 - (udma & 1), udma);
895 if (udma == 5)
896 u_clock = 0x1000; /* 100Mhz */
897 else if (udma > 2)
898 u_clock = 1; /* 66Mhz */
899 else
900 u_clock = 0; /* 33Mhz */
901
902 udma_enable |= (1 << devid);
903
904 /* Load the CT/RP selection */
905 pci_read_config_word(dev, 0x4A, &udma_timing);
906 udma_timing &= ~(3 << (4 * devid));
907 udma_timing |= u_speed << (4 * devid);
908 pci_write_config_word(dev, 0x4A, udma_timing);
909
910 if (isich) {
911 /* Select a 33/66/100Mhz clock */
912 pci_read_config_word(dev, 0x54, &ideconf);
913 ideconf &= ~(0x1001 << devid);
914 ideconf |= u_clock << devid;
915 /* For ICH or later we should set bit 10 for better
916 performance (WR_PingPong_En) */
917 pci_write_config_word(dev, 0x54, ideconf);
761 } 918 }
762 if ((reg4a & a_speed) != u_speed)
763 pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
764 if (speed > XFER_UDMA_2) {
765 if (!(reg54 & v_flag))
766 pci_write_config_byte(dev, 0x54, reg54 | v_flag);
767 } else
768 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
769 } else { 919 } else {
770 if (reg48 & u_flag) 920 /*
771 pci_write_config_byte(dev, 0x48, reg48 & ~u_flag); 921 * MWDMA is driven by the PIO timings. We must also enable
772 if (reg4a & a_speed) 922 * IORDY unconditionally along with TIME1. PPE has already
773 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); 923 * been set when the PIO timing was set.
774 if (reg54 & v_flag) 924 */
775 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); 925 unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
776 if (reg55 & w_flag) 926 unsigned int control;
777 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); 927 u8 slave_data;
928 const unsigned int needed_pio[3] = {
929 XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
930 };
931 int pio = needed_pio[mwdma] - XFER_PIO_0;
932
933 control = 3; /* IORDY|TIME1 */
934
935 /* If the drive MWDMA is faster than it can do PIO then
936 we must force PIO into PIO0 */
937
938 if (adev->pio_mode < needed_pio[mwdma])
939 /* Enable DMA timing only */
940 control |= 8; /* PIO cycles in PIO0 */
941
942 if (adev->devno) { /* Slave */
943 master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
944 master_data |= control << 4;
945 pci_read_config_byte(dev, 0x44, &slave_data);
946 slave_data &= (0x0F + 0xE1 * ap->port_no);
947 /* Load the matching timing */
948 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
949 pci_write_config_byte(dev, 0x44, slave_data);
950 } else { /* Master */
951 master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
952 and master timing bits */
953 master_data |= control;
954 master_data |=
955 (timings[pio][0] << 12) |
956 (timings[pio][1] << 8);
957 }
958 udma_enable &= ~(1 << devid);
959 pci_write_config_word(dev, master_port, master_data);
778 } 960 }
961 /* Don't scribble on 0x48 if the controller does not support UDMA */
962 if (ap->udma_mask)
963 pci_write_config_byte(dev, 0x48, udma_enable);
964}
965
966/**
967 * piix_set_dmamode - Initialize host controller PATA DMA timings
968 * @ap: Port whose timings we are configuring
969 * @adev: um
970 *
971 * Set MW/UDMA mode for device, in host controller PCI config space.
972 *
973 * LOCKING:
974 * None (inherited from caller).
975 */
976
977static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
978{
979 do_pata_set_dmamode(ap, adev, 0);
980}
981
982/**
983 * ich_set_dmamode - Initialize host controller PATA DMA timings
984 * @ap: Port whose timings we are configuring
985 * @adev: um
986 *
987 * Set MW/UDMA mode for device, in host controller PCI config space.
988 *
989 * LOCKING:
990 * None (inherited from caller).
991 */
992
993static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev)
994{
995 do_pata_set_dmamode(ap, adev, 1);
779} 996}
780 997
781#define AHCI_PCI_BAR 5 998#define AHCI_PCI_BAR 5
@@ -867,13 +1084,13 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev,
867 if (force_pcs == 1) { 1084 if (force_pcs == 1) {
868 dev_printk(KERN_INFO, &pdev->dev, 1085 dev_printk(KERN_INFO, &pdev->dev,
869 "force ignoring PCS (0x%x)\n", new_pcs); 1086 "force ignoring PCS (0x%x)\n", new_pcs);
870 pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS; 1087 pinfo[0].flags |= PIIX_FLAG_IGNORE_PCS;
871 pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS; 1088 pinfo[1].flags |= PIIX_FLAG_IGNORE_PCS;
872 } else if (force_pcs == 2) { 1089 } else if (force_pcs == 2) {
873 dev_printk(KERN_INFO, &pdev->dev, 1090 dev_printk(KERN_INFO, &pdev->dev,
874 "force honoring PCS (0x%x)\n", new_pcs); 1091 "force honoring PCS (0x%x)\n", new_pcs);
875 pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS; 1092 pinfo[0].flags &= ~PIIX_FLAG_IGNORE_PCS;
876 pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS; 1093 pinfo[1].flags &= ~PIIX_FLAG_IGNORE_PCS;
877 } 1094 }
878} 1095}
879 1096
@@ -904,7 +1121,7 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
904 1121
905 case IDE: 1122 case IDE:
906 WARN_ON((i & 1) || map[i + 1] != IDE); 1123 WARN_ON((i & 1) || map[i + 1] != IDE);
907 pinfo[i / 2] = piix_port_info[ich5_pata]; 1124 pinfo[i / 2] = piix_port_info[ich_pata_100];
908 pinfo[i / 2].private_data = hpriv; 1125 pinfo[i / 2].private_data = hpriv;
909 i++; 1126 i++;
910 printk(" IDE IDE"); 1127 printk(" IDE IDE");
@@ -913,7 +1130,7 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
913 default: 1130 default:
914 printk(" P%d", map[i]); 1131 printk(" P%d", map[i]);
915 if (i & 1) 1132 if (i & 1)
916 pinfo[i / 2].host_flags |= ATA_FLAG_SLAVE_POSS; 1133 pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
917 break; 1134 break;
918 } 1135 }
919 } 1136 }
@@ -948,7 +1165,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
948 struct ata_port_info port_info[2]; 1165 struct ata_port_info port_info[2];
949 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] }; 1166 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
950 struct piix_host_priv *hpriv; 1167 struct piix_host_priv *hpriv;
951 unsigned long host_flags; 1168 unsigned long port_flags;
952 1169
953 if (!printed_version++) 1170 if (!printed_version++)
954 dev_printk(KERN_DEBUG, &pdev->dev, 1171 dev_printk(KERN_DEBUG, &pdev->dev,
@@ -967,9 +1184,9 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
967 port_info[0].private_data = hpriv; 1184 port_info[0].private_data = hpriv;
968 port_info[1].private_data = hpriv; 1185 port_info[1].private_data = hpriv;
969 1186
970 host_flags = port_info[0].host_flags; 1187 port_flags = port_info[0].flags;
971 1188
972 if (host_flags & PIIX_FLAG_AHCI) { 1189 if (port_flags & PIIX_FLAG_AHCI) {
973 u8 tmp; 1190 u8 tmp;
974 pci_read_config_byte(pdev, PIIX_SCC, &tmp); 1191 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
975 if (tmp == PIIX_AHCI_DEVICE) { 1192 if (tmp == PIIX_AHCI_DEVICE) {
@@ -980,7 +1197,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
980 } 1197 }
981 1198
982 /* Initialize SATA map */ 1199 /* Initialize SATA map */
983 if (host_flags & ATA_FLAG_SATA) { 1200 if (port_flags & ATA_FLAG_SATA) {
984 piix_init_sata_map(pdev, port_info, 1201 piix_init_sata_map(pdev, port_info,
985 piix_map_db_table[ent->driver_data]); 1202 piix_map_db_table[ent->driver_data]);
986 piix_init_pcs(pdev, port_info, 1203 piix_init_pcs(pdev, port_info,
@@ -993,7 +1210,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
993 * MSI is disabled (and it is disabled, as we don't use 1210 * MSI is disabled (and it is disabled, as we don't use
994 * message-signalled interrupts currently). 1211 * message-signalled interrupts currently).
995 */ 1212 */
996 if (host_flags & PIIX_FLAG_CHECKINTR) 1213 if (port_flags & PIIX_FLAG_CHECKINTR)
997 pci_intx(pdev, 1); 1214 pci_intx(pdev, 1);
998 1215
999 if (piix_check_450nx_errata(pdev)) { 1216 if (piix_check_450nx_errata(pdev)) {
@@ -1008,19 +1225,21 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1008 return ata_pci_init_one(pdev, ppinfo, 2); 1225 return ata_pci_init_one(pdev, ppinfo, 2);
1009} 1226}
1010 1227
1011static void piix_host_stop(struct ata_host_set *host_set) 1228static void piix_host_stop(struct ata_host *host)
1012{ 1229{
1013 if (host_set->next == NULL) 1230 struct piix_host_priv *hpriv = host->private_data;
1014 kfree(host_set->private_data); 1231
1015 ata_host_stop(host_set); 1232 ata_host_stop(host);
1233
1234 kfree(hpriv);
1016} 1235}
1017 1236
1018static int __init piix_init(void) 1237static int __init piix_init(void)
1019{ 1238{
1020 int rc; 1239 int rc;
1021 1240
1022 DPRINTK("pci_module_init\n"); 1241 DPRINTK("pci_register_driver\n");
1023 rc = pci_module_init(&piix_pci_driver); 1242 rc = pci_register_driver(&piix_pci_driver);
1024 if (rc) 1243 if (rc)
1025 return rc; 1244 return rc;
1026 1245
@@ -1037,4 +1256,3 @@ static void __exit piix_exit(void)
1037 1256
1038module_init(piix_init); 1257module_init(piix_init);
1039module_exit(piix_exit); 1258module_exit(piix_exit);
1040
diff --git a/drivers/scsi/libata-core.c b/drivers/ata/libata-core.c
index 427b73a3886a..753b0152afd1 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -50,7 +50,6 @@
50#include <linux/jiffies.h> 50#include <linux/jiffies.h>
51#include <linux/scatterlist.h> 51#include <linux/scatterlist.h>
52#include <scsi/scsi.h> 52#include <scsi/scsi.h>
53#include "scsi_priv.h"
54#include <scsi/scsi_cmnd.h> 53#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_host.h> 54#include <scsi/scsi_host.h>
56#include <linux/libata.h> 55#include <linux/libata.h>
@@ -387,9 +386,13 @@ static const char *ata_mode_string(unsigned int xfer_mask)
387 "PIO2", 386 "PIO2",
388 "PIO3", 387 "PIO3",
389 "PIO4", 388 "PIO4",
389 "PIO5",
390 "PIO6",
390 "MWDMA0", 391 "MWDMA0",
391 "MWDMA1", 392 "MWDMA1",
392 "MWDMA2", 393 "MWDMA2",
394 "MWDMA3",
395 "MWDMA4",
393 "UDMA/16", 396 "UDMA/16",
394 "UDMA/25", 397 "UDMA/25",
395 "UDMA/33", 398 "UDMA/33",
@@ -613,8 +616,11 @@ ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
613 if (r_err) 616 if (r_err)
614 *r_err = err; 617 *r_err = err;
615 618
616 /* see if device passed diags */ 619 /* see if device passed diags: if master then continue and warn later */
617 if (err == 1) 620 if (err == 0 && device == 0)
621 /* diagnostic fail : do nothing _YET_ */
622 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
623 else if (err == 1)
618 /* do nothing */ ; 624 /* do nothing */ ;
619 else if ((device == 0) && (err == 0x81)) 625 else if ((device == 0) && (err == 0x81))
620 /* do nothing */ ; 626 /* do nothing */ ;
@@ -876,6 +882,23 @@ static unsigned int ata_id_xfermask(const u16 *id)
876 882
877 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 883 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
878 884
885 if (ata_id_is_cfa(id)) {
886 /*
887 * Process compact flash extended modes
888 */
889 int pio = id[163] & 0x7;
890 int dma = (id[163] >> 3) & 7;
891
892 if (pio)
893 pio_mask |= (1 << 5);
894 if (pio > 1)
895 pio_mask |= (1 << 6);
896 if (dma)
897 mwdma_mask |= (1 << 3);
898 if (dma > 1)
899 mwdma_mask |= (1 << 4);
900 }
901
879 udma_mask = 0; 902 udma_mask = 0;
880 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 903 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
881 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 904 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
@@ -1320,7 +1343,7 @@ static void ata_dev_config_ncq(struct ata_device *dev,
1320 } 1343 }
1321 1344
1322 if (ap->flags & ATA_FLAG_NCQ) { 1345 if (ap->flags & ATA_FLAG_NCQ) {
1323 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1); 1346 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1324 dev->flags |= ATA_DFLAG_NCQ; 1347 dev->flags |= ATA_DFLAG_NCQ;
1325 } 1348 }
1326 1349
@@ -1334,12 +1357,13 @@ static void ata_set_port_max_cmd_len(struct ata_port *ap)
1334{ 1357{
1335 int i; 1358 int i;
1336 1359
1337 if (ap->host) { 1360 if (ap->scsi_host) {
1338 ap->host->max_cmd_len = 0; 1361 unsigned int len = 0;
1362
1339 for (i = 0; i < ATA_MAX_DEVICES; i++) 1363 for (i = 0; i < ATA_MAX_DEVICES; i++)
1340 ap->host->max_cmd_len = max_t(unsigned int, 1364 len = max(len, ap->device[i].cdb_len);
1341 ap->host->max_cmd_len, 1365
1342 ap->device[i].cdb_len); 1366 ap->scsi_host->max_cmd_len = len;
1343 } 1367 }
1344} 1368}
1345 1369
@@ -1362,6 +1386,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1362 struct ata_port *ap = dev->ap; 1386 struct ata_port *ap = dev->ap;
1363 const u16 *id = dev->id; 1387 const u16 *id = dev->id;
1364 unsigned int xfer_mask; 1388 unsigned int xfer_mask;
1389 char revbuf[7]; /* XYZ-99\0 */
1365 int rc; 1390 int rc;
1366 1391
1367 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 1392 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
@@ -1405,6 +1430,15 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1405 1430
1406 /* ATA-specific feature tests */ 1431 /* ATA-specific feature tests */
1407 if (dev->class == ATA_DEV_ATA) { 1432 if (dev->class == ATA_DEV_ATA) {
1433 if (ata_id_is_cfa(id)) {
1434 if (id[162] & 1) /* CPRM may make this media unusable */
1435 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1436 ap->id, dev->devno);
1437 snprintf(revbuf, 7, "CFA");
1438 }
1439 else
1440 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1441
1408 dev->n_sectors = ata_id_n_sectors(id); 1442 dev->n_sectors = ata_id_n_sectors(id);
1409 1443
1410 if (ata_id_has_lba(id)) { 1444 if (ata_id_has_lba(id)) {
@@ -1423,9 +1457,9 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1423 1457
1424 /* print device info to dmesg */ 1458 /* print device info to dmesg */
1425 if (ata_msg_drv(ap) && print_info) 1459 if (ata_msg_drv(ap) && print_info)
1426 ata_dev_printk(dev, KERN_INFO, "ATA-%d, " 1460 ata_dev_printk(dev, KERN_INFO, "%s, "
1427 "max %s, %Lu sectors: %s %s\n", 1461 "max %s, %Lu sectors: %s %s\n",
1428 ata_id_major_version(id), 1462 revbuf,
1429 ata_mode_string(xfer_mask), 1463 ata_mode_string(xfer_mask),
1430 (unsigned long long)dev->n_sectors, 1464 (unsigned long long)dev->n_sectors,
1431 lba_desc, ncq_desc); 1465 lba_desc, ncq_desc);
@@ -1446,9 +1480,9 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1446 1480
1447 /* print device info to dmesg */ 1481 /* print device info to dmesg */
1448 if (ata_msg_drv(ap) && print_info) 1482 if (ata_msg_drv(ap) && print_info)
1449 ata_dev_printk(dev, KERN_INFO, "ATA-%d, " 1483 ata_dev_printk(dev, KERN_INFO, "%s, "
1450 "max %s, %Lu sectors: CHS %u/%u/%u\n", 1484 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1451 ata_id_major_version(id), 1485 revbuf,
1452 ata_mode_string(xfer_mask), 1486 ata_mode_string(xfer_mask),
1453 (unsigned long long)dev->n_sectors, 1487 (unsigned long long)dev->n_sectors,
1454 dev->cylinders, dev->heads, 1488 dev->cylinders, dev->heads,
@@ -1492,6 +1526,18 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1492 cdb_intr_string); 1526 cdb_intr_string);
1493 } 1527 }
1494 1528
1529 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1530 /* Let the user know. We don't want to disallow opens for
1531 rescue purposes, or in case the vendor is just a blithering
1532 idiot */
1533 if (print_info) {
1534 ata_dev_printk(dev, KERN_WARNING,
1535"Drive reports diagnostics failure. This may indicate a drive\n");
1536 ata_dev_printk(dev, KERN_WARNING,
1537"fault or invalid emulation. Contact drive vendor for information.\n");
1538 }
1539 }
1540
1495 ata_set_port_max_cmd_len(ap); 1541 ata_set_port_max_cmd_len(ap);
1496 1542
1497 /* limit bridge transfers to udma5, 200 sectors */ 1543 /* limit bridge transfers to udma5, 200 sectors */
@@ -1533,7 +1579,7 @@ err_out_nosup:
1533 * Zero on success, negative errno otherwise. 1579 * Zero on success, negative errno otherwise.
1534 */ 1580 */
1535 1581
1536static int ata_bus_probe(struct ata_port *ap) 1582int ata_bus_probe(struct ata_port *ap)
1537{ 1583{
1538 unsigned int classes[ATA_MAX_DEVICES]; 1584 unsigned int classes[ATA_MAX_DEVICES];
1539 int tries[ATA_MAX_DEVICES]; 1585 int tries[ATA_MAX_DEVICES];
@@ -1637,7 +1683,7 @@ static int ata_bus_probe(struct ata_port *ap)
1637 * Modify @ap data structure such that the system 1683 * Modify @ap data structure such that the system
1638 * thinks that the entire port is enabled. 1684 * thinks that the entire port is enabled.
1639 * 1685 *
1640 * LOCKING: host_set lock, or some other form of 1686 * LOCKING: host lock, or some other form of
1641 * serialization. 1687 * serialization.
1642 */ 1688 */
1643 1689
@@ -1775,7 +1821,7 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
1775 * never attempt to probe or communicate with devices 1821 * never attempt to probe or communicate with devices
1776 * on this port. 1822 * on this port.
1777 * 1823 *
1778 * LOCKING: host_set lock, or some other form of 1824 * LOCKING: host lock, or some other form of
1779 * serialization. 1825 * serialization.
1780 */ 1826 */
1781 1827
@@ -1906,10 +1952,11 @@ int sata_set_spd(struct ata_port *ap)
1906 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 1952 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1907 */ 1953 */
1908/* 1954/*
1909 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 1955 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1910 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 1956 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1911 * for PIO 5, which is a nonstandard extension and UDMA6, which 1957 * for UDMA6, which is currently supported only by Maxtor drives.
1912 * is currently supported only by Maxtor drives. 1958 *
1959 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
1913 */ 1960 */
1914 1961
1915static const struct ata_timing ata_timing[] = { 1962static const struct ata_timing ata_timing[] = {
@@ -1919,6 +1966,8 @@ static const struct ata_timing ata_timing[] = {
1919 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, 1966 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1920 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, 1967 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1921 1968
1969 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
1970 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
1922 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, 1971 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1923 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, 1972 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1924 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, 1973 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
@@ -1933,7 +1982,8 @@ static const struct ata_timing ata_timing[] = {
1933 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, 1982 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1934 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, 1983 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1935 1984
1936/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */ 1985 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
1986 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
1937 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, 1987 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1938 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, 1988 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1939 1989
@@ -2229,8 +2279,8 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2229 /* Record simplex status. If we selected DMA then the other 2279 /* Record simplex status. If we selected DMA then the other
2230 * host channels are not permitted to do so. 2280 * host channels are not permitted to do so.
2231 */ 2281 */
2232 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX)) 2282 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2233 ap->host_set->simplex_claimed = 1; 2283 ap->host->simplex_claimed = 1;
2234 2284
2235 /* step5: chip specific finalisation */ 2285 /* step5: chip specific finalisation */
2236 if (ap->ops->post_set_mode) 2286 if (ap->ops->post_set_mode)
@@ -2252,7 +2302,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2252 * other threads. 2302 * other threads.
2253 * 2303 *
2254 * LOCKING: 2304 * LOCKING:
2255 * spin_lock_irqsave(host_set lock) 2305 * spin_lock_irqsave(host lock)
2256 */ 2306 */
2257 2307
2258static inline void ata_tf_to_host(struct ata_port *ap, 2308static inline void ata_tf_to_host(struct ata_port *ap,
@@ -2416,7 +2466,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2416 * 2466 *
2417 * LOCKING: 2467 * LOCKING:
2418 * PCI/etc. bus probe sem. 2468 * PCI/etc. bus probe sem.
2419 * Obtains host_set lock. 2469 * Obtains host lock.
2420 * 2470 *
2421 * SIDE EFFECTS: 2471 * SIDE EFFECTS:
2422 * Sets ATA_FLAG_DISABLED if bus reset fails. 2472 * Sets ATA_FLAG_DISABLED if bus reset fails.
@@ -3045,20 +3095,16 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
3045 * known limits including host controller limits, device 3095 * known limits including host controller limits, device
3046 * blacklist, etc... 3096 * blacklist, etc...
3047 * 3097 *
3048 * FIXME: The current implementation limits all transfer modes to
3049 * the fastest of the lowested device on the port. This is not
3050 * required on most controllers.
3051 *
3052 * LOCKING: 3098 * LOCKING:
3053 * None. 3099 * None.
3054 */ 3100 */
3055static void ata_dev_xfermask(struct ata_device *dev) 3101static void ata_dev_xfermask(struct ata_device *dev)
3056{ 3102{
3057 struct ata_port *ap = dev->ap; 3103 struct ata_port *ap = dev->ap;
3058 struct ata_host_set *hs = ap->host_set; 3104 struct ata_host *host = ap->host;
3059 unsigned long xfer_mask; 3105 unsigned long xfer_mask;
3060 int i;
3061 3106
3107 /* controller modes available */
3062 xfer_mask = ata_pack_xfermask(ap->pio_mask, 3108 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3063 ap->mwdma_mask, ap->udma_mask); 3109 ap->mwdma_mask, ap->udma_mask);
3064 3110
@@ -3068,34 +3114,31 @@ static void ata_dev_xfermask(struct ata_device *dev)
3068 if (ap->cbl == ATA_CBL_PATA40) 3114 if (ap->cbl == ATA_CBL_PATA40)
3069 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 3115 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3070 3116
3071 /* FIXME: Use port-wide xfermask for now */ 3117 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3072 for (i = 0; i < ATA_MAX_DEVICES; i++) { 3118 dev->mwdma_mask, dev->udma_mask);
3073 struct ata_device *d = &ap->device[i]; 3119 xfer_mask &= ata_id_xfermask(dev->id);
3074 3120
3075 if (ata_dev_absent(d)) 3121 /*
3076 continue; 3122 * CFA Advanced TrueIDE timings are not allowed on a shared
3077 3123 * cable
3078 if (ata_dev_disabled(d)) { 3124 */
3079 /* to avoid violating device selection timing */ 3125 if (ata_dev_pair(dev)) {
3080 xfer_mask &= ata_pack_xfermask(d->pio_mask, 3126 /* No PIO5 or PIO6 */
3081 UINT_MAX, UINT_MAX); 3127 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3082 continue; 3128 /* No MWDMA3 or MWDMA 4 */
3083 } 3129 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3084
3085 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3086 d->mwdma_mask, d->udma_mask);
3087 xfer_mask &= ata_id_xfermask(d->id);
3088 if (ata_dma_blacklisted(d))
3089 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3090 } 3130 }
3091 3131
3092 if (ata_dma_blacklisted(dev)) 3132 if (ata_dma_blacklisted(dev)) {
3133 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3093 ata_dev_printk(dev, KERN_WARNING, 3134 ata_dev_printk(dev, KERN_WARNING,
3094 "device is on DMA blacklist, disabling DMA\n"); 3135 "device is on DMA blacklist, disabling DMA\n");
3136 }
3095 3137
3096 if (hs->flags & ATA_HOST_SIMPLEX) { 3138 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
3097 if (hs->simplex_claimed) 3139 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3098 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3140 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3141 "other device, disabling DMA\n");
3099 } 3142 }
3100 3143
3101 if (ap->ops->mode_filter) 3144 if (ap->ops->mode_filter)
@@ -3185,7 +3228,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
3185 * Unmap all mapped DMA memory associated with this command. 3228 * Unmap all mapped DMA memory associated with this command.
3186 * 3229 *
3187 * LOCKING: 3230 * LOCKING:
3188 * spin_lock_irqsave(host_set lock) 3231 * spin_lock_irqsave(host lock)
3189 */ 3232 */
3190 3233
3191static void ata_sg_clean(struct ata_queued_cmd *qc) 3234static void ata_sg_clean(struct ata_queued_cmd *qc)
@@ -3245,7 +3288,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
3245 * associated with the current disk command. 3288 * associated with the current disk command.
3246 * 3289 *
3247 * LOCKING: 3290 * LOCKING:
3248 * spin_lock_irqsave(host_set lock) 3291 * spin_lock_irqsave(host lock)
3249 * 3292 *
3250 */ 3293 */
3251static void ata_fill_sg(struct ata_queued_cmd *qc) 3294static void ata_fill_sg(struct ata_queued_cmd *qc)
@@ -3297,7 +3340,7 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
3297 * supplied PACKET command. 3340 * supplied PACKET command.
3298 * 3341 *
3299 * LOCKING: 3342 * LOCKING:
3300 * spin_lock_irqsave(host_set lock) 3343 * spin_lock_irqsave(host lock)
3301 * 3344 *
3302 * RETURNS: 0 when ATAPI DMA can be used 3345 * RETURNS: 0 when ATAPI DMA can be used
3303 * nonzero otherwise 3346 * nonzero otherwise
@@ -3319,7 +3362,7 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3319 * Prepare ATA taskfile for submission. 3362 * Prepare ATA taskfile for submission.
3320 * 3363 *
3321 * LOCKING: 3364 * LOCKING:
3322 * spin_lock_irqsave(host_set lock) 3365 * spin_lock_irqsave(host lock)
3323 */ 3366 */
3324void ata_qc_prep(struct ata_queued_cmd *qc) 3367void ata_qc_prep(struct ata_queued_cmd *qc)
3325{ 3368{
@@ -3341,7 +3384,7 @@ void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3341 * to point to a single memory buffer, @buf of byte length @buflen. 3384 * to point to a single memory buffer, @buf of byte length @buflen.
3342 * 3385 *
3343 * LOCKING: 3386 * LOCKING:
3344 * spin_lock_irqsave(host_set lock) 3387 * spin_lock_irqsave(host lock)
3345 */ 3388 */
3346 3389
3347void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) 3390void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
@@ -3372,7 +3415,7 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3372 * elements. 3415 * elements.
3373 * 3416 *
3374 * LOCKING: 3417 * LOCKING:
3375 * spin_lock_irqsave(host_set lock) 3418 * spin_lock_irqsave(host lock)
3376 */ 3419 */
3377 3420
3378void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 3421void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
@@ -3391,7 +3434,7 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3391 * DMA-map the memory buffer associated with queued_cmd @qc. 3434 * DMA-map the memory buffer associated with queued_cmd @qc.
3392 * 3435 *
3393 * LOCKING: 3436 * LOCKING:
3394 * spin_lock_irqsave(host_set lock) 3437 * spin_lock_irqsave(host lock)
3395 * 3438 *
3396 * RETURNS: 3439 * RETURNS:
3397 * Zero on success, negative on error. 3440 * Zero on success, negative on error.
@@ -3460,7 +3503,7 @@ skip_map:
3460 * DMA-map the scatter-gather table associated with queued_cmd @qc. 3503 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3461 * 3504 *
3462 * LOCKING: 3505 * LOCKING:
3463 * spin_lock_irqsave(host_set lock) 3506 * spin_lock_irqsave(host lock)
3464 * 3507 *
3465 * RETURNS: 3508 * RETURNS:
3466 * Zero on success, negative on error. 3509 * Zero on success, negative on error.
@@ -3969,7 +4012,7 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *q
3969 * Finish @qc which is running on standard HSM. 4012 * Finish @qc which is running on standard HSM.
3970 * 4013 *
3971 * LOCKING: 4014 * LOCKING:
3972 * If @in_wq is zero, spin_lock_irqsave(host_set lock). 4015 * If @in_wq is zero, spin_lock_irqsave(host lock).
3973 * Otherwise, none on entry and grabs host lock. 4016 * Otherwise, none on entry and grabs host lock.
3974 */ 4017 */
3975static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) 4018static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
@@ -3981,8 +4024,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3981 if (in_wq) { 4024 if (in_wq) {
3982 spin_lock_irqsave(ap->lock, flags); 4025 spin_lock_irqsave(ap->lock, flags);
3983 4026
3984 /* EH might have kicked in while host_set lock 4027 /* EH might have kicked in while host lock is
3985 * is released. 4028 * released.
3986 */ 4029 */
3987 qc = ata_qc_from_tag(ap, qc->tag); 4030 qc = ata_qc_from_tag(ap, qc->tag);
3988 if (qc) { 4031 if (qc) {
@@ -4347,7 +4390,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4347 * in case something prevents using it. 4390 * in case something prevents using it.
4348 * 4391 *
4349 * LOCKING: 4392 * LOCKING:
4350 * spin_lock_irqsave(host_set lock) 4393 * spin_lock_irqsave(host lock)
4351 */ 4394 */
4352void ata_qc_free(struct ata_queued_cmd *qc) 4395void ata_qc_free(struct ata_queued_cmd *qc)
4353{ 4396{
@@ -4400,7 +4443,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
4400 * command has completed, with either an ok or not-ok status. 4443 * command has completed, with either an ok or not-ok status.
4401 * 4444 *
4402 * LOCKING: 4445 * LOCKING:
4403 * spin_lock_irqsave(host_set lock) 4446 * spin_lock_irqsave(host lock)
4404 */ 4447 */
4405void ata_qc_complete(struct ata_queued_cmd *qc) 4448void ata_qc_complete(struct ata_queued_cmd *qc)
4406{ 4449{
@@ -4463,7 +4506,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
4463 * and commands are completed accordingly. 4506 * and commands are completed accordingly.
4464 * 4507 *
4465 * LOCKING: 4508 * LOCKING:
4466 * spin_lock_irqsave(host_set lock) 4509 * spin_lock_irqsave(host lock)
4467 * 4510 *
4468 * RETURNS: 4511 * RETURNS:
4469 * Number of completed commands on success, -errno otherwise. 4512 * Number of completed commands on success, -errno otherwise.
@@ -4534,7 +4577,7 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4534 * writing the taskfile to hardware, starting the command. 4577 * writing the taskfile to hardware, starting the command.
4535 * 4578 *
4536 * LOCKING: 4579 * LOCKING:
4537 * spin_lock_irqsave(host_set lock) 4580 * spin_lock_irqsave(host lock)
4538 */ 4581 */
4539void ata_qc_issue(struct ata_queued_cmd *qc) 4582void ata_qc_issue(struct ata_queued_cmd *qc)
4540{ 4583{
@@ -4595,7 +4638,7 @@ err:
4595 * May be used as the qc_issue() entry in ata_port_operations. 4638 * May be used as the qc_issue() entry in ata_port_operations.
4596 * 4639 *
4597 * LOCKING: 4640 * LOCKING:
4598 * spin_lock_irqsave(host_set lock) 4641 * spin_lock_irqsave(host lock)
4599 * 4642 *
4600 * RETURNS: 4643 * RETURNS:
4601 * Zero on success, AC_ERR_* mask on failure 4644 * Zero on success, AC_ERR_* mask on failure
@@ -4724,7 +4767,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4724 * handled via polling with interrupts disabled (nIEN bit). 4767 * handled via polling with interrupts disabled (nIEN bit).
4725 * 4768 *
4726 * LOCKING: 4769 * LOCKING:
4727 * spin_lock_irqsave(host_set lock) 4770 * spin_lock_irqsave(host lock)
4728 * 4771 *
4729 * RETURNS: 4772 * RETURNS:
4730 * One if interrupt was handled, zero if not (shared irq). 4773 * One if interrupt was handled, zero if not (shared irq).
@@ -4811,14 +4854,14 @@ idle_irq:
4811/** 4854/**
4812 * ata_interrupt - Default ATA host interrupt handler 4855 * ata_interrupt - Default ATA host interrupt handler
4813 * @irq: irq line (unused) 4856 * @irq: irq line (unused)
4814 * @dev_instance: pointer to our ata_host_set information structure 4857 * @dev_instance: pointer to our ata_host information structure
4815 * @regs: unused 4858 * @regs: unused
4816 * 4859 *
4817 * Default interrupt handler for PCI IDE devices. Calls 4860 * Default interrupt handler for PCI IDE devices. Calls
4818 * ata_host_intr() for each port that is not disabled. 4861 * ata_host_intr() for each port that is not disabled.
4819 * 4862 *
4820 * LOCKING: 4863 * LOCKING:
4821 * Obtains host_set lock during operation. 4864 * Obtains host lock during operation.
4822 * 4865 *
4823 * RETURNS: 4866 * RETURNS:
4824 * IRQ_NONE or IRQ_HANDLED. 4867 * IRQ_NONE or IRQ_HANDLED.
@@ -4826,18 +4869,18 @@ idle_irq:
4826 4869
4827irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 4870irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4828{ 4871{
4829 struct ata_host_set *host_set = dev_instance; 4872 struct ata_host *host = dev_instance;
4830 unsigned int i; 4873 unsigned int i;
4831 unsigned int handled = 0; 4874 unsigned int handled = 0;
4832 unsigned long flags; 4875 unsigned long flags;
4833 4876
4834 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ 4877 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4835 spin_lock_irqsave(&host_set->lock, flags); 4878 spin_lock_irqsave(&host->lock, flags);
4836 4879
4837 for (i = 0; i < host_set->n_ports; i++) { 4880 for (i = 0; i < host->n_ports; i++) {
4838 struct ata_port *ap; 4881 struct ata_port *ap;
4839 4882
4840 ap = host_set->ports[i]; 4883 ap = host->ports[i];
4841 if (ap && 4884 if (ap &&
4842 !(ap->flags & ATA_FLAG_DISABLED)) { 4885 !(ap->flags & ATA_FLAG_DISABLED)) {
4843 struct ata_queued_cmd *qc; 4886 struct ata_queued_cmd *qc;
@@ -4849,7 +4892,7 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4849 } 4892 }
4850 } 4893 }
4851 4894
4852 spin_unlock_irqrestore(&host_set->lock, flags); 4895 spin_unlock_irqrestore(&host->lock, flags);
4853 4896
4854 return IRQ_RETVAL(handled); 4897 return IRQ_RETVAL(handled);
4855} 4898}
@@ -5014,15 +5057,15 @@ int ata_flush_cache(struct ata_device *dev)
5014 return 0; 5057 return 0;
5015} 5058}
5016 5059
5017static int ata_host_set_request_pm(struct ata_host_set *host_set, 5060static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5018 pm_message_t mesg, unsigned int action, 5061 unsigned int action, unsigned int ehi_flags,
5019 unsigned int ehi_flags, int wait) 5062 int wait)
5020{ 5063{
5021 unsigned long flags; 5064 unsigned long flags;
5022 int i, rc; 5065 int i, rc;
5023 5066
5024 for (i = 0; i < host_set->n_ports; i++) { 5067 for (i = 0; i < host->n_ports; i++) {
5025 struct ata_port *ap = host_set->ports[i]; 5068 struct ata_port *ap = host->ports[i];
5026 5069
5027 /* Previous resume operation might still be in 5070 /* Previous resume operation might still be in
5028 * progress. Wait for PM_PENDING to clear. 5071 * progress. Wait for PM_PENDING to clear.
@@ -5062,11 +5105,11 @@ static int ata_host_set_request_pm(struct ata_host_set *host_set,
5062} 5105}
5063 5106
5064/** 5107/**
5065 * ata_host_set_suspend - suspend host_set 5108 * ata_host_suspend - suspend host
5066 * @host_set: host_set to suspend 5109 * @host: host to suspend
5067 * @mesg: PM message 5110 * @mesg: PM message
5068 * 5111 *
5069 * Suspend @host_set. Actual operation is performed by EH. This 5112 * Suspend @host. Actual operation is performed by EH. This
5070 * function requests EH to perform PM operations and waits for EH 5113 * function requests EH to perform PM operations and waits for EH
5071 * to finish. 5114 * to finish.
5072 * 5115 *
@@ -5076,11 +5119,11 @@ static int ata_host_set_request_pm(struct ata_host_set *host_set,
5076 * RETURNS: 5119 * RETURNS:
5077 * 0 on success, -errno on failure. 5120 * 0 on success, -errno on failure.
5078 */ 5121 */
5079int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg) 5122int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5080{ 5123{
5081 int i, j, rc; 5124 int i, j, rc;
5082 5125
5083 rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1); 5126 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5084 if (rc) 5127 if (rc)
5085 goto fail; 5128 goto fail;
5086 5129
@@ -5088,8 +5131,8 @@ int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
5088 * This happens if hotplug occurs between completion of device 5131 * This happens if hotplug occurs between completion of device
5089 * suspension and here. 5132 * suspension and here.
5090 */ 5133 */
5091 for (i = 0; i < host_set->n_ports; i++) { 5134 for (i = 0; i < host->n_ports; i++) {
5092 struct ata_port *ap = host_set->ports[i]; 5135 struct ata_port *ap = host->ports[i];
5093 5136
5094 for (j = 0; j < ATA_MAX_DEVICES; j++) { 5137 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5095 struct ata_device *dev = &ap->device[j]; 5138 struct ata_device *dev = &ap->device[j];
@@ -5104,30 +5147,30 @@ int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
5104 } 5147 }
5105 } 5148 }
5106 5149
5107 host_set->dev->power.power_state = mesg; 5150 host->dev->power.power_state = mesg;
5108 return 0; 5151 return 0;
5109 5152
5110 fail: 5153 fail:
5111 ata_host_set_resume(host_set); 5154 ata_host_resume(host);
5112 return rc; 5155 return rc;
5113} 5156}
5114 5157
5115/** 5158/**
5116 * ata_host_set_resume - resume host_set 5159 * ata_host_resume - resume host
5117 * @host_set: host_set to resume 5160 * @host: host to resume
5118 * 5161 *
5119 * Resume @host_set. Actual operation is performed by EH. This 5162 * Resume @host. Actual operation is performed by EH. This
5120 * function requests EH to perform PM operations and returns. 5163 * function requests EH to perform PM operations and returns.
5121 * Note that all resume operations are performed parallely. 5164 * Note that all resume operations are performed parallely.
5122 * 5165 *
5123 * LOCKING: 5166 * LOCKING:
5124 * Kernel thread context (may sleep). 5167 * Kernel thread context (may sleep).
5125 */ 5168 */
5126void ata_host_set_resume(struct ata_host_set *host_set) 5169void ata_host_resume(struct ata_host *host)
5127{ 5170{
5128 ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET, 5171 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5129 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 5172 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5130 host_set->dev->power.power_state = PMSG_ON; 5173 host->dev->power.power_state = PMSG_ON;
5131} 5174}
5132 5175
5133/** 5176/**
@@ -5184,10 +5227,10 @@ void ata_port_stop (struct ata_port *ap)
5184 ata_pad_free(ap, dev); 5227 ata_pad_free(ap, dev);
5185} 5228}
5186 5229
5187void ata_host_stop (struct ata_host_set *host_set) 5230void ata_host_stop (struct ata_host *host)
5188{ 5231{
5189 if (host_set->mmio_base) 5232 if (host->mmio_base)
5190 iounmap(host_set->mmio_base); 5233 iounmap(host->mmio_base);
5191} 5234}
5192 5235
5193/** 5236/**
@@ -5209,7 +5252,7 @@ void ata_dev_init(struct ata_device *dev)
5209 5252
5210 /* High bits of dev->flags are used to record warm plug 5253 /* High bits of dev->flags are used to record warm plug
5211 * requests which occur asynchronously. Synchronize using 5254 * requests which occur asynchronously. Synchronize using
5212 * host_set lock. 5255 * host lock.
5213 */ 5256 */
5214 spin_lock_irqsave(ap->lock, flags); 5257 spin_lock_irqsave(ap->lock, flags);
5215 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5258 dev->flags &= ~ATA_DFLAG_INIT_MASK;
@@ -5223,46 +5266,42 @@ void ata_dev_init(struct ata_device *dev)
5223} 5266}
5224 5267
5225/** 5268/**
5226 * ata_host_init - Initialize an ata_port structure 5269 * ata_port_init - Initialize an ata_port structure
5227 * @ap: Structure to initialize 5270 * @ap: Structure to initialize
5228 * @host: associated SCSI mid-layer structure 5271 * @host: Collection of hosts to which @ap belongs
5229 * @host_set: Collection of hosts to which @ap belongs
5230 * @ent: Probe information provided by low-level driver 5272 * @ent: Probe information provided by low-level driver
5231 * @port_no: Port number associated with this ata_port 5273 * @port_no: Port number associated with this ata_port
5232 * 5274 *
5233 * Initialize a new ata_port structure, and its associated 5275 * Initialize a new ata_port structure.
5234 * scsi_host.
5235 * 5276 *
5236 * LOCKING: 5277 * LOCKING:
5237 * Inherited from caller. 5278 * Inherited from caller.
5238 */ 5279 */
5239static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, 5280void ata_port_init(struct ata_port *ap, struct ata_host *host,
5240 struct ata_host_set *host_set, 5281 const struct ata_probe_ent *ent, unsigned int port_no)
5241 const struct ata_probe_ent *ent, unsigned int port_no)
5242{ 5282{
5243 unsigned int i; 5283 unsigned int i;
5244 5284
5245 host->max_id = 16; 5285 ap->lock = &host->lock;
5246 host->max_lun = 1;
5247 host->max_channel = 1;
5248 host->unique_id = ata_unique_id++;
5249 host->max_cmd_len = 12;
5250
5251 ap->lock = &host_set->lock;
5252 ap->flags = ATA_FLAG_DISABLED; 5286 ap->flags = ATA_FLAG_DISABLED;
5253 ap->id = host->unique_id; 5287 ap->id = ata_unique_id++;
5254 ap->host = host;
5255 ap->ctl = ATA_DEVCTL_OBS; 5288 ap->ctl = ATA_DEVCTL_OBS;
5256 ap->host_set = host_set; 5289 ap->host = host;
5257 ap->dev = ent->dev; 5290 ap->dev = ent->dev;
5258 ap->port_no = port_no; 5291 ap->port_no = port_no;
5259 ap->hard_port_no = 5292 if (port_no == 1 && ent->pinfo2) {
5260 ent->legacy_mode ? ent->hard_port_no : port_no; 5293 ap->pio_mask = ent->pinfo2->pio_mask;
5261 ap->pio_mask = ent->pio_mask; 5294 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5262 ap->mwdma_mask = ent->mwdma_mask; 5295 ap->udma_mask = ent->pinfo2->udma_mask;
5263 ap->udma_mask = ent->udma_mask; 5296 ap->flags |= ent->pinfo2->flags;
5264 ap->flags |= ent->host_flags; 5297 ap->ops = ent->pinfo2->port_ops;
5265 ap->ops = ent->port_ops; 5298 } else {
5299 ap->pio_mask = ent->pio_mask;
5300 ap->mwdma_mask = ent->mwdma_mask;
5301 ap->udma_mask = ent->udma_mask;
5302 ap->flags |= ent->port_flags;
5303 ap->ops = ent->port_ops;
5304 }
5266 ap->hw_sata_spd_limit = UINT_MAX; 5305 ap->hw_sata_spd_limit = UINT_MAX;
5267 ap->active_tag = ATA_TAG_POISON; 5306 ap->active_tag = ATA_TAG_POISON;
5268 ap->last_ctl = 0xFF; 5307 ap->last_ctl = 0xFF;
@@ -5303,9 +5342,30 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
5303} 5342}
5304 5343
5305/** 5344/**
5306 * ata_host_add - Attach low-level ATA driver to system 5345 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5346 * @ap: ATA port to initialize SCSI host for
5347 * @shost: SCSI host associated with @ap
5348 *
5349 * Initialize SCSI host @shost associated with ATA port @ap.
5350 *
5351 * LOCKING:
5352 * Inherited from caller.
5353 */
5354static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
5355{
5356 ap->scsi_host = shost;
5357
5358 shost->unique_id = ap->id;
5359 shost->max_id = 16;
5360 shost->max_lun = 1;
5361 shost->max_channel = 1;
5362 shost->max_cmd_len = 12;
5363}
5364
5365/**
5366 * ata_port_add - Attach low-level ATA driver to system
5307 * @ent: Information provided by low-level driver 5367 * @ent: Information provided by low-level driver
5308 * @host_set: Collections of ports to which we add 5368 * @host: Collections of ports to which we add
5309 * @port_no: Port number associated with this host 5369 * @port_no: Port number associated with this host
5310 * 5370 *
5311 * Attach low-level ATA driver to system. 5371 * Attach low-level ATA driver to system.
@@ -5316,43 +5376,55 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
5316 * RETURNS: 5376 * RETURNS:
5317 * New ata_port on success, for NULL on error. 5377 * New ata_port on success, for NULL on error.
5318 */ 5378 */
5319 5379static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
5320static struct ata_port * ata_host_add(const struct ata_probe_ent *ent, 5380 struct ata_host *host,
5321 struct ata_host_set *host_set,
5322 unsigned int port_no) 5381 unsigned int port_no)
5323{ 5382{
5324 struct Scsi_Host *host; 5383 struct Scsi_Host *shost;
5325 struct ata_port *ap; 5384 struct ata_port *ap;
5326 int rc;
5327 5385
5328 DPRINTK("ENTER\n"); 5386 DPRINTK("ENTER\n");
5329 5387
5330 if (!ent->port_ops->error_handler && 5388 if (!ent->port_ops->error_handler &&
5331 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) { 5389 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5332 printk(KERN_ERR "ata%u: no reset mechanism available\n", 5390 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5333 port_no); 5391 port_no);
5334 return NULL; 5392 return NULL;
5335 } 5393 }
5336 5394
5337 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port)); 5395 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5338 if (!host) 5396 if (!shost)
5339 return NULL; 5397 return NULL;
5340 5398
5341 host->transportt = &ata_scsi_transport_template; 5399 shost->transportt = &ata_scsi_transport_template;
5342 5400
5343 ap = ata_shost_to_port(host); 5401 ap = ata_shost_to_port(shost);
5344 5402
5345 ata_host_init(ap, host, host_set, ent, port_no); 5403 ata_port_init(ap, host, ent, port_no);
5346 5404 ata_port_init_shost(ap, shost);
5347 rc = ap->ops->port_start(ap);
5348 if (rc)
5349 goto err_out;
5350 5405
5351 return ap; 5406 return ap;
5407}
5352 5408
5353err_out: 5409/**
5354 scsi_host_put(host); 5410 * ata_sas_host_init - Initialize a host struct
5355 return NULL; 5411 * @host: host to initialize
5412 * @dev: device host is attached to
5413 * @flags: host flags
5414 * @ops: port_ops
5415 *
5416 * LOCKING:
5417 * PCI/etc. bus probe sem.
5418 *
5419 */
5420
5421void ata_host_init(struct ata_host *host, struct device *dev,
5422 unsigned long flags, const struct ata_port_operations *ops)
5423{
5424 spin_lock_init(&host->lock);
5425 host->dev = dev;
5426 host->flags = flags;
5427 host->ops = ops;
5356} 5428}
5357 5429
5358/** 5430/**
@@ -5375,78 +5447,106 @@ err_out:
5375 */ 5447 */
5376int ata_device_add(const struct ata_probe_ent *ent) 5448int ata_device_add(const struct ata_probe_ent *ent)
5377{ 5449{
5378 unsigned int count = 0, i; 5450 unsigned int i;
5379 struct device *dev = ent->dev; 5451 struct device *dev = ent->dev;
5380 struct ata_host_set *host_set; 5452 struct ata_host *host;
5381 int rc; 5453 int rc;
5382 5454
5383 DPRINTK("ENTER\n"); 5455 DPRINTK("ENTER\n");
5384 /* alloc a container for our list of ATA ports (buses) */ 5456 /* alloc a container for our list of ATA ports (buses) */
5385 host_set = kzalloc(sizeof(struct ata_host_set) + 5457 host = kzalloc(sizeof(struct ata_host) +
5386 (ent->n_ports * sizeof(void *)), GFP_KERNEL); 5458 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5387 if (!host_set) 5459 if (!host)
5388 return 0; 5460 return 0;
5389 spin_lock_init(&host_set->lock);
5390 5461
5391 host_set->dev = dev; 5462 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5392 host_set->n_ports = ent->n_ports; 5463 host->n_ports = ent->n_ports;
5393 host_set->irq = ent->irq; 5464 host->irq = ent->irq;
5394 host_set->mmio_base = ent->mmio_base; 5465 host->irq2 = ent->irq2;
5395 host_set->private_data = ent->private_data; 5466 host->mmio_base = ent->mmio_base;
5396 host_set->ops = ent->port_ops; 5467 host->private_data = ent->private_data;
5397 host_set->flags = ent->host_set_flags;
5398 5468
5399 /* register each port bound to this device */ 5469 /* register each port bound to this device */
5400 for (i = 0; i < ent->n_ports; i++) { 5470 for (i = 0; i < host->n_ports; i++) {
5401 struct ata_port *ap; 5471 struct ata_port *ap;
5402 unsigned long xfer_mode_mask; 5472 unsigned long xfer_mode_mask;
5473 int irq_line = ent->irq;
5403 5474
5404 ap = ata_host_add(ent, host_set, i); 5475 ap = ata_port_add(ent, host, i);
5405 if (!ap) 5476 if (!ap)
5406 goto err_out; 5477 goto err_out;
5407 5478
5408 host_set->ports[i] = ap; 5479 host->ports[i] = ap;
5480
5481 /* dummy? */
5482 if (ent->dummy_port_mask & (1 << i)) {
5483 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5484 ap->ops = &ata_dummy_port_ops;
5485 continue;
5486 }
5487
5488 /* start port */
5489 rc = ap->ops->port_start(ap);
5490 if (rc) {
5491 host->ports[i] = NULL;
5492 scsi_host_put(ap->scsi_host);
5493 goto err_out;
5494 }
5495
5496 /* Report the secondary IRQ for second channel legacy */
5497 if (i == 1 && ent->irq2)
5498 irq_line = ent->irq2;
5499
5409 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) | 5500 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5410 (ap->mwdma_mask << ATA_SHIFT_MWDMA) | 5501 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5411 (ap->pio_mask << ATA_SHIFT_PIO); 5502 (ap->pio_mask << ATA_SHIFT_PIO);
5412 5503
5413 /* print per-port info to dmesg */ 5504 /* print per-port info to dmesg */
5414 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX " 5505 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5415 "ctl 0x%lX bmdma 0x%lX irq %lu\n", 5506 "ctl 0x%lX bmdma 0x%lX irq %d\n",
5416 ap->flags & ATA_FLAG_SATA ? 'S' : 'P', 5507 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5417 ata_mode_string(xfer_mode_mask), 5508 ata_mode_string(xfer_mode_mask),
5418 ap->ioaddr.cmd_addr, 5509 ap->ioaddr.cmd_addr,
5419 ap->ioaddr.ctl_addr, 5510 ap->ioaddr.ctl_addr,
5420 ap->ioaddr.bmdma_addr, 5511 ap->ioaddr.bmdma_addr,
5421 ent->irq); 5512 irq_line);
5422 5513
5423 ata_chk_status(ap); 5514 ata_chk_status(ap);
5424 host_set->ops->irq_clear(ap); 5515 host->ops->irq_clear(ap);
5425 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */ 5516 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5426 count++;
5427 } 5517 }
5428 5518
5429 if (!count) 5519 /* obtain irq, that may be shared between channels */
5430 goto err_free_ret;
5431
5432 /* obtain irq, that is shared between channels */
5433 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, 5520 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5434 DRV_NAME, host_set); 5521 DRV_NAME, host);
5435 if (rc) { 5522 if (rc) {
5436 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n", 5523 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5437 ent->irq, rc); 5524 ent->irq, rc);
5438 goto err_out; 5525 goto err_out;
5439 } 5526 }
5440 5527
5528 /* do we have a second IRQ for the other channel, eg legacy mode */
5529 if (ent->irq2) {
5530 /* We will get weird core code crashes later if this is true
5531 so trap it now */
5532 BUG_ON(ent->irq == ent->irq2);
5533
5534 rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags,
5535 DRV_NAME, host);
5536 if (rc) {
5537 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5538 ent->irq2, rc);
5539 goto err_out_free_irq;
5540 }
5541 }
5542
5441 /* perform each probe synchronously */ 5543 /* perform each probe synchronously */
5442 DPRINTK("probe begin\n"); 5544 DPRINTK("probe begin\n");
5443 for (i = 0; i < count; i++) { 5545 for (i = 0; i < host->n_ports; i++) {
5444 struct ata_port *ap; 5546 struct ata_port *ap = host->ports[i];
5445 u32 scontrol; 5547 u32 scontrol;
5446 int rc; 5548 int rc;
5447 5549
5448 ap = host_set->ports[i];
5449
5450 /* init sata_spd_limit to the current value */ 5550 /* init sata_spd_limit to the current value */
5451 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) { 5551 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5452 int spd = (scontrol >> 4) & 0xf; 5552 int spd = (scontrol >> 4) & 0xf;
@@ -5454,7 +5554,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
5454 } 5554 }
5455 ap->sata_spd_limit = ap->hw_sata_spd_limit; 5555 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5456 5556
5457 rc = scsi_add_host(ap->host, dev); 5557 rc = scsi_add_host(ap->scsi_host, dev);
5458 if (rc) { 5558 if (rc) {
5459 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n"); 5559 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5460 /* FIXME: do something useful here */ 5560 /* FIXME: do something useful here */
@@ -5502,27 +5602,29 @@ int ata_device_add(const struct ata_probe_ent *ent)
5502 5602
5503 /* probes are done, now scan each port's disk(s) */ 5603 /* probes are done, now scan each port's disk(s) */
5504 DPRINTK("host probe begin\n"); 5604 DPRINTK("host probe begin\n");
5505 for (i = 0; i < count; i++) { 5605 for (i = 0; i < host->n_ports; i++) {
5506 struct ata_port *ap = host_set->ports[i]; 5606 struct ata_port *ap = host->ports[i];
5507 5607
5508 ata_scsi_scan_host(ap); 5608 ata_scsi_scan_host(ap);
5509 } 5609 }
5510 5610
5511 dev_set_drvdata(dev, host_set); 5611 dev_set_drvdata(dev, host);
5512 5612
5513 VPRINTK("EXIT, returning %u\n", ent->n_ports); 5613 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5514 return ent->n_ports; /* success */ 5614 return ent->n_ports; /* success */
5515 5615
5616err_out_free_irq:
5617 free_irq(ent->irq, host);
5516err_out: 5618err_out:
5517 for (i = 0; i < count; i++) { 5619 for (i = 0; i < host->n_ports; i++) {
5518 struct ata_port *ap = host_set->ports[i]; 5620 struct ata_port *ap = host->ports[i];
5519 if (ap) { 5621 if (ap) {
5520 ap->ops->port_stop(ap); 5622 ap->ops->port_stop(ap);
5521 scsi_host_put(ap->host); 5623 scsi_host_put(ap->scsi_host);
5522 } 5624 }
5523 } 5625 }
5524err_free_ret: 5626
5525 kfree(host_set); 5627 kfree(host);
5526 VPRINTK("EXIT, returning 0\n"); 5628 VPRINTK("EXIT, returning 0\n");
5527 return 0; 5629 return 0;
5528} 5630}
@@ -5582,12 +5684,12 @@ void ata_port_detach(struct ata_port *ap)
5582 5684
5583 skip_eh: 5685 skip_eh:
5584 /* remove the associated SCSI host */ 5686 /* remove the associated SCSI host */
5585 scsi_remove_host(ap->host); 5687 scsi_remove_host(ap->scsi_host);
5586} 5688}
5587 5689
5588/** 5690/**
5589 * ata_host_set_remove - PCI layer callback for device removal 5691 * ata_host_remove - PCI layer callback for device removal
5590 * @host_set: ATA host set that was removed 5692 * @host: ATA host set that was removed
5591 * 5693 *
5592 * Unregister all objects associated with this host set. Free those 5694 * Unregister all objects associated with this host set. Free those
5593 * objects. 5695 * objects.
@@ -5596,36 +5698,39 @@ void ata_port_detach(struct ata_port *ap)
5596 * Inherited from calling layer (may sleep). 5698 * Inherited from calling layer (may sleep).
5597 */ 5699 */
5598 5700
5599void ata_host_set_remove(struct ata_host_set *host_set) 5701void ata_host_remove(struct ata_host *host)
5600{ 5702{
5601 unsigned int i; 5703 unsigned int i;
5602 5704
5603 for (i = 0; i < host_set->n_ports; i++) 5705 for (i = 0; i < host->n_ports; i++)
5604 ata_port_detach(host_set->ports[i]); 5706 ata_port_detach(host->ports[i]);
5605 5707
5606 free_irq(host_set->irq, host_set); 5708 free_irq(host->irq, host);
5709 if (host->irq2)
5710 free_irq(host->irq2, host);
5607 5711
5608 for (i = 0; i < host_set->n_ports; i++) { 5712 for (i = 0; i < host->n_ports; i++) {
5609 struct ata_port *ap = host_set->ports[i]; 5713 struct ata_port *ap = host->ports[i];
5610 5714
5611 ata_scsi_release(ap->host); 5715 ata_scsi_release(ap->scsi_host);
5612 5716
5613 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) { 5717 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5614 struct ata_ioports *ioaddr = &ap->ioaddr; 5718 struct ata_ioports *ioaddr = &ap->ioaddr;
5615 5719
5616 if (ioaddr->cmd_addr == 0x1f0) 5720 /* FIXME: Add -ac IDE pci mods to remove these special cases */
5617 release_region(0x1f0, 8); 5721 if (ioaddr->cmd_addr == ATA_PRIMARY_CMD)
5618 else if (ioaddr->cmd_addr == 0x170) 5722 release_region(ATA_PRIMARY_CMD, 8);
5619 release_region(0x170, 8); 5723 else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD)
5724 release_region(ATA_SECONDARY_CMD, 8);
5620 } 5725 }
5621 5726
5622 scsi_host_put(ap->host); 5727 scsi_host_put(ap->scsi_host);
5623 } 5728 }
5624 5729
5625 if (host_set->ops->host_stop) 5730 if (host->ops->host_stop)
5626 host_set->ops->host_stop(host_set); 5731 host->ops->host_stop(host);
5627 5732
5628 kfree(host_set); 5733 kfree(host);
5629} 5734}
5630 5735
5631/** 5736/**
@@ -5642,9 +5747,9 @@ void ata_host_set_remove(struct ata_host_set *host_set)
5642 * One. 5747 * One.
5643 */ 5748 */
5644 5749
5645int ata_scsi_release(struct Scsi_Host *host) 5750int ata_scsi_release(struct Scsi_Host *shost)
5646{ 5751{
5647 struct ata_port *ap = ata_shost_to_port(host); 5752 struct ata_port *ap = ata_shost_to_port(shost);
5648 5753
5649 DPRINTK("ENTER\n"); 5754 DPRINTK("ENTER\n");
5650 5755
@@ -5655,6 +5760,31 @@ int ata_scsi_release(struct Scsi_Host *host)
5655 return 1; 5760 return 1;
5656} 5761}
5657 5762
5763struct ata_probe_ent *
5764ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5765{
5766 struct ata_probe_ent *probe_ent;
5767
5768 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
5769 if (!probe_ent) {
5770 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5771 kobject_name(&(dev->kobj)));
5772 return NULL;
5773 }
5774
5775 INIT_LIST_HEAD(&probe_ent->node);
5776 probe_ent->dev = dev;
5777
5778 probe_ent->sht = port->sht;
5779 probe_ent->port_flags = port->flags;
5780 probe_ent->pio_mask = port->pio_mask;
5781 probe_ent->mwdma_mask = port->mwdma_mask;
5782 probe_ent->udma_mask = port->udma_mask;
5783 probe_ent->port_ops = port->port_ops;
5784
5785 return probe_ent;
5786}
5787
5658/** 5788/**
5659 * ata_std_ports - initialize ioaddr with standard port offsets. 5789 * ata_std_ports - initialize ioaddr with standard port offsets.
5660 * @ioaddr: IO address structure to be initialized 5790 * @ioaddr: IO address structure to be initialized
@@ -5684,11 +5814,11 @@ void ata_std_ports(struct ata_ioports *ioaddr)
5684 5814
5685#ifdef CONFIG_PCI 5815#ifdef CONFIG_PCI
5686 5816
5687void ata_pci_host_stop (struct ata_host_set *host_set) 5817void ata_pci_host_stop (struct ata_host *host)
5688{ 5818{
5689 struct pci_dev *pdev = to_pci_dev(host_set->dev); 5819 struct pci_dev *pdev = to_pci_dev(host->dev);
5690 5820
5691 pci_iounmap(pdev, host_set->mmio_base); 5821 pci_iounmap(pdev, host->mmio_base);
5692} 5822}
5693 5823
5694/** 5824/**
@@ -5708,12 +5838,9 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
5708void ata_pci_remove_one (struct pci_dev *pdev) 5838void ata_pci_remove_one (struct pci_dev *pdev)
5709{ 5839{
5710 struct device *dev = pci_dev_to_dev(pdev); 5840 struct device *dev = pci_dev_to_dev(pdev);
5711 struct ata_host_set *host_set = dev_get_drvdata(dev); 5841 struct ata_host *host = dev_get_drvdata(dev);
5712 struct ata_host_set *host_set2 = host_set->next;
5713 5842
5714 ata_host_set_remove(host_set); 5843 ata_host_remove(host);
5715 if (host_set2)
5716 ata_host_set_remove(host_set2);
5717 5844
5718 pci_release_regions(pdev); 5845 pci_release_regions(pdev);
5719 pci_disable_device(pdev); 5846 pci_disable_device(pdev);
@@ -5754,11 +5881,11 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5754 return (tmp == bits->val) ? 1 : 0; 5881 return (tmp == bits->val) ? 1 : 0;
5755} 5882}
5756 5883
5757void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state) 5884void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5758{ 5885{
5759 pci_save_state(pdev); 5886 pci_save_state(pdev);
5760 5887
5761 if (state.event == PM_EVENT_SUSPEND) { 5888 if (mesg.event == PM_EVENT_SUSPEND) {
5762 pci_disable_device(pdev); 5889 pci_disable_device(pdev);
5763 pci_set_power_state(pdev, PCI_D3hot); 5890 pci_set_power_state(pdev, PCI_D3hot);
5764 } 5891 }
@@ -5772,37 +5899,26 @@ void ata_pci_device_do_resume(struct pci_dev *pdev)
5772 pci_set_master(pdev); 5899 pci_set_master(pdev);
5773} 5900}
5774 5901
5775int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state) 5902int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5776{ 5903{
5777 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 5904 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5778 int rc = 0; 5905 int rc = 0;
5779 5906
5780 rc = ata_host_set_suspend(host_set, state); 5907 rc = ata_host_suspend(host, mesg);
5781 if (rc) 5908 if (rc)
5782 return rc; 5909 return rc;
5783 5910
5784 if (host_set->next) { 5911 ata_pci_device_do_suspend(pdev, mesg);
5785 rc = ata_host_set_suspend(host_set->next, state);
5786 if (rc) {
5787 ata_host_set_resume(host_set);
5788 return rc;
5789 }
5790 }
5791
5792 ata_pci_device_do_suspend(pdev, state);
5793 5912
5794 return 0; 5913 return 0;
5795} 5914}
5796 5915
5797int ata_pci_device_resume(struct pci_dev *pdev) 5916int ata_pci_device_resume(struct pci_dev *pdev)
5798{ 5917{
5799 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 5918 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5800 5919
5801 ata_pci_device_do_resume(pdev); 5920 ata_pci_device_do_resume(pdev);
5802 ata_host_set_resume(host_set); 5921 ata_host_resume(host);
5803 if (host_set->next)
5804 ata_host_set_resume(host_set->next);
5805
5806 return 0; 5922 return 0;
5807} 5923}
5808#endif /* CONFIG_PCI */ 5924#endif /* CONFIG_PCI */
@@ -5902,6 +6018,39 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5902} 6018}
5903 6019
5904/* 6020/*
6021 * Dummy port_ops
6022 */
6023static void ata_dummy_noret(struct ata_port *ap) { }
6024static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6025static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6026
6027static u8 ata_dummy_check_status(struct ata_port *ap)
6028{
6029 return ATA_DRDY;
6030}
6031
6032static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6033{
6034 return AC_ERR_SYSTEM;
6035}
6036
6037const struct ata_port_operations ata_dummy_port_ops = {
6038 .port_disable = ata_port_disable,
6039 .check_status = ata_dummy_check_status,
6040 .check_altstatus = ata_dummy_check_status,
6041 .dev_select = ata_noop_dev_select,
6042 .qc_prep = ata_noop_qc_prep,
6043 .qc_issue = ata_dummy_qc_issue,
6044 .freeze = ata_dummy_noret,
6045 .thaw = ata_dummy_noret,
6046 .error_handler = ata_dummy_noret,
6047 .post_internal_cmd = ata_dummy_qc_noret,
6048 .irq_clear = ata_dummy_noret,
6049 .port_start = ata_dummy_ret0,
6050 .port_stop = ata_dummy_noret,
6051};
6052
6053/*
5905 * libata is essentially a library of internal helper functions for 6054 * libata is essentially a library of internal helper functions for
5906 * low-level ATA host controller drivers. As such, the API/ABI is 6055 * low-level ATA host controller drivers. As such, the API/ABI is
5907 * likely to change as new drivers are added and updated. 6056 * likely to change as new drivers are added and updated.
@@ -5911,11 +6060,13 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5911EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 6060EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
5912EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 6061EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
5913EXPORT_SYMBOL_GPL(sata_deb_timing_long); 6062EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6063EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
5914EXPORT_SYMBOL_GPL(ata_std_bios_param); 6064EXPORT_SYMBOL_GPL(ata_std_bios_param);
5915EXPORT_SYMBOL_GPL(ata_std_ports); 6065EXPORT_SYMBOL_GPL(ata_std_ports);
6066EXPORT_SYMBOL_GPL(ata_host_init);
5916EXPORT_SYMBOL_GPL(ata_device_add); 6067EXPORT_SYMBOL_GPL(ata_device_add);
5917EXPORT_SYMBOL_GPL(ata_port_detach); 6068EXPORT_SYMBOL_GPL(ata_port_detach);
5918EXPORT_SYMBOL_GPL(ata_host_set_remove); 6069EXPORT_SYMBOL_GPL(ata_host_remove);
5919EXPORT_SYMBOL_GPL(ata_sg_init); 6070EXPORT_SYMBOL_GPL(ata_sg_init);
5920EXPORT_SYMBOL_GPL(ata_sg_init_one); 6071EXPORT_SYMBOL_GPL(ata_sg_init_one);
5921EXPORT_SYMBOL_GPL(ata_hsm_move); 6072EXPORT_SYMBOL_GPL(ata_hsm_move);
@@ -5982,8 +6133,8 @@ EXPORT_SYMBOL_GPL(sata_scr_write);
5982EXPORT_SYMBOL_GPL(sata_scr_write_flush); 6133EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5983EXPORT_SYMBOL_GPL(ata_port_online); 6134EXPORT_SYMBOL_GPL(ata_port_online);
5984EXPORT_SYMBOL_GPL(ata_port_offline); 6135EXPORT_SYMBOL_GPL(ata_port_offline);
5985EXPORT_SYMBOL_GPL(ata_host_set_suspend); 6136EXPORT_SYMBOL_GPL(ata_host_suspend);
5986EXPORT_SYMBOL_GPL(ata_host_set_resume); 6137EXPORT_SYMBOL_GPL(ata_host_resume);
5987EXPORT_SYMBOL_GPL(ata_id_string); 6138EXPORT_SYMBOL_GPL(ata_id_string);
5988EXPORT_SYMBOL_GPL(ata_id_c_string); 6139EXPORT_SYMBOL_GPL(ata_id_c_string);
5989EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6140EXPORT_SYMBOL_GPL(ata_scsi_simulate);
diff --git a/drivers/scsi/libata-eh.c b/drivers/ata/libata-eh.c
index 2c34af99627d..3fa80f09f2ae 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -38,7 +38,7 @@
38#include <scsi/scsi_eh.h> 38#include <scsi/scsi_eh.h>
39#include <scsi/scsi_device.h> 39#include <scsi/scsi_device.h>
40#include <scsi/scsi_cmnd.h> 40#include <scsi/scsi_cmnd.h>
41#include "scsi_transport_api.h" 41#include "../scsi/scsi_transport_api.h"
42 42
43#include <linux/libata.h> 43#include <linux/libata.h>
44 44
@@ -199,7 +199,7 @@ void ata_scsi_error(struct Scsi_Host *host)
199 /* synchronize with port task */ 199 /* synchronize with port task */
200 ata_port_flush_task(ap); 200 ata_port_flush_task(ap);
201 201
202 /* synchronize with host_set lock and sort out timeouts */ 202 /* synchronize with host lock and sort out timeouts */
203 203
204 /* For new EH, all qcs are finished in one of three ways - 204 /* For new EH, all qcs are finished in one of three ways -
205 * normal completion, error completion, and SCSI timeout. 205 * normal completion, error completion, and SCSI timeout.
@@ -376,7 +376,7 @@ void ata_port_wait_eh(struct ata_port *ap)
376 spin_unlock_irqrestore(ap->lock, flags); 376 spin_unlock_irqrestore(ap->lock, flags);
377 377
378 /* make sure SCSI EH is complete */ 378 /* make sure SCSI EH is complete */
379 if (scsi_host_in_recovery(ap->host)) { 379 if (scsi_host_in_recovery(ap->scsi_host)) {
380 msleep(10); 380 msleep(10);
381 goto retry; 381 goto retry;
382 } 382 }
@@ -485,7 +485,7 @@ void ata_eng_timeout(struct ata_port *ap)
485 * other commands are drained. 485 * other commands are drained.
486 * 486 *
487 * LOCKING: 487 * LOCKING:
488 * spin_lock_irqsave(host_set lock) 488 * spin_lock_irqsave(host lock)
489 */ 489 */
490void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 490void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
491{ 491{
@@ -512,14 +512,14 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
512 * all commands are drained. 512 * all commands are drained.
513 * 513 *
514 * LOCKING: 514 * LOCKING:
515 * spin_lock_irqsave(host_set lock) 515 * spin_lock_irqsave(host lock)
516 */ 516 */
517void ata_port_schedule_eh(struct ata_port *ap) 517void ata_port_schedule_eh(struct ata_port *ap)
518{ 518{
519 WARN_ON(!ap->ops->error_handler); 519 WARN_ON(!ap->ops->error_handler);
520 520
521 ap->pflags |= ATA_PFLAG_EH_PENDING; 521 ap->pflags |= ATA_PFLAG_EH_PENDING;
522 scsi_schedule_eh(ap->host); 522 scsi_schedule_eh(ap->scsi_host);
523 523
524 DPRINTK("port EH scheduled\n"); 524 DPRINTK("port EH scheduled\n");
525} 525}
@@ -531,7 +531,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
531 * Abort all active qc's of @ap and schedule EH. 531 * Abort all active qc's of @ap and schedule EH.
532 * 532 *
533 * LOCKING: 533 * LOCKING:
534 * spin_lock_irqsave(host_set lock) 534 * spin_lock_irqsave(host lock)
535 * 535 *
536 * RETURNS: 536 * RETURNS:
537 * Number of aborted qc's. 537 * Number of aborted qc's.
@@ -574,7 +574,7 @@ int ata_port_abort(struct ata_port *ap)
574 * is frozen. 574 * is frozen.
575 * 575 *
576 * LOCKING: 576 * LOCKING:
577 * spin_lock_irqsave(host_set lock) 577 * spin_lock_irqsave(host lock)
578 */ 578 */
579static void __ata_port_freeze(struct ata_port *ap) 579static void __ata_port_freeze(struct ata_port *ap)
580{ 580{
@@ -595,7 +595,7 @@ static void __ata_port_freeze(struct ata_port *ap)
595 * Abort and freeze @ap. 595 * Abort and freeze @ap.
596 * 596 *
597 * LOCKING: 597 * LOCKING:
598 * spin_lock_irqsave(host_set lock) 598 * spin_lock_irqsave(host lock)
599 * 599 *
600 * RETURNS: 600 * RETURNS:
601 * Number of aborted commands. 601 * Number of aborted commands.
diff --git a/drivers/scsi/libata-scsi.c b/drivers/ata/libata-scsi.c
index e92c31d698ff..3986ec8741b4 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -321,7 +321,7 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
321 * current command. 321 * current command.
322 * 322 *
323 * LOCKING: 323 * LOCKING:
324 * spin_lock_irqsave(host_set lock) 324 * spin_lock_irqsave(host lock)
325 * 325 *
326 * RETURNS: 326 * RETURNS:
327 * Command allocated, or %NULL if none available. 327 * Command allocated, or %NULL if none available.
@@ -400,7 +400,7 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
400/** 400/**
401 * ata_scsi_device_suspend - suspend ATA device associated with sdev 401 * ata_scsi_device_suspend - suspend ATA device associated with sdev
402 * @sdev: the SCSI device to suspend 402 * @sdev: the SCSI device to suspend
403 * @state: target power management state 403 * @mesg: target power management message
404 * 404 *
405 * Request suspend EH action on the ATA device associated with 405 * Request suspend EH action on the ATA device associated with
406 * @sdev and wait for the operation to complete. 406 * @sdev and wait for the operation to complete.
@@ -411,7 +411,7 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
411 * RETURNS: 411 * RETURNS:
412 * 0 on success, -errno otherwise. 412 * 0 on success, -errno otherwise.
413 */ 413 */
414int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state) 414int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t mesg)
415{ 415{
416 struct ata_port *ap = ata_shost_to_port(sdev->host); 416 struct ata_port *ap = ata_shost_to_port(sdev->host);
417 struct ata_device *dev = ata_scsi_find_dev(ap, sdev); 417 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
@@ -438,7 +438,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
438 438
439 /* request suspend */ 439 /* request suspend */
440 action = ATA_EH_SUSPEND; 440 action = ATA_EH_SUSPEND;
441 if (state.event != PM_EVENT_SUSPEND) 441 if (mesg.event != PM_EVENT_SUSPEND)
442 action |= ATA_EH_PM_FREEZE; 442 action |= ATA_EH_PM_FREEZE;
443 ap->eh_info.dev_action[dev->devno] |= action; 443 ap->eh_info.dev_action[dev->devno] |= action;
444 ap->eh_info.flags |= ATA_EHI_QUIET; 444 ap->eh_info.flags |= ATA_EHI_QUIET;
@@ -463,7 +463,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
463 spin_unlock_irqrestore(ap->lock, flags); 463 spin_unlock_irqrestore(ap->lock, flags);
464 out: 464 out:
465 if (rc == 0) 465 if (rc == 0)
466 sdev->sdev_gendev.power.power_state = state; 466 sdev->sdev_gendev.power.power_state = mesg;
467 return rc; 467 return rc;
468} 468}
469 469
@@ -537,7 +537,7 @@ int ata_scsi_device_resume(struct scsi_device *sdev)
537 * format sense blocks. 537 * format sense blocks.
538 * 538 *
539 * LOCKING: 539 * LOCKING:
540 * spin_lock_irqsave(host_set lock) 540 * spin_lock_irqsave(host lock)
541 */ 541 */
542void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, 542void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
543 u8 *ascq, int verbose) 543 u8 *ascq, int verbose)
@@ -649,7 +649,7 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
649 * block. Clear sense key, ASC & ASCQ if there is no error. 649 * block. Clear sense key, ASC & ASCQ if there is no error.
650 * 650 *
651 * LOCKING: 651 * LOCKING:
652 * spin_lock_irqsave(host_set lock) 652 * spin_lock_irqsave(host lock)
653 */ 653 */
654void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) 654void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
655{ 655{
@@ -918,7 +918,7 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
918 * [See SAT revision 5 at www.t10.org] 918 * [See SAT revision 5 at www.t10.org]
919 * 919 *
920 * LOCKING: 920 * LOCKING:
921 * spin_lock_irqsave(host_set lock) 921 * spin_lock_irqsave(host lock)
922 * 922 *
923 * RETURNS: 923 * RETURNS:
924 * Zero on success, non-zero on error. 924 * Zero on success, non-zero on error.
@@ -986,7 +986,7 @@ invalid_fld:
986 * FLUSH CACHE EXT. 986 * FLUSH CACHE EXT.
987 * 987 *
988 * LOCKING: 988 * LOCKING:
989 * spin_lock_irqsave(host_set lock) 989 * spin_lock_irqsave(host lock)
990 * 990 *
991 * RETURNS: 991 * RETURNS:
992 * Zero on success, non-zero on error. 992 * Zero on success, non-zero on error.
@@ -1109,7 +1109,7 @@ static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
1109 * Converts SCSI VERIFY command to an ATA READ VERIFY command. 1109 * Converts SCSI VERIFY command to an ATA READ VERIFY command.
1110 * 1110 *
1111 * LOCKING: 1111 * LOCKING:
1112 * spin_lock_irqsave(host_set lock) 1112 * spin_lock_irqsave(host lock)
1113 * 1113 *
1114 * RETURNS: 1114 * RETURNS:
1115 * Zero on success, non-zero on error. 1115 * Zero on success, non-zero on error.
@@ -1233,7 +1233,7 @@ nothing_to_do:
1233 * %WRITE_16 are currently supported. 1233 * %WRITE_16 are currently supported.
1234 * 1234 *
1235 * LOCKING: 1235 * LOCKING:
1236 * spin_lock_irqsave(host_set lock) 1236 * spin_lock_irqsave(host lock)
1237 * 1237 *
1238 * RETURNS: 1238 * RETURNS:
1239 * Zero on success, non-zero on error. 1239 * Zero on success, non-zero on error.
@@ -1467,7 +1467,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1467 * issued to @dev. 1467 * issued to @dev.
1468 * 1468 *
1469 * LOCKING: 1469 * LOCKING:
1470 * spin_lock_irqsave(host_set lock) 1470 * spin_lock_irqsave(host lock)
1471 * 1471 *
1472 * RETURNS: 1472 * RETURNS:
1473 * 1 if deferring is needed, 0 otherwise. 1473 * 1 if deferring is needed, 0 otherwise.
@@ -1510,7 +1510,7 @@ static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
1510 * termination. 1510 * termination.
1511 * 1511 *
1512 * LOCKING: 1512 * LOCKING:
1513 * spin_lock_irqsave(host_set lock) 1513 * spin_lock_irqsave(host lock)
1514 * 1514 *
1515 * RETURNS: 1515 * RETURNS:
1516 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command 1516 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
@@ -1589,7 +1589,7 @@ defer:
1589 * Maps buffer contained within SCSI command @cmd. 1589 * Maps buffer contained within SCSI command @cmd.
1590 * 1590 *
1591 * LOCKING: 1591 * LOCKING:
1592 * spin_lock_irqsave(host_set lock) 1592 * spin_lock_irqsave(host lock)
1593 * 1593 *
1594 * RETURNS: 1594 * RETURNS:
1595 * Length of response buffer. 1595 * Length of response buffer.
@@ -1623,7 +1623,7 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
1623 * Unmaps response buffer contained within @cmd. 1623 * Unmaps response buffer contained within @cmd.
1624 * 1624 *
1625 * LOCKING: 1625 * LOCKING:
1626 * spin_lock_irqsave(host_set lock) 1626 * spin_lock_irqsave(host lock)
1627 */ 1627 */
1628 1628
1629static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) 1629static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
@@ -1649,7 +1649,7 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
1649 * and sense buffer are assumed to be set). 1649 * and sense buffer are assumed to be set).
1650 * 1650 *
1651 * LOCKING: 1651 * LOCKING:
1652 * spin_lock_irqsave(host_set lock) 1652 * spin_lock_irqsave(host lock)
1653 */ 1653 */
1654 1654
1655void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 1655void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
@@ -1680,7 +1680,7 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1680 * with non-VPD INQUIRY command output. 1680 * with non-VPD INQUIRY command output.
1681 * 1681 *
1682 * LOCKING: 1682 * LOCKING:
1683 * spin_lock_irqsave(host_set lock) 1683 * spin_lock_irqsave(host lock)
1684 */ 1684 */
1685 1685
1686unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 1686unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
@@ -1736,7 +1736,7 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1736 * Returns list of inquiry VPD pages available. 1736 * Returns list of inquiry VPD pages available.
1737 * 1737 *
1738 * LOCKING: 1738 * LOCKING:
1739 * spin_lock_irqsave(host_set lock) 1739 * spin_lock_irqsave(host lock)
1740 */ 1740 */
1741 1741
1742unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, 1742unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
@@ -1764,7 +1764,7 @@ unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
1764 * Returns ATA device serial number. 1764 * Returns ATA device serial number.
1765 * 1765 *
1766 * LOCKING: 1766 * LOCKING:
1767 * spin_lock_irqsave(host_set lock) 1767 * spin_lock_irqsave(host lock)
1768 */ 1768 */
1769 1769
1770unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, 1770unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
@@ -1797,7 +1797,7 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1797 * name ("ATA "), model and serial numbers. 1797 * name ("ATA "), model and serial numbers.
1798 * 1798 *
1799 * LOCKING: 1799 * LOCKING:
1800 * spin_lock_irqsave(host_set lock) 1800 * spin_lock_irqsave(host lock)
1801 */ 1801 */
1802 1802
1803unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, 1803unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
@@ -1849,7 +1849,7 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
1849 * that the caller should successfully complete this SCSI command. 1849 * that the caller should successfully complete this SCSI command.
1850 * 1850 *
1851 * LOCKING: 1851 * LOCKING:
1852 * spin_lock_irqsave(host_set lock) 1852 * spin_lock_irqsave(host lock)
1853 */ 1853 */
1854 1854
1855unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, 1855unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
@@ -1990,7 +1990,7 @@ static int ata_dev_supports_fua(u16 *id)
1990 * descriptor for other device types. 1990 * descriptor for other device types.
1991 * 1991 *
1992 * LOCKING: 1992 * LOCKING:
1993 * spin_lock_irqsave(host_set lock) 1993 * spin_lock_irqsave(host lock)
1994 */ 1994 */
1995 1995
1996unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, 1996unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
@@ -2129,7 +2129,7 @@ saving_not_supp:
2129 * Simulate READ CAPACITY commands. 2129 * Simulate READ CAPACITY commands.
2130 * 2130 *
2131 * LOCKING: 2131 * LOCKING:
2132 * spin_lock_irqsave(host_set lock) 2132 * spin_lock_irqsave(host lock)
2133 */ 2133 */
2134 2134
2135unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, 2135unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
@@ -2204,7 +2204,7 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
2204 * Simulate REPORT LUNS command. 2204 * Simulate REPORT LUNS command.
2205 * 2205 *
2206 * LOCKING: 2206 * LOCKING:
2207 * spin_lock_irqsave(host_set lock) 2207 * spin_lock_irqsave(host lock)
2208 */ 2208 */
2209 2209
2210unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, 2210unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
@@ -2256,7 +2256,7 @@ void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
2256 * and the specified additional sense codes. 2256 * and the specified additional sense codes.
2257 * 2257 *
2258 * LOCKING: 2258 * LOCKING:
2259 * spin_lock_irqsave(host_set lock) 2259 * spin_lock_irqsave(host lock)
2260 */ 2260 */
2261 2261
2262void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) 2262void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
@@ -2421,7 +2421,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2421 * @scsicmd: SCSI CDB associated with this PACKET command 2421 * @scsicmd: SCSI CDB associated with this PACKET command
2422 * 2422 *
2423 * LOCKING: 2423 * LOCKING:
2424 * spin_lock_irqsave(host_set lock) 2424 * spin_lock_irqsave(host lock)
2425 * 2425 *
2426 * RETURNS: 2426 * RETURNS:
2427 * Zero on success, non-zero on failure. 2427 * Zero on success, non-zero on failure.
@@ -2500,7 +2500,7 @@ static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
2500 * Determine if commands should be sent to the specified device. 2500 * Determine if commands should be sent to the specified device.
2501 * 2501 *
2502 * LOCKING: 2502 * LOCKING:
2503 * spin_lock_irqsave(host_set lock) 2503 * spin_lock_irqsave(host lock)
2504 * 2504 *
2505 * RETURNS: 2505 * RETURNS:
2506 * 0 if commands are not allowed / 1 if commands are allowed 2506 * 0 if commands are not allowed / 1 if commands are allowed
@@ -2534,7 +2534,7 @@ static int ata_scsi_dev_enabled(struct ata_device *dev)
2534 * SCSI command to be sent. 2534 * SCSI command to be sent.
2535 * 2535 *
2536 * LOCKING: 2536 * LOCKING:
2537 * spin_lock_irqsave(host_set lock) 2537 * spin_lock_irqsave(host lock)
2538 * 2538 *
2539 * RETURNS: 2539 * RETURNS:
2540 * Associated ATA device, or %NULL if not found. 2540 * Associated ATA device, or %NULL if not found.
@@ -2808,7 +2808,7 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
2808 * ATA and ATAPI devices appearing as SCSI devices. 2808 * ATA and ATAPI devices appearing as SCSI devices.
2809 * 2809 *
2810 * LOCKING: 2810 * LOCKING:
2811 * Releases scsi-layer-held lock, and obtains host_set lock. 2811 * Releases scsi-layer-held lock, and obtains host lock.
2812 * 2812 *
2813 * RETURNS: 2813 * RETURNS:
2814 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 2814 * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
@@ -2852,7 +2852,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2852 * that can be handled internally. 2852 * that can be handled internally.
2853 * 2853 *
2854 * LOCKING: 2854 * LOCKING:
2855 * spin_lock_irqsave(host_set lock) 2855 * spin_lock_irqsave(host lock)
2856 */ 2856 */
2857 2857
2858void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, 2858void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
@@ -2944,7 +2944,7 @@ void ata_scsi_scan_host(struct ata_port *ap)
2944 if (!ata_dev_enabled(dev) || dev->sdev) 2944 if (!ata_dev_enabled(dev) || dev->sdev)
2945 continue; 2945 continue;
2946 2946
2947 sdev = __scsi_add_device(ap->host, 0, i, 0, NULL); 2947 sdev = __scsi_add_device(ap->scsi_host, 0, i, 0, NULL);
2948 if (!IS_ERR(sdev)) { 2948 if (!IS_ERR(sdev)) {
2949 dev->sdev = sdev; 2949 dev->sdev = sdev;
2950 scsi_device_put(sdev); 2950 scsi_device_put(sdev);
@@ -2958,11 +2958,11 @@ void ata_scsi_scan_host(struct ata_port *ap)
2958 * 2958 *
2959 * This function is called from ata_eh_hotplug() and responsible 2959 * This function is called from ata_eh_hotplug() and responsible
2960 * for taking the SCSI device attached to @dev offline. This 2960 * for taking the SCSI device attached to @dev offline. This
2961 * function is called with host_set lock which protects dev->sdev 2961 * function is called with host lock which protects dev->sdev
2962 * against clearing. 2962 * against clearing.
2963 * 2963 *
2964 * LOCKING: 2964 * LOCKING:
2965 * spin_lock_irqsave(host_set lock) 2965 * spin_lock_irqsave(host lock)
2966 * 2966 *
2967 * RETURNS: 2967 * RETURNS:
2968 * 1 if attached SCSI device exists, 0 otherwise. 2968 * 1 if attached SCSI device exists, 0 otherwise.
@@ -2998,16 +2998,16 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
2998 * be removed if there is __scsi_device_get() interface which 2998 * be removed if there is __scsi_device_get() interface which
2999 * increments reference counts regardless of device state. 2999 * increments reference counts regardless of device state.
3000 */ 3000 */
3001 mutex_lock(&ap->host->scan_mutex); 3001 mutex_lock(&ap->scsi_host->scan_mutex);
3002 spin_lock_irqsave(ap->lock, flags); 3002 spin_lock_irqsave(ap->lock, flags);
3003 3003
3004 /* clearing dev->sdev is protected by host_set lock */ 3004 /* clearing dev->sdev is protected by host lock */
3005 sdev = dev->sdev; 3005 sdev = dev->sdev;
3006 dev->sdev = NULL; 3006 dev->sdev = NULL;
3007 3007
3008 if (sdev) { 3008 if (sdev) {
3009 /* If user initiated unplug races with us, sdev can go 3009 /* If user initiated unplug races with us, sdev can go
3010 * away underneath us after the host_set lock and 3010 * away underneath us after the host lock and
3011 * scan_mutex are released. Hold onto it. 3011 * scan_mutex are released. Hold onto it.
3012 */ 3012 */
3013 if (scsi_device_get(sdev) == 0) { 3013 if (scsi_device_get(sdev) == 0) {
@@ -3024,7 +3024,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
3024 } 3024 }
3025 3025
3026 spin_unlock_irqrestore(ap->lock, flags); 3026 spin_unlock_irqrestore(ap->lock, flags);
3027 mutex_unlock(&ap->host->scan_mutex); 3027 mutex_unlock(&ap->scsi_host->scan_mutex);
3028 3028
3029 if (sdev) { 3029 if (sdev) {
3030 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n", 3030 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
@@ -3171,3 +3171,152 @@ void ata_scsi_dev_rescan(void *data)
3171 scsi_rescan_device(&(dev->sdev->sdev_gendev)); 3171 scsi_rescan_device(&(dev->sdev->sdev_gendev));
3172 } 3172 }
3173} 3173}
3174
3175/**
3176 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device
3177 * @pdev: PCI device that the scsi device is attached to
3178 * @port_info: Information from low-level host driver
3179 * @shost: SCSI host that the scsi device is attached to
3180 *
3181 * LOCKING:
3182 * PCI/etc. bus probe sem.
3183 *
3184 * RETURNS:
3185 * ata_port pointer on success / NULL on failure.
3186 */
3187
3188struct ata_port *ata_sas_port_alloc(struct ata_host *host,
3189 struct ata_port_info *port_info,
3190 struct Scsi_Host *shost)
3191{
3192 struct ata_port *ap = kzalloc(sizeof(*ap), GFP_KERNEL);
3193 struct ata_probe_ent *ent;
3194
3195 if (!ap)
3196 return NULL;
3197
3198 ent = ata_probe_ent_alloc(host->dev, port_info);
3199 if (!ent) {
3200 kfree(ap);
3201 return NULL;
3202 }
3203
3204 ata_port_init(ap, host, ent, 0);
3205 ap->lock = shost->host_lock;
3206 kfree(ent);
3207 return ap;
3208}
3209EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3210
3211/**
3212 * ata_sas_port_start - Set port up for dma.
3213 * @ap: Port to initialize
3214 *
3215 * Called just after data structures for each port are
3216 * initialized. Allocates DMA pad.
3217 *
3218 * May be used as the port_start() entry in ata_port_operations.
3219 *
3220 * LOCKING:
3221 * Inherited from caller.
3222 */
3223int ata_sas_port_start(struct ata_port *ap)
3224{
3225 return ata_pad_alloc(ap, ap->dev);
3226}
3227EXPORT_SYMBOL_GPL(ata_sas_port_start);
3228
3229/**
3230 * ata_port_stop - Undo ata_sas_port_start()
3231 * @ap: Port to shut down
3232 *
3233 * Frees the DMA pad.
3234 *
3235 * May be used as the port_stop() entry in ata_port_operations.
3236 *
3237 * LOCKING:
3238 * Inherited from caller.
3239 */
3240
3241void ata_sas_port_stop(struct ata_port *ap)
3242{
3243 ata_pad_free(ap, ap->dev);
3244}
3245EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3246
3247/**
3248 * ata_sas_port_init - Initialize a SATA device
3249 * @ap: SATA port to initialize
3250 *
3251 * LOCKING:
3252 * PCI/etc. bus probe sem.
3253 *
3254 * RETURNS:
3255 * Zero on success, non-zero on error.
3256 */
3257
3258int ata_sas_port_init(struct ata_port *ap)
3259{
3260 int rc = ap->ops->port_start(ap);
3261
3262 if (!rc)
3263 rc = ata_bus_probe(ap);
3264
3265 return rc;
3266}
3267EXPORT_SYMBOL_GPL(ata_sas_port_init);
3268
3269/**
3270 * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
3271 * @ap: SATA port to destroy
3272 *
3273 */
3274
3275void ata_sas_port_destroy(struct ata_port *ap)
3276{
3277 ap->ops->port_stop(ap);
3278 kfree(ap);
3279}
3280EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
3281
3282/**
3283 * ata_sas_slave_configure - Default slave_config routine for libata devices
3284 * @sdev: SCSI device to configure
3285 * @ap: ATA port to which SCSI device is attached
3286 *
3287 * RETURNS:
3288 * Zero.
3289 */
3290
3291int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
3292{
3293 ata_scsi_sdev_config(sdev);
3294 ata_scsi_dev_config(sdev, ap->device);
3295 return 0;
3296}
3297EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
3298
3299/**
3300 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
3301 * @cmd: SCSI command to be sent
3302 * @done: Completion function, called when command is complete
3303 * @ap: ATA port to which the command is being sent
3304 *
3305 * RETURNS:
3306 * Zero.
3307 */
3308
3309int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
3310 struct ata_port *ap)
3311{
3312 ata_scsi_dump_cdb(ap, cmd);
3313
3314 if (likely(ata_scsi_dev_enabled(ap->device)))
3315 __ata_scsi_queuecmd(cmd, done, ap->device);
3316 else {
3317 cmd->result = (DID_BAD_TARGET << 16);
3318 done(cmd);
3319 }
3320 return 0;
3321}
3322EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/ata/libata-sff.c
index 9ce221f25954..688bb55e197a 100644
--- a/drivers/scsi/libata-bmdma.c
+++ b/drivers/ata/libata-sff.c
@@ -193,7 +193,7 @@ void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
193 * synchronization with interrupt handler / other threads. 193 * synchronization with interrupt handler / other threads.
194 * 194 *
195 * LOCKING: 195 * LOCKING:
196 * spin_lock_irqsave(host_set lock) 196 * spin_lock_irqsave(host lock)
197 */ 197 */
198 198
199static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf) 199static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
@@ -216,7 +216,7 @@ static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile
216 * FIXME: missing write posting for 400nS delay enforcement 216 * FIXME: missing write posting for 400nS delay enforcement
217 * 217 *
218 * LOCKING: 218 * LOCKING:
219 * spin_lock_irqsave(host_set lock) 219 * spin_lock_irqsave(host lock)
220 */ 220 */
221 221
222static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 222static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
@@ -237,7 +237,7 @@ static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile
237 * synchronization with interrupt handler / other threads. 237 * synchronization with interrupt handler / other threads.
238 * 238 *
239 * LOCKING: 239 * LOCKING:
240 * spin_lock_irqsave(host_set lock) 240 * spin_lock_irqsave(host lock)
241 */ 241 */
242void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) 242void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
243{ 243{
@@ -422,7 +422,7 @@ u8 ata_altstatus(struct ata_port *ap)
422 * @qc: Info associated with this ATA transaction. 422 * @qc: Info associated with this ATA transaction.
423 * 423 *
424 * LOCKING: 424 * LOCKING:
425 * spin_lock_irqsave(host_set lock) 425 * spin_lock_irqsave(host lock)
426 */ 426 */
427 427
428static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc) 428static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
@@ -452,7 +452,7 @@ static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
452 * @qc: Info associated with this ATA transaction. 452 * @qc: Info associated with this ATA transaction.
453 * 453 *
454 * LOCKING: 454 * LOCKING:
455 * spin_lock_irqsave(host_set lock) 455 * spin_lock_irqsave(host lock)
456 */ 456 */
457 457
458static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) 458static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
@@ -483,7 +483,7 @@ static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
483 * @qc: Info associated with this ATA transaction. 483 * @qc: Info associated with this ATA transaction.
484 * 484 *
485 * LOCKING: 485 * LOCKING:
486 * spin_lock_irqsave(host_set lock) 486 * spin_lock_irqsave(host lock)
487 */ 487 */
488 488
489static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc) 489static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
@@ -511,7 +511,7 @@ static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
511 * @qc: Info associated with this ATA transaction. 511 * @qc: Info associated with this ATA transaction.
512 * 512 *
513 * LOCKING: 513 * LOCKING:
514 * spin_lock_irqsave(host_set lock) 514 * spin_lock_irqsave(host lock)
515 */ 515 */
516 516
517static void ata_bmdma_start_pio (struct ata_queued_cmd *qc) 517static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
@@ -535,7 +535,7 @@ static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
535 * May be used as the bmdma_start() entry in ata_port_operations. 535 * May be used as the bmdma_start() entry in ata_port_operations.
536 * 536 *
537 * LOCKING: 537 * LOCKING:
538 * spin_lock_irqsave(host_set lock) 538 * spin_lock_irqsave(host lock)
539 */ 539 */
540void ata_bmdma_start(struct ata_queued_cmd *qc) 540void ata_bmdma_start(struct ata_queued_cmd *qc)
541{ 541{
@@ -557,7 +557,7 @@ void ata_bmdma_start(struct ata_queued_cmd *qc)
557 * May be used as the bmdma_setup() entry in ata_port_operations. 557 * May be used as the bmdma_setup() entry in ata_port_operations.
558 * 558 *
559 * LOCKING: 559 * LOCKING:
560 * spin_lock_irqsave(host_set lock) 560 * spin_lock_irqsave(host lock)
561 */ 561 */
562void ata_bmdma_setup(struct ata_queued_cmd *qc) 562void ata_bmdma_setup(struct ata_queued_cmd *qc)
563{ 563{
@@ -577,7 +577,7 @@ void ata_bmdma_setup(struct ata_queued_cmd *qc)
577 * May be used as the irq_clear() entry in ata_port_operations. 577 * May be used as the irq_clear() entry in ata_port_operations.
578 * 578 *
579 * LOCKING: 579 * LOCKING:
580 * spin_lock_irqsave(host_set lock) 580 * spin_lock_irqsave(host lock)
581 */ 581 */
582 582
583void ata_bmdma_irq_clear(struct ata_port *ap) 583void ata_bmdma_irq_clear(struct ata_port *ap)
@@ -605,7 +605,7 @@ void ata_bmdma_irq_clear(struct ata_port *ap)
605 * May be used as the bmdma_status() entry in ata_port_operations. 605 * May be used as the bmdma_status() entry in ata_port_operations.
606 * 606 *
607 * LOCKING: 607 * LOCKING:
608 * spin_lock_irqsave(host_set lock) 608 * spin_lock_irqsave(host lock)
609 */ 609 */
610 610
611u8 ata_bmdma_status(struct ata_port *ap) 611u8 ata_bmdma_status(struct ata_port *ap)
@@ -629,7 +629,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
629 * May be used as the bmdma_stop() entry in ata_port_operations. 629 * May be used as the bmdma_stop() entry in ata_port_operations.
630 * 630 *
631 * LOCKING: 631 * LOCKING:
632 * spin_lock_irqsave(host_set lock) 632 * spin_lock_irqsave(host lock)
633 */ 633 */
634 634
635void ata_bmdma_stop(struct ata_queued_cmd *qc) 635void ata_bmdma_stop(struct ata_queued_cmd *qc)
@@ -797,32 +797,6 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
797} 797}
798 798
799#ifdef CONFIG_PCI 799#ifdef CONFIG_PCI
800static struct ata_probe_ent *
801ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
802{
803 struct ata_probe_ent *probe_ent;
804
805 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
806 if (!probe_ent) {
807 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
808 kobject_name(&(dev->kobj)));
809 return NULL;
810 }
811
812 INIT_LIST_HEAD(&probe_ent->node);
813 probe_ent->dev = dev;
814
815 probe_ent->sht = port->sht;
816 probe_ent->host_flags = port->host_flags;
817 probe_ent->pio_mask = port->pio_mask;
818 probe_ent->mwdma_mask = port->mwdma_mask;
819 probe_ent->udma_mask = port->udma_mask;
820 probe_ent->port_ops = port->port_ops;
821
822 return probe_ent;
823}
824
825
826/** 800/**
827 * ata_pci_init_native_mode - Initialize native-mode driver 801 * ata_pci_init_native_mode - Initialize native-mode driver
828 * @pdev: pci device to be initialized 802 * @pdev: pci device to be initialized
@@ -864,7 +838,7 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
864 bmdma = pci_resource_start(pdev, 4); 838 bmdma = pci_resource_start(pdev, 4);
865 if (bmdma) { 839 if (bmdma) {
866 if (inb(bmdma + 2) & 0x80) 840 if (inb(bmdma + 2) & 0x80)
867 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX; 841 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
868 probe_ent->port[p].bmdma_addr = bmdma; 842 probe_ent->port[p].bmdma_addr = bmdma;
869 } 843 }
870 ata_std_ports(&probe_ent->port[p]); 844 ata_std_ports(&probe_ent->port[p]);
@@ -880,10 +854,11 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
880 if (bmdma) { 854 if (bmdma) {
881 bmdma += 8; 855 bmdma += 8;
882 if(inb(bmdma + 2) & 0x80) 856 if(inb(bmdma + 2) & 0x80)
883 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX; 857 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
884 probe_ent->port[p].bmdma_addr = bmdma; 858 probe_ent->port[p].bmdma_addr = bmdma;
885 } 859 }
886 ata_std_ports(&probe_ent->port[p]); 860 ata_std_ports(&probe_ent->port[p]);
861 probe_ent->pinfo2 = port[1];
887 p++; 862 p++;
888 } 863 }
889 864
@@ -893,44 +868,49 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
893 868
894 869
895static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, 870static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
896 struct ata_port_info *port, int port_num) 871 struct ata_port_info **port, int port_mask)
897{ 872{
898 struct ata_probe_ent *probe_ent; 873 struct ata_probe_ent *probe_ent;
899 unsigned long bmdma; 874 unsigned long bmdma = pci_resource_start(pdev, 4);
900 875
901 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port); 876 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
902 if (!probe_ent) 877 if (!probe_ent)
903 return NULL; 878 return NULL;
904 879
905 probe_ent->legacy_mode = 1; 880 probe_ent->n_ports = 2;
906 probe_ent->n_ports = 1; 881 probe_ent->private_data = port[0]->private_data;
907 probe_ent->hard_port_no = port_num;
908 probe_ent->private_data = port->private_data;
909
910 switch(port_num)
911 {
912 case 0:
913 probe_ent->irq = 14;
914 probe_ent->port[0].cmd_addr = 0x1f0;
915 probe_ent->port[0].altstatus_addr =
916 probe_ent->port[0].ctl_addr = 0x3f6;
917 break;
918 case 1:
919 probe_ent->irq = 15;
920 probe_ent->port[0].cmd_addr = 0x170;
921 probe_ent->port[0].altstatus_addr =
922 probe_ent->port[0].ctl_addr = 0x376;
923 break;
924 }
925 882
926 bmdma = pci_resource_start(pdev, 4); 883 if (port_mask & ATA_PORT_PRIMARY) {
927 if (bmdma != 0) { 884 probe_ent->irq = 14;
928 bmdma += 8 * port_num; 885 probe_ent->port[0].cmd_addr = ATA_PRIMARY_CMD;
929 probe_ent->port[0].bmdma_addr = bmdma; 886 probe_ent->port[0].altstatus_addr =
930 if (inb(bmdma + 2) & 0x80) 887 probe_ent->port[0].ctl_addr = ATA_PRIMARY_CTL;
931 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX; 888 if (bmdma) {
932 } 889 probe_ent->port[0].bmdma_addr = bmdma;
933 ata_std_ports(&probe_ent->port[0]); 890 if (inb(bmdma + 2) & 0x80)
891 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
892 }
893 ata_std_ports(&probe_ent->port[0]);
894 } else
895 probe_ent->dummy_port_mask |= ATA_PORT_PRIMARY;
896
897 if (port_mask & ATA_PORT_SECONDARY) {
898 if (probe_ent->irq)
899 probe_ent->irq2 = 15;
900 else
901 probe_ent->irq = 15;
902 probe_ent->port[1].cmd_addr = ATA_SECONDARY_CMD;
903 probe_ent->port[1].altstatus_addr =
904 probe_ent->port[1].ctl_addr = ATA_SECONDARY_CTL;
905 if (bmdma) {
906 probe_ent->port[1].bmdma_addr = bmdma + 8;
907 if (inb(bmdma + 10) & 0x80)
908 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
909 }
910 ata_std_ports(&probe_ent->port[1]);
911 probe_ent->pinfo2 = port[1];
912 } else
913 probe_ent->dummy_port_mask |= ATA_PORT_SECONDARY;
934 914
935 return probe_ent; 915 return probe_ent;
936} 916}
@@ -950,6 +930,10 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
950 * regions, sets the dma mask, enables bus master mode, and calls 930 * regions, sets the dma mask, enables bus master mode, and calls
951 * ata_device_add() 931 * ata_device_add()
952 * 932 *
933 * ASSUMPTION:
934 * Nobody makes a single channel controller that appears solely as
935 * the secondary legacy port on PCI.
936 *
953 * LOCKING: 937 * LOCKING:
954 * Inherited from PCI layer (may sleep). 938 * Inherited from PCI layer (may sleep).
955 * 939 *
@@ -960,7 +944,7 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
960int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 944int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
961 unsigned int n_ports) 945 unsigned int n_ports)
962{ 946{
963 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL; 947 struct ata_probe_ent *probe_ent = NULL;
964 struct ata_port_info *port[2]; 948 struct ata_port_info *port[2];
965 u8 tmp8, mask; 949 u8 tmp8, mask;
966 unsigned int legacy_mode = 0; 950 unsigned int legacy_mode = 0;
@@ -975,7 +959,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
975 else 959 else
976 port[1] = port[0]; 960 port[1] = port[0];
977 961
978 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0 962 if ((port[0]->flags & ATA_FLAG_NO_LEGACY) == 0
979 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 963 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
980 /* TODO: What if one channel is in native mode ... */ 964 /* TODO: What if one channel is in native mode ... */
981 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 965 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
@@ -1009,35 +993,44 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
1009 goto err_out; 993 goto err_out;
1010 } 994 }
1011 995
1012 /* FIXME: Should use platform specific mappers for legacy port ranges */
1013 if (legacy_mode) { 996 if (legacy_mode) {
1014 if (!request_region(0x1f0, 8, "libata")) { 997 if (!request_region(ATA_PRIMARY_CMD, 8, "libata")) {
1015 struct resource *conflict, res; 998 struct resource *conflict, res;
1016 res.start = 0x1f0; 999 res.start = ATA_PRIMARY_CMD;
1017 res.end = 0x1f0 + 8 - 1; 1000 res.end = ATA_PRIMARY_CMD + 8 - 1;
1018 conflict = ____request_resource(&ioport_resource, &res); 1001 conflict = ____request_resource(&ioport_resource, &res);
1002 while (conflict->child)
1003 conflict = ____request_resource(conflict, &res);
1019 if (!strcmp(conflict->name, "libata")) 1004 if (!strcmp(conflict->name, "libata"))
1020 legacy_mode |= (1 << 0); 1005 legacy_mode |= ATA_PORT_PRIMARY;
1021 else { 1006 else {
1022 disable_dev_on_err = 0; 1007 disable_dev_on_err = 0;
1023 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n"); 1008 printk(KERN_WARNING "ata: 0x%0X IDE port busy\n" \
1009 "ata: conflict with %s\n",
1010 ATA_PRIMARY_CMD,
1011 conflict->name);
1024 } 1012 }
1025 } else 1013 } else
1026 legacy_mode |= (1 << 0); 1014 legacy_mode |= ATA_PORT_PRIMARY;
1027 1015
1028 if (!request_region(0x170, 8, "libata")) { 1016 if (!request_region(ATA_SECONDARY_CMD, 8, "libata")) {
1029 struct resource *conflict, res; 1017 struct resource *conflict, res;
1030 res.start = 0x170; 1018 res.start = ATA_SECONDARY_CMD;
1031 res.end = 0x170 + 8 - 1; 1019 res.end = ATA_SECONDARY_CMD + 8 - 1;
1032 conflict = ____request_resource(&ioport_resource, &res); 1020 conflict = ____request_resource(&ioport_resource, &res);
1021 while (conflict->child)
1022 conflict = ____request_resource(conflict, &res);
1033 if (!strcmp(conflict->name, "libata")) 1023 if (!strcmp(conflict->name, "libata"))
1034 legacy_mode |= (1 << 1); 1024 legacy_mode |= ATA_PORT_SECONDARY;
1035 else { 1025 else {
1036 disable_dev_on_err = 0; 1026 disable_dev_on_err = 0;
1037 printk(KERN_WARNING "ata: 0x170 IDE port busy\n"); 1027 printk(KERN_WARNING "ata: 0x%X IDE port busy\n" \
1028 "ata: conflict with %s\n",
1029 ATA_SECONDARY_CMD,
1030 conflict->name);
1038 } 1031 }
1039 } else 1032 } else
1040 legacy_mode |= (1 << 1); 1033 legacy_mode |= ATA_PORT_SECONDARY;
1041 } 1034 }
1042 1035
1043 /* we have legacy mode, but all ports are unavailable */ 1036 /* we have legacy mode, but all ports are unavailable */
@@ -1055,17 +1048,14 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
1055 goto err_out_regions; 1048 goto err_out_regions;
1056 1049
1057 if (legacy_mode) { 1050 if (legacy_mode) {
1058 if (legacy_mode & (1 << 0)) 1051 probe_ent = ata_pci_init_legacy_port(pdev, port, legacy_mode);
1059 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
1060 if (legacy_mode & (1 << 1))
1061 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
1062 } else { 1052 } else {
1063 if (n_ports == 2) 1053 if (n_ports == 2)
1064 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 1054 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1065 else 1055 else
1066 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY); 1056 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
1067 } 1057 }
1068 if (!probe_ent && !probe_ent2) { 1058 if (!probe_ent) {
1069 rc = -ENOMEM; 1059 rc = -ENOMEM;
1070 goto err_out_regions; 1060 goto err_out_regions;
1071 } 1061 }
@@ -1073,35 +1063,17 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
1073 pci_set_master(pdev); 1063 pci_set_master(pdev);
1074 1064
1075 /* FIXME: check ata_device_add return */ 1065 /* FIXME: check ata_device_add return */
1076 if (legacy_mode) { 1066 ata_device_add(probe_ent);
1077 struct device *dev = &pdev->dev;
1078 struct ata_host_set *host_set = NULL;
1079
1080 if (legacy_mode & (1 << 0)) {
1081 ata_device_add(probe_ent);
1082 host_set = dev_get_drvdata(dev);
1083 }
1084
1085 if (legacy_mode & (1 << 1)) {
1086 ata_device_add(probe_ent2);
1087 if (host_set) {
1088 host_set->next = dev_get_drvdata(dev);
1089 dev_set_drvdata(dev, host_set);
1090 }
1091 }
1092 } else
1093 ata_device_add(probe_ent);
1094 1067
1095 kfree(probe_ent); 1068 kfree(probe_ent);
1096 kfree(probe_ent2);
1097 1069
1098 return 0; 1070 return 0;
1099 1071
1100err_out_regions: 1072err_out_regions:
1101 if (legacy_mode & (1 << 0)) 1073 if (legacy_mode & ATA_PORT_PRIMARY)
1102 release_region(0x1f0, 8); 1074 release_region(ATA_PRIMARY_CMD, 8);
1103 if (legacy_mode & (1 << 1)) 1075 if (legacy_mode & ATA_PORT_SECONDARY)
1104 release_region(0x170, 8); 1076 release_region(ATA_SECONDARY_CMD, 8);
1105 pci_release_regions(pdev); 1077 pci_release_regions(pdev);
1106err_out: 1078err_out:
1107 if (disable_dev_on_err) 1079 if (disable_dev_on_err)
diff --git a/drivers/scsi/libata.h b/drivers/ata/libata.h
index c325679d9b54..a5ecb71390a9 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/ata/libata.h
@@ -69,6 +69,10 @@ extern int ata_flush_cache(struct ata_device *dev);
69extern void ata_dev_init(struct ata_device *dev); 69extern void ata_dev_init(struct ata_device *dev);
70extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); 70extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
71extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); 71extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
72extern void ata_port_init(struct ata_port *ap, struct ata_host *host,
73 const struct ata_probe_ent *ent, unsigned int port_no);
74extern struct ata_probe_ent *ata_probe_ent_alloc(struct device *dev,
75 const struct ata_port_info *port);
72 76
73 77
74/* libata-scsi.c */ 78/* libata-scsi.c */
@@ -107,6 +111,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
107 u8 *rbuf, unsigned int buflen)); 111 u8 *rbuf, unsigned int buflen));
108extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); 112extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
109extern void ata_scsi_dev_rescan(void *data); 113extern void ata_scsi_dev_rescan(void *data);
114extern int ata_bus_probe(struct ata_port *ap);
110 115
111/* libata-eh.c */ 116/* libata-eh.c */
112extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); 117extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
new file mode 100644
index 000000000000..8448ee6e0eed
--- /dev/null
+++ b/drivers/ata/pata_ali.c
@@ -0,0 +1,679 @@
1/*
2 * pata_ali.c - ALI 15x3 PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based in part upon
7 * linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02
8 *
9 * Copyright (C) 1998-2000 Michel Aubry, Maintainer
10 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
11 * Copyright (C) 1999-2000 CJ, cjtsai@ali.com.tw, Maintainer
12 *
13 * Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org)
14 * May be copied or modified under the terms of the GNU General Public License
15 * Copyright (C) 2002 Alan Cox <alan@redhat.com>
16 * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
17 *
18 * Documentation
19 * Chipset documentation available under NDA only
20 *
21 * TODO/CHECK
22 * Cannot have ATAPI on both master & slave for rev < c2 (???) but
23 * otherwise should do atapi DMA.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/blkdev.h>
31#include <linux/delay.h>
32#include <scsi/scsi_host.h>
33#include <linux/libata.h>
34#include <linux/dmi.h>
35
36#define DRV_NAME "pata_ali"
37#define DRV_VERSION "0.6.5"
38
39/*
40 * Cable special cases
41 */
42
43static struct dmi_system_id cable_dmi_table[] = {
44 {
45 .ident = "HP Pavilion N5430",
46 .matches = {
47 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
48 DMI_MATCH(DMI_BOARD_NAME, "OmniBook N32N-736"),
49 },
50 },
51 { }
52};
53
54static int ali_cable_override(struct pci_dev *pdev)
55{
56 /* Fujitsu P2000 */
57 if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF)
58 return 1;
59 /* Systems by DMI */
60 if (dmi_check_system(cable_dmi_table))
61 return 1;
62 return 0;
63}
64
65/**
66 * ali_c2_cable_detect - cable detection
67 * @ap: ATA port
68 *
69 * Perform cable detection for C2 and later revisions
70 */
71
72static int ali_c2_cable_detect(struct ata_port *ap)
73{
74 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
75 u8 ata66;
76
77 /* Certain laptops use short but suitable cables and don't
78 implement the detect logic */
79
80 if (ali_cable_override(pdev))
81 return ATA_CBL_PATA80;
82
83 /* Host view cable detect 0x4A bit 0 primary bit 1 secondary
84 Bit set for 40 pin */
85 pci_read_config_byte(pdev, 0x4A, &ata66);
86 if (ata66 & (1 << ap->port_no))
87 return ATA_CBL_PATA40;
88 else
89 return ATA_CBL_PATA80;
90}
91
92/**
93 * ali_early_error_handler - reset for eary chip
94 * @ap: ATA port
95 *
96 * Handle the reset callback for the later chips with cable detect
97 */
98
99static int ali_c2_pre_reset(struct ata_port *ap)
100{
101 ap->cbl = ali_c2_cable_detect(ap);
102 return ata_std_prereset(ap);
103}
104
105static void ali_c2_error_handler(struct ata_port *ap)
106{
107 ata_bmdma_drive_eh(ap, ali_c2_pre_reset,
108 ata_std_softreset, NULL,
109 ata_std_postreset);
110}
111
112/**
113 * ali_early_cable_detect - cable detection
114 * @ap: ATA port
115 *
116 * Perform cable detection for older chipsets. This turns out to be
117 * rather easy to implement
118 */
119
120static int ali_early_cable_detect(struct ata_port *ap)
121{
122 return ATA_CBL_PATA40;
123}
124
125/**
126 * ali_early_probe_init - reset for early chip
127 * @ap: ATA port
128 *
129 * Handle the reset callback for the early (pre cable detect) chips.
130 */
131
132static int ali_early_pre_reset(struct ata_port *ap)
133{
134 ap->cbl = ali_early_cable_detect(ap);
135 return ata_std_prereset(ap);
136}
137
138static void ali_early_error_handler(struct ata_port *ap)
139{
140 return ata_bmdma_drive_eh(ap, ali_early_pre_reset,
141 ata_std_softreset, NULL,
142 ata_std_postreset);
143}
144
145/**
146 * ali_20_filter - filter for earlier ALI DMA
147 * @ap: ALi ATA port
148 * @adev: attached device
149 *
150 * Ensure that we do not do DMA on CD devices. We may be able to
151 * fix that later on. Also ensure we do not do UDMA on WDC drives
152 */
153
154static unsigned long ali_20_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
155{
156 char model_num[40];
157 /* No DMA on anything but a disk for now */
158 if (adev->class != ATA_DEV_ATA)
159 mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
160 ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
161 if (strstr(model_num, "WDC"))
162 return mask &= ~ATA_MASK_UDMA;
163 return ata_pci_default_filter(ap, adev, mask);
164}
165
166/**
167 * ali_fifo_control - FIFO manager
168 * @ap: ALi channel to control
169 * @adev: device for FIFO control
170 * @on: 0 for off 1 for on
171 *
172 * Enable or disable the FIFO on a given device. Because of the way the
173 * ALi FIFO works it provides a boost on ATA disk but can be confused by
174 * ATAPI and we must therefore manage it.
175 */
176
177static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int on)
178{
179 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
180 int pio_fifo = 0x54 + ap->port_no;
181 u8 fifo;
182 int shift = 4 * adev->devno;
183
184 /* ATA - FIFO on set nibble to 0x05, ATAPI - FIFO off, set nibble to
185 0x00. Not all the docs agree but the behaviour we now use is the
186 one stated in the BIOS Programming Guide */
187
188 pci_read_config_byte(pdev, pio_fifo, &fifo);
189 fifo &= ~(0x0F << shift);
190 if (on)
191 fifo |= (on << shift);
192 pci_write_config_byte(pdev, pio_fifo, fifo);
193}
194
195/**
196 * ali_program_modes - load mode registers
197 * @ap: ALi channel to load
198 * @adev: Device the timing is for
199 * @cmd: Command timing
200 * @data: Data timing
201 * @ultra: UDMA timing or zero for off
202 *
203 * Loads the timing registers for cmd/data and disable UDMA if
204 * ultra is zero. If ultra is set then load and enable the UDMA
205 * timing but do not touch the command/data timing.
206 */
207
208static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, struct ata_timing *t, u8 ultra)
209{
210 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
211 int cas = 0x58 + 4 * ap->port_no; /* Command timing */
212 int cbt = 0x59 + 4 * ap->port_no; /* Command timing */
213 int drwt = 0x5A + 4 * ap->port_no + adev->devno; /* R/W timing */
214 int udmat = 0x56 + ap->port_no; /* UDMA timing */
215 int shift = 4 * adev->devno;
216 u8 udma;
217
218 if (t != NULL) {
219 t->setup = FIT(t->setup, 1, 8) & 7;
220 t->act8b = FIT(t->act8b, 1, 8) & 7;
221 t->rec8b = FIT(t->rec8b, 1, 16) & 15;
222 t->active = FIT(t->active, 1, 8) & 7;
223 t->recover = FIT(t->recover, 1, 16) & 15;
224
225 pci_write_config_byte(pdev, cas, t->setup);
226 pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b);
227 pci_write_config_byte(pdev, drwt, (t->active << 4) | t->recover);
228 }
229
230 /* Set up the UDMA enable */
231 pci_read_config_byte(pdev, udmat, &udma);
232 udma &= ~(0x0F << shift);
233 udma |= ultra << shift;
234 pci_write_config_byte(pdev, udmat, udma);
235}
236
237/**
238 * ali_set_piomode - set initial PIO mode data
239 * @ap: ATA interface
240 * @adev: ATA device
241 *
242 * Program the ALi registers for PIO mode. FIXME: add timings for
243 * PIO5.
244 */
245
246static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev)
247{
248 struct ata_device *pair = ata_dev_pair(adev);
249 struct ata_timing t;
250 unsigned long T = 1000000000 / 33333; /* PCI clock based */
251
252 ata_timing_compute(adev, adev->pio_mode, &t, T, 1);
253 if (pair) {
254 struct ata_timing p;
255 ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
256 ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
257 if (pair->dma_mode) {
258 ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
259 ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
260 }
261 }
262
263 /* PIO FIFO is only permitted on ATA disk */
264 if (adev->class != ATA_DEV_ATA)
265 ali_fifo_control(ap, adev, 0x00);
266 ali_program_modes(ap, adev, &t, 0);
267 if (adev->class == ATA_DEV_ATA)
268 ali_fifo_control(ap, adev, 0x05);
269
270}
271
272/**
273 * ali_set_dmamode - set initial DMA mode data
274 * @ap: ATA interface
275 * @adev: ATA device
276 *
277 * FIXME: MWDMA timings
278 */
279
280static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev)
281{
282 static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
283 struct ata_device *pair = ata_dev_pair(adev);
284 struct ata_timing t;
285 unsigned long T = 1000000000 / 33333; /* PCI clock based */
286 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
287
288
289 if (adev->class == ATA_DEV_ATA)
290 ali_fifo_control(ap, adev, 0x08);
291
292 if (adev->dma_mode >= XFER_UDMA_0) {
293 ali_program_modes(ap, adev, NULL, udma_timing[adev->dma_mode - XFER_UDMA_0]);
294 if (adev->dma_mode >= XFER_UDMA_3) {
295 u8 reg4b;
296 pci_read_config_byte(pdev, 0x4B, &reg4b);
297 reg4b |= 1;
298 pci_write_config_byte(pdev, 0x4B, reg4b);
299 }
300 } else {
301 ata_timing_compute(adev, adev->dma_mode, &t, T, 1);
302 if (pair) {
303 struct ata_timing p;
304 ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
305 ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
306 if (pair->dma_mode) {
307 ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
308 ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
309 }
310 }
311 ali_program_modes(ap, adev, &t, 0);
312 }
313}
314
315/**
316 * ali_lock_sectors - Keep older devices to 255 sector mode
317 * @ap: ATA port
318 * @adev: Device
319 *
320 * Called during the bus probe for each device that is found. We use
321 * this call to lock the sector count of the device to 255 or less on
322 * older ALi controllers. If we didn't do this then large I/O's would
323 * require LBA48 commands which the older ALi requires are issued by
324 * slower PIO methods
325 */
326
327static void ali_lock_sectors(struct ata_port *ap, struct ata_device *adev)
328{
329 adev->max_sectors = 255;
330}
331
332static struct scsi_host_template ali_sht = {
333 .module = THIS_MODULE,
334 .name = DRV_NAME,
335 .ioctl = ata_scsi_ioctl,
336 .queuecommand = ata_scsi_queuecmd,
337 .can_queue = ATA_DEF_QUEUE,
338 .this_id = ATA_SHT_THIS_ID,
339 .sg_tablesize = LIBATA_MAX_PRD,
340 /* Keep LBA28 counts so large I/O's don't turn LBA48 and PIO
341 with older controllers. Not locked so will grow on C5 or later */
342 .max_sectors = 255,
343 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
344 .emulated = ATA_SHT_EMULATED,
345 .use_clustering = ATA_SHT_USE_CLUSTERING,
346 .proc_name = DRV_NAME,
347 .dma_boundary = ATA_DMA_BOUNDARY,
348 .slave_configure = ata_scsi_slave_config,
349 .bios_param = ata_std_bios_param,
350};
351
352/*
353 * Port operations for PIO only ALi
354 */
355
356static struct ata_port_operations ali_early_port_ops = {
357 .port_disable = ata_port_disable,
358 .set_piomode = ali_set_piomode,
359 .tf_load = ata_tf_load,
360 .tf_read = ata_tf_read,
361 .check_status = ata_check_status,
362 .exec_command = ata_exec_command,
363 .dev_select = ata_std_dev_select,
364
365 .freeze = ata_bmdma_freeze,
366 .thaw = ata_bmdma_thaw,
367 .error_handler = ali_early_error_handler,
368 .post_internal_cmd = ata_bmdma_post_internal_cmd,
369
370 .qc_prep = ata_qc_prep,
371 .qc_issue = ata_qc_issue_prot,
372 .eng_timeout = ata_eng_timeout,
373 .data_xfer = ata_pio_data_xfer,
374
375 .irq_handler = ata_interrupt,
376 .irq_clear = ata_bmdma_irq_clear,
377
378 .port_start = ata_port_start,
379 .port_stop = ata_port_stop,
380 .host_stop = ata_host_stop
381};
382
383/*
384 * Port operations for DMA capable ALi without cable
385 * detect
386 */
387static struct ata_port_operations ali_20_port_ops = {
388 .port_disable = ata_port_disable,
389
390 .set_piomode = ali_set_piomode,
391 .set_dmamode = ali_set_dmamode,
392 .mode_filter = ali_20_filter,
393
394 .tf_load = ata_tf_load,
395 .tf_read = ata_tf_read,
396 .check_status = ata_check_status,
397 .exec_command = ata_exec_command,
398 .dev_select = ata_std_dev_select,
399 .dev_config = ali_lock_sectors,
400
401 .freeze = ata_bmdma_freeze,
402 .thaw = ata_bmdma_thaw,
403 .error_handler = ali_early_error_handler,
404 .post_internal_cmd = ata_bmdma_post_internal_cmd,
405
406 .bmdma_setup = ata_bmdma_setup,
407 .bmdma_start = ata_bmdma_start,
408 .bmdma_stop = ata_bmdma_stop,
409 .bmdma_status = ata_bmdma_status,
410
411 .qc_prep = ata_qc_prep,
412 .qc_issue = ata_qc_issue_prot,
413 .eng_timeout = ata_eng_timeout,
414 .data_xfer = ata_pio_data_xfer,
415
416 .irq_handler = ata_interrupt,
417 .irq_clear = ata_bmdma_irq_clear,
418
419 .port_start = ata_port_start,
420 .port_stop = ata_port_stop,
421 .host_stop = ata_host_stop
422};
423
424/*
425 * Port operations for DMA capable ALi with cable detect
426 */
427static struct ata_port_operations ali_c2_port_ops = {
428 .port_disable = ata_port_disable,
429 .set_piomode = ali_set_piomode,
430 .set_dmamode = ali_set_dmamode,
431 .mode_filter = ata_pci_default_filter,
432 .tf_load = ata_tf_load,
433 .tf_read = ata_tf_read,
434 .check_status = ata_check_status,
435 .exec_command = ata_exec_command,
436 .dev_select = ata_std_dev_select,
437 .dev_config = ali_lock_sectors,
438
439 .freeze = ata_bmdma_freeze,
440 .thaw = ata_bmdma_thaw,
441 .error_handler = ali_c2_error_handler,
442 .post_internal_cmd = ata_bmdma_post_internal_cmd,
443
444 .bmdma_setup = ata_bmdma_setup,
445 .bmdma_start = ata_bmdma_start,
446 .bmdma_stop = ata_bmdma_stop,
447 .bmdma_status = ata_bmdma_status,
448
449 .qc_prep = ata_qc_prep,
450 .qc_issue = ata_qc_issue_prot,
451 .eng_timeout = ata_eng_timeout,
452 .data_xfer = ata_pio_data_xfer,
453
454 .irq_handler = ata_interrupt,
455 .irq_clear = ata_bmdma_irq_clear,
456
457 .port_start = ata_port_start,
458 .port_stop = ata_port_stop,
459 .host_stop = ata_host_stop
460};
461
462/*
463 * Port operations for DMA capable ALi with cable detect and LBA48
464 */
465static struct ata_port_operations ali_c5_port_ops = {
466 .port_disable = ata_port_disable,
467 .set_piomode = ali_set_piomode,
468 .set_dmamode = ali_set_dmamode,
469 .mode_filter = ata_pci_default_filter,
470 .tf_load = ata_tf_load,
471 .tf_read = ata_tf_read,
472 .check_status = ata_check_status,
473 .exec_command = ata_exec_command,
474 .dev_select = ata_std_dev_select,
475
476 .freeze = ata_bmdma_freeze,
477 .thaw = ata_bmdma_thaw,
478 .error_handler = ali_c2_error_handler,
479 .post_internal_cmd = ata_bmdma_post_internal_cmd,
480
481 .bmdma_setup = ata_bmdma_setup,
482 .bmdma_start = ata_bmdma_start,
483 .bmdma_stop = ata_bmdma_stop,
484 .bmdma_status = ata_bmdma_status,
485
486 .qc_prep = ata_qc_prep,
487 .qc_issue = ata_qc_issue_prot,
488 .eng_timeout = ata_eng_timeout,
489 .data_xfer = ata_pio_data_xfer,
490
491 .irq_handler = ata_interrupt,
492 .irq_clear = ata_bmdma_irq_clear,
493
494 .port_start = ata_port_start,
495 .port_stop = ata_port_stop,
496 .host_stop = ata_host_stop
497};
498
499/**
500 * ali_init_one - discovery callback
501 * @pdev: PCI device ID
502 * @id: PCI table info
503 *
504 * An ALi IDE interface has been discovered. Figure out what revision
505 * and perform configuration work before handing it to the ATA layer
506 */
507
508static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
509{
510 static struct ata_port_info info_early = {
511 .sht = &ali_sht,
512 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
513 .pio_mask = 0x1f,
514 .port_ops = &ali_early_port_ops
515 };
516 /* Revision 0x20 added DMA */
517 static struct ata_port_info info_20 = {
518 .sht = &ali_sht,
519 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
520 .pio_mask = 0x1f,
521 .mwdma_mask = 0x07,
522 .port_ops = &ali_20_port_ops
523 };
524 /* Revision 0x20 with support logic added UDMA */
525 static struct ata_port_info info_20_udma = {
526 .sht = &ali_sht,
527 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
528 .pio_mask = 0x1f,
529 .mwdma_mask = 0x07,
530 .udma_mask = 0x07, /* UDMA33 */
531 .port_ops = &ali_20_port_ops
532 };
533 /* Revision 0xC2 adds UDMA66 */
534 static struct ata_port_info info_c2 = {
535 .sht = &ali_sht,
536 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
537 .pio_mask = 0x1f,
538 .mwdma_mask = 0x07,
539 .udma_mask = 0x1f,
540 .port_ops = &ali_c2_port_ops
541 };
542 /* Revision 0xC3 is UDMA100 */
543 static struct ata_port_info info_c3 = {
544 .sht = &ali_sht,
545 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
546 .pio_mask = 0x1f,
547 .mwdma_mask = 0x07,
548 .udma_mask = 0x3f,
549 .port_ops = &ali_c2_port_ops
550 };
551 /* Revision 0xC4 is UDMA133 */
552 static struct ata_port_info info_c4 = {
553 .sht = &ali_sht,
554 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
555 .pio_mask = 0x1f,
556 .mwdma_mask = 0x07,
557 .udma_mask = 0x7f,
558 .port_ops = &ali_c2_port_ops
559 };
560 /* Revision 0xC5 is UDMA133 with LBA48 DMA */
561 static struct ata_port_info info_c5 = {
562 .sht = &ali_sht,
563 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
564 .pio_mask = 0x1f,
565 .mwdma_mask = 0x07,
566 .udma_mask = 0x7f,
567 .port_ops = &ali_c5_port_ops
568 };
569
570 static struct ata_port_info *port_info[2];
571 u8 rev, tmp;
572 struct pci_dev *north, *isa_bridge;
573
574 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
575
576 /*
577 * The chipset revision selects the driver operations and
578 * mode data.
579 */
580
581 if (rev < 0x20) {
582 port_info[0] = port_info[1] = &info_early;
583 } else if (rev < 0xC2) {
584 /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
585 pci_read_config_byte(pdev, 0x4B, &tmp);
586 /* Clear CD-ROM DMA write bit */
587 tmp &= 0x7F;
588 pci_write_config_byte(pdev, 0x4B, tmp);
589 port_info[0] = port_info[1] = &info_20;
590 } else if (rev == 0xC2) {
591 port_info[0] = port_info[1] = &info_c2;
592 } else if (rev == 0xC3) {
593 port_info[0] = port_info[1] = &info_c3;
594 } else if (rev == 0xC4) {
595 port_info[0] = port_info[1] = &info_c4;
596 } else
597 port_info[0] = port_info[1] = &info_c5;
598
599 if (rev >= 0xC2) {
600 /* Enable cable detection logic */
601 pci_read_config_byte(pdev, 0x4B, &tmp);
602 pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
603 }
604
605 north = pci_get_slot(pdev->bus, PCI_DEVFN(0,0));
606 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
607
608 if (north && north->vendor == PCI_VENDOR_ID_AL) {
609 /* Configure the ALi bridge logic. For non ALi rely on BIOS.
610 Set the south bridge enable bit */
611 pci_read_config_byte(isa_bridge, 0x79, &tmp);
612 if (rev == 0xC2)
613 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
614 else if (rev > 0xC2)
615 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
616 }
617
618 if (rev >= 0x20) {
619 if (rev < 0xC2) {
620 /* Are we paired with a UDMA capable chip */
621 pci_read_config_byte(isa_bridge, 0x5E, &tmp);
622 if ((tmp & 0x1E) == 0x12)
623 port_info[0] = port_info[1] = &info_20_udma;
624 }
625 /*
626 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
627 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
628 * via 0x54/55.
629 */
630 pci_read_config_byte(pdev, 0x53, &tmp);
631 if (rev <= 0x20)
632 tmp &= ~0x02;
633 if (rev == 0xc7)
634 tmp |= 0x03;
635 else
636 tmp |= 0x01; /* CD_ROM enable for DMA */
637 pci_write_config_byte(pdev, 0x53, tmp);
638 }
639
640 pci_dev_put(isa_bridge);
641 pci_dev_put(north);
642
643 ata_pci_clear_simplex(pdev);
644 return ata_pci_init_one(pdev, port_info, 2);
645}
646
647static struct pci_device_id ali[] = {
648 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5228), },
649 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5229), },
650 { 0, },
651};
652
653static struct pci_driver ali_pci_driver = {
654 .name = DRV_NAME,
655 .id_table = ali,
656 .probe = ali_init_one,
657 .remove = ata_pci_remove_one
658};
659
660static int __init ali_init(void)
661{
662 return pci_register_driver(&ali_pci_driver);
663}
664
665
666static void __exit ali_exit(void)
667{
668 pci_unregister_driver(&ali_pci_driver);
669}
670
671
672MODULE_AUTHOR("Alan Cox");
673MODULE_DESCRIPTION("low-level driver for ALi PATA");
674MODULE_LICENSE("GPL");
675MODULE_DEVICE_TABLE(pci, ali);
676MODULE_VERSION(DRV_VERSION);
677
678module_init(ali_init);
679module_exit(ali_exit);
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
new file mode 100644
index 000000000000..3293cf9a7eb5
--- /dev/null
+++ b/drivers/ata/pata_amd.c
@@ -0,0 +1,718 @@
1/*
2 * pata_amd.c - AMD PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based on pata-sil680. Errata information is taken from data sheets
7 * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
8 * claimed by sata-nv.c.
9 *
10 * TODO:
11 * Variable system clock when/if it makes sense
12 * Power management on ports
13 *
14 *
15 * Documentation publically available.
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/pci.h>
21#include <linux/init.h>
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <scsi/scsi_host.h>
25#include <linux/libata.h>
26
27#define DRV_NAME "pata_amd"
28#define DRV_VERSION "0.2.3"
29
30/**
31 * timing_setup - shared timing computation and load
32 * @ap: ATA port being set up
33 * @adev: drive being configured
34 * @offset: port offset
35 * @speed: target speed
36 * @clock: clock multiplier (number of times 33MHz for this part)
37 *
38 * Perform the actual timing set up for Nvidia or AMD PATA devices.
39 * The actual devices vary so they all call into this helper function
40 * providing the clock multipler and offset (because AMD and Nvidia put
41 * the ports at different locations).
42 */
43
44static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
45{
46 static const unsigned char amd_cyc2udma[] = {
47 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
48 };
49
50 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
51 struct ata_device *peer = ata_dev_pair(adev);
52 int dn = ap->port_no * 2 + adev->devno;
53 struct ata_timing at, apeer;
54 int T, UT;
55 const int amd_clock = 33333; /* KHz. */
56 u8 t;
57
58 T = 1000000000 / amd_clock;
59 UT = T / min_t(int, max_t(int, clock, 1), 2);
60
61 if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
62 dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
63 return;
64 }
65
66 if (peer) {
67 /* This may be over conservative */
68 if (peer->dma_mode) {
69 ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
70 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
71 }
72 ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
73 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
74 }
75
76 if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
77 if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
78
79 /*
80 * Now do the setup work
81 */
82
83 /* Configure the address set up timing */
84 pci_read_config_byte(pdev, offset + 0x0C, &t);
85 t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
86 pci_write_config_byte(pdev, offset + 0x0C , t);
87
88 /* Configure the 8bit I/O timing */
89 pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
90 ((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1));
91
92 /* Drive timing */
93 pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
94 ((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1));
95
96 switch (clock) {
97 case 1:
98 t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03;
99 break;
100
101 case 2:
102 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03;
103 break;
104
105 case 3:
106 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03;
107 break;
108
109 case 4:
110 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03;
111 break;
112
113 default:
114 return;
115 }
116
117 /* UDMA timing */
118 pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
119}
120
121/**
122 * amd_probe_init - cable detection
123 * @ap: ATA port
124 *
125 * Perform cable detection. The BIOS stores this in PCI config
126 * space for us.
127 */
128
129static int amd_pre_reset(struct ata_port *ap)
130{
131 static const u32 bitmask[2] = {0x03, 0xC0};
132 static const struct pci_bits amd_enable_bits[] = {
133 { 0x40, 1, 0x02, 0x02 },
134 { 0x40, 1, 0x01, 0x01 }
135 };
136
137 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
138 u8 ata66;
139
140 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) {
141 ata_port_disable(ap);
142 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
143 return 0;
144 }
145
146 pci_read_config_byte(pdev, 0x42, &ata66);
147 if (ata66 & bitmask[ap->port_no])
148 ap->cbl = ATA_CBL_PATA80;
149 else
150 ap->cbl = ATA_CBL_PATA40;
151 return ata_std_prereset(ap);
152
153}
154
155static void amd_error_handler(struct ata_port *ap)
156{
157 return ata_bmdma_drive_eh(ap, amd_pre_reset,
158 ata_std_softreset, NULL,
159 ata_std_postreset);
160}
161
162static int amd_early_pre_reset(struct ata_port *ap)
163{
164 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
165 static struct pci_bits amd_enable_bits[] = {
166 { 0x40, 1, 0x02, 0x02 },
167 { 0x40, 1, 0x01, 0x01 }
168 };
169
170 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) {
171 ata_port_disable(ap);
172 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
173 return 0;
174 }
175 /* No host side cable detection */
176 ap->cbl = ATA_CBL_PATA80;
177 return ata_std_prereset(ap);
178
179}
180
181static void amd_early_error_handler(struct ata_port *ap)
182{
183 ata_bmdma_drive_eh(ap, amd_early_pre_reset,
184 ata_std_softreset, NULL,
185 ata_std_postreset);
186}
187
188/**
189 * amd33_set_piomode - set initial PIO mode data
190 * @ap: ATA interface
191 * @adev: ATA device
192 *
193 * Program the AMD registers for PIO mode.
194 */
195
196static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
197{
198 timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
199}
200
201static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
202{
203 timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
204}
205
206static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
207{
208 timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
209}
210
211static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
212{
213 timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
214}
215
216/**
217 * amd33_set_dmamode - set initial DMA mode data
218 * @ap: ATA interface
219 * @adev: ATA device
220 *
221 * Program the MWDMA/UDMA modes for the AMD and Nvidia
222 * chipset.
223 */
224
225static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
226{
227 timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
228}
229
230static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
231{
232 timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
233}
234
235static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
236{
237 timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
238}
239
240static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
241{
242 timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
243}
244
245
246/**
247 * nv_probe_init - cable detection
248 * @ap: ATA port
249 *
250 * Perform cable detection. The BIOS stores this in PCI config
251 * space for us.
252 */
253
254static int nv_pre_reset(struct ata_port *ap) {
255 static const u8 bitmask[2] = {0x03, 0xC0};
256 static const struct pci_bits nv_enable_bits[] = {
257 { 0x50, 1, 0x02, 0x02 },
258 { 0x50, 1, 0x01, 0x01 }
259 };
260
261 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
262 u8 ata66;
263 u16 udma;
264
265 if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no])) {
266 ata_port_disable(ap);
267 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
268 return 0;
269 }
270
271
272 pci_read_config_byte(pdev, 0x52, &ata66);
273 if (ata66 & bitmask[ap->port_no])
274 ap->cbl = ATA_CBL_PATA80;
275 else
276 ap->cbl = ATA_CBL_PATA40;
277
278 /* We now have to double check because the Nvidia boxes BIOS
279 doesn't always set the cable bits but does set mode bits */
280
281 pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
282 if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
283 ap->cbl = ATA_CBL_PATA80;
284 return ata_std_prereset(ap);
285}
286
287static void nv_error_handler(struct ata_port *ap)
288{
289 ata_bmdma_drive_eh(ap, nv_pre_reset,
290 ata_std_softreset, NULL,
291 ata_std_postreset);
292}
293/**
294 * nv100_set_piomode - set initial PIO mode data
295 * @ap: ATA interface
296 * @adev: ATA device
297 *
298 * Program the AMD registers for PIO mode.
299 */
300
301static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
302{
303 timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
304}
305
306static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
307{
308 timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
309}
310
311/**
312 * nv100_set_dmamode - set initial DMA mode data
313 * @ap: ATA interface
314 * @adev: ATA device
315 *
316 * Program the MWDMA/UDMA modes for the AMD and Nvidia
317 * chipset.
318 */
319
320static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
321{
322 timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
323}
324
325static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
326{
327 timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
328}
329
330static struct scsi_host_template amd_sht = {
331 .module = THIS_MODULE,
332 .name = DRV_NAME,
333 .ioctl = ata_scsi_ioctl,
334 .queuecommand = ata_scsi_queuecmd,
335 .can_queue = ATA_DEF_QUEUE,
336 .this_id = ATA_SHT_THIS_ID,
337 .sg_tablesize = LIBATA_MAX_PRD,
338 .max_sectors = ATA_MAX_SECTORS,
339 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
340 .emulated = ATA_SHT_EMULATED,
341 .use_clustering = ATA_SHT_USE_CLUSTERING,
342 .proc_name = DRV_NAME,
343 .dma_boundary = ATA_DMA_BOUNDARY,
344 .slave_configure = ata_scsi_slave_config,
345 .bios_param = ata_std_bios_param,
346};
347
348static struct ata_port_operations amd33_port_ops = {
349 .port_disable = ata_port_disable,
350 .set_piomode = amd33_set_piomode,
351 .set_dmamode = amd33_set_dmamode,
352 .mode_filter = ata_pci_default_filter,
353 .tf_load = ata_tf_load,
354 .tf_read = ata_tf_read,
355 .check_status = ata_check_status,
356 .exec_command = ata_exec_command,
357 .dev_select = ata_std_dev_select,
358
359 .freeze = ata_bmdma_freeze,
360 .thaw = ata_bmdma_thaw,
361 .error_handler = amd_early_error_handler,
362 .post_internal_cmd = ata_bmdma_post_internal_cmd,
363
364 .bmdma_setup = ata_bmdma_setup,
365 .bmdma_start = ata_bmdma_start,
366 .bmdma_stop = ata_bmdma_stop,
367 .bmdma_status = ata_bmdma_status,
368
369 .qc_prep = ata_qc_prep,
370 .qc_issue = ata_qc_issue_prot,
371 .eng_timeout = ata_eng_timeout,
372 .data_xfer = ata_pio_data_xfer,
373
374 .irq_handler = ata_interrupt,
375 .irq_clear = ata_bmdma_irq_clear,
376
377 .port_start = ata_port_start,
378 .port_stop = ata_port_stop,
379 .host_stop = ata_host_stop
380};
381
382static struct ata_port_operations amd66_port_ops = {
383 .port_disable = ata_port_disable,
384 .set_piomode = amd66_set_piomode,
385 .set_dmamode = amd66_set_dmamode,
386 .mode_filter = ata_pci_default_filter,
387 .tf_load = ata_tf_load,
388 .tf_read = ata_tf_read,
389 .check_status = ata_check_status,
390 .exec_command = ata_exec_command,
391 .dev_select = ata_std_dev_select,
392
393 .freeze = ata_bmdma_freeze,
394 .thaw = ata_bmdma_thaw,
395 .error_handler = amd_early_error_handler,
396 .post_internal_cmd = ata_bmdma_post_internal_cmd,
397
398 .bmdma_setup = ata_bmdma_setup,
399 .bmdma_start = ata_bmdma_start,
400 .bmdma_stop = ata_bmdma_stop,
401 .bmdma_status = ata_bmdma_status,
402
403 .qc_prep = ata_qc_prep,
404 .qc_issue = ata_qc_issue_prot,
405 .eng_timeout = ata_eng_timeout,
406 .data_xfer = ata_pio_data_xfer,
407
408 .irq_handler = ata_interrupt,
409 .irq_clear = ata_bmdma_irq_clear,
410
411 .port_start = ata_port_start,
412 .port_stop = ata_port_stop,
413 .host_stop = ata_host_stop
414};
415
416static struct ata_port_operations amd100_port_ops = {
417 .port_disable = ata_port_disable,
418 .set_piomode = amd100_set_piomode,
419 .set_dmamode = amd100_set_dmamode,
420 .mode_filter = ata_pci_default_filter,
421 .tf_load = ata_tf_load,
422 .tf_read = ata_tf_read,
423 .check_status = ata_check_status,
424 .exec_command = ata_exec_command,
425 .dev_select = ata_std_dev_select,
426
427 .freeze = ata_bmdma_freeze,
428 .thaw = ata_bmdma_thaw,
429 .error_handler = amd_error_handler,
430 .post_internal_cmd = ata_bmdma_post_internal_cmd,
431
432 .bmdma_setup = ata_bmdma_setup,
433 .bmdma_start = ata_bmdma_start,
434 .bmdma_stop = ata_bmdma_stop,
435 .bmdma_status = ata_bmdma_status,
436
437 .qc_prep = ata_qc_prep,
438 .qc_issue = ata_qc_issue_prot,
439 .eng_timeout = ata_eng_timeout,
440 .data_xfer = ata_pio_data_xfer,
441
442 .irq_handler = ata_interrupt,
443 .irq_clear = ata_bmdma_irq_clear,
444
445 .port_start = ata_port_start,
446 .port_stop = ata_port_stop,
447 .host_stop = ata_host_stop
448};
449
450static struct ata_port_operations amd133_port_ops = {
451 .port_disable = ata_port_disable,
452 .set_piomode = amd133_set_piomode,
453 .set_dmamode = amd133_set_dmamode,
454 .mode_filter = ata_pci_default_filter,
455 .tf_load = ata_tf_load,
456 .tf_read = ata_tf_read,
457 .check_status = ata_check_status,
458 .exec_command = ata_exec_command,
459 .dev_select = ata_std_dev_select,
460
461 .freeze = ata_bmdma_freeze,
462 .thaw = ata_bmdma_thaw,
463 .error_handler = amd_error_handler,
464 .post_internal_cmd = ata_bmdma_post_internal_cmd,
465
466 .bmdma_setup = ata_bmdma_setup,
467 .bmdma_start = ata_bmdma_start,
468 .bmdma_stop = ata_bmdma_stop,
469 .bmdma_status = ata_bmdma_status,
470
471 .qc_prep = ata_qc_prep,
472 .qc_issue = ata_qc_issue_prot,
473 .eng_timeout = ata_eng_timeout,
474 .data_xfer = ata_pio_data_xfer,
475
476 .irq_handler = ata_interrupt,
477 .irq_clear = ata_bmdma_irq_clear,
478
479 .port_start = ata_port_start,
480 .port_stop = ata_port_stop,
481 .host_stop = ata_host_stop
482};
483
484static struct ata_port_operations nv100_port_ops = {
485 .port_disable = ata_port_disable,
486 .set_piomode = nv100_set_piomode,
487 .set_dmamode = nv100_set_dmamode,
488 .mode_filter = ata_pci_default_filter,
489 .tf_load = ata_tf_load,
490 .tf_read = ata_tf_read,
491 .check_status = ata_check_status,
492 .exec_command = ata_exec_command,
493 .dev_select = ata_std_dev_select,
494
495 .freeze = ata_bmdma_freeze,
496 .thaw = ata_bmdma_thaw,
497 .error_handler = nv_error_handler,
498 .post_internal_cmd = ata_bmdma_post_internal_cmd,
499
500 .bmdma_setup = ata_bmdma_setup,
501 .bmdma_start = ata_bmdma_start,
502 .bmdma_stop = ata_bmdma_stop,
503 .bmdma_status = ata_bmdma_status,
504
505 .qc_prep = ata_qc_prep,
506 .qc_issue = ata_qc_issue_prot,
507 .eng_timeout = ata_eng_timeout,
508 .data_xfer = ata_pio_data_xfer,
509
510 .irq_handler = ata_interrupt,
511 .irq_clear = ata_bmdma_irq_clear,
512
513 .port_start = ata_port_start,
514 .port_stop = ata_port_stop,
515 .host_stop = ata_host_stop
516};
517
518static struct ata_port_operations nv133_port_ops = {
519 .port_disable = ata_port_disable,
520 .set_piomode = nv133_set_piomode,
521 .set_dmamode = nv133_set_dmamode,
522 .mode_filter = ata_pci_default_filter,
523 .tf_load = ata_tf_load,
524 .tf_read = ata_tf_read,
525 .check_status = ata_check_status,
526 .exec_command = ata_exec_command,
527 .dev_select = ata_std_dev_select,
528
529 .freeze = ata_bmdma_freeze,
530 .thaw = ata_bmdma_thaw,
531 .error_handler = nv_error_handler,
532 .post_internal_cmd = ata_bmdma_post_internal_cmd,
533
534 .bmdma_setup = ata_bmdma_setup,
535 .bmdma_start = ata_bmdma_start,
536 .bmdma_stop = ata_bmdma_stop,
537 .bmdma_status = ata_bmdma_status,
538
539 .qc_prep = ata_qc_prep,
540 .qc_issue = ata_qc_issue_prot,
541 .eng_timeout = ata_eng_timeout,
542 .data_xfer = ata_pio_data_xfer,
543
544 .irq_handler = ata_interrupt,
545 .irq_clear = ata_bmdma_irq_clear,
546
547 .port_start = ata_port_start,
548 .port_stop = ata_port_stop,
549 .host_stop = ata_host_stop
550};
551
552static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
553{
554 static struct ata_port_info info[10] = {
555 { /* 0: AMD 7401 */
556 .sht = &amd_sht,
557 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
558 .pio_mask = 0x1f,
559 .mwdma_mask = 0x07, /* No SWDMA */
560 .udma_mask = 0x07, /* UDMA 33 */
561 .port_ops = &amd33_port_ops
562 },
563 { /* 1: Early AMD7409 - no swdma */
564 .sht = &amd_sht,
565 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
566 .pio_mask = 0x1f,
567 .mwdma_mask = 0x07,
568 .udma_mask = 0x1f, /* UDMA 66 */
569 .port_ops = &amd66_port_ops
570 },
571 { /* 2: AMD 7409, no swdma errata */
572 .sht = &amd_sht,
573 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
574 .pio_mask = 0x1f,
575 .mwdma_mask = 0x07,
576 .udma_mask = 0x1f, /* UDMA 66 */
577 .port_ops = &amd66_port_ops
578 },
579 { /* 3: AMD 7411 */
580 .sht = &amd_sht,
581 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
582 .pio_mask = 0x1f,
583 .mwdma_mask = 0x07,
584 .udma_mask = 0x3f, /* UDMA 100 */
585 .port_ops = &amd100_port_ops
586 },
587 { /* 4: AMD 7441 */
588 .sht = &amd_sht,
589 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
590 .pio_mask = 0x1f,
591 .mwdma_mask = 0x07,
592 .udma_mask = 0x3f, /* UDMA 100 */
593 .port_ops = &amd100_port_ops
594 },
595 { /* 5: AMD 8111*/
596 .sht = &amd_sht,
597 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
598 .pio_mask = 0x1f,
599 .mwdma_mask = 0x07,
600 .udma_mask = 0x7f, /* UDMA 133, no swdma */
601 .port_ops = &amd133_port_ops
602 },
603 { /* 6: AMD 8111 UDMA 100 (Serenade) */
604 .sht = &amd_sht,
605 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
606 .pio_mask = 0x1f,
607 .mwdma_mask = 0x07,
608 .udma_mask = 0x3f, /* UDMA 100, no swdma */
609 .port_ops = &amd133_port_ops
610 },
611 { /* 7: Nvidia Nforce */
612 .sht = &amd_sht,
613 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
614 .pio_mask = 0x1f,
615 .mwdma_mask = 0x07,
616 .udma_mask = 0x3f, /* UDMA 100 */
617 .port_ops = &nv100_port_ops
618 },
619 { /* 8: Nvidia Nforce2 and later */
620 .sht = &amd_sht,
621 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
622 .pio_mask = 0x1f,
623 .mwdma_mask = 0x07,
624 .udma_mask = 0x7f, /* UDMA 133, no swdma */
625 .port_ops = &nv133_port_ops
626 },
627 { /* 9: AMD CS5536 (Geode companion) */
628 .sht = &amd_sht,
629 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
630 .pio_mask = 0x1f,
631 .mwdma_mask = 0x07,
632 .udma_mask = 0x3f, /* UDMA 100 */
633 .port_ops = &amd100_port_ops
634 }
635 };
636 static struct ata_port_info *port_info[2];
637 static int printed_version;
638 int type = id->driver_data;
639 u8 rev;
640 u8 fifo;
641
642 if (!printed_version++)
643 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
644
645 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
646 pci_read_config_byte(pdev, 0x41, &fifo);
647
648 /* Check for AMD7409 without swdma errata and if found adjust type */
649 if (type == 1 && rev > 0x7)
650 type = 2;
651
652 /* Check for AMD7411 */
653 if (type == 3)
654 /* FIFO is broken */
655 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
656 else
657 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
658
659 /* Serenade ? */
660 if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
661 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
662 type = 6; /* UDMA 100 only */
663
664 if (type < 3)
665 ata_pci_clear_simplex(pdev);
666
667 /* And fire it up */
668
669 port_info[0] = port_info[1] = &info[type];
670 return ata_pci_init_one(pdev, port_info, 2);
671}
672
673static const struct pci_device_id amd[] = {
674 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_COBRA_7401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
675 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7409, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
676 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
677 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_OPUS_7441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
678 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
679 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
680 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
681 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
682 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
683 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
684 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
685 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
686 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
687 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
688 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
689 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
690 { 0, },
691};
692
693static struct pci_driver amd_pci_driver = {
694 .name = DRV_NAME,
695 .id_table = amd,
696 .probe = amd_init_one,
697 .remove = ata_pci_remove_one
698};
699
700static int __init amd_init(void)
701{
702 return pci_register_driver(&amd_pci_driver);
703}
704
705static void __exit amd_exit(void)
706{
707 pci_unregister_driver(&amd_pci_driver);
708}
709
710
711MODULE_AUTHOR("Alan Cox");
712MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
713MODULE_LICENSE("GPL");
714MODULE_DEVICE_TABLE(pci, amd);
715MODULE_VERSION(DRV_VERSION);
716
717module_init(amd_init);
718module_exit(amd_exit);
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
new file mode 100644
index 000000000000..d6ef3bf1bac7
--- /dev/null
+++ b/drivers/ata/pata_artop.c
@@ -0,0 +1,518 @@
1/*
2 * pata_artop.c - ARTOP ATA controller driver
3 *
4 * (C) 2006 Red Hat <alan@redhat.com>
5 *
6 * Based in part on drivers/ide/pci/aec62xx.c
7 * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
8 * 865/865R fixes for Macintosh card version from a patch to the old
9 * driver by Thibaut VARENE <varenet@parisc-linux.org>
10 * When setting the PCI latency we must set 0x80 or higher for burst
11 * performance Alessandro Zummo <alessandro.zummo@towertech.it>
12 *
13 * TODO
14 * 850 serialization once the core supports it
15 * Investigate no_dsc on 850R
16 * Clock detect
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/pci.h>
22#include <linux/init.h>
23#include <linux/blkdev.h>
24#include <linux/delay.h>
25#include <linux/device.h>
26#include <scsi/scsi_host.h>
27#include <linux/libata.h>
28#include <linux/ata.h>
29
30#define DRV_NAME "pata_artop"
31#define DRV_VERSION "0.4.1"
32
33/*
34 * The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
35 * get PCI bus speed functionality we leave this as 0. Its a variable
36 * for when we get the functionality and also for folks wanting to
37 * test stuff.
38 */
39
40static int clock = 0;
41
42static int artop6210_pre_reset(struct ata_port *ap)
43{
44 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
45 const struct pci_bits artop_enable_bits[] = {
46 { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
47 { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
48 };
49
50 if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) {
51 ata_port_disable(ap);
52 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
53 return 0;
54 }
55 ap->cbl = ATA_CBL_PATA40;
56 return ata_std_prereset(ap);
57}
58
59/**
60 * artop6210_error_handler - Probe specified port on PATA host controller
61 * @ap: Port to probe
62 *
63 * LOCKING:
64 * None (inherited from caller).
65 */
66
67static void artop6210_error_handler(struct ata_port *ap)
68{
69 ata_bmdma_drive_eh(ap, artop6210_pre_reset,
70 ata_std_softreset, NULL,
71 ata_std_postreset);
72}
73
74/**
75 * artop6260_pre_reset - check for 40/80 pin
76 * @ap: Port
77 *
78 * The ARTOP hardware reports the cable detect bits in register 0x49.
79 * Nothing complicated needed here.
80 */
81
82static int artop6260_pre_reset(struct ata_port *ap)
83{
84 static const struct pci_bits artop_enable_bits[] = {
85 { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
86 { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
87 };
88
89 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
90 u8 tmp;
91
92 /* Odd numbered device ids are the units with enable bits (the -R cards) */
93 if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) {
94 ata_port_disable(ap);
95 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
96 return 0;
97 }
98 pci_read_config_byte(pdev, 0x49, &tmp);
99 if (tmp & (1 >> ap->port_no))
100 ap->cbl = ATA_CBL_PATA40;
101 else
102 ap->cbl = ATA_CBL_PATA80;
103 return ata_std_prereset(ap);
104}
105
106/**
107 * artop6260_error_handler - Probe specified port on PATA host controller
108 * @ap: Port to probe
109 *
110 * LOCKING:
111 * None (inherited from caller).
112 */
113
114static void artop6260_error_handler(struct ata_port *ap)
115{
116 ata_bmdma_drive_eh(ap, artop6260_pre_reset,
117 ata_std_softreset, NULL,
118 ata_std_postreset);
119}
120
121/**
122 * artop6210_load_piomode - Load a set of PATA PIO timings
123 * @ap: Port whose timings we are configuring
124 * @adev: Device
125 * @pio: PIO mode
126 *
127 * Set PIO mode for device, in host controller PCI config space. This
128 * is used both to set PIO timings in PIO mode and also to set the
129 * matching PIO clocking for UDMA, as well as the MWDMA timings.
130 *
131 * LOCKING:
132 * None (inherited from caller).
133 */
134
135static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio)
136{
137 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
138 int dn = adev->devno + 2 * ap->port_no;
139 const u16 timing[2][5] = {
140 { 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
141 { 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
142
143 };
144 /* Load the PIO timing active/recovery bits */
145 pci_write_config_word(pdev, 0x40 + 2 * dn, timing[clock][pio]);
146}
147
148/**
149 * artop6210_set_piomode - Initialize host controller PATA PIO timings
150 * @ap: Port whose timings we are configuring
151 * @adev: Device we are configuring
152 *
153 * Set PIO mode for device, in host controller PCI config space. For
154 * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
155 * the event UDMA is used the later call to set_dmamode will set the
156 * bits as required.
157 *
158 * LOCKING:
159 * None (inherited from caller).
160 */
161
162static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev)
163{
164 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
165 int dn = adev->devno + 2 * ap->port_no;
166 u8 ultra;
167
168 artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
169
170 /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
171 pci_read_config_byte(pdev, 0x54, &ultra);
172 ultra &= ~(3 << (2 * dn));
173 pci_write_config_byte(pdev, 0x54, ultra);
174}
175
176/**
177 * artop6260_load_piomode - Initialize host controller PATA PIO timings
178 * @ap: Port whose timings we are configuring
179 * @adev: Device we are configuring
180 * @pio: PIO mode
181 *
182 * Set PIO mode for device, in host controller PCI config space. The
183 * ARTOP6260 and relatives store the timing data differently.
184 *
185 * LOCKING:
186 * None (inherited from caller).
187 */
188
189static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio)
190{
191 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
192 int dn = adev->devno + 2 * ap->port_no;
193 const u8 timing[2][5] = {
194 { 0x00, 0x0A, 0x08, 0x33, 0x31 },
195 { 0x70, 0x7A, 0x78, 0x43, 0x41 }
196
197 };
198 /* Load the PIO timing active/recovery bits */
199 pci_write_config_byte(pdev, 0x40 + dn, timing[clock][pio]);
200}
201
202/**
203 * artop6260_set_piomode - Initialize host controller PATA PIO timings
204 * @ap: Port whose timings we are configuring
205 * @adev: Device we are configuring
206 *
207 * Set PIO mode for device, in host controller PCI config space. For
208 * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
209 * the event UDMA is used the later call to set_dmamode will set the
210 * bits as required.
211 *
212 * LOCKING:
213 * None (inherited from caller).
214 */
215
216static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev)
217{
218 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
219 u8 ultra;
220
221 artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
222
223 /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
224 pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
225 ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
226 pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
227}
228
229/**
230 * artop6210_set_dmamode - Initialize host controller PATA PIO timings
231 * @ap: Port whose timings we are configuring
232 * @adev: um
233 *
234 * Set DMA mode for device, in host controller PCI config space.
235 *
236 * LOCKING:
237 * None (inherited from caller).
238 */
239
240static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev)
241{
242 unsigned int pio;
243 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
244 int dn = adev->devno + 2 * ap->port_no;
245 u8 ultra;
246
247 if (adev->dma_mode == XFER_MW_DMA_0)
248 pio = 1;
249 else
250 pio = 4;
251
252 /* Load the PIO timing active/recovery bits */
253 artop6210_load_piomode(ap, adev, pio);
254
255 pci_read_config_byte(pdev, 0x54, &ultra);
256 ultra &= ~(3 << (2 * dn));
257
258 /* Add ultra DMA bits if in UDMA mode */
259 if (adev->dma_mode >= XFER_UDMA_0) {
260 u8 mode = (adev->dma_mode - XFER_UDMA_0) + 1 - clock;
261 if (mode == 0)
262 mode = 1;
263 ultra |= (mode << (2 * dn));
264 }
265 pci_write_config_byte(pdev, 0x54, ultra);
266}
267
268/**
269 * artop6260_set_dmamode - Initialize host controller PATA PIO timings
270 * @ap: Port whose timings we are configuring
271 * @adev: Device we are configuring
272 *
273 * Set DMA mode for device, in host controller PCI config space. The
274 * ARTOP6260 and relatives store the timing data differently.
275 *
276 * LOCKING:
277 * None (inherited from caller).
278 */
279
280static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev)
281{
282 unsigned int pio = adev->pio_mode - XFER_PIO_0;
283 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
284 u8 ultra;
285
286 if (adev->dma_mode == XFER_MW_DMA_0)
287 pio = 1;
288 else
289 pio = 4;
290
291 /* Load the PIO timing active/recovery bits */
292 artop6260_load_piomode(ap, adev, pio);
293
294 /* Add ultra DMA bits if in UDMA mode */
295 pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
296 ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
297 if (adev->dma_mode >= XFER_UDMA_0) {
298 u8 mode = adev->dma_mode - XFER_UDMA_0 + 1 - clock;
299 if (mode == 0)
300 mode = 1;
301 ultra |= (mode << (4 * adev->devno));
302 }
303 pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
304}
305
306static struct scsi_host_template artop_sht = {
307 .module = THIS_MODULE,
308 .name = DRV_NAME,
309 .ioctl = ata_scsi_ioctl,
310 .queuecommand = ata_scsi_queuecmd,
311 .can_queue = ATA_DEF_QUEUE,
312 .this_id = ATA_SHT_THIS_ID,
313 .sg_tablesize = LIBATA_MAX_PRD,
314 .max_sectors = ATA_MAX_SECTORS,
315 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
316 .emulated = ATA_SHT_EMULATED,
317 .use_clustering = ATA_SHT_USE_CLUSTERING,
318 .proc_name = DRV_NAME,
319 .dma_boundary = ATA_DMA_BOUNDARY,
320 .slave_configure = ata_scsi_slave_config,
321 .bios_param = ata_std_bios_param,
322};
323
324static const struct ata_port_operations artop6210_ops = {
325 .port_disable = ata_port_disable,
326 .set_piomode = artop6210_set_piomode,
327 .set_dmamode = artop6210_set_dmamode,
328 .mode_filter = ata_pci_default_filter,
329
330 .tf_load = ata_tf_load,
331 .tf_read = ata_tf_read,
332 .check_status = ata_check_status,
333 .exec_command = ata_exec_command,
334 .dev_select = ata_std_dev_select,
335
336 .freeze = ata_bmdma_freeze,
337 .thaw = ata_bmdma_thaw,
338 .error_handler = artop6210_error_handler,
339 .post_internal_cmd = ata_bmdma_post_internal_cmd,
340
341 .bmdma_setup = ata_bmdma_setup,
342 .bmdma_start = ata_bmdma_start,
343 .bmdma_stop = ata_bmdma_stop,
344 .bmdma_status = ata_bmdma_status,
345 .qc_prep = ata_qc_prep,
346 .qc_issue = ata_qc_issue_prot,
347 .eng_timeout = ata_eng_timeout,
348 .data_xfer = ata_pio_data_xfer,
349
350 .irq_handler = ata_interrupt,
351 .irq_clear = ata_bmdma_irq_clear,
352
353 .port_start = ata_port_start,
354 .port_stop = ata_port_stop,
355 .host_stop = ata_host_stop,
356};
357
358static const struct ata_port_operations artop6260_ops = {
359 .port_disable = ata_port_disable,
360 .set_piomode = artop6260_set_piomode,
361 .set_dmamode = artop6260_set_dmamode,
362
363 .tf_load = ata_tf_load,
364 .tf_read = ata_tf_read,
365 .check_status = ata_check_status,
366 .exec_command = ata_exec_command,
367 .dev_select = ata_std_dev_select,
368
369 .freeze = ata_bmdma_freeze,
370 .thaw = ata_bmdma_thaw,
371 .error_handler = artop6260_error_handler,
372 .post_internal_cmd = ata_bmdma_post_internal_cmd,
373
374 .bmdma_setup = ata_bmdma_setup,
375 .bmdma_start = ata_bmdma_start,
376 .bmdma_stop = ata_bmdma_stop,
377 .bmdma_status = ata_bmdma_status,
378 .qc_prep = ata_qc_prep,
379 .qc_issue = ata_qc_issue_prot,
380 .data_xfer = ata_pio_data_xfer,
381
382 .eng_timeout = ata_eng_timeout,
383
384 .irq_handler = ata_interrupt,
385 .irq_clear = ata_bmdma_irq_clear,
386
387 .port_start = ata_port_start,
388 .port_stop = ata_port_stop,
389 .host_stop = ata_host_stop,
390};
391
392
393/**
394 * artop_init_one - Register ARTOP ATA PCI device with kernel services
395 * @pdev: PCI device to register
396 * @ent: Entry in artop_pci_tbl matching with @pdev
397 *
398 * Called from kernel PCI layer.
399 *
400 * LOCKING:
401 * Inherited from PCI layer (may sleep).
402 *
403 * RETURNS:
404 * Zero on success, or -ERRNO value.
405 */
406
407static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
408{
409 static int printed_version;
410 static struct ata_port_info info_6210 = {
411 .sht = &artop_sht,
412 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
413 .pio_mask = 0x1f, /* pio0-4 */
414 .mwdma_mask = 0x07, /* mwdma0-2 */
415 .udma_mask = ATA_UDMA2,
416 .port_ops = &artop6210_ops,
417 };
418 static struct ata_port_info info_626x = {
419 .sht = &artop_sht,
420 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
421 .pio_mask = 0x1f, /* pio0-4 */
422 .mwdma_mask = 0x07, /* mwdma0-2 */
423 .udma_mask = ATA_UDMA4,
424 .port_ops = &artop6260_ops,
425 };
426 static struct ata_port_info info_626x_fast = {
427 .sht = &artop_sht,
428 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
429 .pio_mask = 0x1f, /* pio0-4 */
430 .mwdma_mask = 0x07, /* mwdma0-2 */
431 .udma_mask = ATA_UDMA5,
432 .port_ops = &artop6260_ops,
433 };
434 struct ata_port_info *port_info[2];
435 struct ata_port_info *info;
436 int ports = 2;
437
438 if (!printed_version++)
439 dev_printk(KERN_DEBUG, &pdev->dev,
440 "version " DRV_VERSION "\n");
441
442 if (id->driver_data == 0) { /* 6210 variant */
443 info = &info_6210;
444 /* BIOS may have left us in UDMA, clear it before libata probe */
445 pci_write_config_byte(pdev, 0x54, 0);
446 /* For the moment (also lacks dsc) */
447 printk(KERN_WARNING "ARTOP 6210 requires serialize functionality not yet supported by libata.\n");
448 printk(KERN_WARNING "Secondary ATA ports will not be activated.\n");
449 ports = 1;
450 }
451 else if (id->driver_data == 1) /* 6260 */
452 info = &info_626x;
453 else if (id->driver_data == 2) { /* 6260 or 6260 + fast */
454 unsigned long io = pci_resource_start(pdev, 4);
455 u8 reg;
456
457 info = &info_626x;
458 if (inb(io) & 0x10)
459 info = &info_626x_fast;
460 /* Mac systems come up with some registers not set as we
461 will need them */
462
463 /* Clear reset & test bits */
464 pci_read_config_byte(pdev, 0x49, &reg);
465 pci_write_config_byte(pdev, 0x49, reg & ~ 0x30);
466
467 /* PCI latency must be > 0x80 for burst mode, tweak it
468 * if required.
469 */
470 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg);
471 if (reg <= 0x80)
472 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
473
474 /* Enable IRQ output and burst mode */
475 pci_read_config_byte(pdev, 0x4a, &reg);
476 pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
477
478 }
479 port_info[0] = port_info[1] = info;
480 return ata_pci_init_one(pdev, port_info, ports);
481}
482
483static const struct pci_device_id artop_pci_tbl[] = {
484 { 0x1191, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
485 { 0x1191, 0x0006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
486 { 0x1191, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
487 { 0x1191, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
488 { 0x1191, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
489 { } /* terminate list */
490};
491
492static struct pci_driver artop_pci_driver = {
493 .name = DRV_NAME,
494 .id_table = artop_pci_tbl,
495 .probe = artop_init_one,
496 .remove = ata_pci_remove_one,
497};
498
499static int __init artop_init(void)
500{
501 return pci_register_driver(&artop_pci_driver);
502}
503
504static void __exit artop_exit(void)
505{
506 pci_unregister_driver(&artop_pci_driver);
507}
508
509
510module_init(artop_init);
511module_exit(artop_exit);
512
513MODULE_AUTHOR("Alan Cox");
514MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA");
515MODULE_LICENSE("GPL");
516MODULE_DEVICE_TABLE(pci, artop_pci_tbl);
517MODULE_VERSION(DRV_VERSION);
518
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
new file mode 100644
index 000000000000..3f78a1e54a75
--- /dev/null
+++ b/drivers/ata/pata_atiixp.c
@@ -0,0 +1,306 @@
1/*
2 * pata_atiixp.c - ATI PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based on
7 *
8 * linux/drivers/ide/pci/atiixp.c Version 0.01-bart2 Feb. 26, 2004
9 *
10 * Copyright (C) 2003 ATI Inc. <hyu@ati.com>
11 * Copyright (C) 2004 Bartlomiej Zolnierkiewicz
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/init.h>
19#include <linux/blkdev.h>
20#include <linux/delay.h>
21#include <scsi/scsi_host.h>
22#include <linux/libata.h>
23
24#define DRV_NAME "pata_atiixp"
25#define DRV_VERSION "0.4.2"
26
27enum {
28 ATIIXP_IDE_PIO_TIMING = 0x40,
29 ATIIXP_IDE_MWDMA_TIMING = 0x44,
30 ATIIXP_IDE_PIO_CONTROL = 0x48,
31 ATIIXP_IDE_PIO_MODE = 0x4a,
32 ATIIXP_IDE_UDMA_CONTROL = 0x54,
33 ATIIXP_IDE_UDMA_MODE = 0x56
34};
35
36static int atiixp_pre_reset(struct ata_port *ap)
37{
38 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
39 static struct pci_bits atiixp_enable_bits[] = {
40 { 0x48, 1, 0x01, 0x00 },
41 { 0x48, 1, 0x08, 0x00 }
42 };
43
44 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no])) {
45 ata_port_disable(ap);
46 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
47 return 0;
48 }
49 ap->cbl = ATA_CBL_PATA80;
50 return ata_std_prereset(ap);
51}
52
53static void atiixp_error_handler(struct ata_port *ap)
54{
55 ata_bmdma_drive_eh(ap, atiixp_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
56}
57
58/**
59 * atiixp_set_pio_timing - set initial PIO mode data
60 * @ap: ATA interface
61 * @adev: ATA device
62 *
63 * Called by both the pio and dma setup functions to set the controller
64 * timings for PIO transfers. We must load both the mode number and
65 * timing values into the controller.
66 */
67
68static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
69{
70 static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
71
72 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
73 int dn = 2 * ap->port_no + adev->devno;
74
75 /* Check this is correct - the order is odd in both drivers */
76 int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
77 u16 pio_mode_data, pio_timing_data;
78
79 pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
80 pio_mode_data &= ~(0x7 << (4 * dn));
81 pio_mode_data |= pio << (4 * dn);
82 pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
83
84 pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
85 pio_mode_data &= ~(0xFF << timing_shift);
86 pio_mode_data |= (pio_timings[pio] << timing_shift);
87 pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
88}
89
90/**
91 * atiixp_set_piomode - set initial PIO mode data
92 * @ap: ATA interface
93 * @adev: ATA device
94 *
95 * Called to do the PIO mode setup. We use a shared helper for this
96 * as the DMA setup must also adjust the PIO timing information.
97 */
98
99static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
100{
101 atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0);
102}
103
104/**
105 * atiixp_set_dmamode - set initial DMA mode data
106 * @ap: ATA interface
107 * @adev: ATA device
108 *
109 * Called to do the DMA mode setup. We use timing tables for most
110 * modes but must tune an appropriate PIO mode to match.
111 */
112
113static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
114{
115 static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
116
117 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
118 int dma = adev->dma_mode;
119 int dn = 2 * ap->port_no + adev->devno;
120 int wanted_pio;
121
122 if (adev->dma_mode >= XFER_UDMA_0) {
123 u16 udma_mode_data;
124
125 dma -= XFER_UDMA_0;
126
127 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data);
128 udma_mode_data &= ~(0x7 << (4 * dn));
129 udma_mode_data |= dma << (4 * dn);
130 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
131 } else {
132 u16 mwdma_timing_data;
133 /* Check this is correct - the order is odd in both drivers */
134 int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
135
136 dma -= XFER_MW_DMA_0;
137
138 pci_read_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, &mwdma_timing_data);
139 mwdma_timing_data &= ~(0xFF << timing_shift);
140 mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
141 pci_write_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, mwdma_timing_data);
142 }
143 /*
144 * We must now look at the PIO mode situation. We may need to
145 * adjust the PIO mode to keep the timings acceptable
146 */
147 if (adev->dma_mode >= XFER_MW_DMA_2)
148 wanted_pio = 4;
149 else if (adev->dma_mode == XFER_MW_DMA_1)
150 wanted_pio = 3;
151 else if (adev->dma_mode == XFER_MW_DMA_0)
152 wanted_pio = 0;
153 else BUG();
154
155 if (adev->pio_mode != wanted_pio)
156 atiixp_set_pio_timing(ap, adev, wanted_pio);
157}
158
159/**
160 * atiixp_bmdma_start - DMA start callback
161 * @qc: Command in progress
162 *
163 * When DMA begins we need to ensure that the UDMA control
164 * register for the channel is correctly set.
165 */
166
167static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
168{
169 struct ata_port *ap = qc->ap;
170 struct ata_device *adev = qc->dev;
171
172 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
173 int dn = (2 * ap->port_no) + adev->devno;
174 u16 tmp16;
175
176 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
177 if (adev->dma_mode >= XFER_UDMA_0)
178 tmp16 |= (1 << dn);
179 else
180 tmp16 &= ~(1 << dn);
181 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
182 ata_bmdma_start(qc);
183}
184
185/**
186 * atiixp_dma_stop - DMA stop callback
187 * @qc: Command in progress
188 *
189 * DMA has completed. Clear the UDMA flag as the next operations will
190 * be PIO ones not UDMA data transfer.
191 */
192
193static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
194{
195 struct ata_port *ap = qc->ap;
196 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
197 int dn = (2 * ap->port_no) + qc->dev->devno;
198 u16 tmp16;
199
200 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
201 tmp16 &= ~(1 << dn);
202 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
203 ata_bmdma_stop(qc);
204}
205
206static struct scsi_host_template atiixp_sht = {
207 .module = THIS_MODULE,
208 .name = DRV_NAME,
209 .ioctl = ata_scsi_ioctl,
210 .queuecommand = ata_scsi_queuecmd,
211 .can_queue = ATA_DEF_QUEUE,
212 .this_id = ATA_SHT_THIS_ID,
213 .sg_tablesize = LIBATA_MAX_PRD,
214 .max_sectors = ATA_MAX_SECTORS,
215 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
216 .emulated = ATA_SHT_EMULATED,
217 .use_clustering = ATA_SHT_USE_CLUSTERING,
218 .proc_name = DRV_NAME,
219 .dma_boundary = ATA_DMA_BOUNDARY,
220 .slave_configure = ata_scsi_slave_config,
221 .bios_param = ata_std_bios_param,
222};
223
224static struct ata_port_operations atiixp_port_ops = {
225 .port_disable = ata_port_disable,
226 .set_piomode = atiixp_set_piomode,
227 .set_dmamode = atiixp_set_dmamode,
228 .mode_filter = ata_pci_default_filter,
229 .tf_load = ata_tf_load,
230 .tf_read = ata_tf_read,
231 .check_status = ata_check_status,
232 .exec_command = ata_exec_command,
233 .dev_select = ata_std_dev_select,
234
235 .freeze = ata_bmdma_freeze,
236 .thaw = ata_bmdma_thaw,
237 .error_handler = atiixp_error_handler,
238 .post_internal_cmd = ata_bmdma_post_internal_cmd,
239
240 .bmdma_setup = ata_bmdma_setup,
241 .bmdma_start = atiixp_bmdma_start,
242 .bmdma_stop = atiixp_bmdma_stop,
243 .bmdma_status = ata_bmdma_status,
244
245 .qc_prep = ata_qc_prep,
246 .qc_issue = ata_qc_issue_prot,
247 .eng_timeout = ata_eng_timeout,
248 .data_xfer = ata_pio_data_xfer,
249
250 .irq_handler = ata_interrupt,
251 .irq_clear = ata_bmdma_irq_clear,
252
253 .port_start = ata_port_start,
254 .port_stop = ata_port_stop,
255 .host_stop = ata_host_stop
256};
257
258static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
259{
260 static struct ata_port_info info = {
261 .sht = &atiixp_sht,
262 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
263 .pio_mask = 0x1f,
264 .mwdma_mask = 0x06, /* No MWDMA0 support */
265 .udma_mask = 0x3F,
266 .port_ops = &atiixp_port_ops
267 };
268 static struct ata_port_info *port_info[2] = { &info, &info };
269 return ata_pci_init_one(dev, port_info, 2);
270}
271
272static struct pci_device_id atiixp[] = {
273 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
274 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
275 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
276 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
277 { 0, },
278};
279
280static struct pci_driver atiixp_pci_driver = {
281 .name = DRV_NAME,
282 .id_table = atiixp,
283 .probe = atiixp_init_one,
284 .remove = ata_pci_remove_one
285};
286
287static int __init atiixp_init(void)
288{
289 return pci_register_driver(&atiixp_pci_driver);
290}
291
292
293static void __exit atiixp_exit(void)
294{
295 pci_unregister_driver(&atiixp_pci_driver);
296}
297
298
299MODULE_AUTHOR("Alan Cox");
300MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
301MODULE_LICENSE("GPL");
302MODULE_DEVICE_TABLE(pci, atiixp);
303MODULE_VERSION(DRV_VERSION);
304
305module_init(atiixp_init);
306module_exit(atiixp_exit);
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
new file mode 100644
index 000000000000..abf1bb7bd322
--- /dev/null
+++ b/drivers/ata/pata_cmd64x.c
@@ -0,0 +1,505 @@
1/*
2 * pata_cmd64x.c - ATI PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based upon
7 * linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002
8 *
9 * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
10 * Note, this driver is not used at all on other systems because
11 * there the "BIOS" has done all of the following already.
12 * Due to massive hardware bugs, UltraDMA is only supported
13 * on the 646U2 and not on the 646U.
14 *
15 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
16 * Copyright (C) 1998 David S. Miller (davem@redhat.com)
17 *
18 * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
19 *
20 * TODO
21 * Testing work
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/blkdev.h>
29#include <linux/delay.h>
30#include <scsi/scsi_host.h>
31#include <linux/libata.h>
32
33#define DRV_NAME "pata_cmd64x"
34#define DRV_VERSION "0.2.1"
35
36/*
37 * CMD64x specific registers definition.
38 */
39
40enum {
41 CFR = 0x50,
42 CFR_INTR_CH0 = 0x02,
43 CNTRL = 0x51,
44 CNTRL_DIS_RA0 = 0x40,
45 CNTRL_DIS_RA1 = 0x80,
46 CNTRL_ENA_2ND = 0x08,
47 CMDTIM = 0x52,
48 ARTTIM0 = 0x53,
49 DRWTIM0 = 0x54,
50 ARTTIM1 = 0x55,
51 DRWTIM1 = 0x56,
52 ARTTIM23 = 0x57,
53 ARTTIM23_DIS_RA2 = 0x04,
54 ARTTIM23_DIS_RA3 = 0x08,
55 ARTTIM23_INTR_CH1 = 0x10,
56 ARTTIM2 = 0x57,
57 ARTTIM3 = 0x57,
58 DRWTIM23 = 0x58,
59 DRWTIM2 = 0x58,
60 BRST = 0x59,
61 DRWTIM3 = 0x5b,
62 BMIDECR0 = 0x70,
63 MRDMODE = 0x71,
64 MRDMODE_INTR_CH0 = 0x04,
65 MRDMODE_INTR_CH1 = 0x08,
66 MRDMODE_BLK_CH0 = 0x10,
67 MRDMODE_BLK_CH1 = 0x20,
68 BMIDESR0 = 0x72,
69 UDIDETCR0 = 0x73,
70 DTPR0 = 0x74,
71 BMIDECR1 = 0x78,
72 BMIDECSR = 0x79,
73 BMIDESR1 = 0x7A,
74 UDIDETCR1 = 0x7B,
75 DTPR1 = 0x7C
76};
77
78static int cmd64x_pre_reset(struct ata_port *ap)
79{
80 ap->cbl = ATA_CBL_PATA40;
81 return ata_std_prereset(ap);
82}
83
84static int cmd648_pre_reset(struct ata_port *ap)
85{
86 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
87 u8 r;
88
89 /* Check cable detect bits */
90 pci_read_config_byte(pdev, BMIDECSR, &r);
91 if (r & (1 << ap->port_no))
92 ap->cbl = ATA_CBL_PATA80;
93 else
94 ap->cbl = ATA_CBL_PATA40;
95
96 return ata_std_prereset(ap);
97}
98
99static void cmd64x_error_handler(struct ata_port *ap)
100{
101 return ata_bmdma_drive_eh(ap, cmd64x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
102}
103
104static void cmd648_error_handler(struct ata_port *ap)
105{
106 ata_bmdma_drive_eh(ap, cmd648_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
107}
108
109/**
110 * cmd64x_set_piomode - set initial PIO mode data
111 * @ap: ATA interface
112 * @adev: ATA device
113 *
114 * Called to do the PIO mode setup.
115 */
116
117static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
118{
119 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
120 struct ata_timing t;
121 const unsigned long T = 1000000 / 33;
122 const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 };
123
124 u8 reg;
125
126 /* Port layout is not logical so use a table */
127 const u8 arttim_port[2][2] = {
128 { ARTTIM0, ARTTIM1 },
129 { ARTTIM23, ARTTIM23 }
130 };
131 const u8 drwtim_port[2][2] = {
132 { DRWTIM0, DRWTIM1 },
133 { DRWTIM2, DRWTIM3 }
134 };
135
136 int arttim = arttim_port[ap->port_no][adev->devno];
137 int drwtim = drwtim_port[ap->port_no][adev->devno];
138
139
140 if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) {
141 printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
142 return;
143 }
144 if (ap->port_no) {
145 /* Slave has shared address setup */
146 struct ata_device *pair = ata_dev_pair(adev);
147
148 if (pair) {
149 struct ata_timing tp;
150 ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
151 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
152 }
153 }
154
155 printk(KERN_DEBUG DRV_NAME ": active %d recovery %d setup %d.\n",
156 t.active, t.recover, t.setup);
157 if (t.recover > 16) {
158 t.active += t.recover - 16;
159 t.recover = 16;
160 }
161 if (t.active > 16)
162 t.active = 16;
163
164 /* Now convert the clocks into values we can actually stuff into
165 the chip */
166
167 if (t.recover > 1)
168 t.recover--;
169 else
170 t.recover = 15;
171
172 if (t.setup > 4)
173 t.setup = 0xC0;
174 else
175 t.setup = setup_data[t.setup];
176
177 t.active &= 0x0F; /* 0 = 16 */
178
179 /* Load setup timing */
180 pci_read_config_byte(pdev, arttim, &reg);
181 reg &= 0x3F;
182 reg |= t.setup;
183 pci_write_config_byte(pdev, arttim, reg);
184
185 /* Load active/recovery */
186 pci_write_config_byte(pdev, drwtim, (t.active << 4) | t.recover);
187}
188
189/**
190 * cmd64x_set_dmamode - set initial DMA mode data
191 * @ap: ATA interface
192 * @adev: ATA device
193 *
194 * Called to do the DMA mode setup.
195 */
196
197static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
198{
199 static const u8 udma_data[] = {
200 0x31, 0x21, 0x11, 0x25, 0x15, 0x05
201 };
202 static const u8 mwdma_data[] = {
203 0x30, 0x20, 0x10
204 };
205
206 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
207 u8 regU, regD;
208
209 int pciU = UDIDETCR0 + 8 * ap->port_no;
210 int pciD = BMIDESR0 + 8 * ap->port_no;
211 int shift = 2 * adev->devno;
212
213 pci_read_config_byte(pdev, pciD, &regD);
214 pci_read_config_byte(pdev, pciU, &regU);
215
216 regD &= ~(0x20 << shift);
217 regU &= ~(0x35 << shift);
218
219 if (adev->dma_mode >= XFER_UDMA_0)
220 regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
221 else
222 regD |= mwdma_data[adev->dma_mode - XFER_MW_DMA_0] << shift;
223
224 regD |= 0x20 << adev->devno;
225
226 pci_write_config_byte(pdev, pciU, regU);
227 pci_write_config_byte(pdev, pciD, regD);
228}
229
230/**
231 * cmd648_dma_stop - DMA stop callback
232 * @qc: Command in progress
233 *
234 * DMA has completed.
235 */
236
237static void cmd648_bmdma_stop(struct ata_queued_cmd *qc)
238{
239 struct ata_port *ap = qc->ap;
240 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
241 u8 dma_intr;
242 int dma_reg = ap->port_no ? ARTTIM23_INTR_CH1 : CFR_INTR_CH0;
243 int dma_mask = ap->port_no ? ARTTIM2 : CFR;
244
245 ata_bmdma_stop(qc);
246
247 pci_read_config_byte(pdev, dma_reg, &dma_intr);
248 pci_write_config_byte(pdev, dma_reg, dma_intr | dma_mask);
249}
250
251/**
252 * cmd646r1_dma_stop - DMA stop callback
253 * @qc: Command in progress
254 *
255 * Stub for now while investigating the r1 quirk in the old driver.
256 */
257
258static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc)
259{
260 ata_bmdma_stop(qc);
261}
262
263static struct scsi_host_template cmd64x_sht = {
264 .module = THIS_MODULE,
265 .name = DRV_NAME,
266 .ioctl = ata_scsi_ioctl,
267 .queuecommand = ata_scsi_queuecmd,
268 .can_queue = ATA_DEF_QUEUE,
269 .this_id = ATA_SHT_THIS_ID,
270 .sg_tablesize = LIBATA_MAX_PRD,
271 .max_sectors = ATA_MAX_SECTORS,
272 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
273 .emulated = ATA_SHT_EMULATED,
274 .use_clustering = ATA_SHT_USE_CLUSTERING,
275 .proc_name = DRV_NAME,
276 .dma_boundary = ATA_DMA_BOUNDARY,
277 .slave_configure = ata_scsi_slave_config,
278 .bios_param = ata_std_bios_param,
279};
280
281static struct ata_port_operations cmd64x_port_ops = {
282 .port_disable = ata_port_disable,
283 .set_piomode = cmd64x_set_piomode,
284 .set_dmamode = cmd64x_set_dmamode,
285 .mode_filter = ata_pci_default_filter,
286 .tf_load = ata_tf_load,
287 .tf_read = ata_tf_read,
288 .check_status = ata_check_status,
289 .exec_command = ata_exec_command,
290 .dev_select = ata_std_dev_select,
291
292 .freeze = ata_bmdma_freeze,
293 .thaw = ata_bmdma_thaw,
294 .error_handler = cmd64x_error_handler,
295 .post_internal_cmd = ata_bmdma_post_internal_cmd,
296
297 .bmdma_setup = ata_bmdma_setup,
298 .bmdma_start = ata_bmdma_start,
299 .bmdma_stop = ata_bmdma_stop,
300 .bmdma_status = ata_bmdma_status,
301
302 .qc_prep = ata_qc_prep,
303 .qc_issue = ata_qc_issue_prot,
304 .eng_timeout = ata_eng_timeout,
305 .data_xfer = ata_pio_data_xfer,
306
307 .irq_handler = ata_interrupt,
308 .irq_clear = ata_bmdma_irq_clear,
309
310 .port_start = ata_port_start,
311 .port_stop = ata_port_stop,
312 .host_stop = ata_host_stop
313};
314
315static struct ata_port_operations cmd646r1_port_ops = {
316 .port_disable = ata_port_disable,
317 .set_piomode = cmd64x_set_piomode,
318 .set_dmamode = cmd64x_set_dmamode,
319 .mode_filter = ata_pci_default_filter,
320 .tf_load = ata_tf_load,
321 .tf_read = ata_tf_read,
322 .check_status = ata_check_status,
323 .exec_command = ata_exec_command,
324 .dev_select = ata_std_dev_select,
325
326 .freeze = ata_bmdma_freeze,
327 .thaw = ata_bmdma_thaw,
328 .error_handler = cmd64x_error_handler,
329 .post_internal_cmd = ata_bmdma_post_internal_cmd,
330
331 .bmdma_setup = ata_bmdma_setup,
332 .bmdma_start = ata_bmdma_start,
333 .bmdma_stop = cmd646r1_bmdma_stop,
334 .bmdma_status = ata_bmdma_status,
335
336 .qc_prep = ata_qc_prep,
337 .qc_issue = ata_qc_issue_prot,
338 .eng_timeout = ata_eng_timeout,
339 .data_xfer = ata_pio_data_xfer,
340
341 .irq_handler = ata_interrupt,
342 .irq_clear = ata_bmdma_irq_clear,
343
344 .port_start = ata_port_start,
345 .port_stop = ata_port_stop,
346 .host_stop = ata_host_stop
347};
348
349static struct ata_port_operations cmd648_port_ops = {
350 .port_disable = ata_port_disable,
351 .set_piomode = cmd64x_set_piomode,
352 .set_dmamode = cmd64x_set_dmamode,
353 .mode_filter = ata_pci_default_filter,
354 .tf_load = ata_tf_load,
355 .tf_read = ata_tf_read,
356 .check_status = ata_check_status,
357 .exec_command = ata_exec_command,
358 .dev_select = ata_std_dev_select,
359
360 .freeze = ata_bmdma_freeze,
361 .thaw = ata_bmdma_thaw,
362 .error_handler = cmd648_error_handler,
363 .post_internal_cmd = ata_bmdma_post_internal_cmd,
364
365 .bmdma_setup = ata_bmdma_setup,
366 .bmdma_start = ata_bmdma_start,
367 .bmdma_stop = cmd648_bmdma_stop,
368 .bmdma_status = ata_bmdma_status,
369
370 .qc_prep = ata_qc_prep,
371 .qc_issue = ata_qc_issue_prot,
372 .eng_timeout = ata_eng_timeout,
373 .data_xfer = ata_pio_data_xfer,
374
375 .irq_handler = ata_interrupt,
376 .irq_clear = ata_bmdma_irq_clear,
377
378 .port_start = ata_port_start,
379 .port_stop = ata_port_stop,
380 .host_stop = ata_host_stop
381};
382
383static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
384{
385 u32 class_rev;
386
387 static struct ata_port_info cmd_info[6] = {
388 { /* CMD 643 - no UDMA */
389 .sht = &cmd64x_sht,
390 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
391 .pio_mask = 0x1f,
392 .mwdma_mask = 0x07,
393 .port_ops = &cmd64x_port_ops
394 },
395 { /* CMD 646 with broken UDMA */
396 .sht = &cmd64x_sht,
397 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
398 .pio_mask = 0x1f,
399 .mwdma_mask = 0x07,
400 .port_ops = &cmd64x_port_ops
401 },
402 { /* CMD 646 with working UDMA */
403 .sht = &cmd64x_sht,
404 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
405 .pio_mask = 0x1f,
406 .mwdma_mask = 0x07,
407 .udma_mask = ATA_UDMA1,
408 .port_ops = &cmd64x_port_ops
409 },
410 { /* CMD 646 rev 1 */
411 .sht = &cmd64x_sht,
412 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
413 .pio_mask = 0x1f,
414 .mwdma_mask = 0x07,
415 .port_ops = &cmd646r1_port_ops
416 },
417 { /* CMD 648 */
418 .sht = &cmd64x_sht,
419 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
420 .pio_mask = 0x1f,
421 .mwdma_mask = 0x07,
422 .udma_mask = ATA_UDMA2,
423 .port_ops = &cmd648_port_ops
424 },
425 { /* CMD 649 */
426 .sht = &cmd64x_sht,
427 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
428 .pio_mask = 0x1f,
429 .mwdma_mask = 0x07,
430 .udma_mask = ATA_UDMA3,
431 .port_ops = &cmd648_port_ops
432 }
433 };
434 static struct ata_port_info *port_info[2], *info;
435 u8 mrdmode;
436
437 info = &cmd_info[id->driver_data];
438
439 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
440 class_rev &= 0xFF;
441
442 if (id->driver_data == 0) /* 643 */
443 ata_pci_clear_simplex(pdev);
444
445 if (pdev->device == PCI_DEVICE_ID_CMD_646) {
446 /* Does UDMA work ? */
447 if (class_rev > 4)
448 info = &cmd_info[2];
449 /* Early rev with other problems ? */
450 else if (class_rev == 1)
451 info = &cmd_info[3];
452 }
453
454 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
455 pci_read_config_byte(pdev, MRDMODE, &mrdmode);
456 mrdmode &= ~ 0x30; /* IRQ set up */
457 mrdmode |= 0x02; /* Memory read line enable */
458 pci_write_config_byte(pdev, MRDMODE, mrdmode);
459
460 /* Force PIO 0 here.. */
461
462 /* PPC specific fixup copied from old driver */
463#ifdef CONFIG_PPC
464 pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
465#endif
466
467 port_info[0] = port_info[1] = info;
468 return ata_pci_init_one(pdev, port_info, 2);
469}
470
471static struct pci_device_id cmd64x[] = {
472 { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_643, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
473 { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
474 { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
475 { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
476 { 0, },
477};
478
479static struct pci_driver cmd64x_pci_driver = {
480 .name = DRV_NAME,
481 .id_table = cmd64x,
482 .probe = cmd64x_init_one,
483 .remove = ata_pci_remove_one
484};
485
486static int __init cmd64x_init(void)
487{
488 return pci_register_driver(&cmd64x_pci_driver);
489}
490
491
492static void __exit cmd64x_exit(void)
493{
494 pci_unregister_driver(&cmd64x_pci_driver);
495}
496
497
498MODULE_AUTHOR("Alan Cox");
499MODULE_DESCRIPTION("low-level driver for CMD64x series PATA controllers");
500MODULE_LICENSE("GPL");
501MODULE_DEVICE_TABLE(pci, cmd64x);
502MODULE_VERSION(DRV_VERSION);
503
504module_init(cmd64x_init);
505module_exit(cmd64x_exit);
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
new file mode 100644
index 000000000000..792ce4828510
--- /dev/null
+++ b/drivers/ata/pata_cs5520.c
@@ -0,0 +1,336 @@
1/*
2 * IDE tuning and bus mastering support for the CS5510/CS5520
3 * chipsets
4 *
5 * The CS5510/CS5520 are slightly unusual devices. Unlike the
6 * typical IDE controllers they do bus mastering with the drive in
7 * PIO mode and smarter silicon.
8 *
9 * The practical upshot of this is that we must always tune the
10 * drive for the right PIO mode. We must also ignore all the blacklists
11 * and the drive bus mastering DMA information. Also to confuse matters
12 * further we can do DMA on PIO only drives.
13 *
14 * DMA on the 5510 also requires we disable_hlt() during DMA on early
15 * revisions.
16 *
17 * *** This driver is strictly experimental ***
18 *
19 * (c) Copyright Red Hat Inc 2002
20 *
21 * This program is free software; you can redistribute it and/or modify it
22 * under the terms of the GNU General Public License as published by the
23 * Free Software Foundation; either version 2, or (at your option) any
24 * later version.
25 *
26 * This program is distributed in the hope that it will be useful, but
27 * WITHOUT ANY WARRANTY; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
29 * General Public License for more details.
30 *
31 * Documentation:
32 * Not publically available.
33 */
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/init.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <scsi/scsi_host.h>
41#include <linux/libata.h>
42
43#define DRV_NAME "pata_cs5520"
44#define DRV_VERSION "0.6.2"
45
46struct pio_clocks
47{
48 int address;
49 int assert;
50 int recovery;
51};
52
53static const struct pio_clocks cs5520_pio_clocks[]={
54 {3, 6, 11},
55 {2, 5, 6},
56 {1, 4, 3},
57 {1, 3, 2},
58 {1, 2, 1}
59};
60
61/**
62 * cs5520_set_timings - program PIO timings
63 * @ap: ATA port
64 * @adev: ATA device
65 *
66 * Program the PIO mode timings for the controller according to the pio
67 * clocking table.
68 */
69
70static void cs5520_set_timings(struct ata_port *ap, struct ata_device *adev, int pio)
71{
72 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
73 int slave = adev->devno;
74
75 pio -= XFER_PIO_0;
76
77 /* Channel command timing */
78 pci_write_config_byte(pdev, 0x62 + ap->port_no,
79 (cs5520_pio_clocks[pio].recovery << 4) |
80 (cs5520_pio_clocks[pio].assert));
81 /* FIXME: should these use address ? */
82 /* Read command timing */
83 pci_write_config_byte(pdev, 0x64 + 4*ap->port_no + slave,
84 (cs5520_pio_clocks[pio].recovery << 4) |
85 (cs5520_pio_clocks[pio].assert));
86 /* Write command timing */
87 pci_write_config_byte(pdev, 0x66 + 4*ap->port_no + slave,
88 (cs5520_pio_clocks[pio].recovery << 4) |
89 (cs5520_pio_clocks[pio].assert));
90}
91
92/**
93 * cs5520_enable_dma - turn on DMA bits
94 *
95 * Turn on the DMA bits for this disk. Needed because the BIOS probably
96 * has not done the work for us. Belongs in the core SATA code.
97 */
98
99static void cs5520_enable_dma(struct ata_port *ap, struct ata_device *adev)
100{
101 /* Set the DMA enable/disable flag */
102 u8 reg = inb(ap->ioaddr.bmdma_addr + 0x02);
103 reg |= 1<<(adev->devno + 5);
104 outb(reg, ap->ioaddr.bmdma_addr + 0x02);
105}
106
107/**
108 * cs5520_set_dmamode - program DMA timings
109 * @ap: ATA port
110 * @adev: ATA device
111 *
112 * Program the DMA mode timings for the controller according to the pio
113 * clocking table. Note that this device sets the DMA timings to PIO
114 * mode values. This may seem bizarre but the 5520 architecture talks
115 * PIO mode to the disk and DMA mode to the controller so the underlying
116 * transfers are PIO timed.
117 */
118
119static void cs5520_set_dmamode(struct ata_port *ap, struct ata_device *adev)
120{
121 static const int dma_xlate[3] = { XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 };
122 cs5520_set_timings(ap, adev, dma_xlate[adev->dma_mode]);
123 cs5520_enable_dma(ap, adev);
124}
125
126/**
127 * cs5520_set_piomode - program PIO timings
128 * @ap: ATA port
129 * @adev: ATA device
130 *
131 * Program the PIO mode timings for the controller according to the pio
132 * clocking table. We know pio_mode will equal dma_mode because of the
133 * CS5520 architecture. At least once we turned DMA on and wrote a
134 * mode setter.
135 */
136
137static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev)
138{
139 cs5520_set_timings(ap, adev, adev->pio_mode);
140}
141
142
143static int cs5520_pre_reset(struct ata_port *ap)
144{
145 ap->cbl = ATA_CBL_PATA40;
146 return ata_std_prereset(ap);
147}
148
149static void cs5520_error_handler(struct ata_port *ap)
150{
151 return ata_bmdma_drive_eh(ap, cs5520_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
152}
153
154static struct scsi_host_template cs5520_sht = {
155 .module = THIS_MODULE,
156 .name = DRV_NAME,
157 .ioctl = ata_scsi_ioctl,
158 .queuecommand = ata_scsi_queuecmd,
159 .can_queue = ATA_DEF_QUEUE,
160 .this_id = ATA_SHT_THIS_ID,
161 .sg_tablesize = LIBATA_MAX_PRD,
162 .max_sectors = ATA_MAX_SECTORS,
163 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
164 .emulated = ATA_SHT_EMULATED,
165 .use_clustering = ATA_SHT_USE_CLUSTERING,
166 .proc_name = DRV_NAME,
167 .dma_boundary = ATA_DMA_BOUNDARY,
168 .slave_configure = ata_scsi_slave_config,
169 .bios_param = ata_std_bios_param,
170};
171
172static struct ata_port_operations cs5520_port_ops = {
173 .port_disable = ata_port_disable,
174 .set_piomode = cs5520_set_piomode,
175 .set_dmamode = cs5520_set_dmamode,
176
177 .tf_load = ata_tf_load,
178 .tf_read = ata_tf_read,
179 .check_status = ata_check_status,
180 .exec_command = ata_exec_command,
181 .dev_select = ata_std_dev_select,
182
183 .freeze = ata_bmdma_freeze,
184 .thaw = ata_bmdma_thaw,
185 .error_handler = cs5520_error_handler,
186 .post_internal_cmd = ata_bmdma_post_internal_cmd,
187
188 .bmdma_setup = ata_bmdma_setup,
189 .bmdma_start = ata_bmdma_start,
190 .bmdma_stop = ata_bmdma_stop,
191 .bmdma_status = ata_bmdma_status,
192 .qc_prep = ata_qc_prep,
193 .qc_issue = ata_qc_issue_prot,
194 .data_xfer = ata_pio_data_xfer,
195
196 .eng_timeout = ata_eng_timeout,
197
198 .irq_handler = ata_interrupt,
199 .irq_clear = ata_bmdma_irq_clear,
200
201 .port_start = ata_port_start,
202 .port_stop = ata_port_stop,
203 .host_stop = ata_host_stop,
204};
205
206static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
207{
208 u8 pcicfg;
209 static struct ata_probe_ent probe[2];
210 int ports = 0;
211
212 /* IDE port enable bits */
213 pci_read_config_byte(dev, 0x60, &pcicfg);
214
215 /* Check if the ATA ports are enabled */
216 if ((pcicfg & 3) == 0)
217 return -ENODEV;
218
219 if ((pcicfg & 0x40) == 0) {
220 printk(KERN_WARNING DRV_NAME ": DMA mode disabled. Enabling.\n");
221 pci_write_config_byte(dev, 0x60, pcicfg | 0x40);
222 }
223
224 /* Perform set up for DMA */
225 if (pci_enable_device_bars(dev, 1<<2)) {
226 printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
227 return -ENODEV;
228 }
229 pci_set_master(dev);
230 if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
231 printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
232 return -ENODEV;
233 }
234 if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
235 printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
236 return -ENODEV;
237 }
238
239 /* We have to do our own plumbing as the PCI setup for this
240 chipset is non-standard so we can't punt to the libata code */
241
242 INIT_LIST_HEAD(&probe[0].node);
243 probe[0].dev = pci_dev_to_dev(dev);
244 probe[0].port_ops = &cs5520_port_ops;
245 probe[0].sht = &cs5520_sht;
246 probe[0].pio_mask = 0x1F;
247 probe[0].mwdma_mask = id->driver_data;
248 probe[0].irq = 14;
249 probe[0].irq_flags = 0;
250 probe[0].port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST;
251 probe[0].n_ports = 1;
252 probe[0].port[0].cmd_addr = 0x1F0;
253 probe[0].port[0].ctl_addr = 0x3F6;
254 probe[0].port[0].altstatus_addr = 0x3F6;
255 probe[0].port[0].bmdma_addr = pci_resource_start(dev, 2);
256
257 /* The secondary lurks at different addresses but is otherwise
258 the same beastie */
259
260 probe[1] = probe[0];
261 INIT_LIST_HEAD(&probe[1].node);
262 probe[1].irq = 15;
263 probe[1].port[0].cmd_addr = 0x170;
264 probe[1].port[0].ctl_addr = 0x376;
265 probe[1].port[0].altstatus_addr = 0x376;
266 probe[1].port[0].bmdma_addr = pci_resource_start(dev, 2) + 8;
267
268 /* Let libata fill in the port details */
269 ata_std_ports(&probe[0].port[0]);
270 ata_std_ports(&probe[1].port[0]);
271
272 /* Now add the ports that are active */
273 if (pcicfg & 1)
274 ports += ata_device_add(&probe[0]);
275 if (pcicfg & 2)
276 ports += ata_device_add(&probe[1]);
277 if (ports)
278 return 0;
279 return -ENODEV;
280}
281
282/**
283 * cs5520_remove_one - device unload
284 * @pdev: PCI device being removed
285 *
286 * Handle an unplug/unload event for a PCI device. Unload the
287 * PCI driver but do not use the default handler as we manage
288 * resources ourself and *MUST NOT* disable the device as it has
289 * other functions.
290 */
291
292static void __devexit cs5520_remove_one(struct pci_dev *pdev)
293{
294 struct device *dev = pci_dev_to_dev(pdev);
295 struct ata_host *host = dev_get_drvdata(dev);
296
297 ata_host_remove(host);
298 dev_set_drvdata(dev, NULL);
299}
300
301/* For now keep DMA off. We can set it for all but A rev CS5510 once the
302 core ATA code can handle it */
303
304static struct pci_device_id pata_cs5520[] = {
305 { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
306 { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
307 { 0, },
308};
309
310static struct pci_driver cs5520_pci_driver = {
311 .name = DRV_NAME,
312 .id_table = pata_cs5520,
313 .probe = cs5520_init_one,
314 .remove = cs5520_remove_one
315};
316
317
318static int __init cs5520_init(void)
319{
320 return pci_register_driver(&cs5520_pci_driver);
321}
322
323static void __exit cs5520_exit(void)
324{
325 pci_unregister_driver(&cs5520_pci_driver);
326}
327
328MODULE_AUTHOR("Alan Cox");
329MODULE_DESCRIPTION("low-level driver for Cyrix CS5510/5520");
330MODULE_LICENSE("GPL");
331MODULE_DEVICE_TABLE(pci, pata_cs5520);
332MODULE_VERSION(DRV_VERSION);
333
334module_init(cs5520_init);
335module_exit(cs5520_exit);
336
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
new file mode 100644
index 000000000000..f3d8a3bc1e78
--- /dev/null
+++ b/drivers/ata/pata_cs5530.c
@@ -0,0 +1,387 @@
1/*
2 * pata-cs5530.c - CS5530 PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based upon cs5530.c by Mark Lord.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Loosely based on the piix & svwks drivers.
22 *
23 * Documentation:
24 * Available from AMD web site.
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/blkdev.h>
32#include <linux/delay.h>
33#include <scsi/scsi_host.h>
34#include <linux/libata.h>
35#include <linux/dmi.h>
36
37#define DRV_NAME "pata_cs5530"
38#define DRV_VERSION "0.6"
39
40/**
41 * cs5530_set_piomode - PIO setup
42 * @ap: ATA interface
43 * @adev: device on the interface
44 *
45 * Set our PIO requirements. This is fairly simple on the CS5530
46 * chips.
47 */
48
49static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev)
50{
51 static const unsigned int cs5530_pio_timings[2][5] = {
52 {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010},
53 {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}
54 };
55 unsigned long base = ( ap->ioaddr.bmdma_addr & ~0x0F) + 0x20 + 0x10 * ap->port_no;
56 u32 tuning;
57 int format;
58
59 /* Find out which table to use */
60 tuning = inl(base + 0x04);
61 format = (tuning & 0x80000000UL) ? 1 : 0;
62
63 /* Now load the right timing register */
64 if (adev->devno)
65 base += 0x08;
66
67 outl(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base);
68}
69
70/**
71 * cs5530_set_dmamode - DMA timing setup
72 * @ap: ATA interface
73 * @adev: Device being configured
74 *
75 * We cannot mix MWDMA and UDMA without reloading timings each switch
76 * master to slave. We track the last DMA setup in order to minimise
77 * reloads.
78 */
79
80static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev)
81{
82 unsigned long base = ( ap->ioaddr.bmdma_addr & ~0x0F) + 0x20 + 0x10 * ap->port_no;
83 u32 tuning, timing = 0;
84 u8 reg;
85
86 /* Find out which table to use */
87 tuning = inl(base + 0x04);
88
89 switch(adev->dma_mode) {
90 case XFER_UDMA_0:
91 timing = 0x00921250;break;
92 case XFER_UDMA_1:
93 timing = 0x00911140;break;
94 case XFER_UDMA_2:
95 timing = 0x00911030;break;
96 case XFER_MW_DMA_0:
97 timing = 0x00077771;break;
98 case XFER_MW_DMA_1:
99 timing = 0x00012121;break;
100 case XFER_MW_DMA_2:
101 timing = 0x00002020;break;
102 default:
103 BUG();
104 }
105 /* Merge in the PIO format bit */
106 timing |= (tuning & 0x80000000UL);
107 if (adev->devno == 0) /* Master */
108 outl(timing, base + 0x04);
109 else {
110 if (timing & 0x00100000)
111 tuning |= 0x00100000; /* UDMA for both */
112 else
113 tuning &= ~0x00100000; /* MWDMA for both */
114 outl(tuning, base + 0x04);
115 outl(timing, base + 0x0C);
116 }
117
118 /* Set the DMA capable bit in the BMDMA area */
119 reg = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
120 reg |= (1 << (5 + adev->devno));
121 outb(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
122
123 /* Remember the last DMA setup we did */
124
125 ap->private_data = adev;
126}
127
128/**
129 * cs5530_qc_issue_prot - command issue
130 * @qc: command pending
131 *
132 * Called when the libata layer is about to issue a command. We wrap
133 * this interface so that we can load the correct ATA timings if
134 * neccessary. Specifically we have a problem that there is only
135 * one MWDMA/UDMA bit.
136 */
137
138static unsigned int cs5530_qc_issue_prot(struct ata_queued_cmd *qc)
139{
140 struct ata_port *ap = qc->ap;
141 struct ata_device *adev = qc->dev;
142 struct ata_device *prev = ap->private_data;
143
144 /* See if the DMA settings could be wrong */
145 if (adev->dma_mode != 0 && adev != prev && prev != NULL) {
146 /* Maybe, but do the channels match MWDMA/UDMA ? */
147 if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) ||
148 (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0))
149 /* Switch the mode bits */
150 cs5530_set_dmamode(ap, adev);
151 }
152
153 return ata_qc_issue_prot(qc);
154}
155
156static int cs5530_pre_reset(struct ata_port *ap)
157{
158 ap->cbl = ATA_CBL_PATA40;
159 return ata_std_prereset(ap);
160}
161
162static void cs5530_error_handler(struct ata_port *ap)
163{
164 return ata_bmdma_drive_eh(ap, cs5530_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
165}
166
167
168static struct scsi_host_template cs5530_sht = {
169 .module = THIS_MODULE,
170 .name = DRV_NAME,
171 .ioctl = ata_scsi_ioctl,
172 .queuecommand = ata_scsi_queuecmd,
173 .can_queue = ATA_DEF_QUEUE,
174 .this_id = ATA_SHT_THIS_ID,
175 .sg_tablesize = LIBATA_MAX_PRD,
176 .max_sectors = ATA_MAX_SECTORS,
177 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
178 .emulated = ATA_SHT_EMULATED,
179 .use_clustering = ATA_SHT_USE_CLUSTERING,
180 .proc_name = DRV_NAME,
181 .dma_boundary = ATA_DMA_BOUNDARY,
182 .slave_configure = ata_scsi_slave_config,
183 .bios_param = ata_std_bios_param,
184};
185
186static struct ata_port_operations cs5530_port_ops = {
187 .port_disable = ata_port_disable,
188 .set_piomode = cs5530_set_piomode,
189 .set_dmamode = cs5530_set_dmamode,
190 .mode_filter = ata_pci_default_filter,
191
192 .tf_load = ata_tf_load,
193 .tf_read = ata_tf_read,
194 .check_status = ata_check_status,
195 .exec_command = ata_exec_command,
196 .dev_select = ata_std_dev_select,
197
198 .bmdma_setup = ata_bmdma_setup,
199 .bmdma_start = ata_bmdma_start,
200 .bmdma_stop = ata_bmdma_stop,
201 .bmdma_status = ata_bmdma_status,
202
203 .freeze = ata_bmdma_freeze,
204 .thaw = ata_bmdma_thaw,
205 .error_handler = cs5530_error_handler,
206 .post_internal_cmd = ata_bmdma_post_internal_cmd,
207
208 .qc_prep = ata_qc_prep,
209 .qc_issue = cs5530_qc_issue_prot,
210 .eng_timeout = ata_eng_timeout,
211 .data_xfer = ata_pio_data_xfer,
212
213 .irq_handler = ata_interrupt,
214 .irq_clear = ata_bmdma_irq_clear,
215
216 .port_start = ata_port_start,
217 .port_stop = ata_port_stop,
218 .host_stop = ata_host_stop
219};
220
221static struct dmi_system_id palmax_dmi_table[] = {
222 {
223 .ident = "Palmax PD1100",
224 .matches = {
225 DMI_MATCH(DMI_SYS_VENDOR, "Cyrix"),
226 DMI_MATCH(DMI_PRODUCT_NAME, "Caddis"),
227 },
228 },
229 { }
230};
231
232static int cs5530_is_palmax(void)
233{
234 if (dmi_check_system(palmax_dmi_table)) {
235 printk(KERN_INFO "Palmax PD1100: Disabling DMA on docking port.\n");
236 return 1;
237 }
238 return 0;
239}
240
241/**
242 * cs5530_init_one - Initialise a CS5530
243 * @dev: PCI device
244 * @id: Entry in match table
245 *
246 * Install a driver for the newly found CS5530 companion chip. Most of
247 * this is just housekeeping. We have to set the chip up correctly and
248 * turn off various bits of emulation magic.
249 */
250
251static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
252{
253 int compiler_warning_pointless_fix;
254 struct pci_dev *master_0 = NULL, *cs5530_0 = NULL;
255 static struct ata_port_info info = {
256 .sht = &cs5530_sht,
257 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
258 .pio_mask = 0x1f,
259 .mwdma_mask = 0x07,
260 .udma_mask = 0x07,
261 .port_ops = &cs5530_port_ops
262 };
263 /* The docking connector doesn't do UDMA, and it seems not MWDMA */
264 static struct ata_port_info info_palmax_secondary = {
265 .sht = &cs5530_sht,
266 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
267 .pio_mask = 0x1f,
268 .port_ops = &cs5530_port_ops
269 };
270 static struct ata_port_info *port_info[2] = { &info, &info };
271
272 dev = NULL;
273 while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
274 switch (dev->device) {
275 case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
276 master_0 = pci_dev_get(dev);
277 break;
278 case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
279 cs5530_0 = pci_dev_get(dev);
280 break;
281 }
282 }
283 if (!master_0) {
284 printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n");
285 goto fail_put;
286 }
287 if (!cs5530_0) {
288 printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n");
289 goto fail_put;
290 }
291
292 pci_set_master(cs5530_0);
293 compiler_warning_pointless_fix = pci_set_mwi(cs5530_0);
294
295 /*
296 * Set PCI CacheLineSize to 16-bytes:
297 * --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530
298 *
299 * Note: This value is constant because the 5530 is only a Geode companion
300 */
301
302 pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04);
303
304 /*
305 * Disable trapping of UDMA register accesses (Win98 hack):
306 * --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530
307 */
308
309 pci_write_config_word(cs5530_0, 0xd0, 0x5006);
310
311 /*
312 * Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus:
313 * The other settings are what is necessary to get the register
314 * into a sane state for IDE DMA operation.
315 */
316
317 pci_write_config_byte(master_0, 0x40, 0x1e);
318
319 /*
320 * Set max PCI burst size (16-bytes seems to work best):
321 * 16bytes: set bit-1 at 0x41 (reg value of 0x16)
322 * all others: clear bit-1 at 0x41, and do:
323 * 128bytes: OR 0x00 at 0x41
324 * 256bytes: OR 0x04 at 0x41
325 * 512bytes: OR 0x08 at 0x41
326 * 1024bytes: OR 0x0c at 0x41
327 */
328
329 pci_write_config_byte(master_0, 0x41, 0x14);
330
331 /*
332 * These settings are necessary to get the chip
333 * into a sane state for IDE DMA operation.
334 */
335
336 pci_write_config_byte(master_0, 0x42, 0x00);
337 pci_write_config_byte(master_0, 0x43, 0xc1);
338
339 pci_dev_put(master_0);
340 pci_dev_put(cs5530_0);
341
342 if (cs5530_is_palmax())
343 port_info[1] = &info_palmax_secondary;
344
345 /* Now kick off ATA set up */
346 return ata_pci_init_one(dev, port_info, 2);
347
348fail_put:
349 if (master_0)
350 pci_dev_put(master_0);
351 if (cs5530_0)
352 pci_dev_put(cs5530_0);
353 return -ENODEV;
354}
355
356static struct pci_device_id cs5530[] = {
357 { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
358 { 0, },
359};
360
361static struct pci_driver cs5530_pci_driver = {
362 .name = DRV_NAME,
363 .id_table = cs5530,
364 .probe = cs5530_init_one,
365 .remove = ata_pci_remove_one
366};
367
368static int __init cs5530_init(void)
369{
370 return pci_register_driver(&cs5530_pci_driver);
371}
372
373
374static void __exit cs5530_exit(void)
375{
376 pci_unregister_driver(&cs5530_pci_driver);
377}
378
379
380MODULE_AUTHOR("Alan Cox");
381MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530");
382MODULE_LICENSE("GPL");
383MODULE_DEVICE_TABLE(pci, cs5530);
384MODULE_VERSION(DRV_VERSION);
385
386module_init(cs5530_init);
387module_exit(cs5530_exit);
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
new file mode 100644
index 000000000000..69d6b4258724
--- /dev/null
+++ b/drivers/ata/pata_cs5535.c
@@ -0,0 +1,291 @@
1/*
2 * pata-cs5535.c - CS5535 PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based upon cs5535.c from AMD <Jens.Altmann@amd.com> as cleaned up and
7 * made readable and Linux style by Wolfgang Zuleger <wolfgang.zuleger@gmx.de
8 * and Alexander Kiausch <alex.kiausch@t-online.de>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * Loosely based on the piix & svwks drivers.
24 *
25 * Documentation:
26 * Available from AMD web site.
27 * TODO
28 * Review errata to see if serializing is neccessary
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/init.h>
35#include <linux/blkdev.h>
36#include <linux/delay.h>
37#include <scsi/scsi_host.h>
38#include <linux/libata.h>
39#include <asm/msr.h>
40
41#define DRV_NAME "cs5535"
42#define DRV_VERSION "0.2.10"
43
44/*
45 * The Geode (Aka Athlon GX now) uses an internal MSR based
46 * bus system for control. Demented but there you go.
47 */
48
49#define MSR_ATAC_BASE 0x51300000
50#define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0)
51#define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01)
52#define ATAC_GLD_MSR_SMI (MSR_ATAC_BASE+0x02)
53#define ATAC_GLD_MSR_ERROR (MSR_ATAC_BASE+0x03)
54#define ATAC_GLD_MSR_PM (MSR_ATAC_BASE+0x04)
55#define ATAC_GLD_MSR_DIAG (MSR_ATAC_BASE+0x05)
56#define ATAC_IO_BAR (MSR_ATAC_BASE+0x08)
57#define ATAC_RESET (MSR_ATAC_BASE+0x10)
58#define ATAC_CH0D0_PIO (MSR_ATAC_BASE+0x20)
59#define ATAC_CH0D0_DMA (MSR_ATAC_BASE+0x21)
60#define ATAC_CH0D1_PIO (MSR_ATAC_BASE+0x22)
61#define ATAC_CH0D1_DMA (MSR_ATAC_BASE+0x23)
62#define ATAC_PCI_ABRTERR (MSR_ATAC_BASE+0x24)
63
64#define ATAC_BM0_CMD_PRIM 0x00
65#define ATAC_BM0_STS_PRIM 0x02
66#define ATAC_BM0_PRD 0x04
67
68#define CS5535_CABLE_DETECT 0x48
69
70#define CS5535_BAD_PIO(timings) ( (timings&~0x80000000UL)==0x00009172 )
71
72/**
73 * cs5535_pre_reset - detect cable type
74 * @ap: Port to detect on
75 *
76 * Perform cable detection for ATA66 capable cable. Return a libata
77 * cable type.
78 */
79
80static int cs5535_pre_reset(struct ata_port *ap)
81{
82 u8 cable;
83 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
84
85 pci_read_config_byte(pdev, CS5535_CABLE_DETECT, &cable);
86 if (cable & 1)
87 ap->cbl = ATA_CBL_PATA80;
88 else
89 ap->cbl = ATA_CBL_PATA40;
90 return ata_std_prereset(ap);
91}
92
93/**
94 * cs5535_error_handler - reset/probe
95 * @ap: Port to reset
96 *
97 * Reset and configure a port
98 */
99
100static void cs5535_error_handler(struct ata_port *ap)
101{
102 ata_bmdma_drive_eh(ap, cs5535_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
103}
104
105/**
106 * cs5535_set_piomode - PIO setup
107 * @ap: ATA interface
108 * @adev: device on the interface
109 *
110 * Set our PIO requirements. The CS5535 is pretty clean about all this
111 */
112
113static void cs5535_set_piomode(struct ata_port *ap, struct ata_device *adev)
114{
115 static const u16 pio_timings[5] = {
116 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
117 };
118 static const u16 pio_cmd_timings[5] = {
119 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
120 };
121 u32 reg, dummy;
122 struct ata_device *pair = ata_dev_pair(adev);
123
124 int mode = adev->pio_mode - XFER_PIO_0;
125 int cmdmode = mode;
126
127 /* Command timing has to be for the lowest of the pair of devices */
128 if (pair) {
129 int pairmode = pair->pio_mode - XFER_PIO_0;
130 cmdmode = min(mode, pairmode);
131 /* Write the other drive timing register if it changed */
132 if (cmdmode < pairmode)
133 wrmsr(ATAC_CH0D0_PIO + 2 * pair->devno,
134 pio_cmd_timings[cmdmode] << 16 | pio_timings[pairmode], 0);
135 }
136 /* Write the drive timing register */
137 wrmsr(ATAC_CH0D0_PIO + 2 * adev->devno,
138 pio_cmd_timings[cmdmode] << 16 | pio_timings[mode], 0);
139
140 /* Set the PIO "format 1" bit in the DMA timing register */
141 rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
142 wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg | 0x80000000UL, 0);
143}
144
145/**
146 * cs5535_set_dmamode - DMA timing setup
147 * @ap: ATA interface
148 * @adev: Device being configured
149 *
150 */
151
152static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev)
153{
154 static const u32 udma_timings[5] = {
155 0x7F7436A1, 0x7F733481, 0x7F723261, 0x7F713161, 0x7F703061
156 };
157 static const u32 mwdma_timings[3] = {
158 0x7F0FFFF3, 0x7F035352, 0x7F024241
159 };
160 u32 reg, dummy;
161 int mode = adev->dma_mode;
162
163 rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
164 reg &= 0x80000000UL;
165 if (mode >= XFER_UDMA_0)
166 reg |= udma_timings[mode - XFER_UDMA_0];
167 else
168 reg |= mwdma_timings[mode - XFER_MW_DMA_0];
169 wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, 0);
170}
171
172static struct scsi_host_template cs5535_sht = {
173 .module = THIS_MODULE,
174 .name = DRV_NAME,
175 .ioctl = ata_scsi_ioctl,
176 .queuecommand = ata_scsi_queuecmd,
177 .can_queue = ATA_DEF_QUEUE,
178 .this_id = ATA_SHT_THIS_ID,
179 .sg_tablesize = LIBATA_MAX_PRD,
180 .max_sectors = ATA_MAX_SECTORS,
181 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
182 .emulated = ATA_SHT_EMULATED,
183 .use_clustering = ATA_SHT_USE_CLUSTERING,
184 .proc_name = DRV_NAME,
185 .dma_boundary = ATA_DMA_BOUNDARY,
186 .slave_configure = ata_scsi_slave_config,
187 .bios_param = ata_std_bios_param,
188};
189
190static struct ata_port_operations cs5535_port_ops = {
191 .port_disable = ata_port_disable,
192 .set_piomode = cs5535_set_piomode,
193 .set_dmamode = cs5535_set_dmamode,
194 .mode_filter = ata_pci_default_filter,
195
196 .tf_load = ata_tf_load,
197 .tf_read = ata_tf_read,
198 .check_status = ata_check_status,
199 .exec_command = ata_exec_command,
200 .dev_select = ata_std_dev_select,
201
202 .freeze = ata_bmdma_freeze,
203 .thaw = ata_bmdma_thaw,
204 .error_handler = cs5535_error_handler,
205 .post_internal_cmd = ata_bmdma_post_internal_cmd,
206
207 .bmdma_setup = ata_bmdma_setup,
208 .bmdma_start = ata_bmdma_start,
209 .bmdma_stop = ata_bmdma_stop,
210 .bmdma_status = ata_bmdma_status,
211
212 .qc_prep = ata_qc_prep,
213 .qc_issue = ata_qc_issue_prot,
214 .eng_timeout = ata_eng_timeout,
215 .data_xfer = ata_pio_data_xfer,
216
217 .irq_handler = ata_interrupt,
218 .irq_clear = ata_bmdma_irq_clear,
219
220 .port_start = ata_port_start,
221 .port_stop = ata_port_stop,
222 .host_stop = ata_host_stop
223};
224
225/**
226 * cs5535_init_one - Initialise a CS5530
227 * @dev: PCI device
228 * @id: Entry in match table
229 *
230 * Install a driver for the newly found CS5530 companion chip. Most of
231 * this is just housekeeping. We have to set the chip up correctly and
232 * turn off various bits of emulation magic.
233 */
234
235static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
236{
237 static struct ata_port_info info = {
238 .sht = &cs5535_sht,
239 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
240 .pio_mask = 0x1f,
241 .mwdma_mask = 0x07,
242 .udma_mask = 0x1f,
243 .port_ops = &cs5535_port_ops
244 };
245 struct ata_port_info *ports[1] = { &info };
246
247 u32 timings, dummy;
248
249 /* Check the BIOS set the initial timing clock. If not set the
250 timings for PIO0 */
251 rdmsr(ATAC_CH0D0_PIO, timings, dummy);
252 if (CS5535_BAD_PIO(timings))
253 wrmsr(ATAC_CH0D0_PIO, 0xF7F4F7F4UL, 0);
254 rdmsr(ATAC_CH0D1_PIO, timings, dummy);
255 if (CS5535_BAD_PIO(timings))
256 wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0);
257 return ata_pci_init_one(dev, ports, 1);
258}
259
260static struct pci_device_id cs5535[] = {
261 { PCI_DEVICE(PCI_VENDOR_ID_NS, 0x002D), },
262 { 0, },
263};
264
265static struct pci_driver cs5535_pci_driver = {
266 .name = DRV_NAME,
267 .id_table = cs5535,
268 .probe = cs5535_init_one,
269 .remove = ata_pci_remove_one
270};
271
272static int __init cs5535_init(void)
273{
274 return pci_register_driver(&cs5535_pci_driver);
275}
276
277
278static void __exit cs5535_exit(void)
279{
280 pci_unregister_driver(&cs5535_pci_driver);
281}
282
283
284MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch");
285MODULE_DESCRIPTION("low-level driver for the NS/AMD 5530");
286MODULE_LICENSE("GPL");
287MODULE_DEVICE_TABLE(pci, cs5535);
288MODULE_VERSION(DRV_VERSION);
289
290module_init(cs5535_init);
291module_exit(cs5535_exit);
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
new file mode 100644
index 000000000000..fd55474e0d15
--- /dev/null
+++ b/drivers/ata/pata_cypress.c
@@ -0,0 +1,227 @@
1/*
2 * pata_cypress.c - Cypress PATA for new ATA layer
3 * (C) 2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based heavily on
7 * linux/drivers/ide/pci/cy82c693.c Version 0.40 Sep. 10, 2002
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/init.h>
15#include <linux/blkdev.h>
16#include <linux/delay.h>
17#include <scsi/scsi_host.h>
18#include <linux/libata.h>
19
20#define DRV_NAME "pata_cypress"
21#define DRV_VERSION "0.1.2"
22
23/* here are the offset definitions for the registers */
24
25enum {
26 CY82_IDE_CMDREG = 0x04,
27 CY82_IDE_ADDRSETUP = 0x48,
28 CY82_IDE_MASTER_IOR = 0x4C,
29 CY82_IDE_MASTER_IOW = 0x4D,
30 CY82_IDE_SLAVE_IOR = 0x4E,
31 CY82_IDE_SLAVE_IOW = 0x4F,
32 CY82_IDE_MASTER_8BIT = 0x50,
33 CY82_IDE_SLAVE_8BIT = 0x51,
34
35 CY82_INDEX_PORT = 0x22,
36 CY82_DATA_PORT = 0x23,
37
38 CY82_INDEX_CTRLREG1 = 0x01,
39 CY82_INDEX_CHANNEL0 = 0x30,
40 CY82_INDEX_CHANNEL1 = 0x31,
41 CY82_INDEX_TIMEOUT = 0x32
42};
43
44static int cy82c693_pre_reset(struct ata_port *ap)
45{
46 ap->cbl = ATA_CBL_PATA40;
47 return ata_std_prereset(ap);
48}
49
50static void cy82c693_error_handler(struct ata_port *ap)
51{
52 ata_bmdma_drive_eh(ap, cy82c693_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
53}
54
55/**
56 * cy82c693_set_piomode - set initial PIO mode data
57 * @ap: ATA interface
58 * @adev: ATA device
59 *
60 * Called to do the PIO mode setup.
61 */
62
63static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev)
64{
65 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
66 struct ata_timing t;
67 const unsigned long T = 1000000 / 33;
68 short time_16, time_8;
69 u32 addr;
70
71 if (ata_timing_compute(adev, adev->pio_mode, &t, T, 1) < 0) {
72 printk(KERN_ERR DRV_NAME ": mome computation failed.\n");
73 return;
74 }
75
76 time_16 = FIT(t.recover, 0, 15) | (FIT(t.active, 0, 15) << 4);
77 time_8 = FIT(t.act8b, 0, 15) | (FIT(t.rec8b, 0, 15) << 4);
78
79 if (adev->devno == 0) {
80 pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
81
82 addr &= ~0x0F; /* Mask bits */
83 addr |= FIT(t.setup, 0, 15);
84
85 pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
86 pci_write_config_byte(pdev, CY82_IDE_MASTER_IOR, time_16);
87 pci_write_config_byte(pdev, CY82_IDE_MASTER_IOW, time_16);
88 pci_write_config_byte(pdev, CY82_IDE_MASTER_8BIT, time_8);
89 } else {
90 pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
91
92 addr &= ~0xF0; /* Mask bits */
93 addr |= (FIT(t.setup, 0, 15) << 4);
94
95 pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
96 pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOR, time_16);
97 pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOW, time_16);
98 pci_write_config_byte(pdev, CY82_IDE_SLAVE_8BIT, time_8);
99 }
100}
101
102/**
103 * cy82c693_set_dmamode - set initial DMA mode data
104 * @ap: ATA interface
105 * @adev: ATA device
106 *
107 * Called to do the DMA mode setup.
108 */
109
110static void cy82c693_set_dmamode(struct ata_port *ap, struct ata_device *adev)
111{
112 int reg = CY82_INDEX_CHANNEL0 + ap->port_no;
113
114 /* Be afraid, be very afraid. Magic registers in low I/O space */
115 outb(reg, 0x22);
116 outb(adev->dma_mode - XFER_MW_DMA_0, 0x23);
117
118 /* 0x50 gives the best behaviour on the Alpha's using this chip */
119 outb(CY82_INDEX_TIMEOUT, 0x22);
120 outb(0x50, 0x23);
121}
122
123static struct scsi_host_template cy82c693_sht = {
124 .module = THIS_MODULE,
125 .name = DRV_NAME,
126 .ioctl = ata_scsi_ioctl,
127 .queuecommand = ata_scsi_queuecmd,
128 .can_queue = ATA_DEF_QUEUE,
129 .this_id = ATA_SHT_THIS_ID,
130 .sg_tablesize = LIBATA_MAX_PRD,
131 .max_sectors = ATA_MAX_SECTORS,
132 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
133 .emulated = ATA_SHT_EMULATED,
134 .use_clustering = ATA_SHT_USE_CLUSTERING,
135 .proc_name = DRV_NAME,
136 .dma_boundary = ATA_DMA_BOUNDARY,
137 .slave_configure = ata_scsi_slave_config,
138 .bios_param = ata_std_bios_param,
139};
140
141static struct ata_port_operations cy82c693_port_ops = {
142 .port_disable = ata_port_disable,
143 .set_piomode = cy82c693_set_piomode,
144 .set_dmamode = cy82c693_set_dmamode,
145 .mode_filter = ata_pci_default_filter,
146
147 .tf_load = ata_tf_load,
148 .tf_read = ata_tf_read,
149 .check_status = ata_check_status,
150 .exec_command = ata_exec_command,
151 .dev_select = ata_std_dev_select,
152
153 .freeze = ata_bmdma_freeze,
154 .thaw = ata_bmdma_thaw,
155 .error_handler = cy82c693_error_handler,
156 .post_internal_cmd = ata_bmdma_post_internal_cmd,
157
158 .bmdma_setup = ata_bmdma_setup,
159 .bmdma_start = ata_bmdma_start,
160 .bmdma_stop = ata_bmdma_stop,
161 .bmdma_status = ata_bmdma_status,
162
163 .qc_prep = ata_qc_prep,
164 .qc_issue = ata_qc_issue_prot,
165 .eng_timeout = ata_eng_timeout,
166 .data_xfer = ata_pio_data_xfer,
167
168 .irq_handler = ata_interrupt,
169 .irq_clear = ata_bmdma_irq_clear,
170
171 .port_start = ata_port_start,
172 .port_stop = ata_port_stop,
173 .host_stop = ata_host_stop
174};
175
176static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
177{
178 static struct ata_port_info info = {
179 .sht = &cy82c693_sht,
180 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
181 .pio_mask = 0x1f,
182 .mwdma_mask = 0x07,
183 .port_ops = &cy82c693_port_ops
184 };
185 static struct ata_port_info *port_info[1] = { &info };
186
187 /* Devfn 1 is the ATA primary. The secondary is magic and on devfn2. For the
188 moment we don't handle the secondary. FIXME */
189
190 if (PCI_FUNC(pdev->devfn) != 1)
191 return -ENODEV;
192
193 return ata_pci_init_one(pdev, port_info, 1);
194}
195
196static struct pci_device_id cy82c693[] = {
197 { PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
198 { 0, },
199};
200
201static struct pci_driver cy82c693_pci_driver = {
202 .name = DRV_NAME,
203 .id_table = cy82c693,
204 .probe = cy82c693_init_one,
205 .remove = ata_pci_remove_one
206};
207
208static int __init cy82c693_init(void)
209{
210 return pci_register_driver(&cy82c693_pci_driver);
211}
212
213
214static void __exit cy82c693_exit(void)
215{
216 pci_unregister_driver(&cy82c693_pci_driver);
217}
218
219
220MODULE_AUTHOR("Alan Cox");
221MODULE_DESCRIPTION("low-level driver for the CY82C693 PATA controller");
222MODULE_LICENSE("GPL");
223MODULE_DEVICE_TABLE(pci, cy82c693);
224MODULE_VERSION(DRV_VERSION);
225
226module_init(cy82c693_init);
227module_exit(cy82c693_exit);
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
new file mode 100644
index 000000000000..c30bc181304f
--- /dev/null
+++ b/drivers/ata/pata_efar.c
@@ -0,0 +1,342 @@
1/*
2 * pata_efar.c - EFAR PIIX clone controller driver
3 *
4 * (C) 2005 Red Hat <alan@redhat.com>
5 *
6 * Some parts based on ata_piix.c by Jeff Garzik and others.
7 *
8 * The EFAR is a PIIX4 clone with UDMA66 support. Unlike the later
9 * Intel ICH controllers the EFAR widened the UDMA mode register bits
10 * and doesn't require the funky clock selection.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/pci.h>
16#include <linux/init.h>
17#include <linux/blkdev.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <scsi/scsi_host.h>
21#include <linux/libata.h>
22#include <linux/ata.h>
23
24#define DRV_NAME "pata_efar"
25#define DRV_VERSION "0.4.1"
26
27/**
28 * efar_pre_reset - check for 40/80 pin
29 * @ap: Port
30 *
31 * Perform cable detection for the EFAR ATA interface. This is
32 * different to the PIIX arrangement
33 */
34
35static int efar_pre_reset(struct ata_port *ap)
36{
37 static const struct pci_bits efar_enable_bits[] = {
38 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
39 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
40 };
41
42 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
43 u8 tmp;
44
45 if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no])) {
46 ata_port_disable(ap);
47 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
48 return 0;
49 }
50 pci_read_config_byte(pdev, 0x47, &tmp);
51 if (tmp & (2 >> ap->port_no))
52 ap->cbl = ATA_CBL_PATA40;
53 else
54 ap->cbl = ATA_CBL_PATA80;
55 return ata_std_prereset(ap);
56}
57
58/**
59 * efar_probe_reset - Probe specified port on PATA host controller
60 * @ap: Port to probe
61 *
62 * LOCKING:
63 * None (inherited from caller).
64 */
65
66static void efar_error_handler(struct ata_port *ap)
67{
68 ata_bmdma_drive_eh(ap, efar_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
69}
70
71/**
72 * efar_set_piomode - Initialize host controller PATA PIO timings
73 * @ap: Port whose timings we are configuring
74 * @adev: um
75 *
76 * Set PIO mode for device, in host controller PCI config space.
77 *
78 * LOCKING:
79 * None (inherited from caller).
80 */
81
82static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
83{
84 unsigned int pio = adev->pio_mode - XFER_PIO_0;
85 struct pci_dev *dev = to_pci_dev(ap->host->dev);
86 unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
87 u16 idetm_data;
88 int control = 0;
89
90 /*
91 * See Intel Document 298600-004 for the timing programing rules
92 * for PIIX/ICH. The EFAR is a clone so very similar
93 */
94
95 static const /* ISP RTC */
96 u8 timings[][2] = { { 0, 0 },
97 { 0, 0 },
98 { 1, 0 },
99 { 2, 1 },
100 { 2, 3 }, };
101
102 if (pio > 2)
103 control |= 1; /* TIME1 enable */
104 if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */
105 control |= 2; /* IE enable */
106 /* Intel specifies that the PPE functionality is for disk only */
107 if (adev->class == ATA_DEV_ATA)
108 control |= 4; /* PPE enable */
109
110 pci_read_config_word(dev, idetm_port, &idetm_data);
111
112 /* Enable PPE, IE and TIME as appropriate */
113
114 if (adev->devno == 0) {
115 idetm_data &= 0xCCF0;
116 idetm_data |= control;
117 idetm_data |= (timings[pio][0] << 12) |
118 (timings[pio][1] << 8);
119 } else {
120 int shift = 4 * ap->port_no;
121 u8 slave_data;
122
123 idetm_data &= 0xCC0F;
124 idetm_data |= (control << 4);
125
126 /* Slave timing in seperate register */
127 pci_read_config_byte(dev, 0x44, &slave_data);
128 slave_data &= 0x0F << shift;
129 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << shift;
130 pci_write_config_byte(dev, 0x44, slave_data);
131 }
132
133 idetm_data |= 0x4000; /* Ensure SITRE is enabled */
134 pci_write_config_word(dev, idetm_port, idetm_data);
135}
136
137/**
138 * efar_set_dmamode - Initialize host controller PATA DMA timings
139 * @ap: Port whose timings we are configuring
140 * @adev: Device to program
141 *
142 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
143 *
144 * LOCKING:
145 * None (inherited from caller).
146 */
147
148static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev)
149{
150 struct pci_dev *dev = to_pci_dev(ap->host->dev);
151 u8 master_port = ap->port_no ? 0x42 : 0x40;
152 u16 master_data;
153 u8 speed = adev->dma_mode;
154 int devid = adev->devno + 2 * ap->port_no;
155 u8 udma_enable;
156
157 static const /* ISP RTC */
158 u8 timings[][2] = { { 0, 0 },
159 { 0, 0 },
160 { 1, 0 },
161 { 2, 1 },
162 { 2, 3 }, };
163
164 pci_read_config_word(dev, master_port, &master_data);
165 pci_read_config_byte(dev, 0x48, &udma_enable);
166
167 if (speed >= XFER_UDMA_0) {
168 unsigned int udma = adev->dma_mode - XFER_UDMA_0;
169 u16 udma_timing;
170
171 udma_enable |= (1 << devid);
172
173 /* Load the UDMA mode number */
174 pci_read_config_word(dev, 0x4A, &udma_timing);
175 udma_timing &= ~(7 << (4 * devid));
176 udma_timing |= udma << (4 * devid);
177 pci_write_config_word(dev, 0x4A, udma_timing);
178 } else {
179 /*
180 * MWDMA is driven by the PIO timings. We must also enable
181 * IORDY unconditionally along with TIME1. PPE has already
182 * been set when the PIO timing was set.
183 */
184 unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
185 unsigned int control;
186 u8 slave_data;
187 const unsigned int needed_pio[3] = {
188 XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
189 };
190 int pio = needed_pio[mwdma] - XFER_PIO_0;
191
192 control = 3; /* IORDY|TIME1 */
193
194 /* If the drive MWDMA is faster than it can do PIO then
195 we must force PIO into PIO0 */
196
197 if (adev->pio_mode < needed_pio[mwdma])
198 /* Enable DMA timing only */
199 control |= 8; /* PIO cycles in PIO0 */
200
201 if (adev->devno) { /* Slave */
202 master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
203 master_data |= control << 4;
204 pci_read_config_byte(dev, 0x44, &slave_data);
205 slave_data &= (0x0F + 0xE1 * ap->port_no);
206 /* Load the matching timing */
207 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
208 pci_write_config_byte(dev, 0x44, slave_data);
209 } else { /* Master */
210 master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
211 and master timing bits */
212 master_data |= control;
213 master_data |=
214 (timings[pio][0] << 12) |
215 (timings[pio][1] << 8);
216 }
217 udma_enable &= ~(1 << devid);
218 pci_write_config_word(dev, master_port, master_data);
219 }
220 pci_write_config_byte(dev, 0x48, udma_enable);
221}
222
223static struct scsi_host_template efar_sht = {
224 .module = THIS_MODULE,
225 .name = DRV_NAME,
226 .ioctl = ata_scsi_ioctl,
227 .queuecommand = ata_scsi_queuecmd,
228 .can_queue = ATA_DEF_QUEUE,
229 .this_id = ATA_SHT_THIS_ID,
230 .sg_tablesize = LIBATA_MAX_PRD,
231 .max_sectors = ATA_MAX_SECTORS,
232 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
233 .emulated = ATA_SHT_EMULATED,
234 .use_clustering = ATA_SHT_USE_CLUSTERING,
235 .proc_name = DRV_NAME,
236 .dma_boundary = ATA_DMA_BOUNDARY,
237 .slave_configure = ata_scsi_slave_config,
238 .bios_param = ata_std_bios_param,
239};
240
241static const struct ata_port_operations efar_ops = {
242 .port_disable = ata_port_disable,
243 .set_piomode = efar_set_piomode,
244 .set_dmamode = efar_set_dmamode,
245 .mode_filter = ata_pci_default_filter,
246
247 .tf_load = ata_tf_load,
248 .tf_read = ata_tf_read,
249 .check_status = ata_check_status,
250 .exec_command = ata_exec_command,
251 .dev_select = ata_std_dev_select,
252
253 .freeze = ata_bmdma_freeze,
254 .thaw = ata_bmdma_thaw,
255 .error_handler = efar_error_handler,
256 .post_internal_cmd = ata_bmdma_post_internal_cmd,
257
258 .bmdma_setup = ata_bmdma_setup,
259 .bmdma_start = ata_bmdma_start,
260 .bmdma_stop = ata_bmdma_stop,
261 .bmdma_status = ata_bmdma_status,
262 .qc_prep = ata_qc_prep,
263 .qc_issue = ata_qc_issue_prot,
264 .data_xfer = ata_pio_data_xfer,
265
266 .eng_timeout = ata_eng_timeout,
267
268 .irq_handler = ata_interrupt,
269 .irq_clear = ata_bmdma_irq_clear,
270
271 .port_start = ata_port_start,
272 .port_stop = ata_port_stop,
273 .host_stop = ata_host_stop,
274};
275
276
277/**
278 * efar_init_one - Register EFAR ATA PCI device with kernel services
279 * @pdev: PCI device to register
280 * @ent: Entry in efar_pci_tbl matching with @pdev
281 *
282 * Called from kernel PCI layer.
283 *
284 * LOCKING:
285 * Inherited from PCI layer (may sleep).
286 *
287 * RETURNS:
288 * Zero on success, or -ERRNO value.
289 */
290
291static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
292{
293 static int printed_version;
294 static struct ata_port_info info = {
295 .sht = &efar_sht,
296 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
297 .pio_mask = 0x1f, /* pio0-4 */
298 .mwdma_mask = 0x07, /* mwdma1-2 */
299 .udma_mask = 0x0f, /* UDMA 66 */
300 .port_ops = &efar_ops,
301 };
302 static struct ata_port_info *port_info[2] = { &info, &info };
303
304 if (!printed_version++)
305 dev_printk(KERN_DEBUG, &pdev->dev,
306 "version " DRV_VERSION "\n");
307
308 return ata_pci_init_one(pdev, port_info, 2);
309}
310
311static const struct pci_device_id efar_pci_tbl[] = {
312 { 0x1055, 0x9130, PCI_ANY_ID, PCI_ANY_ID, },
313 { } /* terminate list */
314};
315
316static struct pci_driver efar_pci_driver = {
317 .name = DRV_NAME,
318 .id_table = efar_pci_tbl,
319 .probe = efar_init_one,
320 .remove = ata_pci_remove_one,
321};
322
323static int __init efar_init(void)
324{
325 return pci_register_driver(&efar_pci_driver);
326}
327
328static void __exit efar_exit(void)
329{
330 pci_unregister_driver(&efar_pci_driver);
331}
332
333
334module_init(efar_init);
335module_exit(efar_exit);
336
337MODULE_AUTHOR("Alan Cox");
338MODULE_DESCRIPTION("SCSI low-level driver for EFAR PIIX clones");
339MODULE_LICENSE("GPL");
340MODULE_DEVICE_TABLE(pci, efar_pci_tbl);
341MODULE_VERSION(DRV_VERSION);
342
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
new file mode 100644
index 000000000000..94bb1dfc3f19
--- /dev/null
+++ b/drivers/ata/pata_hpt366.c
@@ -0,0 +1,478 @@
1/*
2 * Libata driver for the highpoint 366 and 368 UDMA66 ATA controllers.
3 *
4 * This driver is heavily based upon:
5 *
6 * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
7 *
8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
9 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
10 * Portions Copyright (C) 2003 Red Hat Inc
11 *
12 *
13 * TODO
14 * Maybe PLL mode
15 * Look into engine reset on timeout errors. Should not be
16 * required.
17 */
18
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/init.h>
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <scsi/scsi_host.h>
27#include <linux/libata.h>
28
29#define DRV_NAME "pata_hpt366"
30#define DRV_VERSION "0.5"
31
32struct hpt_clock {
33 u8 xfer_speed;
34 u32 timing;
35};
36
37/* key for bus clock timings
38 * bit
39 * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
40 * DMA. cycles = value + 1
41 * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW
42 * DMA. cycles = value + 1
43 * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file
44 * register access.
45 * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file
46 * register access.
47 * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer.
48 * during task file register access.
49 * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA
50 * xfer.
51 * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task
52 * register access.
53 * 28 UDMA enable
54 * 29 DMA enable
55 * 30 PIO_MST enable. if set, the chip is in bus master mode during
56 * PIO.
57 * 31 FIFO enable.
58 */
59
60static const struct hpt_clock hpt366_40[] = {
61 { XFER_UDMA_4, 0x900fd943 },
62 { XFER_UDMA_3, 0x900ad943 },
63 { XFER_UDMA_2, 0x900bd943 },
64 { XFER_UDMA_1, 0x9008d943 },
65 { XFER_UDMA_0, 0x9008d943 },
66
67 { XFER_MW_DMA_2, 0xa008d943 },
68 { XFER_MW_DMA_1, 0xa010d955 },
69 { XFER_MW_DMA_0, 0xa010d9fc },
70
71 { XFER_PIO_4, 0xc008d963 },
72 { XFER_PIO_3, 0xc010d974 },
73 { XFER_PIO_2, 0xc010d997 },
74 { XFER_PIO_1, 0xc010d9c7 },
75 { XFER_PIO_0, 0xc018d9d9 },
76 { 0, 0x0120d9d9 }
77};
78
79static const struct hpt_clock hpt366_33[] = {
80 { XFER_UDMA_4, 0x90c9a731 },
81 { XFER_UDMA_3, 0x90cfa731 },
82 { XFER_UDMA_2, 0x90caa731 },
83 { XFER_UDMA_1, 0x90cba731 },
84 { XFER_UDMA_0, 0x90c8a731 },
85
86 { XFER_MW_DMA_2, 0xa0c8a731 },
87 { XFER_MW_DMA_1, 0xa0c8a732 }, /* 0xa0c8a733 */
88 { XFER_MW_DMA_0, 0xa0c8a797 },
89
90 { XFER_PIO_4, 0xc0c8a731 },
91 { XFER_PIO_3, 0xc0c8a742 },
92 { XFER_PIO_2, 0xc0d0a753 },
93 { XFER_PIO_1, 0xc0d0a7a3 }, /* 0xc0d0a793 */
94 { XFER_PIO_0, 0xc0d0a7aa }, /* 0xc0d0a7a7 */
95 { 0, 0x0120a7a7 }
96};
97
98static const struct hpt_clock hpt366_25[] = {
99 { XFER_UDMA_4, 0x90c98521 },
100 { XFER_UDMA_3, 0x90cf8521 },
101 { XFER_UDMA_2, 0x90cf8521 },
102 { XFER_UDMA_1, 0x90cb8521 },
103 { XFER_UDMA_0, 0x90cb8521 },
104
105 { XFER_MW_DMA_2, 0xa0ca8521 },
106 { XFER_MW_DMA_1, 0xa0ca8532 },
107 { XFER_MW_DMA_0, 0xa0ca8575 },
108
109 { XFER_PIO_4, 0xc0ca8521 },
110 { XFER_PIO_3, 0xc0ca8532 },
111 { XFER_PIO_2, 0xc0ca8542 },
112 { XFER_PIO_1, 0xc0d08572 },
113 { XFER_PIO_0, 0xc0d08585 },
114 { 0, 0x01208585 }
115};
116
117static const char *bad_ata33[] = {
118 "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
119 "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
120 "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
121 "Maxtor 90510D4",
122 "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
123 "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
124 "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
125 NULL
126};
127
128static const char *bad_ata66_4[] = {
129 "IBM-DTLA-307075",
130 "IBM-DTLA-307060",
131 "IBM-DTLA-307045",
132 "IBM-DTLA-307030",
133 "IBM-DTLA-307020",
134 "IBM-DTLA-307015",
135 "IBM-DTLA-305040",
136 "IBM-DTLA-305030",
137 "IBM-DTLA-305020",
138 "IC35L010AVER07-0",
139 "IC35L020AVER07-0",
140 "IC35L030AVER07-0",
141 "IC35L040AVER07-0",
142 "IC35L060AVER07-0",
143 "WDC AC310200R",
144 NULL
145};
146
147static const char *bad_ata66_3[] = {
148 "WDC AC310200R",
149 NULL
150};
151
152static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
153{
154 unsigned char model_num[40];
155 char *s;
156 unsigned int len;
157 int i = 0;
158
159 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
160 s = &model_num[0];
161 len = strnlen(s, sizeof(model_num));
162
163 /* ATAPI specifies that empty space is blank-filled; remove blanks */
164 while ((len > 0) && (s[len - 1] == ' ')) {
165 len--;
166 s[len] = 0;
167 }
168
169 while(list[i] != NULL) {
170 if (!strncmp(list[i], s, len)) {
171 printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
172 modestr, list[i]);
173 return 1;
174 }
175 i++;
176 }
177 return 0;
178}
179
180/**
181 * hpt366_filter - mode selection filter
182 * @ap: ATA interface
183 * @adev: ATA device
184 *
185 * Block UDMA on devices that cause trouble with this controller.
186 */
187
188static unsigned long hpt366_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
189{
190 if (adev->class == ATA_DEV_ATA) {
191 if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
192 mask &= ~ATA_MASK_UDMA;
193 if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3))
194 mask &= ~(0x07 << ATA_SHIFT_UDMA);
195 if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
196 mask &= ~(0x0F << ATA_SHIFT_UDMA);
197 }
198 return ata_pci_default_filter(ap, adev, mask);
199}
200
201/**
202 * hpt36x_find_mode - reset the hpt36x bus
203 * @ap: ATA port
204 * @speed: transfer mode
205 *
206 * Return the 32bit register programming information for this channel
207 * that matches the speed provided.
208 */
209
210static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
211{
212 struct hpt_clock *clocks = ap->host->private_data;
213
214 while(clocks->xfer_speed) {
215 if (clocks->xfer_speed == speed)
216 return clocks->timing;
217 clocks++;
218 }
219 BUG();
220 return 0xffffffffU; /* silence compiler warning */
221}
222
223static int hpt36x_pre_reset(struct ata_port *ap)
224{
225 u8 ata66;
226 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
227
228 pci_read_config_byte(pdev, 0x5A, &ata66);
229 if (ata66 & (1 << ap->port_no))
230 ap->cbl = ATA_CBL_PATA40;
231 else
232 ap->cbl = ATA_CBL_PATA80;
233 return ata_std_prereset(ap);
234}
235
236/**
237 * hpt36x_error_handler - reset the hpt36x bus
238 * @ap: ATA port to reset
239 *
240 * Perform the reset handling for the 366/368
241 */
242
243static void hpt36x_error_handler(struct ata_port *ap)
244{
245 ata_bmdma_drive_eh(ap, hpt36x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
246}
247
248/**
249 * hpt366_set_piomode - PIO setup
250 * @ap: ATA interface
251 * @adev: device on the interface
252 *
253 * Perform PIO mode setup.
254 */
255
256static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev)
257{
258 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
259 u32 addr1, addr2;
260 u32 reg;
261 u32 mode;
262 u8 fast;
263
264 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
265 addr2 = 0x51 + 4 * ap->port_no;
266
267 /* Fast interrupt prediction disable, hold off interrupt disable */
268 pci_read_config_byte(pdev, addr2, &fast);
269 if (fast & 0x80) {
270 fast &= ~0x80;
271 pci_write_config_byte(pdev, addr2, fast);
272 }
273
274 pci_read_config_dword(pdev, addr1, &reg);
275 mode = hpt36x_find_mode(ap, adev->pio_mode);
276 mode &= ~0x8000000; /* No FIFO in PIO */
277 mode &= ~0x30070000; /* Leave config bits alone */
278 reg &= 0x30070000; /* Strip timing bits */
279 pci_write_config_dword(pdev, addr1, reg | mode);
280}
281
282/**
283 * hpt366_set_dmamode - DMA timing setup
284 * @ap: ATA interface
285 * @adev: Device being configured
286 *
287 * Set up the channel for MWDMA or UDMA modes. Much the same as with
288 * PIO, load the mode number and then set MWDMA or UDMA flag.
289 */
290
291static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev)
292{
293 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
294 u32 addr1, addr2;
295 u32 reg;
296 u32 mode;
297 u8 fast;
298
299 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
300 addr2 = 0x51 + 4 * ap->port_no;
301
302 /* Fast interrupt prediction disable, hold off interrupt disable */
303 pci_read_config_byte(pdev, addr2, &fast);
304 if (fast & 0x80) {
305 fast &= ~0x80;
306 pci_write_config_byte(pdev, addr2, fast);
307 }
308
309 pci_read_config_dword(pdev, addr1, &reg);
310 mode = hpt36x_find_mode(ap, adev->dma_mode);
311 mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
312 mode &= ~0xC0000000; /* Leave config bits alone */
313 reg &= 0xC0000000; /* Strip timing bits */
314 pci_write_config_dword(pdev, addr1, reg | mode);
315}
316
317static struct scsi_host_template hpt36x_sht = {
318 .module = THIS_MODULE,
319 .name = DRV_NAME,
320 .ioctl = ata_scsi_ioctl,
321 .queuecommand = ata_scsi_queuecmd,
322 .can_queue = ATA_DEF_QUEUE,
323 .this_id = ATA_SHT_THIS_ID,
324 .sg_tablesize = LIBATA_MAX_PRD,
325 .max_sectors = ATA_MAX_SECTORS,
326 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
327 .emulated = ATA_SHT_EMULATED,
328 .use_clustering = ATA_SHT_USE_CLUSTERING,
329 .proc_name = DRV_NAME,
330 .dma_boundary = ATA_DMA_BOUNDARY,
331 .slave_configure = ata_scsi_slave_config,
332 .bios_param = ata_std_bios_param,
333};
334
335/*
336 * Configuration for HPT366/68
337 */
338
339static struct ata_port_operations hpt366_port_ops = {
340 .port_disable = ata_port_disable,
341 .set_piomode = hpt366_set_piomode,
342 .set_dmamode = hpt366_set_dmamode,
343 .mode_filter = hpt366_filter,
344
345 .tf_load = ata_tf_load,
346 .tf_read = ata_tf_read,
347 .check_status = ata_check_status,
348 .exec_command = ata_exec_command,
349 .dev_select = ata_std_dev_select,
350
351 .freeze = ata_bmdma_freeze,
352 .thaw = ata_bmdma_thaw,
353 .error_handler = hpt36x_error_handler,
354 .post_internal_cmd = ata_bmdma_post_internal_cmd,
355
356 .bmdma_setup = ata_bmdma_setup,
357 .bmdma_start = ata_bmdma_start,
358 .bmdma_stop = ata_bmdma_stop,
359 .bmdma_status = ata_bmdma_status,
360
361 .qc_prep = ata_qc_prep,
362 .qc_issue = ata_qc_issue_prot,
363 .eng_timeout = ata_eng_timeout,
364 .data_xfer = ata_pio_data_xfer,
365
366 .irq_handler = ata_interrupt,
367 .irq_clear = ata_bmdma_irq_clear,
368
369 .port_start = ata_port_start,
370 .port_stop = ata_port_stop,
371 .host_stop = ata_host_stop
372};
373
374/**
375 * hpt36x_init_one - Initialise an HPT366/368
376 * @dev: PCI device
377 * @id: Entry in match table
378 *
379 * Initialise an HPT36x device. There are some interesting complications
380 * here. Firstly the chip may report 366 and be one of several variants.
381 * Secondly all the timings depend on the clock for the chip which we must
382 * detect and look up
383 *
384 * This is the known chip mappings. It may be missing a couple of later
385 * releases.
386 *
387 * Chip version PCI Rev Notes
388 * HPT366 4 (HPT366) 0 UDMA66
389 * HPT366 4 (HPT366) 1 UDMA66
390 * HPT368 4 (HPT366) 2 UDMA66
391 * HPT37x/30x 4 (HPT366) 3+ Other driver
392 *
393 */
394
395static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
396{
397 static struct ata_port_info info_hpt366 = {
398 .sht = &hpt36x_sht,
399 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
400 .pio_mask = 0x1f,
401 .mwdma_mask = 0x07,
402 .udma_mask = 0x1f,
403 .port_ops = &hpt366_port_ops
404 };
405 struct ata_port_info *port_info[2] = {&info_hpt366, &info_hpt366};
406
407 u32 class_rev;
408 u32 reg1;
409 u8 drive_fast;
410
411 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
412 class_rev &= 0xFF;
413
414 /* May be a later chip in disguise. Check */
415 /* Newer chips are not in the HPT36x driver. Ignore them */
416 if (class_rev > 2)
417 return -ENODEV;
418
419 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
420 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
421 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
422 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
423
424 pci_read_config_byte(dev, 0x51, &drive_fast);
425 if (drive_fast & 0x80)
426 pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
427
428 pci_read_config_dword(dev, 0x40, &reg1);
429
430 /* PCI clocking determines the ATA timing values to use */
431 /* info_hpt366 is safe against re-entry so we can scribble on it */
432 switch(reg1 & 0x700) {
433 case 5:
434 info_hpt366.private_data = &hpt366_40;
435 break;
436 case 9:
437 info_hpt366.private_data = &hpt366_25;
438 break;
439 default:
440 info_hpt366.private_data = &hpt366_33;
441 break;
442 }
443 /* Now kick off ATA set up */
444 return ata_pci_init_one(dev, port_info, 2);
445}
446
447static struct pci_device_id hpt36x[] = {
448 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), },
449 { 0, },
450};
451
452static struct pci_driver hpt36x_pci_driver = {
453 .name = DRV_NAME,
454 .id_table = hpt36x,
455 .probe = hpt36x_init_one,
456 .remove = ata_pci_remove_one
457};
458
459static int __init hpt36x_init(void)
460{
461 return pci_register_driver(&hpt36x_pci_driver);
462}
463
464
465static void __exit hpt36x_exit(void)
466{
467 pci_unregister_driver(&hpt36x_pci_driver);
468}
469
470
471MODULE_AUTHOR("Alan Cox");
472MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368");
473MODULE_LICENSE("GPL");
474MODULE_DEVICE_TABLE(pci, hpt36x);
475MODULE_VERSION(DRV_VERSION);
476
477module_init(hpt36x_init);
478module_exit(hpt36x_exit);
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
new file mode 100644
index 000000000000..532a7928f803
--- /dev/null
+++ b/drivers/ata/pata_hpt37x.c
@@ -0,0 +1,1257 @@
1/*
2 * Libata driver for the highpoint 37x and 30x UDMA66 ATA controllers.
3 *
4 * This driver is heavily based upon:
5 *
6 * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
7 *
8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
9 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
10 * Portions Copyright (C) 2003 Red Hat Inc
11 *
12 * TODO
13 * PLL mode
14 * Look into engine reset on timeout errors. Should not be
15 * required.
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/pci.h>
21#include <linux/init.h>
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <scsi/scsi_host.h>
25#include <linux/libata.h>
26
27#define DRV_NAME "pata_hpt37x"
28#define DRV_VERSION "0.5"
29
30struct hpt_clock {
31 u8 xfer_speed;
32 u32 timing;
33};
34
35struct hpt_chip {
36 const char *name;
37 unsigned int base;
38 struct hpt_clock const *clocks[4];
39};
40
41/* key for bus clock timings
42 * bit
43 * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
44 * DMA. cycles = value + 1
45 * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW
46 * DMA. cycles = value + 1
47 * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file
48 * register access.
49 * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file
50 * register access.
51 * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer.
52 * during task file register access.
53 * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA
54 * xfer.
55 * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task
56 * register access.
57 * 28 UDMA enable
58 * 29 DMA enable
59 * 30 PIO_MST enable. if set, the chip is in bus master mode during
60 * PIO.
61 * 31 FIFO enable.
62 */
63
64/* from highpoint documentation. these are old values */
65static const struct hpt_clock hpt370_timings_33[] = {
66/* { XFER_UDMA_5, 0x1A85F442, 0x16454e31 }, */
67 { XFER_UDMA_5, 0x16454e31 },
68 { XFER_UDMA_4, 0x16454e31 },
69 { XFER_UDMA_3, 0x166d4e31 },
70 { XFER_UDMA_2, 0x16494e31 },
71 { XFER_UDMA_1, 0x164d4e31 },
72 { XFER_UDMA_0, 0x16514e31 },
73
74 { XFER_MW_DMA_2, 0x26514e21 },
75 { XFER_MW_DMA_1, 0x26514e33 },
76 { XFER_MW_DMA_0, 0x26514e97 },
77
78 { XFER_PIO_4, 0x06514e21 },
79 { XFER_PIO_3, 0x06514e22 },
80 { XFER_PIO_2, 0x06514e33 },
81 { XFER_PIO_1, 0x06914e43 },
82 { XFER_PIO_0, 0x06914e57 },
83 { 0, 0x06514e57 }
84};
85
86static const struct hpt_clock hpt370_timings_66[] = {
87 { XFER_UDMA_5, 0x14846231 },
88 { XFER_UDMA_4, 0x14886231 },
89 { XFER_UDMA_3, 0x148c6231 },
90 { XFER_UDMA_2, 0x148c6231 },
91 { XFER_UDMA_1, 0x14906231 },
92 { XFER_UDMA_0, 0x14986231 },
93
94 { XFER_MW_DMA_2, 0x26514e21 },
95 { XFER_MW_DMA_1, 0x26514e33 },
96 { XFER_MW_DMA_0, 0x26514e97 },
97
98 { XFER_PIO_4, 0x06514e21 },
99 { XFER_PIO_3, 0x06514e22 },
100 { XFER_PIO_2, 0x06514e33 },
101 { XFER_PIO_1, 0x06914e43 },
102 { XFER_PIO_0, 0x06914e57 },
103 { 0, 0x06514e57 }
104};
105
106/* these are the current (4 sep 2001) timings from highpoint */
107static const struct hpt_clock hpt370a_timings_33[] = {
108 { XFER_UDMA_5, 0x12446231 },
109 { XFER_UDMA_4, 0x12446231 },
110 { XFER_UDMA_3, 0x126c6231 },
111 { XFER_UDMA_2, 0x12486231 },
112 { XFER_UDMA_1, 0x124c6233 },
113 { XFER_UDMA_0, 0x12506297 },
114
115 { XFER_MW_DMA_2, 0x22406c31 },
116 { XFER_MW_DMA_1, 0x22406c33 },
117 { XFER_MW_DMA_0, 0x22406c97 },
118
119 { XFER_PIO_4, 0x06414e31 },
120 { XFER_PIO_3, 0x06414e42 },
121 { XFER_PIO_2, 0x06414e53 },
122 { XFER_PIO_1, 0x06814e93 },
123 { XFER_PIO_0, 0x06814ea7 },
124 { 0, 0x06814ea7 }
125};
126
127/* 2x 33MHz timings */
128static const struct hpt_clock hpt370a_timings_66[] = {
129 { XFER_UDMA_5, 0x1488e673 },
130 { XFER_UDMA_4, 0x1488e673 },
131 { XFER_UDMA_3, 0x1498e673 },
132 { XFER_UDMA_2, 0x1490e673 },
133 { XFER_UDMA_1, 0x1498e677 },
134 { XFER_UDMA_0, 0x14a0e73f },
135
136 { XFER_MW_DMA_2, 0x2480fa73 },
137 { XFER_MW_DMA_1, 0x2480fa77 },
138 { XFER_MW_DMA_0, 0x2480fb3f },
139
140 { XFER_PIO_4, 0x0c82be73 },
141 { XFER_PIO_3, 0x0c82be95 },
142 { XFER_PIO_2, 0x0c82beb7 },
143 { XFER_PIO_1, 0x0d02bf37 },
144 { XFER_PIO_0, 0x0d02bf5f },
145 { 0, 0x0d02bf5f }
146};
147
148static const struct hpt_clock hpt370a_timings_50[] = {
149 { XFER_UDMA_5, 0x12848242 },
150 { XFER_UDMA_4, 0x12ac8242 },
151 { XFER_UDMA_3, 0x128c8242 },
152 { XFER_UDMA_2, 0x120c8242 },
153 { XFER_UDMA_1, 0x12148254 },
154 { XFER_UDMA_0, 0x121882ea },
155
156 { XFER_MW_DMA_2, 0x22808242 },
157 { XFER_MW_DMA_1, 0x22808254 },
158 { XFER_MW_DMA_0, 0x228082ea },
159
160 { XFER_PIO_4, 0x0a81f442 },
161 { XFER_PIO_3, 0x0a81f443 },
162 { XFER_PIO_2, 0x0a81f454 },
163 { XFER_PIO_1, 0x0ac1f465 },
164 { XFER_PIO_0, 0x0ac1f48a },
165 { 0, 0x0ac1f48a }
166};
167
168static const struct hpt_clock hpt372_timings_33[] = {
169 { XFER_UDMA_6, 0x1c81dc62 },
170 { XFER_UDMA_5, 0x1c6ddc62 },
171 { XFER_UDMA_4, 0x1c8ddc62 },
172 { XFER_UDMA_3, 0x1c8edc62 }, /* checkme */
173 { XFER_UDMA_2, 0x1c91dc62 },
174 { XFER_UDMA_1, 0x1c9adc62 }, /* checkme */
175 { XFER_UDMA_0, 0x1c82dc62 }, /* checkme */
176
177 { XFER_MW_DMA_2, 0x2c829262 },
178 { XFER_MW_DMA_1, 0x2c829266 }, /* checkme */
179 { XFER_MW_DMA_0, 0x2c82922e }, /* checkme */
180
181 { XFER_PIO_4, 0x0c829c62 },
182 { XFER_PIO_3, 0x0c829c84 },
183 { XFER_PIO_2, 0x0c829ca6 },
184 { XFER_PIO_1, 0x0d029d26 },
185 { XFER_PIO_0, 0x0d029d5e },
186 { 0, 0x0d029d5e }
187};
188
189static const struct hpt_clock hpt372_timings_50[] = {
190 { XFER_UDMA_5, 0x12848242 },
191 { XFER_UDMA_4, 0x12ac8242 },
192 { XFER_UDMA_3, 0x128c8242 },
193 { XFER_UDMA_2, 0x120c8242 },
194 { XFER_UDMA_1, 0x12148254 },
195 { XFER_UDMA_0, 0x121882ea },
196
197 { XFER_MW_DMA_2, 0x22808242 },
198 { XFER_MW_DMA_1, 0x22808254 },
199 { XFER_MW_DMA_0, 0x228082ea },
200
201 { XFER_PIO_4, 0x0a81f442 },
202 { XFER_PIO_3, 0x0a81f443 },
203 { XFER_PIO_2, 0x0a81f454 },
204 { XFER_PIO_1, 0x0ac1f465 },
205 { XFER_PIO_0, 0x0ac1f48a },
206 { 0, 0x0a81f443 }
207};
208
209static const struct hpt_clock hpt372_timings_66[] = {
210 { XFER_UDMA_6, 0x1c869c62 },
211 { XFER_UDMA_5, 0x1cae9c62 },
212 { XFER_UDMA_4, 0x1c8a9c62 },
213 { XFER_UDMA_3, 0x1c8e9c62 },
214 { XFER_UDMA_2, 0x1c929c62 },
215 { XFER_UDMA_1, 0x1c9a9c62 },
216 { XFER_UDMA_0, 0x1c829c62 },
217
218 { XFER_MW_DMA_2, 0x2c829c62 },
219 { XFER_MW_DMA_1, 0x2c829c66 },
220 { XFER_MW_DMA_0, 0x2c829d2e },
221
222 { XFER_PIO_4, 0x0c829c62 },
223 { XFER_PIO_3, 0x0c829c84 },
224 { XFER_PIO_2, 0x0c829ca6 },
225 { XFER_PIO_1, 0x0d029d26 },
226 { XFER_PIO_0, 0x0d029d5e },
227 { 0, 0x0d029d26 }
228};
229
230static const struct hpt_clock hpt374_timings_33[] = {
231 { XFER_UDMA_6, 0x12808242 },
232 { XFER_UDMA_5, 0x12848242 },
233 { XFER_UDMA_4, 0x12ac8242 },
234 { XFER_UDMA_3, 0x128c8242 },
235 { XFER_UDMA_2, 0x120c8242 },
236 { XFER_UDMA_1, 0x12148254 },
237 { XFER_UDMA_0, 0x121882ea },
238
239 { XFER_MW_DMA_2, 0x22808242 },
240 { XFER_MW_DMA_1, 0x22808254 },
241 { XFER_MW_DMA_0, 0x228082ea },
242
243 { XFER_PIO_4, 0x0a81f442 },
244 { XFER_PIO_3, 0x0a81f443 },
245 { XFER_PIO_2, 0x0a81f454 },
246 { XFER_PIO_1, 0x0ac1f465 },
247 { XFER_PIO_0, 0x0ac1f48a },
248 { 0, 0x06814e93 }
249};
250
251static const struct hpt_chip hpt370 = {
252 "HPT370",
253 48,
254 {
255 hpt370_timings_33,
256 NULL,
257 NULL,
258 hpt370_timings_66
259 }
260};
261
262static const struct hpt_chip hpt370a = {
263 "HPT370A",
264 48,
265 {
266 hpt370a_timings_33,
267 NULL,
268 hpt370a_timings_50,
269 hpt370a_timings_66
270 }
271};
272
273static const struct hpt_chip hpt372 = {
274 "HPT372",
275 55,
276 {
277 hpt372_timings_33,
278 NULL,
279 hpt372_timings_50,
280 hpt372_timings_66
281 }
282};
283
284static const struct hpt_chip hpt302 = {
285 "HPT302",
286 66,
287 {
288 hpt372_timings_33,
289 NULL,
290 hpt372_timings_50,
291 hpt372_timings_66
292 }
293};
294
295static const struct hpt_chip hpt371 = {
296 "HPT371",
297 66,
298 {
299 hpt372_timings_33,
300 NULL,
301 hpt372_timings_50,
302 hpt372_timings_66
303 }
304};
305
306static const struct hpt_chip hpt372a = {
307 "HPT372A",
308 66,
309 {
310 hpt372_timings_33,
311 NULL,
312 hpt372_timings_50,
313 hpt372_timings_66
314 }
315};
316
317static const struct hpt_chip hpt374 = {
318 "HPT374",
319 48,
320 {
321 hpt374_timings_33,
322 NULL,
323 NULL,
324 NULL
325 }
326};
327
328/**
329 * hpt37x_find_mode - reset the hpt37x bus
330 * @ap: ATA port
331 * @speed: transfer mode
332 *
333 * Return the 32bit register programming information for this channel
334 * that matches the speed provided.
335 */
336
337static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
338{
339 struct hpt_clock *clocks = ap->host->private_data;
340
341 while(clocks->xfer_speed) {
342 if (clocks->xfer_speed == speed)
343 return clocks->timing;
344 clocks++;
345 }
346 BUG();
347 return 0xffffffffU; /* silence compiler warning */
348}
349
350static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
351{
352 unsigned char model_num[40];
353 char *s;
354 unsigned int len;
355 int i = 0;
356
357 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
358 sizeof(model_num));
359 s = &model_num[0];
360 len = strnlen(s, sizeof(model_num));
361
362 /* ATAPI specifies that empty space is blank-filled; remove blanks */
363 while ((len > 0) && (s[len - 1] == ' ')) {
364 len--;
365 s[len] = 0;
366 }
367
368 while(list[i] != NULL) {
369 if (!strncmp(list[i], s, len)) {
370 printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
371 modestr, list[i]);
372 return 1;
373 }
374 i++;
375 }
376 return 0;
377}
378
379static const char *bad_ata33[] = {
380 "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
381 "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
382 "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
383 "Maxtor 90510D4",
384 "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
385 "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
386 "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
387 NULL
388};
389
390static const char *bad_ata100_5[] = {
391 "IBM-DTLA-307075",
392 "IBM-DTLA-307060",
393 "IBM-DTLA-307045",
394 "IBM-DTLA-307030",
395 "IBM-DTLA-307020",
396 "IBM-DTLA-307015",
397 "IBM-DTLA-305040",
398 "IBM-DTLA-305030",
399 "IBM-DTLA-305020",
400 "IC35L010AVER07-0",
401 "IC35L020AVER07-0",
402 "IC35L030AVER07-0",
403 "IC35L040AVER07-0",
404 "IC35L060AVER07-0",
405 "WDC AC310200R",
406 NULL
407};
408
409/**
410 * hpt370_filter - mode selection filter
411 * @ap: ATA interface
412 * @adev: ATA device
413 *
414 * Block UDMA on devices that cause trouble with this controller.
415 */
416
417static unsigned long hpt370_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
418{
419 if (adev->class != ATA_DEV_ATA) {
420 if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
421 mask &= ~ATA_MASK_UDMA;
422 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
423 mask &= ~(0x1F << ATA_SHIFT_UDMA);
424 }
425 return ata_pci_default_filter(ap, adev, mask);
426}
427
428/**
429 * hpt370a_filter - mode selection filter
430 * @ap: ATA interface
431 * @adev: ATA device
432 *
433 * Block UDMA on devices that cause trouble with this controller.
434 */
435
436static unsigned long hpt370a_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
437{
438 if (adev->class != ATA_DEV_ATA) {
439 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
440 mask &= ~ (0x1F << ATA_SHIFT_UDMA);
441 }
442 return ata_pci_default_filter(ap, adev, mask);
443}
444
445/**
446 * hpt37x_pre_reset - reset the hpt37x bus
447 * @ap: ATA port to reset
448 *
449 * Perform the initial reset handling for the 370/372 and 374 func 0
450 */
451
452static int hpt37x_pre_reset(struct ata_port *ap)
453{
454 u8 scr2, ata66;
455 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
456
457 pci_read_config_byte(pdev, 0x5B, &scr2);
458 pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
459 /* Cable register now active */
460 pci_read_config_byte(pdev, 0x5A, &ata66);
461 /* Restore state */
462 pci_write_config_byte(pdev, 0x5B, scr2);
463
464 if (ata66 & (1 << ap->port_no))
465 ap->cbl = ATA_CBL_PATA40;
466 else
467 ap->cbl = ATA_CBL_PATA80;
468
469 /* Reset the state machine */
470 pci_write_config_byte(pdev, 0x50, 0x37);
471 pci_write_config_byte(pdev, 0x54, 0x37);
472 udelay(100);
473
474 return ata_std_prereset(ap);
475}
476
477/**
478 * hpt37x_error_handler - reset the hpt374
479 * @ap: ATA port to reset
480 *
481 * Perform probe for HPT37x, except for HPT374 channel 2
482 */
483
484static void hpt37x_error_handler(struct ata_port *ap)
485{
486 ata_bmdma_drive_eh(ap, hpt37x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
487}
488
489static int hpt374_pre_reset(struct ata_port *ap)
490{
491 u16 mcr3, mcr6;
492 u8 ata66;
493
494 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
495 /* Do the extra channel work */
496 pci_read_config_word(pdev, 0x52, &mcr3);
497 pci_read_config_word(pdev, 0x56, &mcr6);
498 /* Set bit 15 of 0x52 to enable TCBLID as input
499 Set bit 15 of 0x56 to enable FCBLID as input
500 */
501 pci_write_config_word(pdev, 0x52, mcr3 | 0x8000);
502 pci_write_config_word(pdev, 0x56, mcr6 | 0x8000);
503 pci_read_config_byte(pdev, 0x5A, &ata66);
504 /* Reset TCBLID/FCBLID to output */
505 pci_write_config_word(pdev, 0x52, mcr3);
506 pci_write_config_word(pdev, 0x56, mcr6);
507
508 if (ata66 & (1 << ap->port_no))
509 ap->cbl = ATA_CBL_PATA40;
510 else
511 ap->cbl = ATA_CBL_PATA80;
512
513 /* Reset the state machine */
514 pci_write_config_byte(pdev, 0x50, 0x37);
515 pci_write_config_byte(pdev, 0x54, 0x37);
516 udelay(100);
517
518 return ata_std_prereset(ap);
519}
520
521/**
522 * hpt374_error_handler - reset the hpt374
523 * @classes:
524 *
525 * The 374 cable detect is a little different due to the extra
526 * channels. The function 0 channels work like usual but function 1
527 * is special
528 */
529
530static void hpt374_error_handler(struct ata_port *ap)
531{
532 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
533
534 if (!(PCI_FUNC(pdev->devfn) & 1))
535 hpt37x_error_handler(ap);
536 else
537 ata_bmdma_drive_eh(ap, hpt374_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
538}
539
540/**
541 * hpt370_set_piomode - PIO setup
542 * @ap: ATA interface
543 * @adev: device on the interface
544 *
545 * Perform PIO mode setup.
546 */
547
548static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev)
549{
550 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
551 u32 addr1, addr2;
552 u32 reg;
553 u32 mode;
554 u8 fast;
555
556 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
557 addr2 = 0x51 + 4 * ap->port_no;
558
559 /* Fast interrupt prediction disable, hold off interrupt disable */
560 pci_read_config_byte(pdev, addr2, &fast);
561 fast &= ~0x02;
562 fast |= 0x01;
563 pci_write_config_byte(pdev, addr2, fast);
564
565 pci_read_config_dword(pdev, addr1, &reg);
566 mode = hpt37x_find_mode(ap, adev->pio_mode);
567 mode &= ~0x8000000; /* No FIFO in PIO */
568 mode &= ~0x30070000; /* Leave config bits alone */
569 reg &= 0x30070000; /* Strip timing bits */
570 pci_write_config_dword(pdev, addr1, reg | mode);
571}
572
573/**
574 * hpt370_set_dmamode - DMA timing setup
575 * @ap: ATA interface
576 * @adev: Device being configured
577 *
578 * Set up the channel for MWDMA or UDMA modes. Much the same as with
579 * PIO, load the mode number and then set MWDMA or UDMA flag.
580 */
581
582static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
583{
584 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
585 u32 addr1, addr2;
586 u32 reg;
587 u32 mode;
588 u8 fast;
589
590 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
591 addr2 = 0x51 + 4 * ap->port_no;
592
593 /* Fast interrupt prediction disable, hold off interrupt disable */
594 pci_read_config_byte(pdev, addr2, &fast);
595 fast &= ~0x02;
596 fast |= 0x01;
597 pci_write_config_byte(pdev, addr2, fast);
598
599 pci_read_config_dword(pdev, addr1, &reg);
600 mode = hpt37x_find_mode(ap, adev->dma_mode);
601 mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
602 mode &= ~0xC0000000; /* Leave config bits alone */
603 reg &= 0xC0000000; /* Strip timing bits */
604 pci_write_config_dword(pdev, addr1, reg | mode);
605}
606
607/**
608 * hpt370_bmdma_start - DMA engine begin
609 * @qc: ATA command
610 *
611 * The 370 and 370A want us to reset the DMA engine each time we
612 * use it. The 372 and later are fine.
613 */
614
615static void hpt370_bmdma_start(struct ata_queued_cmd *qc)
616{
617 struct ata_port *ap = qc->ap;
618 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
619 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
620 udelay(10);
621 ata_bmdma_start(qc);
622}
623
624/**
625 * hpt370_bmdma_end - DMA engine stop
626 * @qc: ATA command
627 *
628 * Work around the HPT370 DMA engine.
629 */
630
631static void hpt370_bmdma_stop(struct ata_queued_cmd *qc)
632{
633 struct ata_port *ap = qc->ap;
634 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
635 u8 dma_stat = inb(ap->ioaddr.bmdma_addr + 2);
636 u8 dma_cmd;
637 unsigned long bmdma = ap->ioaddr.bmdma_addr;
638
639 if (dma_stat & 0x01) {
640 udelay(20);
641 dma_stat = inb(bmdma + 2);
642 }
643 if (dma_stat & 0x01) {
644 /* Clear the engine */
645 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
646 udelay(10);
647 /* Stop DMA */
648 dma_cmd = inb(bmdma );
649 outb(dma_cmd & 0xFE, bmdma);
650 /* Clear Error */
651 dma_stat = inb(bmdma + 2);
652 outb(dma_stat | 0x06 , bmdma + 2);
653 /* Clear the engine */
654 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
655 udelay(10);
656 }
657 ata_bmdma_stop(qc);
658}
659
660/**
661 * hpt372_set_piomode - PIO setup
662 * @ap: ATA interface
663 * @adev: device on the interface
664 *
665 * Perform PIO mode setup.
666 */
667
668static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev)
669{
670 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
671 u32 addr1, addr2;
672 u32 reg;
673 u32 mode;
674 u8 fast;
675
676 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
677 addr2 = 0x51 + 4 * ap->port_no;
678
679 /* Fast interrupt prediction disable, hold off interrupt disable */
680 pci_read_config_byte(pdev, addr2, &fast);
681 fast &= ~0x07;
682 pci_write_config_byte(pdev, addr2, fast);
683
684 pci_read_config_dword(pdev, addr1, &reg);
685 mode = hpt37x_find_mode(ap, adev->pio_mode);
686
687 printk("Find mode for %d reports %X\n", adev->pio_mode, mode);
688 mode &= ~0x80000000; /* No FIFO in PIO */
689 mode &= ~0x30070000; /* Leave config bits alone */
690 reg &= 0x30070000; /* Strip timing bits */
691 pci_write_config_dword(pdev, addr1, reg | mode);
692}
693
694/**
695 * hpt372_set_dmamode - DMA timing setup
696 * @ap: ATA interface
697 * @adev: Device being configured
698 *
699 * Set up the channel for MWDMA or UDMA modes. Much the same as with
700 * PIO, load the mode number and then set MWDMA or UDMA flag.
701 */
702
703static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev)
704{
705 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
706 u32 addr1, addr2;
707 u32 reg;
708 u32 mode;
709 u8 fast;
710
711 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
712 addr2 = 0x51 + 4 * ap->port_no;
713
714 /* Fast interrupt prediction disable, hold off interrupt disable */
715 pci_read_config_byte(pdev, addr2, &fast);
716 fast &= ~0x07;
717 pci_write_config_byte(pdev, addr2, fast);
718
719 pci_read_config_dword(pdev, addr1, &reg);
720 mode = hpt37x_find_mode(ap, adev->dma_mode);
721 printk("Find mode for DMA %d reports %X\n", adev->dma_mode, mode);
722 mode &= ~0xC0000000; /* Leave config bits alone */
723 mode |= 0x80000000; /* FIFO in MWDMA or UDMA */
724 reg &= 0xC0000000; /* Strip timing bits */
725 pci_write_config_dword(pdev, addr1, reg | mode);
726}
727
728/**
729 * hpt37x_bmdma_end - DMA engine stop
730 * @qc: ATA command
731 *
732 * Clean up after the HPT372 and later DMA engine
733 */
734
735static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc)
736{
737 struct ata_port *ap = qc->ap;
738 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
739 int mscreg = 0x50 + 2 * ap->port_no;
740 u8 bwsr_stat, msc_stat;
741
742 pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
743 pci_read_config_byte(pdev, mscreg, &msc_stat);
744 if (bwsr_stat & (1 << ap->port_no))
745 pci_write_config_byte(pdev, mscreg, msc_stat | 0x30);
746 ata_bmdma_stop(qc);
747}
748
749
750static struct scsi_host_template hpt37x_sht = {
751 .module = THIS_MODULE,
752 .name = DRV_NAME,
753 .ioctl = ata_scsi_ioctl,
754 .queuecommand = ata_scsi_queuecmd,
755 .can_queue = ATA_DEF_QUEUE,
756 .this_id = ATA_SHT_THIS_ID,
757 .sg_tablesize = LIBATA_MAX_PRD,
758 .max_sectors = ATA_MAX_SECTORS,
759 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
760 .emulated = ATA_SHT_EMULATED,
761 .use_clustering = ATA_SHT_USE_CLUSTERING,
762 .proc_name = DRV_NAME,
763 .dma_boundary = ATA_DMA_BOUNDARY,
764 .slave_configure = ata_scsi_slave_config,
765 .bios_param = ata_std_bios_param,
766};
767
768/*
769 * Configuration for HPT370
770 */
771
772static struct ata_port_operations hpt370_port_ops = {
773 .port_disable = ata_port_disable,
774 .set_piomode = hpt370_set_piomode,
775 .set_dmamode = hpt370_set_dmamode,
776 .mode_filter = hpt370_filter,
777
778 .tf_load = ata_tf_load,
779 .tf_read = ata_tf_read,
780 .check_status = ata_check_status,
781 .exec_command = ata_exec_command,
782 .dev_select = ata_std_dev_select,
783
784 .freeze = ata_bmdma_freeze,
785 .thaw = ata_bmdma_thaw,
786 .error_handler = hpt37x_error_handler,
787 .post_internal_cmd = ata_bmdma_post_internal_cmd,
788
789 .bmdma_setup = ata_bmdma_setup,
790 .bmdma_start = hpt370_bmdma_start,
791 .bmdma_stop = hpt370_bmdma_stop,
792 .bmdma_status = ata_bmdma_status,
793
794 .qc_prep = ata_qc_prep,
795 .qc_issue = ata_qc_issue_prot,
796 .eng_timeout = ata_eng_timeout,
797 .data_xfer = ata_pio_data_xfer,
798
799 .irq_handler = ata_interrupt,
800 .irq_clear = ata_bmdma_irq_clear,
801
802 .port_start = ata_port_start,
803 .port_stop = ata_port_stop,
804 .host_stop = ata_host_stop
805};
806
807/*
808 * Configuration for HPT370A. Close to 370 but less filters
809 */
810
811static struct ata_port_operations hpt370a_port_ops = {
812 .port_disable = ata_port_disable,
813 .set_piomode = hpt370_set_piomode,
814 .set_dmamode = hpt370_set_dmamode,
815 .mode_filter = hpt370a_filter,
816
817 .tf_load = ata_tf_load,
818 .tf_read = ata_tf_read,
819 .check_status = ata_check_status,
820 .exec_command = ata_exec_command,
821 .dev_select = ata_std_dev_select,
822
823 .freeze = ata_bmdma_freeze,
824 .thaw = ata_bmdma_thaw,
825 .error_handler = hpt37x_error_handler,
826 .post_internal_cmd = ata_bmdma_post_internal_cmd,
827
828 .bmdma_setup = ata_bmdma_setup,
829 .bmdma_start = hpt370_bmdma_start,
830 .bmdma_stop = hpt370_bmdma_stop,
831 .bmdma_status = ata_bmdma_status,
832
833 .qc_prep = ata_qc_prep,
834 .qc_issue = ata_qc_issue_prot,
835 .eng_timeout = ata_eng_timeout,
836 .data_xfer = ata_pio_data_xfer,
837
838 .irq_handler = ata_interrupt,
839 .irq_clear = ata_bmdma_irq_clear,
840
841 .port_start = ata_port_start,
842 .port_stop = ata_port_stop,
843 .host_stop = ata_host_stop
844};
845
846/*
847 * Configuration for HPT372, HPT371, HPT302. Slightly different PIO
848 * and DMA mode setting functionality.
849 */
850
851static struct ata_port_operations hpt372_port_ops = {
852 .port_disable = ata_port_disable,
853 .set_piomode = hpt372_set_piomode,
854 .set_dmamode = hpt372_set_dmamode,
855 .mode_filter = ata_pci_default_filter,
856
857 .tf_load = ata_tf_load,
858 .tf_read = ata_tf_read,
859 .check_status = ata_check_status,
860 .exec_command = ata_exec_command,
861 .dev_select = ata_std_dev_select,
862
863 .freeze = ata_bmdma_freeze,
864 .thaw = ata_bmdma_thaw,
865 .error_handler = hpt37x_error_handler,
866 .post_internal_cmd = ata_bmdma_post_internal_cmd,
867
868 .bmdma_setup = ata_bmdma_setup,
869 .bmdma_start = ata_bmdma_start,
870 .bmdma_stop = hpt37x_bmdma_stop,
871 .bmdma_status = ata_bmdma_status,
872
873 .qc_prep = ata_qc_prep,
874 .qc_issue = ata_qc_issue_prot,
875 .eng_timeout = ata_eng_timeout,
876 .data_xfer = ata_pio_data_xfer,
877
878 .irq_handler = ata_interrupt,
879 .irq_clear = ata_bmdma_irq_clear,
880
881 .port_start = ata_port_start,
882 .port_stop = ata_port_stop,
883 .host_stop = ata_host_stop
884};
885
886/*
887 * Configuration for HPT374. Mode setting works like 372 and friends
888 * but we have a different cable detection procedure.
889 */
890
891static struct ata_port_operations hpt374_port_ops = {
892 .port_disable = ata_port_disable,
893 .set_piomode = hpt372_set_piomode,
894 .set_dmamode = hpt372_set_dmamode,
895 .mode_filter = ata_pci_default_filter,
896
897 .tf_load = ata_tf_load,
898 .tf_read = ata_tf_read,
899 .check_status = ata_check_status,
900 .exec_command = ata_exec_command,
901 .dev_select = ata_std_dev_select,
902
903 .freeze = ata_bmdma_freeze,
904 .thaw = ata_bmdma_thaw,
905 .error_handler = hpt374_error_handler,
906 .post_internal_cmd = ata_bmdma_post_internal_cmd,
907
908 .bmdma_setup = ata_bmdma_setup,
909 .bmdma_start = ata_bmdma_start,
910 .bmdma_stop = hpt37x_bmdma_stop,
911 .bmdma_status = ata_bmdma_status,
912
913 .qc_prep = ata_qc_prep,
914 .qc_issue = ata_qc_issue_prot,
915 .eng_timeout = ata_eng_timeout,
916 .data_xfer = ata_pio_data_xfer,
917
918 .irq_handler = ata_interrupt,
919 .irq_clear = ata_bmdma_irq_clear,
920
921 .port_start = ata_port_start,
922 .port_stop = ata_port_stop,
923 .host_stop = ata_host_stop
924};
925
926/**
927 * htp37x_clock_slot - Turn timing to PC clock entry
928 * @freq: Reported frequency timing
929 * @base: Base timing
930 *
931 * Turn the timing data intoa clock slot (0 for 33, 1 for 40, 2 for 50
932 * and 3 for 66Mhz)
933 */
934
935static int hpt37x_clock_slot(unsigned int freq, unsigned int base)
936{
937 unsigned int f = (base * freq) / 192; /* Mhz */
938 if (f < 40)
939 return 0; /* 33Mhz slot */
940 if (f < 45)
941 return 1; /* 40Mhz slot */
942 if (f < 55)
943 return 2; /* 50Mhz slot */
944 return 3; /* 60Mhz slot */
945}
946
947/**
948 * hpt37x_calibrate_dpll - Calibrate the DPLL loop
949 * @dev: PCI device
950 *
951 * Perform a calibration cycle on the HPT37x DPLL. Returns 1 if this
952 * succeeds
953 */
954
955static int hpt37x_calibrate_dpll(struct pci_dev *dev)
956{
957 u8 reg5b;
958 u32 reg5c;
959 int tries;
960
961 for(tries = 0; tries < 0x5000; tries++) {
962 udelay(50);
963 pci_read_config_byte(dev, 0x5b, &reg5b);
964 if (reg5b & 0x80) {
965 /* See if it stays set */
966 for(tries = 0; tries < 0x1000; tries ++) {
967 pci_read_config_byte(dev, 0x5b, &reg5b);
968 /* Failed ? */
969 if ((reg5b & 0x80) == 0)
970 return 0;
971 }
972 /* Turn off tuning, we have the DPLL set */
973 pci_read_config_dword(dev, 0x5c, &reg5c);
974 pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100);
975 return 1;
976 }
977 }
978 /* Never went stable */
979 return 0;
980}
981/**
982 * hpt37x_init_one - Initialise an HPT37X/302
983 * @dev: PCI device
984 * @id: Entry in match table
985 *
986 * Initialise an HPT37x device. There are some interesting complications
987 * here. Firstly the chip may report 366 and be one of several variants.
988 * Secondly all the timings depend on the clock for the chip which we must
989 * detect and look up
990 *
991 * This is the known chip mappings. It may be missing a couple of later
992 * releases.
993 *
994 * Chip version PCI Rev Notes
995 * HPT366 4 (HPT366) 0 Other driver
996 * HPT366 4 (HPT366) 1 Other driver
997 * HPT368 4 (HPT366) 2 Other driver
998 * HPT370 4 (HPT366) 3 UDMA100
999 * HPT370A 4 (HPT366) 4 UDMA100
1000 * HPT372 4 (HPT366) 5 UDMA133 (1)
1001 * HPT372N 4 (HPT366) 6 Other driver
1002 * HPT372A 5 (HPT372) 1 UDMA133 (1)
1003 * HPT372N 5 (HPT372) 2 Other driver
1004 * HPT302 6 (HPT302) 1 UDMA133
1005 * HPT302N 6 (HPT302) 2 Other driver
1006 * HPT371 7 (HPT371) * UDMA133
1007 * HPT374 8 (HPT374) * UDMA133 4 channel
1008 * HPT372N 9 (HPT372N) * Other driver
1009 *
1010 * (1) UDMA133 support depends on the bus clock
1011 */
1012
1013static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1014{
1015 /* HPT370 - UDMA100 */
1016 static struct ata_port_info info_hpt370 = {
1017 .sht = &hpt37x_sht,
1018 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
1019 .pio_mask = 0x1f,
1020 .mwdma_mask = 0x07,
1021 .udma_mask = 0x3f,
1022 .port_ops = &hpt370_port_ops
1023 };
1024 /* HPT370A - UDMA100 */
1025 static struct ata_port_info info_hpt370a = {
1026 .sht = &hpt37x_sht,
1027 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
1028 .pio_mask = 0x1f,
1029 .mwdma_mask = 0x07,
1030 .udma_mask = 0x3f,
1031 .port_ops = &hpt370a_port_ops
1032 };
1033 /* HPT371, 372 and friends - UDMA133 */
1034 static struct ata_port_info info_hpt372 = {
1035 .sht = &hpt37x_sht,
1036 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
1037 .pio_mask = 0x1f,
1038 .mwdma_mask = 0x07,
1039 .udma_mask = 0x7f,
1040 .port_ops = &hpt372_port_ops
1041 };
1042 /* HPT371, 372 and friends - UDMA100 at 50MHz clock */
1043 static struct ata_port_info info_hpt372_50 = {
1044 .sht = &hpt37x_sht,
1045 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
1046 .pio_mask = 0x1f,
1047 .mwdma_mask = 0x07,
1048 .udma_mask = 0x3f,
1049 .port_ops = &hpt372_port_ops
1050 };
1051 /* HPT374 - UDMA133 */
1052 static struct ata_port_info info_hpt374 = {
1053 .sht = &hpt37x_sht,
1054 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
1055 .pio_mask = 0x1f,
1056 .mwdma_mask = 0x07,
1057 .udma_mask = 0x7f,
1058 .port_ops = &hpt374_port_ops
1059 };
1060
1061 static const int MHz[4] = { 33, 40, 50, 66 };
1062
1063 struct ata_port_info *port_info[2];
1064 struct ata_port_info *port;
1065
1066 u8 irqmask;
1067 u32 class_rev;
1068 u32 freq;
1069
1070 const struct hpt_chip *chip_table;
1071 int clock_slot;
1072
1073 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
1074 class_rev &= 0xFF;
1075
1076 if (dev->device == PCI_DEVICE_ID_TTI_HPT366) {
1077 /* May be a later chip in disguise. Check */
1078 /* Older chips are in the HPT366 driver. Ignore them */
1079 if (class_rev < 3)
1080 return -ENODEV;
1081 /* N series chips have their own driver. Ignore */
1082 if (class_rev == 6)
1083 return -ENODEV;
1084
1085 switch(class_rev) {
1086 case 3:
1087 port = &info_hpt370;
1088 chip_table = &hpt370;
1089 break;
1090 case 4:
1091 port = &info_hpt370a;
1092 chip_table = &hpt370a;
1093 break;
1094 case 5:
1095 port = &info_hpt372;
1096 chip_table = &hpt372;
1097 break;
1098 default:
1099 printk(KERN_ERR "pata_hpt37x: Unknown HPT366 subtype please report (%d).\n", class_rev);
1100 return -ENODEV;
1101 }
1102 } else {
1103 switch(dev->device) {
1104 case PCI_DEVICE_ID_TTI_HPT372:
1105 /* 372N if rev >= 2*/
1106 if (class_rev >= 2)
1107 return -ENODEV;
1108 port = &info_hpt372;
1109 chip_table = &hpt372a;
1110 break;
1111 case PCI_DEVICE_ID_TTI_HPT302:
1112 /* 302N if rev > 1 */
1113 if (class_rev > 1)
1114 return -ENODEV;
1115 port = &info_hpt372;
1116 /* Check this */
1117 chip_table = &hpt302;
1118 break;
1119 case PCI_DEVICE_ID_TTI_HPT371:
1120 port = &info_hpt372;
1121 chip_table = &hpt371;
1122 break;
1123 case PCI_DEVICE_ID_TTI_HPT374:
1124 chip_table = &hpt374;
1125 port = &info_hpt374;
1126 break;
1127 default:
1128 printk(KERN_ERR "pata_hpt37x: PCI table is bogus please report (%d).\n", dev->device);
1129 return -ENODEV;
1130 }
1131 }
1132 /* Ok so this is a chip we support */
1133
1134 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
1135 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
1136 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
1137 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
1138
1139 pci_read_config_byte(dev, 0x5A, &irqmask);
1140 irqmask &= ~0x10;
1141 pci_write_config_byte(dev, 0x5a, irqmask);
1142
1143 /*
1144 * default to pci clock. make sure MA15/16 are set to output
1145 * to prevent drives having problems with 40-pin cables. Needed
1146 * for some drives such as IBM-DTLA which will not enter ready
1147 * state on reset when PDIAG is a input.
1148 */
1149
1150 pci_write_config_byte(dev, 0x5b, 0x23);
1151
1152 pci_read_config_dword(dev, 0x70, &freq);
1153 if ((freq >> 12) != 0xABCDE) {
1154 int i;
1155 u8 sr;
1156 u32 total = 0;
1157
1158 printk(KERN_WARNING "pata_hpt37x: BIOS has not set timing clocks.\n");
1159
1160 /* This is the process the HPT371 BIOS is reported to use */
1161 for(i = 0; i < 128; i++) {
1162 pci_read_config_byte(dev, 0x78, &sr);
1163 total += sr;
1164 udelay(15);
1165 }
1166 freq = total / 128;
1167 }
1168 freq &= 0x1FF;
1169
1170 /*
1171 * Turn the frequency check into a band and then find a timing
1172 * table to match it.
1173 */
1174
1175 clock_slot = hpt37x_clock_slot(freq, chip_table->base);
1176 if (chip_table->clocks[clock_slot] == NULL) {
1177 /*
1178 * We need to try PLL mode instead
1179 */
1180 unsigned int f_low = (MHz[clock_slot] * chip_table->base) / 192;
1181 unsigned int f_high = f_low + 2;
1182 int adjust;
1183
1184 for(adjust = 0; adjust < 8; adjust++) {
1185 if (hpt37x_calibrate_dpll(dev))
1186 break;
1187 /* See if it'll settle at a fractionally different clock */
1188 if ((adjust & 3) == 3) {
1189 f_low --;
1190 f_high ++;
1191 }
1192 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
1193 }
1194 if (adjust == 8) {
1195 printk(KERN_WARNING "hpt37x: DPLL did not stabilize.\n");
1196 return -ENODEV;
1197 }
1198 /* Check if this works for all cases */
1199 port->private_data = (void *)hpt370_timings_66;
1200
1201 printk(KERN_INFO "hpt37x: Bus clock %dMHz, using DPLL.\n", MHz[clock_slot]);
1202 } else {
1203 port->private_data = (void *)chip_table->clocks[clock_slot];
1204 /*
1205 * Perform a final fixup. The 371 and 372 clock determines
1206 * if UDMA133 is available.
1207 */
1208
1209 if (clock_slot == 2 && chip_table == &hpt372) { /* 50Mhz */
1210 printk(KERN_WARNING "pata_hpt37x: No UDMA133 support available with 50MHz bus clock.\n");
1211 if (port == &info_hpt372)
1212 port = &info_hpt372_50;
1213 else BUG();
1214 }
1215 printk(KERN_INFO "hpt37x: %s: Bus clock %dMHz.\n", chip_table->name, MHz[clock_slot]);
1216 }
1217 port_info[0] = port_info[1] = port;
1218 /* Now kick off ATA set up */
1219 return ata_pci_init_one(dev, port_info, 2);
1220}
1221
1222static struct pci_device_id hpt37x[] = {
1223 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), },
1224 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT371), },
1225 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372), },
1226 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT374), },
1227 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT302), },
1228 { 0, },
1229};
1230
1231static struct pci_driver hpt37x_pci_driver = {
1232 .name = DRV_NAME,
1233 .id_table = hpt37x,
1234 .probe = hpt37x_init_one,
1235 .remove = ata_pci_remove_one
1236};
1237
1238static int __init hpt37x_init(void)
1239{
1240 return pci_register_driver(&hpt37x_pci_driver);
1241}
1242
1243
1244static void __exit hpt37x_exit(void)
1245{
1246 pci_unregister_driver(&hpt37x_pci_driver);
1247}
1248
1249
1250MODULE_AUTHOR("Alan Cox");
1251MODULE_DESCRIPTION("low-level driver for the Highpoint HPT37x/30x");
1252MODULE_LICENSE("GPL");
1253MODULE_DEVICE_TABLE(pci, hpt37x);
1254MODULE_VERSION(DRV_VERSION);
1255
1256module_init(hpt37x_init);
1257module_exit(hpt37x_exit);
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
new file mode 100644
index 000000000000..06c8db079b91
--- /dev/null
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -0,0 +1,597 @@
1/*
2 * Libata driver for the highpoint 372N and 302N UDMA66 ATA controllers.
3 *
4 * This driver is heavily based upon:
5 *
6 * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
7 *
8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
9 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
10 * Portions Copyright (C) 2003 Red Hat Inc
11 *
12 *
13 * TODO
14 * 371N
15 * Work out best PLL policy
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/pci.h>
21#include <linux/init.h>
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <scsi/scsi_host.h>
25#include <linux/libata.h>
26
27#define DRV_NAME "pata_hpt3x2n"
28#define DRV_VERSION "0.3"
29
30enum {
31 HPT_PCI_FAST = (1 << 31),
32 PCI66 = (1 << 1),
33 USE_DPLL = (1 << 0)
34};
35
36struct hpt_clock {
37 u8 xfer_speed;
38 u32 timing;
39};
40
41struct hpt_chip {
42 const char *name;
43 struct hpt_clock *clocks[3];
44};
45
46/* key for bus clock timings
47 * bit
48 * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
49 * DMA. cycles = value + 1
50 * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW
51 * DMA. cycles = value + 1
52 * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file
53 * register access.
54 * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file
55 * register access.
56 * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer.
57 * during task file register access.
58 * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA
59 * xfer.
60 * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task
61 * register access.
62 * 28 UDMA enable
63 * 29 DMA enable
64 * 30 PIO_MST enable. if set, the chip is in bus master mode during
65 * PIO.
66 * 31 FIFO enable.
67 */
68
69/* 66MHz DPLL clocks */
70
71static struct hpt_clock hpt3x2n_clocks[] = {
72 { XFER_UDMA_7, 0x1c869c62 },
73 { XFER_UDMA_6, 0x1c869c62 },
74 { XFER_UDMA_5, 0x1c8a9c62 },
75 { XFER_UDMA_4, 0x1c8a9c62 },
76 { XFER_UDMA_3, 0x1c8e9c62 },
77 { XFER_UDMA_2, 0x1c929c62 },
78 { XFER_UDMA_1, 0x1c9a9c62 },
79 { XFER_UDMA_0, 0x1c829c62 },
80
81 { XFER_MW_DMA_2, 0x2c829c62 },
82 { XFER_MW_DMA_1, 0x2c829c66 },
83 { XFER_MW_DMA_0, 0x2c829d2c },
84
85 { XFER_PIO_4, 0x0c829c62 },
86 { XFER_PIO_3, 0x0c829c84 },
87 { XFER_PIO_2, 0x0c829ca6 },
88 { XFER_PIO_1, 0x0d029d26 },
89 { XFER_PIO_0, 0x0d029d5e },
90 { 0, 0x0d029d5e }
91};
92
93/**
94 * hpt3x2n_find_mode - reset the hpt3x2n bus
95 * @ap: ATA port
96 * @speed: transfer mode
97 *
98 * Return the 32bit register programming information for this channel
99 * that matches the speed provided. For the moment the clocks table
100 * is hard coded but easy to change. This will be needed if we use
101 * different DPLLs
102 */
103
104static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed)
105{
106 struct hpt_clock *clocks = hpt3x2n_clocks;
107
108 while(clocks->xfer_speed) {
109 if (clocks->xfer_speed == speed)
110 return clocks->timing;
111 clocks++;
112 }
113 BUG();
114 return 0xffffffffU; /* silence compiler warning */
115}
116
117/**
118 * hpt3x2n_pre_reset - reset the hpt3x2n bus
119 * @ap: ATA port to reset
120 *
121 * Perform the initial reset handling for the 3x2n series controllers.
122 * Reset the hardware and state machine, obtain the cable type.
123 */
124
125static int hpt3xn_pre_reset(struct ata_port *ap)
126{
127 u8 scr2, ata66;
128 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
129
130 pci_read_config_byte(pdev, 0x5B, &scr2);
131 pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
132 /* Cable register now active */
133 pci_read_config_byte(pdev, 0x5A, &ata66);
134 /* Restore state */
135 pci_write_config_byte(pdev, 0x5B, scr2);
136
137 if (ata66 & (1 << ap->port_no))
138 ap->cbl = ATA_CBL_PATA40;
139 else
140 ap->cbl = ATA_CBL_PATA80;
141
142 /* Reset the state machine */
143 pci_write_config_byte(pdev, 0x50, 0x37);
144 pci_write_config_byte(pdev, 0x54, 0x37);
145 udelay(100);
146
147 return ata_std_prereset(ap);
148}
149
150/**
151 * hpt3x2n_error_handler - probe the hpt3x2n bus
152 * @ap: ATA port to reset
153 *
154 * Perform the probe reset handling for the 3x2N
155 */
156
157static void hpt3x2n_error_handler(struct ata_port *ap)
158{
159 ata_bmdma_drive_eh(ap, hpt3xn_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
160}
161
162/**
163 * hpt3x2n_set_piomode - PIO setup
164 * @ap: ATA interface
165 * @adev: device on the interface
166 *
167 * Perform PIO mode setup.
168 */
169
170static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev)
171{
172 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
173 u32 addr1, addr2;
174 u32 reg;
175 u32 mode;
176 u8 fast;
177
178 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
179 addr2 = 0x51 + 4 * ap->port_no;
180
181 /* Fast interrupt prediction disable, hold off interrupt disable */
182 pci_read_config_byte(pdev, addr2, &fast);
183 fast &= ~0x07;
184 pci_write_config_byte(pdev, addr2, fast);
185
186 pci_read_config_dword(pdev, addr1, &reg);
187 mode = hpt3x2n_find_mode(ap, adev->pio_mode);
188 mode &= ~0x8000000; /* No FIFO in PIO */
189 mode &= ~0x30070000; /* Leave config bits alone */
190 reg &= 0x30070000; /* Strip timing bits */
191 pci_write_config_dword(pdev, addr1, reg | mode);
192}
193
194/**
195 * hpt3x2n_set_dmamode - DMA timing setup
196 * @ap: ATA interface
197 * @adev: Device being configured
198 *
199 * Set up the channel for MWDMA or UDMA modes. Much the same as with
200 * PIO, load the mode number and then set MWDMA or UDMA flag.
201 */
202
203static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev)
204{
205 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
206 u32 addr1, addr2;
207 u32 reg;
208 u32 mode;
209 u8 fast;
210
211 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
212 addr2 = 0x51 + 4 * ap->port_no;
213
214 /* Fast interrupt prediction disable, hold off interrupt disable */
215 pci_read_config_byte(pdev, addr2, &fast);
216 fast &= ~0x07;
217 pci_write_config_byte(pdev, addr2, fast);
218
219 pci_read_config_dword(pdev, addr1, &reg);
220 mode = hpt3x2n_find_mode(ap, adev->dma_mode);
221 mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
222 mode &= ~0xC0000000; /* Leave config bits alone */
223 reg &= 0xC0000000; /* Strip timing bits */
224 pci_write_config_dword(pdev, addr1, reg | mode);
225}
226
227/**
228 * hpt3x2n_bmdma_end - DMA engine stop
229 * @qc: ATA command
230 *
231 * Clean up after the HPT3x2n and later DMA engine
232 */
233
234static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
235{
236 struct ata_port *ap = qc->ap;
237 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
238 int mscreg = 0x50 + 2 * ap->port_no;
239 u8 bwsr_stat, msc_stat;
240
241 pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
242 pci_read_config_byte(pdev, mscreg, &msc_stat);
243 if (bwsr_stat & (1 << ap->port_no))
244 pci_write_config_byte(pdev, mscreg, msc_stat | 0x30);
245 ata_bmdma_stop(qc);
246}
247
248/**
249 * hpt3x2n_set_clock - clock control
250 * @ap: ATA port
251 * @source: 0x21 or 0x23 for PLL or PCI sourced clock
252 *
253 * Switch the ATA bus clock between the PLL and PCI clock sources
254 * while correctly isolating the bus and resetting internal logic
255 *
256 * We must use the DPLL for
257 * - writing
258 * - second channel UDMA7 (SATA ports) or higher
259 * - 66MHz PCI
260 *
261 * or we will underclock the device and get reduced performance.
262 */
263
264static void hpt3x2n_set_clock(struct ata_port *ap, int source)
265{
266 unsigned long bmdma = ap->ioaddr.bmdma_addr;
267
268 /* Tristate the bus */
269 outb(0x80, bmdma+0x73);
270 outb(0x80, bmdma+0x77);
271
272 /* Switch clock and reset channels */
273 outb(source, bmdma+0x7B);
274 outb(0xC0, bmdma+0x79);
275
276 /* Reset state machines */
277 outb(0x37, bmdma+0x70);
278 outb(0x37, bmdma+0x74);
279
280 /* Complete reset */
281 outb(0x00, bmdma+0x79);
282
283 /* Reconnect channels to bus */
284 outb(0x00, bmdma+0x73);
285 outb(0x00, bmdma+0x77);
286}
287
288/* Check if our partner interface is busy */
289
290static int hpt3x2n_pair_idle(struct ata_port *ap)
291{
292 struct ata_host *host = ap->host;
293 struct ata_port *pair = host->ports[ap->port_no ^ 1];
294
295 if (pair->hsm_task_state == HSM_ST_IDLE)
296 return 1;
297 return 0;
298}
299
300static int hpt3x2n_use_dpll(struct ata_port *ap, int reading)
301{
302 long flags = (long)ap->host->private_data;
303 /* See if we should use the DPLL */
304 if (reading == 0)
305 return USE_DPLL; /* Needed for write */
306 if (flags & PCI66)
307 return USE_DPLL; /* Needed at 66Mhz */
308 return 0;
309}
310
311static unsigned int hpt3x2n_qc_issue_prot(struct ata_queued_cmd *qc)
312{
313 struct ata_taskfile *tf = &qc->tf;
314 struct ata_port *ap = qc->ap;
315 int flags = (long)ap->host->private_data;
316
317 if (hpt3x2n_pair_idle(ap)) {
318 int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE));
319 if ((flags & USE_DPLL) != dpll) {
320 if (dpll == 1)
321 hpt3x2n_set_clock(ap, 0x21);
322 else
323 hpt3x2n_set_clock(ap, 0x23);
324 }
325 }
326 return ata_qc_issue_prot(qc);
327}
328
329static struct scsi_host_template hpt3x2n_sht = {
330 .module = THIS_MODULE,
331 .name = DRV_NAME,
332 .ioctl = ata_scsi_ioctl,
333 .queuecommand = ata_scsi_queuecmd,
334 .can_queue = ATA_DEF_QUEUE,
335 .this_id = ATA_SHT_THIS_ID,
336 .sg_tablesize = LIBATA_MAX_PRD,
337 .max_sectors = ATA_MAX_SECTORS,
338 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
339 .emulated = ATA_SHT_EMULATED,
340 .use_clustering = ATA_SHT_USE_CLUSTERING,
341 .proc_name = DRV_NAME,
342 .dma_boundary = ATA_DMA_BOUNDARY,
343 .slave_configure = ata_scsi_slave_config,
344 .bios_param = ata_std_bios_param,
345};
346
347/*
348 * Configuration for HPT3x2n.
349 */
350
351static struct ata_port_operations hpt3x2n_port_ops = {
352 .port_disable = ata_port_disable,
353 .set_piomode = hpt3x2n_set_piomode,
354 .set_dmamode = hpt3x2n_set_dmamode,
355 .mode_filter = ata_pci_default_filter,
356
357 .tf_load = ata_tf_load,
358 .tf_read = ata_tf_read,
359 .check_status = ata_check_status,
360 .exec_command = ata_exec_command,
361 .dev_select = ata_std_dev_select,
362
363 .freeze = ata_bmdma_freeze,
364 .thaw = ata_bmdma_thaw,
365 .error_handler = hpt3x2n_error_handler,
366 .post_internal_cmd = ata_bmdma_post_internal_cmd,
367
368 .bmdma_setup = ata_bmdma_setup,
369 .bmdma_start = ata_bmdma_start,
370 .bmdma_stop = hpt3x2n_bmdma_stop,
371 .bmdma_status = ata_bmdma_status,
372
373 .qc_prep = ata_qc_prep,
374 .qc_issue = hpt3x2n_qc_issue_prot,
375 .eng_timeout = ata_eng_timeout,
376 .data_xfer = ata_pio_data_xfer,
377
378 .irq_handler = ata_interrupt,
379 .irq_clear = ata_bmdma_irq_clear,
380
381 .port_start = ata_port_start,
382 .port_stop = ata_port_stop,
383 .host_stop = ata_host_stop
384};
385
386/**
387 * hpt3xn_calibrate_dpll - Calibrate the DPLL loop
388 * @dev: PCI device
389 *
390 * Perform a calibration cycle on the HPT3xN DPLL. Returns 1 if this
391 * succeeds
392 */
393
394static int hpt3xn_calibrate_dpll(struct pci_dev *dev)
395{
396 u8 reg5b;
397 u32 reg5c;
398 int tries;
399
400 for(tries = 0; tries < 0x5000; tries++) {
401 udelay(50);
402 pci_read_config_byte(dev, 0x5b, &reg5b);
403 if (reg5b & 0x80) {
404 /* See if it stays set */
405 for(tries = 0; tries < 0x1000; tries ++) {
406 pci_read_config_byte(dev, 0x5b, &reg5b);
407 /* Failed ? */
408 if ((reg5b & 0x80) == 0)
409 return 0;
410 }
411 /* Turn off tuning, we have the DPLL set */
412 pci_read_config_dword(dev, 0x5c, &reg5c);
413 pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100);
414 return 1;
415 }
416 }
417 /* Never went stable */
418 return 0;
419}
420
421static int hpt3x2n_pci_clock(struct pci_dev *pdev)
422{
423 unsigned long freq;
424 u32 fcnt;
425
426 pci_read_config_dword(pdev, 0x70/*CHECKME*/, &fcnt);
427 if ((fcnt >> 12) != 0xABCDE) {
428 printk(KERN_WARNING "hpt3xn: BIOS clock data not set.\n");
429 return 33; /* Not BIOS set */
430 }
431 fcnt &= 0x1FF;
432
433 freq = (fcnt * 77) / 192;
434
435 /* Clamp to bands */
436 if (freq < 40)
437 return 33;
438 if (freq < 45)
439 return 40;
440 if (freq < 55)
441 return 50;
442 return 66;
443}
444
445/**
446 * hpt3x2n_init_one - Initialise an HPT37X/302
447 * @dev: PCI device
448 * @id: Entry in match table
449 *
450 * Initialise an HPT3x2n device. There are some interesting complications
451 * here. Firstly the chip may report 366 and be one of several variants.
452 * Secondly all the timings depend on the clock for the chip which we must
453 * detect and look up
454 *
455 * This is the known chip mappings. It may be missing a couple of later
456 * releases.
457 *
458 * Chip version PCI Rev Notes
459 * HPT372 4 (HPT366) 5 Other driver
460 * HPT372N 4 (HPT366) 6 UDMA133
461 * HPT372 5 (HPT372) 1 Other driver
462 * HPT372N 5 (HPT372) 2 UDMA133
463 * HPT302 6 (HPT302) * Other driver
464 * HPT302N 6 (HPT302) > 1 UDMA133
465 * HPT371 7 (HPT371) * Other driver
466 * HPT371N 7 (HPT371) > 1 UDMA133
467 * HPT374 8 (HPT374) * Other driver
468 * HPT372N 9 (HPT372N) * UDMA133
469 *
470 * (1) UDMA133 support depends on the bus clock
471 *
472 * To pin down HPT371N
473 */
474
475static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
476{
477 /* HPT372N and friends - UDMA133 */
478 static struct ata_port_info info = {
479 .sht = &hpt3x2n_sht,
480 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
481 .pio_mask = 0x1f,
482 .mwdma_mask = 0x07,
483 .udma_mask = 0x7f,
484 .port_ops = &hpt3x2n_port_ops
485 };
486 struct ata_port_info *port_info[2];
487 struct ata_port_info *port = &info;
488
489 u8 irqmask;
490 u32 class_rev;
491
492 unsigned int pci_mhz;
493 unsigned int f_low, f_high;
494 int adjust;
495
496 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
497 class_rev &= 0xFF;
498
499 switch(dev->device) {
500 case PCI_DEVICE_ID_TTI_HPT366:
501 if (class_rev < 6)
502 return -ENODEV;
503 break;
504 case PCI_DEVICE_ID_TTI_HPT372:
505 /* 372N if rev >= 1*/
506 if (class_rev == 0)
507 return -ENODEV;
508 break;
509 case PCI_DEVICE_ID_TTI_HPT302:
510 if (class_rev < 2)
511 return -ENODEV;
512 break;
513 case PCI_DEVICE_ID_TTI_HPT372N:
514 break;
515 default:
516 printk(KERN_ERR "pata_hpt3x2n: PCI table is bogus please report (%d).\n", dev->device);
517 return -ENODEV;
518 }
519
520 /* Ok so this is a chip we support */
521
522 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
523 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
524 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
525 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
526
527 pci_read_config_byte(dev, 0x5A, &irqmask);
528 irqmask &= ~0x10;
529 pci_write_config_byte(dev, 0x5a, irqmask);
530
531 /* Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or
532 50 for UDMA100. Right now we always use 66 */
533
534 pci_mhz = hpt3x2n_pci_clock(dev);
535
536 f_low = (pci_mhz * 48) / 66; /* PCI Mhz for 66Mhz DPLL */
537 f_high = f_low + 2; /* Tolerance */
538
539 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
540 /* PLL clock */
541 pci_write_config_byte(dev, 0x5B, 0x21);
542
543 /* Unlike the 37x we don't try jiggling the frequency */
544 for(adjust = 0; adjust < 8; adjust++) {
545 if (hpt3xn_calibrate_dpll(dev))
546 break;
547 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
548 }
549 if (adjust == 8)
550 printk(KERN_WARNING "hpt3xn: DPLL did not stabilize.\n");
551
552 /* Set our private data up. We only need a few flags so we use
553 it directly */
554 port->private_data = NULL;
555 if (pci_mhz > 60)
556 port->private_data = (void *)PCI66;
557
558 /* Now kick off ATA set up */
559 port_info[0] = port_info[1] = port;
560 return ata_pci_init_one(dev, port_info, 2);
561}
562
563static struct pci_device_id hpt3x2n[] = {
564 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), },
565 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372), },
566 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT302), },
567 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372N), },
568 { 0, },
569};
570
571static struct pci_driver hpt3x2n_pci_driver = {
572 .name = DRV_NAME,
573 .id_table = hpt3x2n,
574 .probe = hpt3x2n_init_one,
575 .remove = ata_pci_remove_one
576};
577
578static int __init hpt3x2n_init(void)
579{
580 return pci_register_driver(&hpt3x2n_pci_driver);
581}
582
583
584static void __exit hpt3x2n_exit(void)
585{
586 pci_unregister_driver(&hpt3x2n_pci_driver);
587}
588
589
590MODULE_AUTHOR("Alan Cox");
591MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3x2n/30x");
592MODULE_LICENSE("GPL");
593MODULE_DEVICE_TABLE(pci, hpt3x2n);
594MODULE_VERSION(DRV_VERSION);
595
596module_init(hpt3x2n_init);
597module_exit(hpt3x2n_exit);
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
new file mode 100644
index 000000000000..152770133ab1
--- /dev/null
+++ b/drivers/ata/pata_hpt3x3.c
@@ -0,0 +1,226 @@
1/*
2 * pata_hpt3x3 - HPT3x3 driver
3 * (c) Copyright 2005-2006 Red Hat
4 *
5 * Was pata_hpt34x but the naming was confusing as it supported the
6 * 343 and 363 so it has been renamed.
7 *
8 * Based on:
9 * linux/drivers/ide/pci/hpt34x.c Version 0.40 Sept 10, 2002
10 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
11 *
12 * May be copied or modified under the terms of the GNU General Public
13 * License
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19#include <linux/init.h>
20#include <linux/blkdev.h>
21#include <linux/delay.h>
22#include <scsi/scsi_host.h>
23#include <linux/libata.h>
24
25#define DRV_NAME "pata_hpt3x3"
26#define DRV_VERSION "0.4.1"
27
28static int hpt3x3_probe_init(struct ata_port *ap)
29{
30 ap->cbl = ATA_CBL_PATA40;
31 return ata_std_prereset(ap);
32}
33
34/**
35 * hpt3x3_probe_reset - reset the hpt3x3 bus
36 * @ap: ATA port to reset
37 *
38 * Perform the housekeeping when doing an ATA bus reeset. We just
39 * need to force the cable type.
40 */
41
42static void hpt3x3_error_handler(struct ata_port *ap)
43{
44 return ata_bmdma_drive_eh(ap, hpt3x3_probe_init, ata_std_softreset, NULL, ata_std_postreset);
45}
46
47/**
48 * hpt3x3_set_piomode - PIO setup
49 * @ap: ATA interface
50 * @adev: device on the interface
51 *
52 * Set our PIO requirements. This is fairly simple on the HPT3x3 as
53 * all we have to do is clear the MWDMA and UDMA bits then load the
54 * mode number.
55 */
56
57static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev)
58{
59 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
60 u32 r1, r2;
61 int dn = 2 * ap->port_no + adev->devno;
62
63 pci_read_config_dword(pdev, 0x44, &r1);
64 pci_read_config_dword(pdev, 0x48, &r2);
65 /* Load the PIO timing number */
66 r1 &= ~(7 << (3 * dn));
67 r1 |= (adev->pio_mode - XFER_PIO_0) << (3 * dn);
68 r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
69
70 pci_write_config_dword(pdev, 0x44, r1);
71 pci_write_config_dword(pdev, 0x48, r2);
72}
73
74/**
75 * hpt3x3_set_dmamode - DMA timing setup
76 * @ap: ATA interface
77 * @adev: Device being configured
78 *
79 * Set up the channel for MWDMA or UDMA modes. Much the same as with
80 * PIO, load the mode number and then set MWDMA or UDMA flag.
81 */
82
83static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev)
84{
85 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
86 u32 r1, r2;
87 int dn = 2 * ap->port_no + adev->devno;
88 int mode_num = adev->dma_mode & 0x0F;
89
90 pci_read_config_dword(pdev, 0x44, &r1);
91 pci_read_config_dword(pdev, 0x48, &r2);
92 /* Load the timing number */
93 r1 &= ~(7 << (3 * dn));
94 r1 |= (mode_num << (3 * dn));
95 r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
96
97 if (adev->dma_mode >= XFER_UDMA_0)
98 r2 |= 0x01 << dn; /* Ultra mode */
99 else
100 r2 |= 0x10 << dn; /* MWDMA */
101
102 pci_write_config_dword(pdev, 0x44, r1);
103 pci_write_config_dword(pdev, 0x48, r2);
104}
105
106static struct scsi_host_template hpt3x3_sht = {
107 .module = THIS_MODULE,
108 .name = DRV_NAME,
109 .ioctl = ata_scsi_ioctl,
110 .queuecommand = ata_scsi_queuecmd,
111 .can_queue = ATA_DEF_QUEUE,
112 .this_id = ATA_SHT_THIS_ID,
113 .sg_tablesize = LIBATA_MAX_PRD,
114 .max_sectors = ATA_MAX_SECTORS,
115 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
116 .emulated = ATA_SHT_EMULATED,
117 .use_clustering = ATA_SHT_USE_CLUSTERING,
118 .proc_name = DRV_NAME,
119 .dma_boundary = ATA_DMA_BOUNDARY,
120 .slave_configure = ata_scsi_slave_config,
121 .bios_param = ata_std_bios_param,
122};
123
124static struct ata_port_operations hpt3x3_port_ops = {
125 .port_disable = ata_port_disable,
126 .set_piomode = hpt3x3_set_piomode,
127 .set_dmamode = hpt3x3_set_dmamode,
128 .mode_filter = ata_pci_default_filter,
129
130 .tf_load = ata_tf_load,
131 .tf_read = ata_tf_read,
132 .check_status = ata_check_status,
133 .exec_command = ata_exec_command,
134 .dev_select = ata_std_dev_select,
135
136 .freeze = ata_bmdma_freeze,
137 .thaw = ata_bmdma_thaw,
138 .error_handler = hpt3x3_error_handler,
139 .post_internal_cmd = ata_bmdma_post_internal_cmd,
140
141 .bmdma_setup = ata_bmdma_setup,
142 .bmdma_start = ata_bmdma_start,
143 .bmdma_stop = ata_bmdma_stop,
144 .bmdma_status = ata_bmdma_status,
145
146 .qc_prep = ata_qc_prep,
147 .qc_issue = ata_qc_issue_prot,
148 .eng_timeout = ata_eng_timeout,
149 .data_xfer = ata_pio_data_xfer,
150
151 .irq_handler = ata_interrupt,
152 .irq_clear = ata_bmdma_irq_clear,
153
154 .port_start = ata_port_start,
155 .port_stop = ata_port_stop,
156 .host_stop = ata_host_stop
157};
158
159/**
160 * hpt3x3_init_one - Initialise an HPT343/363
161 * @dev: PCI device
162 * @id: Entry in match table
163 *
164 * Perform basic initialisation. The chip has a quirk that it won't
165 * function unless it is at XX00. The old ATA driver touched this up
166 * but we leave it for pci quirks to do properly.
167 */
168
169static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
170{
171 static struct ata_port_info info = {
172 .sht = &hpt3x3_sht,
173 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
174 .pio_mask = 0x1f,
175 .mwdma_mask = 0x07,
176 .udma_mask = 0x07,
177 .port_ops = &hpt3x3_port_ops
178 };
179 static struct ata_port_info *port_info[2] = { &info, &info };
180 u16 cmd;
181
182 /* Initialize the board */
183 pci_write_config_word(dev, 0x80, 0x00);
184 /* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
185 pci_read_config_word(dev, PCI_COMMAND, &cmd);
186 if (cmd & PCI_COMMAND_MEMORY)
187 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
188 else
189 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
190
191 /* Now kick off ATA set up */
192 return ata_pci_init_one(dev, port_info, 2);
193}
194
195static struct pci_device_id hpt3x3[] = {
196 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT343), },
197 { 0, },
198};
199
200static struct pci_driver hpt3x3_pci_driver = {
201 .name = DRV_NAME,
202 .id_table = hpt3x3,
203 .probe = hpt3x3_init_one,
204 .remove = ata_pci_remove_one
205};
206
207static int __init hpt3x3_init(void)
208{
209 return pci_register_driver(&hpt3x3_pci_driver);
210}
211
212
213static void __exit hpt3x3_exit(void)
214{
215 pci_unregister_driver(&hpt3x3_pci_driver);
216}
217
218
219MODULE_AUTHOR("Alan Cox");
220MODULE_DESCRIPTION("low-level driver for the Highpoint HPT343/363");
221MODULE_LICENSE("GPL");
222MODULE_DEVICE_TABLE(pci, hpt3x3);
223MODULE_VERSION(DRV_VERSION);
224
225module_init(hpt3x3_init);
226module_exit(hpt3x3_exit);
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
new file mode 100644
index 000000000000..73948c8b7270
--- /dev/null
+++ b/drivers/ata/pata_isapnp.c
@@ -0,0 +1,156 @@
1
2/*
3 * pata-isapnp.c - ISA PnP PATA controller driver.
4 * Copyright 2005/2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
5 *
6 * Based in part on ide-pnp.c by Andrey Panin <pazke@donpac.ru>
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/isapnp.h>
12#include <linux/init.h>
13#include <linux/blkdev.h>
14#include <linux/delay.h>
15#include <scsi/scsi_host.h>
16#include <linux/ata.h>
17#include <linux/libata.h>
18
19#define DRV_NAME "pata_isapnp"
20#define DRV_VERSION "0.1.5"
21
22static struct scsi_host_template isapnp_sht = {
23 .module = THIS_MODULE,
24 .name = DRV_NAME,
25 .ioctl = ata_scsi_ioctl,
26 .queuecommand = ata_scsi_queuecmd,
27 .can_queue = ATA_DEF_QUEUE,
28 .this_id = ATA_SHT_THIS_ID,
29 .sg_tablesize = LIBATA_MAX_PRD,
30 .max_sectors = ATA_MAX_SECTORS,
31 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
32 .emulated = ATA_SHT_EMULATED,
33 .use_clustering = ATA_SHT_USE_CLUSTERING,
34 .proc_name = DRV_NAME,
35 .dma_boundary = ATA_DMA_BOUNDARY,
36 .slave_configure = ata_scsi_slave_config,
37 .bios_param = ata_std_bios_param,
38};
39
40static struct ata_port_operations isapnp_port_ops = {
41 .port_disable = ata_port_disable,
42 .tf_load = ata_tf_load,
43 .tf_read = ata_tf_read,
44 .check_status = ata_check_status,
45 .exec_command = ata_exec_command,
46 .dev_select = ata_std_dev_select,
47
48 .freeze = ata_bmdma_freeze,
49 .thaw = ata_bmdma_thaw,
50 .error_handler = ata_bmdma_error_handler,
51 .post_internal_cmd = ata_bmdma_post_internal_cmd,
52
53 .qc_prep = ata_qc_prep,
54 .qc_issue = ata_qc_issue_prot,
55 .eng_timeout = ata_eng_timeout,
56 .data_xfer = ata_pio_data_xfer,
57
58 .irq_handler = ata_interrupt,
59 .irq_clear = ata_bmdma_irq_clear,
60
61 .port_start = ata_port_start,
62 .port_stop = ata_port_stop,
63 .host_stop = ata_host_stop
64};
65
66/**
67 * isapnp_init_one - attach an isapnp interface
68 * @idev: PnP device
69 * @dev_id: matching detect line
70 *
71 * Register an ISA bus IDE interface. Such interfaces are PIO 0 and
72 * non shared IRQ.
73 */
74
75static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev_id)
76{
77 struct ata_probe_ent ae;
78
79 if (pnp_port_valid(idev, 0) == 0)
80 return -ENODEV;
81
82 /* FIXME: Should selected polled PIO here not fail */
83 if (pnp_irq_valid(idev, 0) == 0)
84 return -ENODEV;
85
86 memset(&ae, 0, sizeof(struct ata_probe_ent));
87 INIT_LIST_HEAD(&ae.node);
88 ae.dev = &idev->dev;
89 ae.port_ops = &isapnp_port_ops;
90 ae.sht = &isapnp_sht;
91 ae.n_ports = 1;
92 ae.pio_mask = 1; /* ISA so PIO 0 cycles */
93 ae.irq = pnp_irq(idev, 0);
94 ae.irq_flags = 0;
95 ae.port_flags = ATA_FLAG_SLAVE_POSS;
96 ae.port[0].cmd_addr = pnp_port_start(idev, 0);
97
98 if (pnp_port_valid(idev, 1) == 0) {
99 ae.port[0].altstatus_addr = pnp_port_start(idev, 1);
100 ae.port[0].ctl_addr = pnp_port_start(idev, 1);
101 ae.port_flags |= ATA_FLAG_SRST;
102 }
103 ata_std_ports(&ae.port[0]);
104
105 if (ata_device_add(&ae) == 0)
106 return -ENODEV;
107 return 0;
108}
109
110/**
111 * isapnp_remove_one - unplug an isapnp interface
112 * @idev: PnP device
113 *
114 * Remove a previously configured PnP ATA port. Called only on module
115 * unload events as the core does not currently deal with ISAPnP docking.
116 */
117
118static void isapnp_remove_one(struct pnp_dev *idev)
119{
120 struct device *dev = &idev->dev;
121 struct ata_host *host = dev_get_drvdata(dev);
122
123 ata_host_remove(host);
124 dev_set_drvdata(dev, NULL);
125}
126
127static struct pnp_device_id isapnp_devices[] = {
128 /* Generic ESDI/IDE/ATA compatible hard disk controller */
129 {.id = "PNP0600", .driver_data = 0},
130 {.id = ""}
131};
132
133static struct pnp_driver isapnp_driver = {
134 .name = DRV_NAME,
135 .id_table = isapnp_devices,
136 .probe = isapnp_init_one,
137 .remove = isapnp_remove_one,
138};
139
140static int __init isapnp_init(void)
141{
142 return pnp_register_driver(&isapnp_driver);
143}
144
145static void __exit isapnp_exit(void)
146{
147 pnp_unregister_driver(&isapnp_driver);
148}
149
150MODULE_AUTHOR("Alan Cox");
151MODULE_DESCRIPTION("low-level driver for ISA PnP ATA");
152MODULE_LICENSE("GPL");
153MODULE_VERSION(DRV_VERSION);
154
155module_init(isapnp_init);
156module_exit(isapnp_exit);
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
new file mode 100644
index 000000000000..af39097d8081
--- /dev/null
+++ b/drivers/ata/pata_it821x.c
@@ -0,0 +1,847 @@
1/*
2 * ata-it821x.c - IT821x PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based upon
7 *
8 * it821x.c
9 *
10 * linux/drivers/ide/pci/it821x.c Version 0.09 December 2004
11 *
12 * Copyright (C) 2004 Red Hat <alan@redhat.com>
13 *
14 * May be copied or modified under the terms of the GNU General Public License
15 * Based in part on the ITE vendor provided SCSI driver.
16 *
17 * Documentation available from
18 * http://www.ite.com.tw/pc/IT8212F_V04.pdf
19 * Some other documents are NDA.
20 *
21 * The ITE8212 isn't exactly a standard IDE controller. It has two
22 * modes. In pass through mode then it is an IDE controller. In its smart
23 * mode its actually quite a capable hardware raid controller disguised
24 * as an IDE controller. Smart mode only understands DMA read/write and
25 * identify, none of the fancier commands apply. The IT8211 is identical
26 * in other respects but lacks the raid mode.
27 *
28 * Errata:
29 * o Rev 0x10 also requires master/slave hold the same DMA timings and
30 * cannot do ATAPI MWDMA.
31 * o The identify data for raid volumes lacks CHS info (technically ok)
32 * but also fails to set the LBA28 and other bits. We fix these in
33 * the IDE probe quirk code.
34 * o If you write LBA48 sized I/O's (ie > 256 sector) in smart mode
35 * raid then the controller firmware dies
36 * o Smart mode without RAID doesn't clear all the necessary identify
37 * bits to reduce the command set to the one used
38 *
39 * This has a few impacts on the driver
40 * - In pass through mode we do all the work you would expect
41 * - In smart mode the clocking set up is done by the controller generally
42 * but we must watch the other limits and filter.
43 * - There are a few extra vendor commands that actually talk to the
44 * controller but only work PIO with no IRQ.
45 *
46 * Vendor areas of the identify block in smart mode are used for the
47 * timing and policy set up. Each HDD in raid mode also has a serial
48 * block on the disk. The hardware extra commands are get/set chip status,
49 * rebuild, get rebuild status.
50 *
51 * In Linux the driver supports pass through mode as if the device was
52 * just another IDE controller. If the smart mode is running then
53 * volumes are managed by the controller firmware and each IDE "disk"
54 * is a raid volume. Even more cute - the controller can do automated
55 * hotplug and rebuild.
56 *
57 * The pass through controller itself is a little demented. It has a
58 * flaw that it has a single set of PIO/MWDMA timings per channel so
59 * non UDMA devices restrict each others performance. It also has a
60 * single clock source per channel so mixed UDMA100/133 performance
61 * isn't perfect and we have to pick a clock. Thankfully none of this
62 * matters in smart mode. ATAPI DMA is not currently supported.
63 *
64 * It seems the smart mode is a win for RAID1/RAID10 but otherwise not.
65 *
66 * TODO
67 * - ATAPI and other speed filtering
68 * - Command filter in smart mode
69 * - RAID configuration ioctls
70 */
71
72#include <linux/kernel.h>
73#include <linux/module.h>
74#include <linux/pci.h>
75#include <linux/init.h>
76#include <linux/blkdev.h>
77#include <linux/delay.h>
78#include <scsi/scsi_host.h>
79#include <linux/libata.h>
80
81
82#define DRV_NAME "pata_it821x"
83#define DRV_VERSION "0.3.2"
84
85struct it821x_dev
86{
87 unsigned int smart:1, /* Are we in smart raid mode */
88 timing10:1; /* Rev 0x10 */
89 u8 clock_mode; /* 0, ATA_50 or ATA_66 */
90 u8 want[2][2]; /* Mode/Pri log for master slave */
91 /* We need these for switching the clock when DMA goes on/off
92 The high byte is the 66Mhz timing */
93 u16 pio[2]; /* Cached PIO values */
94 u16 mwdma[2]; /* Cached MWDMA values */
95 u16 udma[2]; /* Cached UDMA values (per drive) */
96 u16 last_device; /* Master or slave loaded ? */
97};
98
99#define ATA_66 0
100#define ATA_50 1
101#define ATA_ANY 2
102
103#define UDMA_OFF 0
104#define MWDMA_OFF 0
105
106/*
107 * We allow users to force the card into non raid mode without
108 * flashing the alternative BIOS. This is also neccessary right now
109 * for embedded platforms that cannot run a PC BIOS but are using this
110 * device.
111 */
112
113static int it8212_noraid;
114
115/**
116 * it821x_pre_reset - probe
117 * @ap: ATA port
118 *
119 * Set the cable type
120 */
121
122static int it821x_pre_reset(struct ata_port *ap)
123{
124 ap->cbl = ATA_CBL_PATA80;
125 return ata_std_prereset(ap);
126}
127
128/**
129 * it821x_error_handler - probe/reset
130 * @ap: ATA port
131 *
132 * Set the cable type and trigger a probe
133 */
134
135static void it821x_error_handler(struct ata_port *ap)
136{
137 return ata_bmdma_drive_eh(ap, it821x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
138}
139
140/**
141 * it821x_program - program the PIO/MWDMA registers
142 * @ap: ATA port
143 * @adev: Device to program
144 * @timing: Timing value (66Mhz in top 8bits, 50 in the low 8)
145 *
146 * Program the PIO/MWDMA timing for this channel according to the
147 * current clock. These share the same register so are managed by
148 * the DMA start/stop sequence as with the old driver.
149 */
150
151static void it821x_program(struct ata_port *ap, struct ata_device *adev, u16 timing)
152{
153 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
154 struct it821x_dev *itdev = ap->private_data;
155 int channel = ap->port_no;
156 u8 conf;
157
158 /* Program PIO/MWDMA timing bits */
159 if (itdev->clock_mode == ATA_66)
160 conf = timing >> 8;
161 else
162 conf = timing & 0xFF;
163 pci_write_config_byte(pdev, 0x54 + 4 * channel, conf);
164}
165
166
167/**
168 * it821x_program_udma - program the UDMA registers
169 * @ap: ATA port
170 * @adev: ATA device to update
171 * @timing: Timing bits. Top 8 are for 66Mhz bottom for 50Mhz
172 *
173 * Program the UDMA timing for this drive according to the
174 * current clock. Handles the dual clocks and also knows about
175 * the errata on the 0x10 revision. The UDMA errata is partly handled
176 * here and partly in start_dma.
177 */
178
179static void it821x_program_udma(struct ata_port *ap, struct ata_device *adev, u16 timing)
180{
181 struct it821x_dev *itdev = ap->private_data;
182 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
183 int channel = ap->port_no;
184 int unit = adev->devno;
185 u8 conf;
186
187 /* Program UDMA timing bits */
188 if (itdev->clock_mode == ATA_66)
189 conf = timing >> 8;
190 else
191 conf = timing & 0xFF;
192 if (itdev->timing10 == 0)
193 pci_write_config_byte(pdev, 0x56 + 4 * channel + unit, conf);
194 else {
195 /* Early revision must be programmed for both together */
196 pci_write_config_byte(pdev, 0x56 + 4 * channel, conf);
197 pci_write_config_byte(pdev, 0x56 + 4 * channel + 1, conf);
198 }
199}
200
201/**
202 * it821x_clock_strategy
203 * @ap: ATA interface
204 * @adev: ATA device being updated
205 *
206 * Select between the 50 and 66Mhz base clocks to get the best
207 * results for this interface.
208 */
209
210static void it821x_clock_strategy(struct ata_port *ap, struct ata_device *adev)
211{
212 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
213 struct it821x_dev *itdev = ap->private_data;
214 u8 unit = adev->devno;
215 struct ata_device *pair = ata_dev_pair(adev);
216
217 int clock, altclock;
218 u8 v;
219 int sel = 0;
220
221 /* Look for the most wanted clocking */
222 if (itdev->want[0][0] > itdev->want[1][0]) {
223 clock = itdev->want[0][1];
224 altclock = itdev->want[1][1];
225 } else {
226 clock = itdev->want[1][1];
227 altclock = itdev->want[0][1];
228 }
229
230 /* Master doesn't care does the slave ? */
231 if (clock == ATA_ANY)
232 clock = altclock;
233
234 /* Nobody cares - keep the same clock */
235 if (clock == ATA_ANY)
236 return;
237 /* No change */
238 if (clock == itdev->clock_mode)
239 return;
240
241 /* Load this into the controller */
242 if (clock == ATA_66)
243 itdev->clock_mode = ATA_66;
244 else {
245 itdev->clock_mode = ATA_50;
246 sel = 1;
247 }
248 pci_read_config_byte(pdev, 0x50, &v);
249 v &= ~(1 << (1 + ap->port_no));
250 v |= sel << (1 + ap->port_no);
251 pci_write_config_byte(pdev, 0x50, v);
252
253 /*
254 * Reprogram the UDMA/PIO of the pair drive for the switch
255 * MWDMA will be dealt with by the dma switcher
256 */
257 if (pair && itdev->udma[1-unit] != UDMA_OFF) {
258 it821x_program_udma(ap, pair, itdev->udma[1-unit]);
259 it821x_program(ap, pair, itdev->pio[1-unit]);
260 }
261 /*
262 * Reprogram the UDMA/PIO of our drive for the switch.
263 * MWDMA will be dealt with by the dma switcher
264 */
265 if (itdev->udma[unit] != UDMA_OFF) {
266 it821x_program_udma(ap, adev, itdev->udma[unit]);
267 it821x_program(ap, adev, itdev->pio[unit]);
268 }
269}
270
271/**
272 * it821x_passthru_set_piomode - set PIO mode data
273 * @ap: ATA interface
274 * @adev: ATA device
275 *
276 * Configure for PIO mode. This is complicated as the register is
277 * shared by PIO and MWDMA and for both channels.
278 */
279
280static void it821x_passthru_set_piomode(struct ata_port *ap, struct ata_device *adev)
281{
282 /* Spec says 89 ref driver uses 88 */
283 static const u16 pio[] = { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 };
284 static const u8 pio_want[] = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY };
285
286 struct it821x_dev *itdev = ap->private_data;
287 int unit = adev->devno;
288 int mode_wanted = adev->pio_mode - XFER_PIO_0;
289
290 /* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */
291 itdev->want[unit][1] = pio_want[mode_wanted];
292 itdev->want[unit][0] = 1; /* PIO is lowest priority */
293 itdev->pio[unit] = pio[mode_wanted];
294 it821x_clock_strategy(ap, adev);
295 it821x_program(ap, adev, itdev->pio[unit]);
296}
297
298/**
299 * it821x_passthru_set_dmamode - set initial DMA mode data
300 * @ap: ATA interface
301 * @adev: ATA device
302 *
303 * Set up the DMA modes. The actions taken depend heavily on the mode
304 * to use. If UDMA is used as is hopefully the usual case then the
305 * timing register is private and we need only consider the clock. If
306 * we are using MWDMA then we have to manage the setting ourself as
307 * we switch devices and mode.
308 */
309
310static void it821x_passthru_set_dmamode(struct ata_port *ap, struct ata_device *adev)
311{
312 static const u16 dma[] = { 0x8866, 0x3222, 0x3121 };
313 static const u8 mwdma_want[] = { ATA_ANY, ATA_66, ATA_ANY };
314 static const u16 udma[] = { 0x4433, 0x4231, 0x3121, 0x2121, 0x1111, 0x2211, 0x1111 };
315 static const u8 udma_want[] = { ATA_ANY, ATA_50, ATA_ANY, ATA_66, ATA_66, ATA_50, ATA_66 };
316
317 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
318 struct it821x_dev *itdev = ap->private_data;
319 int channel = ap->port_no;
320 int unit = adev->devno;
321 u8 conf;
322
323 if (adev->dma_mode >= XFER_UDMA_0) {
324 int mode_wanted = adev->dma_mode - XFER_UDMA_0;
325
326 itdev->want[unit][1] = udma_want[mode_wanted];
327 itdev->want[unit][0] = 3; /* UDMA is high priority */
328 itdev->mwdma[unit] = MWDMA_OFF;
329 itdev->udma[unit] = udma[mode_wanted];
330 if (mode_wanted >= 5)
331 itdev->udma[unit] |= 0x8080; /* UDMA 5/6 select on */
332
333 /* UDMA on. Again revision 0x10 must do the pair */
334 pci_read_config_byte(pdev, 0x50, &conf);
335 if (itdev->timing10)
336 conf &= channel ? 0x9F: 0xE7;
337 else
338 conf &= ~ (1 << (3 + 2 * channel + unit));
339 pci_write_config_byte(pdev, 0x50, conf);
340 it821x_clock_strategy(ap, adev);
341 it821x_program_udma(ap, adev, itdev->udma[unit]);
342 } else {
343 int mode_wanted = adev->dma_mode - XFER_MW_DMA_0;
344
345 itdev->want[unit][1] = mwdma_want[mode_wanted];
346 itdev->want[unit][0] = 2; /* MWDMA is low priority */
347 itdev->mwdma[unit] = dma[mode_wanted];
348 itdev->udma[unit] = UDMA_OFF;
349
350 /* UDMA bits off - Revision 0x10 do them in pairs */
351 pci_read_config_byte(pdev, 0x50, &conf);
352 if (itdev->timing10)
353 conf |= channel ? 0x60: 0x18;
354 else
355 conf |= 1 << (3 + 2 * channel + unit);
356 pci_write_config_byte(pdev, 0x50, conf);
357 it821x_clock_strategy(ap, adev);
358 }
359}
360
361/**
362 * it821x_passthru_dma_start - DMA start callback
363 * @qc: Command in progress
364 *
365 * Usually drivers set the DMA timing at the point the set_dmamode call
366 * is made. IT821x however requires we load new timings on the
367 * transitions in some cases.
368 */
369
370static void it821x_passthru_bmdma_start(struct ata_queued_cmd *qc)
371{
372 struct ata_port *ap = qc->ap;
373 struct ata_device *adev = qc->dev;
374 struct it821x_dev *itdev = ap->private_data;
375 int unit = adev->devno;
376
377 if (itdev->mwdma[unit] != MWDMA_OFF)
378 it821x_program(ap, adev, itdev->mwdma[unit]);
379 else if (itdev->udma[unit] != UDMA_OFF && itdev->timing10)
380 it821x_program_udma(ap, adev, itdev->udma[unit]);
381 ata_bmdma_start(qc);
382}
383
384/**
385 * it821x_passthru_dma_stop - DMA stop callback
386 * @qc: ATA command
387 *
388 * We loaded new timings in dma_start, as a result we need to restore
389 * the PIO timings in dma_stop so that the next command issue gets the
390 * right clock values.
391 */
392
393static void it821x_passthru_bmdma_stop(struct ata_queued_cmd *qc)
394{
395 struct ata_port *ap = qc->ap;
396 struct ata_device *adev = qc->dev;
397 struct it821x_dev *itdev = ap->private_data;
398 int unit = adev->devno;
399
400 ata_bmdma_stop(qc);
401 if (itdev->mwdma[unit] != MWDMA_OFF)
402 it821x_program(ap, adev, itdev->pio[unit]);
403}
404
405
406/**
407 * it821x_passthru_dev_select - Select master/slave
408 * @ap: ATA port
409 * @device: Device number (not pointer)
410 *
411 * Device selection hook. If neccessary perform clock switching
412 */
413
414static void it821x_passthru_dev_select(struct ata_port *ap,
415 unsigned int device)
416{
417 struct it821x_dev *itdev = ap->private_data;
418 if (itdev && device != itdev->last_device) {
419 struct ata_device *adev = &ap->device[device];
420 it821x_program(ap, adev, itdev->pio[adev->devno]);
421 itdev->last_device = device;
422 }
423 ata_std_dev_select(ap, device);
424}
425
426/**
427 * it821x_smart_qc_issue_prot - wrap qc issue prot
428 * @qc: command
429 *
430 * Wrap the command issue sequence for the IT821x. We need to
431 * perform out own device selection timing loads before the
432 * usual happenings kick off
433 */
434
435static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc)
436{
437 switch(qc->tf.command)
438 {
439 /* Commands the firmware supports */
440 case ATA_CMD_READ:
441 case ATA_CMD_READ_EXT:
442 case ATA_CMD_WRITE:
443 case ATA_CMD_WRITE_EXT:
444 case ATA_CMD_PIO_READ:
445 case ATA_CMD_PIO_READ_EXT:
446 case ATA_CMD_PIO_WRITE:
447 case ATA_CMD_PIO_WRITE_EXT:
448 case ATA_CMD_READ_MULTI:
449 case ATA_CMD_READ_MULTI_EXT:
450 case ATA_CMD_WRITE_MULTI:
451 case ATA_CMD_WRITE_MULTI_EXT:
452 case ATA_CMD_ID_ATA:
453 /* Arguably should just no-op this one */
454 case ATA_CMD_SET_FEATURES:
455 return ata_qc_issue_prot(qc);
456 }
457 printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
458 return AC_ERR_INVALID;
459}
460
461/**
462 * it821x_passthru_qc_issue_prot - wrap qc issue prot
463 * @qc: command
464 *
465 * Wrap the command issue sequence for the IT821x. We need to
466 * perform out own device selection timing loads before the
467 * usual happenings kick off
468 */
469
470static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc)
471{
472 it821x_passthru_dev_select(qc->ap, qc->dev->devno);
473 return ata_qc_issue_prot(qc);
474}
475
476/**
477 * it821x_smart_set_mode - mode setting
478 * @ap: interface to set up
479 *
480 * Use a non standard set_mode function. We don't want to be tuned.
481 * The BIOS configured everything. Our job is not to fiddle. We
482 * read the dma enabled bits from the PCI configuration of the device
483 * and respect them.
484 */
485
486static void it821x_smart_set_mode(struct ata_port *ap)
487{
488 int dma_enabled = 0;
489 int i;
490
491 /* Bits 5 and 6 indicate if DMA is active on master/slave */
492 /* It is possible that BMDMA isn't allocated */
493 if (ap->ioaddr.bmdma_addr)
494 dma_enabled = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
495
496 for (i = 0; i < ATA_MAX_DEVICES; i++) {
497 struct ata_device *dev = &ap->device[i];
498 if (ata_dev_enabled(dev)) {
499 /* We don't really care */
500 dev->pio_mode = XFER_PIO_0;
501 dev->dma_mode = XFER_MW_DMA_0;
502 /* We do need the right mode information for DMA or PIO
503 and this comes from the current configuration flags */
504 if (dma_enabled & (1 << (5 + i))) {
505 dev->xfer_mode = XFER_MW_DMA_0;
506 dev->xfer_shift = ATA_SHIFT_MWDMA;
507 dev->flags &= ~ATA_DFLAG_PIO;
508 } else {
509 dev->xfer_mode = XFER_PIO_0;
510 dev->xfer_shift = ATA_SHIFT_PIO;
511 dev->flags |= ATA_DFLAG_PIO;
512 }
513 }
514 }
515}
516
517/**
518 * it821x_dev_config - Called each device identify
519 * @ap: ATA port
520 * @adev: Device that has just been identified
521 *
522 * Perform the initial setup needed for each device that is chip
523 * special. In our case we need to lock the sector count to avoid
524 * blowing the brains out of the firmware with large LBA48 requests
525 *
526 * FIXME: When FUA appears we need to block FUA too. And SMART and
527 * basically we need to filter commands for this chip.
528 */
529
530static void it821x_dev_config(struct ata_port *ap, struct ata_device *adev)
531{
532 unsigned char model_num[40];
533 char *s;
534 unsigned int len;
535
536 /* This block ought to be a library routine as it is in several
537 drivers now */
538
539 ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS,
540 sizeof(model_num));
541 s = &model_num[0];
542 len = strnlen(s, sizeof(model_num));
543
544 /* ATAPI specifies that empty space is blank-filled; remove blanks */
545 while ((len > 0) && (s[len - 1] == ' ')) {
546 len--;
547 s[len] = 0;
548 }
549
550 if (adev->max_sectors > 255)
551 adev->max_sectors = 255;
552
553 if (strstr(model_num, "Integrated Technology Express")) {
554 /* RAID mode */
555 printk(KERN_INFO "IT821x %sRAID%d volume",
556 adev->id[147]?"Bootable ":"",
557 adev->id[129]);
558 if (adev->id[129] != 1)
559 printk("(%dK stripe)", adev->id[146]);
560 printk(".\n");
561 }
562}
563
564
565/**
566 * it821x_check_atapi_dma - ATAPI DMA handler
567 * @qc: Command we are about to issue
568 *
569 * Decide if this ATAPI command can be issued by DMA on this
570 * controller. Return 0 if it can be.
571 */
572
573static int it821x_check_atapi_dma(struct ata_queued_cmd *qc)
574{
575 struct ata_port *ap = qc->ap;
576 struct it821x_dev *itdev = ap->private_data;
577
578 /* No ATAPI DMA in smart mode */
579 if (itdev->smart)
580 return -EOPNOTSUPP;
581 /* No ATAPI DMA on rev 10 */
582 if (itdev->timing10)
583 return -EOPNOTSUPP;
584 /* Cool */
585 return 0;
586}
587
588
589/**
590 * it821x_port_start - port setup
591 * @ap: ATA port being set up
592 *
593 * The it821x needs to maintain private data structures and also to
594 * use the standard PCI interface which lacks support for this
595 * functionality. We instead set up the private data on the port
596 * start hook, and tear it down on port stop
597 */
598
599static int it821x_port_start(struct ata_port *ap)
600{
601 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
602 struct it821x_dev *itdev;
603 u8 conf;
604
605 int ret = ata_port_start(ap);
606 if (ret < 0)
607 return ret;
608
609 ap->private_data = kmalloc(sizeof(struct it821x_dev), GFP_KERNEL);
610 if (ap->private_data == NULL) {
611 ata_port_stop(ap);
612 return -ENOMEM;
613 }
614
615 itdev = ap->private_data;
616 memset(itdev, 0, sizeof(struct it821x_dev));
617
618 pci_read_config_byte(pdev, 0x50, &conf);
619
620 if (conf & 1) {
621 itdev->smart = 1;
622 /* Long I/O's although allowed in LBA48 space cause the
623 onboard firmware to enter the twighlight zone */
624 /* No ATAPI DMA in this mode either */
625 }
626 /* Pull the current clocks from 0x50 */
627 if (conf & (1 << (1 + ap->port_no)))
628 itdev->clock_mode = ATA_50;
629 else
630 itdev->clock_mode = ATA_66;
631
632 itdev->want[0][1] = ATA_ANY;
633 itdev->want[1][1] = ATA_ANY;
634 itdev->last_device = -1;
635
636 pci_read_config_byte(pdev, PCI_REVISION_ID, &conf);
637 if (conf == 0x10) {
638 itdev->timing10 = 1;
639 /* Need to disable ATAPI DMA for this case */
640 if (!itdev->smart)
641 printk(KERN_WARNING DRV_NAME": Revision 0x10, workarounds activated.\n");
642 }
643
644 return 0;
645}
646
647/**
648 * it821x_port_stop - port shutdown
649 * @ap: ATA port being removed
650 *
651 * Release the private objects we added in it821x_port_start
652 */
653
654static void it821x_port_stop(struct ata_port *ap) {
655 kfree(ap->private_data);
656 ap->private_data = NULL; /* We want an OOPS if we reuse this
657 too late! */
658 ata_port_stop(ap);
659}
660
661static struct scsi_host_template it821x_sht = {
662 .module = THIS_MODULE,
663 .name = DRV_NAME,
664 .ioctl = ata_scsi_ioctl,
665 .queuecommand = ata_scsi_queuecmd,
666 .can_queue = ATA_DEF_QUEUE,
667 .this_id = ATA_SHT_THIS_ID,
668 .sg_tablesize = LIBATA_MAX_PRD,
669 /* 255 sectors to begin with. This is locked in smart mode but not
670 in pass through */
671 .max_sectors = 255,
672 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
673 .emulated = ATA_SHT_EMULATED,
674 .use_clustering = ATA_SHT_USE_CLUSTERING,
675 .proc_name = DRV_NAME,
676 .dma_boundary = ATA_DMA_BOUNDARY,
677 .slave_configure = ata_scsi_slave_config,
678 .bios_param = ata_std_bios_param,
679};
680
681static struct ata_port_operations it821x_smart_port_ops = {
682 .set_mode = it821x_smart_set_mode,
683 .port_disable = ata_port_disable,
684 .tf_load = ata_tf_load,
685 .tf_read = ata_tf_read,
686 .mode_filter = ata_pci_default_filter,
687
688 .check_status = ata_check_status,
689 .check_atapi_dma= it821x_check_atapi_dma,
690 .exec_command = ata_exec_command,
691 .dev_select = ata_std_dev_select,
692 .dev_config = it821x_dev_config,
693
694 .freeze = ata_bmdma_freeze,
695 .thaw = ata_bmdma_thaw,
696 .error_handler = it821x_error_handler,
697 .post_internal_cmd = ata_bmdma_post_internal_cmd,
698
699 .bmdma_setup = ata_bmdma_setup,
700 .bmdma_start = ata_bmdma_start,
701 .bmdma_stop = ata_bmdma_stop,
702 .bmdma_status = ata_bmdma_status,
703
704 .qc_prep = ata_qc_prep,
705 .qc_issue = it821x_smart_qc_issue_prot,
706 .eng_timeout = ata_eng_timeout,
707 .data_xfer = ata_pio_data_xfer,
708
709 .irq_handler = ata_interrupt,
710 .irq_clear = ata_bmdma_irq_clear,
711
712 .port_start = it821x_port_start,
713 .port_stop = it821x_port_stop,
714 .host_stop = ata_host_stop
715};
716
717static struct ata_port_operations it821x_passthru_port_ops = {
718 .port_disable = ata_port_disable,
719 .set_piomode = it821x_passthru_set_piomode,
720 .set_dmamode = it821x_passthru_set_dmamode,
721 .mode_filter = ata_pci_default_filter,
722
723 .tf_load = ata_tf_load,
724 .tf_read = ata_tf_read,
725 .check_status = ata_check_status,
726 .exec_command = ata_exec_command,
727 .check_atapi_dma= it821x_check_atapi_dma,
728 .dev_select = it821x_passthru_dev_select,
729
730 .freeze = ata_bmdma_freeze,
731 .thaw = ata_bmdma_thaw,
732 .error_handler = it821x_error_handler,
733 .post_internal_cmd = ata_bmdma_post_internal_cmd,
734
735 .bmdma_setup = ata_bmdma_setup,
736 .bmdma_start = it821x_passthru_bmdma_start,
737 .bmdma_stop = it821x_passthru_bmdma_stop,
738 .bmdma_status = ata_bmdma_status,
739
740 .qc_prep = ata_qc_prep,
741 .qc_issue = it821x_passthru_qc_issue_prot,
742 .eng_timeout = ata_eng_timeout,
743 .data_xfer = ata_pio_data_xfer,
744
745 .irq_clear = ata_bmdma_irq_clear,
746 .irq_handler = ata_interrupt,
747
748 .port_start = it821x_port_start,
749 .port_stop = it821x_port_stop,
750 .host_stop = ata_host_stop
751};
752
753static void __devinit it821x_disable_raid(struct pci_dev *pdev)
754{
755 /* Reset local CPU, and set BIOS not ready */
756 pci_write_config_byte(pdev, 0x5E, 0x01);
757
758 /* Set to bypass mode, and reset PCI bus */
759 pci_write_config_byte(pdev, 0x50, 0x00);
760 pci_write_config_word(pdev, PCI_COMMAND,
761 PCI_COMMAND_PARITY | PCI_COMMAND_IO |
762 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
763 pci_write_config_word(pdev, 0x40, 0xA0F3);
764
765 pci_write_config_dword(pdev,0x4C, 0x02040204);
766 pci_write_config_byte(pdev, 0x42, 0x36);
767 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20);
768}
769
770
771static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
772{
773 u8 conf;
774
775 static struct ata_port_info info_smart = {
776 .sht = &it821x_sht,
777 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
778 .pio_mask = 0x1f,
779 .mwdma_mask = 0x07,
780 .port_ops = &it821x_smart_port_ops
781 };
782 static struct ata_port_info info_passthru = {
783 .sht = &it821x_sht,
784 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
785 .pio_mask = 0x1f,
786 .mwdma_mask = 0x07,
787 .udma_mask = 0x7f,
788 .port_ops = &it821x_passthru_port_ops
789 };
790 static struct ata_port_info *port_info[2];
791
792 static char *mode[2] = { "pass through", "smart" };
793
794 /* Force the card into bypass mode if so requested */
795 if (it8212_noraid) {
796 printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n");
797 it821x_disable_raid(pdev);
798 }
799 pci_read_config_byte(pdev, 0x50, &conf);
800 conf &= 1;
801
802 printk(KERN_INFO DRV_NAME ": controller in %s mode.\n", mode[conf]);
803 if (conf == 0)
804 port_info[0] = port_info[1] = &info_passthru;
805 else
806 port_info[0] = port_info[1] = &info_smart;
807
808 return ata_pci_init_one(pdev, port_info, 2);
809}
810
811static struct pci_device_id it821x[] = {
812 { PCI_DEVICE(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8211), },
813 { PCI_DEVICE(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8212), },
814 { 0, },
815};
816
817static struct pci_driver it821x_pci_driver = {
818 .name = DRV_NAME,
819 .id_table = it821x,
820 .probe = it821x_init_one,
821 .remove = ata_pci_remove_one
822};
823
824static int __init it821x_init(void)
825{
826 return pci_register_driver(&it821x_pci_driver);
827}
828
829
830static void __exit it821x_exit(void)
831{
832 pci_unregister_driver(&it821x_pci_driver);
833}
834
835
836MODULE_AUTHOR("Alan Cox");
837MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller");
838MODULE_LICENSE("GPL");
839MODULE_DEVICE_TABLE(pci, it821x);
840MODULE_VERSION(DRV_VERSION);
841
842
843module_param_named(noraid, it8212_noraid, int, S_IRUGO);
844MODULE_PARM_DESC(it8212_noraid, "Force card into bypass mode");
845
846module_init(it821x_init);
847module_exit(it821x_exit);
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
new file mode 100644
index 000000000000..6832a643a9eb
--- /dev/null
+++ b/drivers/ata/pata_jmicron.c
@@ -0,0 +1,266 @@
1/*
2 * pata_jmicron.c - JMicron ATA driver for non AHCI mode. This drives the
3 * PATA port of the controller. The SATA ports are
4 * driven by AHCI in the usual configuration although
5 * this driver can handle other setups if we need it.
6 *
7 * (c) 2006 Red Hat <alan@redhat.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/init.h>
14#include <linux/blkdev.h>
15#include <linux/delay.h>
16#include <linux/device.h>
17#include <scsi/scsi_host.h>
18#include <linux/libata.h>
19#include <linux/ata.h>
20
21#define DRV_NAME "pata_jmicron"
22#define DRV_VERSION "0.1.2"
23
24typedef enum {
25 PORT_PATA0 = 0,
26 PORT_PATA1 = 1,
27 PORT_SATA = 2,
28} port_type;
29
30/**
31 * jmicron_pre_reset - check for 40/80 pin
32 * @ap: Port
33 *
34 * Perform the PATA port setup we need.
35
36 * On the Jmicron 361/363 there is a single PATA port that can be mapped
37 * either as primary or secondary (or neither). We don't do any policy
38 * and setup here. We assume that has been done by init_one and the
39 * BIOS.
40 */
41
42static int jmicron_pre_reset(struct ata_port *ap)
43{
44 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
45 u32 control;
46 u32 control5;
47 int port_mask = 1<< (4 * ap->port_no);
48 int port = ap->port_no;
49 port_type port_map[2];
50
51 /* Check if our port is enabled */
52 pci_read_config_dword(pdev, 0x40, &control);
53 if ((control & port_mask) == 0)
54 return 0;
55
56 /* There are two basic mappings. One has the two SATA ports merged
57 as master/slave and the secondary as PATA, the other has only the
58 SATA port mapped */
59 if (control & (1 << 23)) {
60 port_map[0] = PORT_SATA;
61 port_map[1] = PORT_PATA0;
62 } else {
63 port_map[0] = PORT_SATA;
64 port_map[1] = PORT_SATA;
65 }
66
67 /* The 365/366 may have this bit set to map the second PATA port
68 as the internal primary channel */
69 pci_read_config_dword(pdev, 0x80, &control5);
70 if (control5 & (1<<24))
71 port_map[0] = PORT_PATA1;
72
73 /* The two ports may then be logically swapped by the firmware */
74 if (control & (1 << 22))
75 port = port ^ 1;
76
77 /*
78 * Now we know which physical port we are talking about we can
79 * actually do our cable checking etc. Thankfully we don't need
80 * to do the plumbing for other cases.
81 */
82 switch (port_map[port])
83 {
84 case PORT_PATA0:
85 if (control & (1 << 5))
86 return 0;
87 if (control & (1 << 3)) /* 40/80 pin primary */
88 ap->cbl = ATA_CBL_PATA40;
89 else
90 ap->cbl = ATA_CBL_PATA80;
91 break;
92 case PORT_PATA1:
93 /* Bit 21 is set if the port is enabled */
94 if ((control5 & (1 << 21)) == 0)
95 return 0;
96 if (control5 & (1 << 19)) /* 40/80 pin secondary */
97 ap->cbl = ATA_CBL_PATA40;
98 else
99 ap->cbl = ATA_CBL_PATA80;
100 break;
101 case PORT_SATA:
102 ap->cbl = ATA_CBL_SATA;
103 break;
104 }
105 return ata_std_prereset(ap);
106}
107
108/**
109 * jmicron_error_handler - Setup and error handler
110 * @ap: Port to handle
111 *
112 * LOCKING:
113 * None (inherited from caller).
114 */
115
116static void jmicron_error_handler(struct ata_port *ap)
117{
118 return ata_bmdma_drive_eh(ap, jmicron_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
119}
120
121/* No PIO or DMA methods needed for this device */
122
123static struct scsi_host_template jmicron_sht = {
124 .module = THIS_MODULE,
125 .name = DRV_NAME,
126 .ioctl = ata_scsi_ioctl,
127 .queuecommand = ata_scsi_queuecmd,
128 .can_queue = ATA_DEF_QUEUE,
129 .this_id = ATA_SHT_THIS_ID,
130 .sg_tablesize = LIBATA_MAX_PRD,
131 /* Special handling needed if you have sector or LBA48 limits */
132 .max_sectors = ATA_MAX_SECTORS,
133 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
134 .emulated = ATA_SHT_EMULATED,
135 .use_clustering = ATA_SHT_USE_CLUSTERING,
136 .proc_name = DRV_NAME,
137 .dma_boundary = ATA_DMA_BOUNDARY,
138 .slave_configure = ata_scsi_slave_config,
139 /* Use standard CHS mapping rules */
140 .bios_param = ata_std_bios_param,
141};
142
143static const struct ata_port_operations jmicron_ops = {
144 .port_disable = ata_port_disable,
145
146 /* Task file is PCI ATA format, use helpers */
147 .tf_load = ata_tf_load,
148 .tf_read = ata_tf_read,
149 .check_status = ata_check_status,
150 .exec_command = ata_exec_command,
151 .dev_select = ata_std_dev_select,
152
153 .freeze = ata_bmdma_freeze,
154 .thaw = ata_bmdma_thaw,
155 .error_handler = jmicron_error_handler,
156 .post_internal_cmd = ata_bmdma_post_internal_cmd,
157
158 /* BMDMA handling is PCI ATA format, use helpers */
159 .bmdma_setup = ata_bmdma_setup,
160 .bmdma_start = ata_bmdma_start,
161 .bmdma_stop = ata_bmdma_stop,
162 .bmdma_status = ata_bmdma_status,
163 .qc_prep = ata_qc_prep,
164 .qc_issue = ata_qc_issue_prot,
165 .data_xfer = ata_pio_data_xfer,
166
167 /* Timeout handling. Special recovery hooks here */
168 .eng_timeout = ata_eng_timeout,
169 .irq_handler = ata_interrupt,
170 .irq_clear = ata_bmdma_irq_clear,
171
172 /* Generic PATA PCI ATA helpers */
173 .port_start = ata_port_start,
174 .port_stop = ata_port_stop,
175 .host_stop = ata_host_stop,
176};
177
178
179/**
180 * jmicron_init_one - Register Jmicron ATA PCI device with kernel services
181 * @pdev: PCI device to register
182 * @ent: Entry in jmicron_pci_tbl matching with @pdev
183 *
184 * Called from kernel PCI layer.
185 *
186 * LOCKING:
187 * Inherited from PCI layer (may sleep).
188 *
189 * RETURNS:
190 * Zero on success, or -ERRNO value.
191 */
192
193static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
194{
195 static struct ata_port_info info = {
196 .sht = &jmicron_sht,
197 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
198
199 .pio_mask = 0x1f,
200 .mwdma_mask = 0x07,
201 .udma_mask = 0x3f,
202
203 .port_ops = &jmicron_ops,
204 };
205 struct ata_port_info *port_info[2] = { &info, &info };
206
207 u32 reg;
208
209 if (id->driver_data != 368) {
210 /* Put the controller into AHCI mode in case the AHCI driver
211 has not yet been loaded. This can be done with either
212 function present */
213
214 /* FIXME: We may want a way to override this in future */
215 pci_write_config_byte(pdev, 0x41, 0xa1);
216 }
217
218 /* PATA controller is fn 1, AHCI is fn 0 */
219 if (PCI_FUNC(pdev->devfn) != 1)
220 return -ENODEV;
221
222 if ( id->driver_data == 365 || id->driver_data == 366) {
223 /* The 365/66 have two PATA channels, redirect the second */
224 pci_read_config_dword(pdev, 0x80, &reg);
225 reg |= (1 << 24); /* IDE1 to PATA IDE secondary */
226 pci_write_config_dword(pdev, 0x80, reg);
227 }
228
229 return ata_pci_init_one(pdev, port_info, 2);
230}
231
232static const struct pci_device_id jmicron_pci_tbl[] = {
233 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361), 361},
234 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363), 363},
235 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365), 365},
236 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366), 366},
237 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368), 368},
238 { } /* terminate list */
239};
240
241static struct pci_driver jmicron_pci_driver = {
242 .name = DRV_NAME,
243 .id_table = jmicron_pci_tbl,
244 .probe = jmicron_init_one,
245 .remove = ata_pci_remove_one,
246};
247
248static int __init jmicron_init(void)
249{
250 return pci_register_driver(&jmicron_pci_driver);
251}
252
253static void __exit jmicron_exit(void)
254{
255 pci_unregister_driver(&jmicron_pci_driver);
256}
257
258module_init(jmicron_init);
259module_exit(jmicron_exit);
260
261MODULE_AUTHOR("Alan Cox");
262MODULE_DESCRIPTION("SCSI low-level driver for Jmicron PATA ports");
263MODULE_LICENSE("GPL");
264MODULE_DEVICE_TABLE(pci, jmicron_pci_tbl);
265MODULE_VERSION(DRV_VERSION);
266
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
new file mode 100644
index 000000000000..ad37c220bb2c
--- /dev/null
+++ b/drivers/ata/pata_legacy.c
@@ -0,0 +1,949 @@
1/*
2 * pata-legacy.c - Legacy port PATA/SATA controller driver.
3 * Copyright 2005/2006 Red Hat <alan@redhat.com>, all rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; see the file COPYING. If not, write to
17 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * An ATA driver for the legacy ATA ports.
20 *
21 * Data Sources:
22 * Opti 82C465/82C611 support: Data sheets at opti-inc.com
23 * HT6560 series:
24 * Promise 20230/20620:
25 * http://www.ryston.cz/petr/vlb/pdc20230b.html
26 * http://www.ryston.cz/petr/vlb/pdc20230c.html
27 * http://www.ryston.cz/petr/vlb/pdc20630.html
28 *
29 * Unsupported but docs exist:
30 * Appian/Adaptec AIC25VL01/Cirrus Logic PD7220
31 * Winbond W83759A
32 *
33 * This driver handles legacy (that is "ISA/VLB side") IDE ports found
34 * on PC class systems. There are three hybrid devices that are exceptions
35 * The Cyrix 5510/5520 where a pre SFF ATA device is on the bridge and
36 * the MPIIX where the tuning is PCI side but the IDE is "ISA side".
37 *
38 * Specific support is included for the ht6560a/ht6560b/opti82c611a/
39 * opti82c465mv/promise 20230c/20630
40 *
41 * Use the autospeed and pio_mask options with:
42 * Appian ADI/2 aka CLPD7220 or AIC25VL01.
43 * Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
44 * Goldstar GM82C711, PIC-1288A-125, UMC 82C871F, Winbond W83759,
45 * Winbond W83759A, Promise PDC20230-B
46 *
47 * For now use autospeed and pio_mask as above with the W83759A. This may
48 * change.
49 *
50 * TODO
51 * Merge existing pata_qdi driver
52 *
53 */
54
55#include <linux/kernel.h>
56#include <linux/module.h>
57#include <linux/pci.h>
58#include <linux/init.h>
59#include <linux/blkdev.h>
60#include <linux/delay.h>
61#include <scsi/scsi_host.h>
62#include <linux/ata.h>
63#include <linux/libata.h>
64#include <linux/platform_device.h>
65
66#define DRV_NAME "pata_legacy"
67#define DRV_VERSION "0.5.3"
68
69#define NR_HOST 6
70
71static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
72static int legacy_irq[NR_HOST] = { 15, 14, 11, 10, 8, 12 };
73
74struct legacy_data {
75 unsigned long timing;
76 u8 clock[2];
77 u8 last;
78 int fast;
79 struct platform_device *platform_dev;
80
81};
82
83static struct legacy_data legacy_data[NR_HOST];
84static struct ata_host *legacy_host[NR_HOST];
85static int nr_legacy_host;
86
87
88static int probe_all; /* Set to check all ISA port ranges */
89static int ht6560a; /* HT 6560A on primary 1, secondary 2, both 3 */
90static int ht6560b; /* HT 6560A on primary 1, secondary 2, both 3 */
91static int opti82c611a; /* Opti82c611A on primary 1, secondary 2, both 3 */
92static int opti82c46x; /* Opti 82c465MV present (pri/sec autodetect) */
93static int autospeed; /* Chip present which snoops speed changes */
94static int pio_mask = 0x1F; /* PIO range for autospeed devices */
95
96/**
97 * legacy_set_mode - mode setting
98 * @ap: IDE interface
99 *
100 * Use a non standard set_mode function. We don't want to be tuned.
101 *
102 * The BIOS configured everything. Our job is not to fiddle. Just use
103 * whatever PIO the hardware is using and leave it at that. When we
104 * get some kind of nice user driven API for control then we can
105 * expand on this as per hdparm in the base kernel.
106 */
107
108static void legacy_set_mode(struct ata_port *ap)
109{
110 int i;
111
112 for (i = 0; i < ATA_MAX_DEVICES; i++) {
113 struct ata_device *dev = &ap->device[i];
114 if (ata_dev_enabled(dev)) {
115 dev->pio_mode = XFER_PIO_0;
116 dev->xfer_mode = XFER_PIO_0;
117 dev->xfer_shift = ATA_SHIFT_PIO;
118 dev->flags |= ATA_DFLAG_PIO;
119 }
120 }
121}
122
123static struct scsi_host_template legacy_sht = {
124 .module = THIS_MODULE,
125 .name = DRV_NAME,
126 .ioctl = ata_scsi_ioctl,
127 .queuecommand = ata_scsi_queuecmd,
128 .can_queue = ATA_DEF_QUEUE,
129 .this_id = ATA_SHT_THIS_ID,
130 .sg_tablesize = LIBATA_MAX_PRD,
131 .max_sectors = ATA_MAX_SECTORS,
132 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
133 .emulated = ATA_SHT_EMULATED,
134 .use_clustering = ATA_SHT_USE_CLUSTERING,
135 .proc_name = DRV_NAME,
136 .dma_boundary = ATA_DMA_BOUNDARY,
137 .slave_configure = ata_scsi_slave_config,
138 .bios_param = ata_std_bios_param,
139};
140
141/*
142 * These ops are used if the user indicates the hardware
143 * snoops the commands to decide on the mode and handles the
144 * mode selection "magically" itself. Several legacy controllers
145 * do this. The mode range can be set if it is not 0x1F by setting
146 * pio_mask as well.
147 */
148
149static struct ata_port_operations simple_port_ops = {
150 .port_disable = ata_port_disable,
151 .tf_load = ata_tf_load,
152 .tf_read = ata_tf_read,
153 .check_status = ata_check_status,
154 .exec_command = ata_exec_command,
155 .dev_select = ata_std_dev_select,
156
157 .freeze = ata_bmdma_freeze,
158 .thaw = ata_bmdma_thaw,
159 .error_handler = ata_bmdma_error_handler,
160 .post_internal_cmd = ata_bmdma_post_internal_cmd,
161
162 .qc_prep = ata_qc_prep,
163 .qc_issue = ata_qc_issue_prot,
164 .eng_timeout = ata_eng_timeout,
165 .data_xfer = ata_pio_data_xfer_noirq,
166
167 .irq_handler = ata_interrupt,
168 .irq_clear = ata_bmdma_irq_clear,
169
170 .port_start = ata_port_start,
171 .port_stop = ata_port_stop,
172 .host_stop = ata_host_stop
173};
174
175static struct ata_port_operations legacy_port_ops = {
176 .set_mode = legacy_set_mode,
177
178 .port_disable = ata_port_disable,
179 .tf_load = ata_tf_load,
180 .tf_read = ata_tf_read,
181 .check_status = ata_check_status,
182 .exec_command = ata_exec_command,
183 .dev_select = ata_std_dev_select,
184
185 .error_handler = ata_bmdma_error_handler,
186
187 .qc_prep = ata_qc_prep,
188 .qc_issue = ata_qc_issue_prot,
189 .eng_timeout = ata_eng_timeout,
190 .data_xfer = ata_pio_data_xfer_noirq,
191
192 .irq_handler = ata_interrupt,
193 .irq_clear = ata_bmdma_irq_clear,
194
195 .port_start = ata_port_start,
196 .port_stop = ata_port_stop,
197 .host_stop = ata_host_stop
198};
199
200/*
201 * Promise 20230C and 20620 support
202 *
203 * This controller supports PIO0 to PIO2. We set PIO timings conservatively to
204 * allow for 50MHz Vesa Local Bus. The 20620 DMA support is weird being DMA to
205 * controller and PIO'd to the host and not supported.
206 */
207
208static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
209{
210 int tries = 5;
211 int pio = adev->pio_mode - XFER_PIO_0;
212 u8 rt;
213 unsigned long flags;
214
215 /* Safe as UP only. Force I/Os to occur together */
216
217 local_irq_save(flags);
218
219 /* Unlock the control interface */
220 do
221 {
222 inb(0x1F5);
223 outb(inb(0x1F2) | 0x80, 0x1F2);
224 inb(0x1F2);
225 inb(0x3F6);
226 inb(0x3F6);
227 inb(0x1F2);
228 inb(0x1F2);
229 }
230 while((inb(0x1F2) & 0x80) && --tries);
231
232 local_irq_restore(flags);
233
234 outb(inb(0x1F4) & 0x07, 0x1F4);
235
236 rt = inb(0x1F3);
237 rt &= 0x07 << (3 * adev->devno);
238 if (pio)
239 rt |= (1 + 3 * pio) << (3 * adev->devno);
240
241 udelay(100);
242 outb(inb(0x1F2) | 0x01, 0x1F2);
243 udelay(100);
244 inb(0x1F5);
245
246}
247
248static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
249{
250 struct ata_port *ap = adev->ap;
251 int slop = buflen & 3;
252 unsigned long flags;
253
254 if (ata_id_has_dword_io(adev->id)) {
255 local_irq_save(flags);
256
257 /* Perform the 32bit I/O synchronization sequence */
258 inb(ap->ioaddr.nsect_addr);
259 inb(ap->ioaddr.nsect_addr);
260 inb(ap->ioaddr.nsect_addr);
261
262 /* Now the data */
263
264 if (write_data)
265 outsl(ap->ioaddr.data_addr, buf, buflen >> 2);
266 else
267 insl(ap->ioaddr.data_addr, buf, buflen >> 2);
268
269 if (unlikely(slop)) {
270 u32 pad;
271 if (write_data) {
272 memcpy(&pad, buf + buflen - slop, slop);
273 outl(le32_to_cpu(pad), ap->ioaddr.data_addr);
274 } else {
275 pad = cpu_to_le16(inl(ap->ioaddr.data_addr));
276 memcpy(buf + buflen - slop, &pad, slop);
277 }
278 }
279 local_irq_restore(flags);
280 }
281 else
282 ata_pio_data_xfer_noirq(adev, buf, buflen, write_data);
283}
284
285static struct ata_port_operations pdc20230_port_ops = {
286 .set_piomode = pdc20230_set_piomode,
287
288 .port_disable = ata_port_disable,
289 .tf_load = ata_tf_load,
290 .tf_read = ata_tf_read,
291 .check_status = ata_check_status,
292 .exec_command = ata_exec_command,
293 .dev_select = ata_std_dev_select,
294
295 .error_handler = ata_bmdma_error_handler,
296
297 .qc_prep = ata_qc_prep,
298 .qc_issue = ata_qc_issue_prot,
299 .eng_timeout = ata_eng_timeout,
300 .data_xfer = pdc_data_xfer_vlb,
301
302 .irq_handler = ata_interrupt,
303 .irq_clear = ata_bmdma_irq_clear,
304
305 .port_start = ata_port_start,
306 .port_stop = ata_port_stop,
307 .host_stop = ata_host_stop
308};
309
310/*
311 * Holtek 6560A support
312 *
313 * This controller supports PIO0 to PIO2 (no IORDY even though higher timings
314 * can be loaded).
315 */
316
317static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
318{
319 u8 active, recover;
320 struct ata_timing t;
321
322 /* Get the timing data in cycles. For now play safe at 50Mhz */
323 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
324
325 active = FIT(t.active, 2, 15);
326 recover = FIT(t.recover, 4, 15);
327
328 inb(0x3E6);
329 inb(0x3E6);
330 inb(0x3E6);
331 inb(0x3E6);
332
333 outb(recover << 4 | active, ap->ioaddr.device_addr);
334 inb(ap->ioaddr.status_addr);
335}
336
337static struct ata_port_operations ht6560a_port_ops = {
338 .set_piomode = ht6560a_set_piomode,
339
340 .port_disable = ata_port_disable,
341 .tf_load = ata_tf_load,
342 .tf_read = ata_tf_read,
343 .check_status = ata_check_status,
344 .exec_command = ata_exec_command,
345 .dev_select = ata_std_dev_select,
346
347 .error_handler = ata_bmdma_error_handler,
348
349 .qc_prep = ata_qc_prep,
350 .qc_issue = ata_qc_issue_prot,
351 .eng_timeout = ata_eng_timeout,
352 .data_xfer = ata_pio_data_xfer, /* Check vlb/noirq */
353
354 .irq_handler = ata_interrupt,
355 .irq_clear = ata_bmdma_irq_clear,
356
357 .port_start = ata_port_start,
358 .port_stop = ata_port_stop,
359 .host_stop = ata_host_stop
360};
361
362/*
363 * Holtek 6560B support
364 *
365 * This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO setting
366 * unless we see an ATAPI device in which case we force it off.
367 *
368 * FIXME: need to implement 2nd channel support.
369 */
370
371static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
372{
373 u8 active, recover;
374 struct ata_timing t;
375
376 /* Get the timing data in cycles. For now play safe at 50Mhz */
377 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
378
379 active = FIT(t.active, 2, 15);
380 recover = FIT(t.recover, 2, 16);
381 recover &= 0x15;
382
383 inb(0x3E6);
384 inb(0x3E6);
385 inb(0x3E6);
386 inb(0x3E6);
387
388 outb(recover << 4 | active, ap->ioaddr.device_addr);
389
390 if (adev->class != ATA_DEV_ATA) {
391 u8 rconf = inb(0x3E6);
392 if (rconf & 0x24) {
393 rconf &= ~ 0x24;
394 outb(rconf, 0x3E6);
395 }
396 }
397 inb(ap->ioaddr.status_addr);
398}
399
400static struct ata_port_operations ht6560b_port_ops = {
401 .set_piomode = ht6560b_set_piomode,
402
403 .port_disable = ata_port_disable,
404 .tf_load = ata_tf_load,
405 .tf_read = ata_tf_read,
406 .check_status = ata_check_status,
407 .exec_command = ata_exec_command,
408 .dev_select = ata_std_dev_select,
409
410 .error_handler = ata_bmdma_error_handler,
411
412 .qc_prep = ata_qc_prep,
413 .qc_issue = ata_qc_issue_prot,
414 .eng_timeout = ata_eng_timeout,
415 .data_xfer = ata_pio_data_xfer, /* FIXME: Check 32bit and noirq */
416
417 .irq_handler = ata_interrupt,
418 .irq_clear = ata_bmdma_irq_clear,
419
420 .port_start = ata_port_start,
421 .port_stop = ata_port_stop,
422 .host_stop = ata_host_stop
423};
424
425/*
426 * Opti core chipset helpers
427 */
428
429/**
430 * opti_syscfg - read OPTI chipset configuration
431 * @reg: Configuration register to read
432 *
433 * Returns the value of an OPTI system board configuration register.
434 */
435
436static u8 opti_syscfg(u8 reg)
437{
438 unsigned long flags;
439 u8 r;
440
441 /* Uniprocessor chipset and must force cycles adjancent */
442 local_irq_save(flags);
443 outb(reg, 0x22);
444 r = inb(0x24);
445 local_irq_restore(flags);
446 return r;
447}
448
449/*
450 * Opti 82C611A
451 *
452 * This controller supports PIO0 to PIO3.
453 */
454
455static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev)
456{
457 u8 active, recover, setup;
458 struct ata_timing t;
459 struct ata_device *pair = ata_dev_pair(adev);
460 int clock;
461 int khz[4] = { 50000, 40000, 33000, 25000 };
462 u8 rc;
463
464 /* Enter configuration mode */
465 inw(ap->ioaddr.error_addr);
466 inw(ap->ioaddr.error_addr);
467 outb(3, ap->ioaddr.nsect_addr);
468
469 /* Read VLB clock strapping */
470 clock = 1000000000 / khz[inb(ap->ioaddr.lbah_addr) & 0x03];
471
472 /* Get the timing data in cycles */
473 ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
474
475 /* Setup timing is shared */
476 if (pair) {
477 struct ata_timing tp;
478 ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
479
480 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
481 }
482
483 active = FIT(t.active, 2, 17) - 2;
484 recover = FIT(t.recover, 1, 16) - 1;
485 setup = FIT(t.setup, 1, 4) - 1;
486
487 /* Select the right timing bank for write timing */
488 rc = inb(ap->ioaddr.lbal_addr);
489 rc &= 0x7F;
490 rc |= (adev->devno << 7);
491 outb(rc, ap->ioaddr.lbal_addr);
492
493 /* Write the timings */
494 outb(active << 4 | recover, ap->ioaddr.error_addr);
495
496 /* Select the right bank for read timings, also
497 load the shared timings for address */
498 rc = inb(ap->ioaddr.device_addr);
499 rc &= 0xC0;
500 rc |= adev->devno; /* Index select */
501 rc |= (setup << 4) | 0x04;
502 outb(rc, ap->ioaddr.device_addr);
503
504 /* Load the read timings */
505 outb(active << 4 | recover, ap->ioaddr.data_addr);
506
507 /* Ensure the timing register mode is right */
508 rc = inb (ap->ioaddr.lbal_addr);
509 rc &= 0x73;
510 rc |= 0x84;
511 outb(rc, ap->ioaddr.lbal_addr);
512
513 /* Exit command mode */
514 outb(0x83, ap->ioaddr.nsect_addr);
515}
516
517
518static struct ata_port_operations opti82c611a_port_ops = {
519 .set_piomode = opti82c611a_set_piomode,
520
521 .port_disable = ata_port_disable,
522 .tf_load = ata_tf_load,
523 .tf_read = ata_tf_read,
524 .check_status = ata_check_status,
525 .exec_command = ata_exec_command,
526 .dev_select = ata_std_dev_select,
527
528 .error_handler = ata_bmdma_error_handler,
529
530 .qc_prep = ata_qc_prep,
531 .qc_issue = ata_qc_issue_prot,
532 .eng_timeout = ata_eng_timeout,
533 .data_xfer = ata_pio_data_xfer,
534
535 .irq_handler = ata_interrupt,
536 .irq_clear = ata_bmdma_irq_clear,
537
538 .port_start = ata_port_start,
539 .port_stop = ata_port_stop,
540 .host_stop = ata_host_stop
541};
542
543/*
544 * Opti 82C465MV
545 *
546 * This controller supports PIO0 to PIO3. Unlike the 611A the MVB
547 * version is dual channel but doesn't have a lot of unique registers.
548 */
549
550static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
551{
552 u8 active, recover, setup;
553 struct ata_timing t;
554 struct ata_device *pair = ata_dev_pair(adev);
555 int clock;
556 int khz[4] = { 50000, 40000, 33000, 25000 };
557 u8 rc;
558 u8 sysclk;
559
560 /* Get the clock */
561 sysclk = opti_syscfg(0xAC) & 0xC0; /* BIOS set */
562
563 /* Enter configuration mode */
564 inw(ap->ioaddr.error_addr);
565 inw(ap->ioaddr.error_addr);
566 outb(3, ap->ioaddr.nsect_addr);
567
568 /* Read VLB clock strapping */
569 clock = 1000000000 / khz[sysclk];
570
571 /* Get the timing data in cycles */
572 ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
573
574 /* Setup timing is shared */
575 if (pair) {
576 struct ata_timing tp;
577 ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
578
579 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
580 }
581
582 active = FIT(t.active, 2, 17) - 2;
583 recover = FIT(t.recover, 1, 16) - 1;
584 setup = FIT(t.setup, 1, 4) - 1;
585
586 /* Select the right timing bank for write timing */
587 rc = inb(ap->ioaddr.lbal_addr);
588 rc &= 0x7F;
589 rc |= (adev->devno << 7);
590 outb(rc, ap->ioaddr.lbal_addr);
591
592 /* Write the timings */
593 outb(active << 4 | recover, ap->ioaddr.error_addr);
594
595 /* Select the right bank for read timings, also
596 load the shared timings for address */
597 rc = inb(ap->ioaddr.device_addr);
598 rc &= 0xC0;
599 rc |= adev->devno; /* Index select */
600 rc |= (setup << 4) | 0x04;
601 outb(rc, ap->ioaddr.device_addr);
602
603 /* Load the read timings */
604 outb(active << 4 | recover, ap->ioaddr.data_addr);
605
606 /* Ensure the timing register mode is right */
607 rc = inb (ap->ioaddr.lbal_addr);
608 rc &= 0x73;
609 rc |= 0x84;
610 outb(rc, ap->ioaddr.lbal_addr);
611
612 /* Exit command mode */
613 outb(0x83, ap->ioaddr.nsect_addr);
614
615 /* We need to know this for quad device on the MVB */
616 ap->host->private_data = ap;
617}
618
619/**
620 * opt82c465mv_qc_issue_prot - command issue
621 * @qc: command pending
622 *
623 * Called when the libata layer is about to issue a command. We wrap
624 * this interface so that we can load the correct ATA timings. The
625 * MVB has a single set of timing registers and these are shared
626 * across channels. As there are two registers we really ought to
627 * track the last two used values as a sort of register window. For
628 * now we just reload on a channel switch. On the single channel
629 * setup this condition never fires so we do nothing extra.
630 *
631 * FIXME: dual channel needs ->serialize support
632 */
633
634static unsigned int opti82c46x_qc_issue_prot(struct ata_queued_cmd *qc)
635{
636 struct ata_port *ap = qc->ap;
637 struct ata_device *adev = qc->dev;
638
639 /* If timings are set and for the wrong channel (2nd test is
640 due to a libata shortcoming and will eventually go I hope) */
641 if (ap->host->private_data != ap->host
642 && ap->host->private_data != NULL)
643 opti82c46x_set_piomode(ap, adev);
644
645 return ata_qc_issue_prot(qc);
646}
647
648static struct ata_port_operations opti82c46x_port_ops = {
649 .set_piomode = opti82c46x_set_piomode,
650
651 .port_disable = ata_port_disable,
652 .tf_load = ata_tf_load,
653 .tf_read = ata_tf_read,
654 .check_status = ata_check_status,
655 .exec_command = ata_exec_command,
656 .dev_select = ata_std_dev_select,
657
658 .error_handler = ata_bmdma_error_handler,
659
660 .qc_prep = ata_qc_prep,
661 .qc_issue = opti82c46x_qc_issue_prot,
662 .eng_timeout = ata_eng_timeout,
663 .data_xfer = ata_pio_data_xfer,
664
665 .irq_handler = ata_interrupt,
666 .irq_clear = ata_bmdma_irq_clear,
667
668 .port_start = ata_port_start,
669 .port_stop = ata_port_stop,
670 .host_stop = ata_host_stop
671};
672
673
674/**
675 * legacy_init_one - attach a legacy interface
676 * @port: port number
677 * @io: I/O port start
678 * @ctrl: control port
679 * @irq: interrupt line
680 *
681 * Register an ISA bus IDE interface. Such interfaces are PIO and we
682 * assume do not support IRQ sharing.
683 */
684
685static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl, int irq)
686{
687 struct legacy_data *ld = &legacy_data[nr_legacy_host];
688 struct ata_probe_ent ae;
689 struct platform_device *pdev;
690 int ret = -EBUSY;
691 struct ata_port_operations *ops = &legacy_port_ops;
692 int pio_modes = pio_mask;
693 u32 mask = (1 << port);
694
695 if (request_region(io, 8, "pata_legacy") == NULL)
696 return -EBUSY;
697 if (request_region(ctrl, 1, "pata_legacy") == NULL)
698 goto fail_io;
699
700 pdev = platform_device_register_simple(DRV_NAME, nr_legacy_host, NULL, 0);
701 if (pdev == NULL)
702 goto fail_dev;
703
704 if (ht6560a & mask) {
705 ops = &ht6560a_port_ops;
706 pio_modes = 0x07;
707 }
708 if (ht6560b & mask) {
709 ops = &ht6560b_port_ops;
710 pio_modes = 0x1F;
711 }
712 if (opti82c611a & mask) {
713 ops = &opti82c611a_port_ops;
714 pio_modes = 0x0F;
715 }
716 if (opti82c46x & mask) {
717 ops = &opti82c46x_port_ops;
718 pio_modes = 0x0F;
719 }
720
721 /* Probe for automatically detectable controllers */
722
723 if (io == 0x1F0 && ops == &legacy_port_ops) {
724 unsigned long flags;
725
726 local_irq_save(flags);
727
728 /* Probes */
729 inb(0x1F5);
730 outb(inb(0x1F2) | 0x80, 0x1F2);
731 inb(0x1F2);
732 inb(0x3F6);
733 inb(0x3F6);
734 inb(0x1F2);
735 inb(0x1F2);
736
737 if ((inb(0x1F2) & 0x80) == 0) {
738 /* PDC20230c or 20630 ? */
739 printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller detected.\n");
740 pio_modes = 0x07;
741 ops = &pdc20230_port_ops;
742 udelay(100);
743 inb(0x1F5);
744 } else {
745 outb(0x55, 0x1F2);
746 inb(0x1F2);
747 inb(0x1F2);
748 if (inb(0x1F2) == 0x00) {
749 printk(KERN_INFO "PDC20230-B VLB ATA controller detected.\n");
750 }
751 }
752 local_irq_restore(flags);
753 }
754
755
756 /* Chip does mode setting by command snooping */
757 if (ops == &legacy_port_ops && (autospeed & mask))
758 ops = &simple_port_ops;
759 memset(&ae, 0, sizeof(struct ata_probe_ent));
760 INIT_LIST_HEAD(&ae.node);
761 ae.dev = &pdev->dev;
762 ae.port_ops = ops;
763 ae.sht = &legacy_sht;
764 ae.n_ports = 1;
765 ae.pio_mask = pio_modes;
766 ae.irq = irq;
767 ae.irq_flags = 0;
768 ae.port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST;
769 ae.port[0].cmd_addr = io;
770 ae.port[0].altstatus_addr = ctrl;
771 ae.port[0].ctl_addr = ctrl;
772 ata_std_ports(&ae.port[0]);
773 ae.private_data = ld;
774
775 ret = ata_device_add(&ae);
776 if (ret == 0) {
777 ret = -ENODEV;
778 goto fail;
779 }
780 legacy_host[nr_legacy_host++] = dev_get_drvdata(&pdev->dev);
781 ld->platform_dev = pdev;
782 return 0;
783
784fail:
785 platform_device_unregister(pdev);
786fail_dev:
787 release_region(ctrl, 1);
788fail_io:
789 release_region(io, 8);
790 return ret;
791}
792
793/**
794 * legacy_check_special_cases - ATA special cases
795 * @p: PCI device to check
796 * @master: set this if we find an ATA master
797 * @master: set this if we find an ATA secondary
798 *
799 * A small number of vendors implemented early PCI ATA interfaces on bridge logic
800 * without the ATA interface being PCI visible. Where we have a matching PCI driver
801 * we must skip the relevant device here. If we don't know about it then the legacy
802 * driver is the right driver anyway.
803 */
804
805static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *secondary)
806{
807 /* Cyrix CS5510 pre SFF MWDMA ATA on the bridge */
808 if (p->vendor == 0x1078 && p->device == 0x0000) {
809 *primary = *secondary = 1;
810 return;
811 }
812 /* Cyrix CS5520 pre SFF MWDMA ATA on the bridge */
813 if (p->vendor == 0x1078 && p->device == 0x0002) {
814 *primary = *secondary = 1;
815 return;
816 }
817 /* Intel MPIIX - PIO ATA on non PCI side of bridge */
818 if (p->vendor == 0x8086 && p->device == 0x1234) {
819 u16 r;
820 pci_read_config_word(p, 0x6C, &r);
821 if (r & 0x8000) { /* ATA port enabled */
822 if (r & 0x4000)
823 *secondary = 1;
824 else
825 *primary = 1;
826 }
827 return;
828 }
829}
830
831
832/**
833 * legacy_init - attach legacy interfaces
834 *
835 * Attach legacy IDE interfaces by scanning the usual IRQ/port suspects.
836 * Right now we do not scan the ide0 and ide1 address but should do so
837 * for non PCI systems or systems with no PCI IDE legacy mode devices.
838 * If you fix that note there are special cases to consider like VLB
839 * drivers and CS5510/20.
840 */
841
842static __init int legacy_init(void)
843{
844 int i;
845 int ct = 0;
846 int primary = 0;
847 int secondary = 0;
848 int last_port = NR_HOST;
849
850 struct pci_dev *p = NULL;
851
852 for_each_pci_dev(p) {
853 int r;
854 /* Check for any overlap of the system ATA mappings. Native mode controllers
855 stuck on these addresses or some devices in 'raid' mode won't be found by
856 the storage class test */
857 for (r = 0; r < 6; r++) {
858 if (pci_resource_start(p, r) == 0x1f0)
859 primary = 1;
860 if (pci_resource_start(p, r) == 0x170)
861 secondary = 1;
862 }
863 /* Check for special cases */
864 legacy_check_special_cases(p, &primary, &secondary);
865
866 /* If PCI bus is present then don't probe for tertiary legacy ports */
867 if (probe_all == 0)
868 last_port = 2;
869 }
870
871 /* If an OPTI 82C46X is present find out where the channels are */
872 if (opti82c46x) {
873 static const char *optis[4] = {
874 "3/463MV", "5MV",
875 "5MVA", "5MVB"
876 };
877 u8 chans = 1;
878 u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6;
879
880 opti82c46x = 3; /* Assume master and slave first */
881 printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n", optis[ctrl]);
882 if (ctrl == 3)
883 chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1;
884 ctrl = opti_syscfg(0xAC);
885 /* Check enabled and this port is the 465MV port. On the
886 MVB we may have two channels */
887 if (ctrl & 8) {
888 if (ctrl & 4)
889 opti82c46x = 2; /* Slave */
890 else
891 opti82c46x = 1; /* Master */
892 if (chans == 2)
893 opti82c46x = 3; /* Master and Slave */
894 } /* Slave only */
895 else if (chans == 1)
896 opti82c46x = 1;
897 }
898
899 for (i = 0; i < last_port; i++) {
900 /* Skip primary if we have seen a PCI one */
901 if (i == 0 && primary == 1)
902 continue;
903 /* Skip secondary if we have seen a PCI one */
904 if (i == 1 && secondary == 1)
905 continue;
906 if (legacy_init_one(i, legacy_port[i],
907 legacy_port[i] + 0x0206,
908 legacy_irq[i]) == 0)
909 ct++;
910 }
911 if (ct != 0)
912 return 0;
913 return -ENODEV;
914}
915
916static __exit void legacy_exit(void)
917{
918 int i;
919
920 for (i = 0; i < nr_legacy_host; i++) {
921 struct legacy_data *ld = &legacy_data[i];
922 struct ata_port *ap =legacy_host[i]->ports[0];
923 unsigned long io = ap->ioaddr.cmd_addr;
924 unsigned long ctrl = ap->ioaddr.ctl_addr;
925 ata_host_remove(legacy_host[i]);
926 platform_device_unregister(ld->platform_dev);
927 if (ld->timing)
928 release_region(ld->timing, 2);
929 release_region(io, 8);
930 release_region(ctrl, 1);
931 }
932}
933
934MODULE_AUTHOR("Alan Cox");
935MODULE_DESCRIPTION("low-level driver for legacy ATA");
936MODULE_LICENSE("GPL");
937MODULE_VERSION(DRV_VERSION);
938
939module_param(probe_all, int, 0);
940module_param(autospeed, int, 0);
941module_param(ht6560a, int, 0);
942module_param(ht6560b, int, 0);
943module_param(opti82c611a, int, 0);
944module_param(opti82c46x, int, 0);
945module_param(pio_mask, int, 0);
946
947module_init(legacy_init);
948module_exit(legacy_exit);
949
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
new file mode 100644
index 000000000000..1958c4ed09a8
--- /dev/null
+++ b/drivers/ata/pata_mpiix.c
@@ -0,0 +1,313 @@
1/*
2 * pata_mpiix.c - Intel MPIIX PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * The MPIIX is different enough to the PIIX4 and friends that we give it
7 * a separate driver. The old ide/pci code handles this by just not tuning
8 * MPIIX at all.
9 *
10 * The MPIIX also differs in another important way from the majority of PIIX
11 * devices. The chip is a bridge (pardon the pun) between the old world of
12 * ISA IDE and PCI IDE. Although the ATA timings are PCI configured the actual
13 * IDE controller is not decoded in PCI space and the chip does not claim to
14 * be IDE class PCI. This requires slightly non-standard probe logic compared
15 * with PCI IDE and also that we do not disable the device when our driver is
16 * unloaded (as it has many other functions).
17 *
18 * The driver conciously keeps this logic internally to avoid pushing quirky
19 * PATA history into the clean libata layer.
20 *
21 * Thinkpad specific note: If you boot an MPIIX using thinkpad with a PCMCIA
22 * hard disk present this driver will not detect it. This is not a bug. In this
23 * configuration the secondary port of the MPIIX is disabled and the addresses
24 * are decoded by the PCMCIA bridge and therefore are for a generic IDE driver
25 * to operate.
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/blkdev.h>
33#include <linux/delay.h>
34#include <scsi/scsi_host.h>
35#include <linux/libata.h>
36
37#define DRV_NAME "pata_mpiix"
38#define DRV_VERSION "0.7.1"
39
40enum {
41 IDETIM = 0x6C, /* IDE control register */
42 IORDY = (1 << 1),
43 PPE = (1 << 2),
44 FTIM = (1 << 0),
45 ENABLED = (1 << 15),
46 SECONDARY = (1 << 14)
47};
48
49static int mpiix_pre_reset(struct ata_port *ap)
50{
51 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
52 static const struct pci_bits mpiix_enable_bits[] = {
53 { 0x6D, 1, 0x80, 0x80 },
54 { 0x6F, 1, 0x80, 0x80 }
55 };
56
57 if (!pci_test_config_bits(pdev, &mpiix_enable_bits[ap->port_no])) {
58 ata_port_disable(ap);
59 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
60 return 0;
61 }
62 ap->cbl = ATA_CBL_PATA40;
63 return ata_std_prereset(ap);
64}
65
66/**
67 * mpiix_error_handler - probe reset
68 * @ap: ATA port
69 *
70 * Perform the ATA probe and bus reset sequence plus specific handling
71 * for this hardware. The MPIIX has the enable bits in a different place
72 * to PIIX4 and friends. As a pure PIO device it has no cable detect
73 */
74
75static void mpiix_error_handler(struct ata_port *ap)
76{
77 ata_bmdma_drive_eh(ap, mpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
78}
79
80/**
81 * mpiix_set_piomode - set initial PIO mode data
82 * @ap: ATA interface
83 * @adev: ATA device
84 *
85 * Called to do the PIO mode setup. The MPIIX allows us to program the
86 * IORDY sample point (2-5 clocks), recovery 1-4 clocks and whether
87 * prefetching or iordy are used.
88 *
89 * This would get very ugly because we can only program timing for one
90 * device at a time, the other gets PIO0. Fortunately libata calls
91 * our qc_issue_prot command before a command is issued so we can
92 * flip the timings back and forth to reduce the pain.
93 */
94
95static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev)
96{
97 int control = 0;
98 int pio = adev->pio_mode - XFER_PIO_0;
99 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
100 u16 idetim;
101 static const /* ISP RTC */
102 u8 timings[][2] = { { 0, 0 },
103 { 0, 0 },
104 { 1, 0 },
105 { 2, 1 },
106 { 2, 3 }, };
107
108 pci_read_config_word(pdev, IDETIM, &idetim);
109 /* Mask the IORDY/TIME/PPE0 bank for this device */
110 if (adev->class == ATA_DEV_ATA)
111 control |= PPE; /* PPE enable for disk */
112 if (ata_pio_need_iordy(adev))
113 control |= IORDY; /* IORDY */
114 if (pio > 0)
115 control |= FTIM; /* This drive is on the fast timing bank */
116
117 /* Mask out timing and clear both TIME bank selects */
118 idetim &= 0xCCEE;
119 idetim &= ~(0x07 << (2 * adev->devno));
120 idetim |= (control << (2 * adev->devno));
121
122 idetim |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
123 pci_write_config_word(pdev, IDETIM, idetim);
124
125 /* We use ap->private_data as a pointer to the device currently
126 loaded for timing */
127 ap->private_data = adev;
128}
129
130/**
131 * mpiix_qc_issue_prot - command issue
132 * @qc: command pending
133 *
134 * Called when the libata layer is about to issue a command. We wrap
135 * this interface so that we can load the correct ATA timings if
136 * neccessary. Our logic also clears TIME0/TIME1 for the other device so
137 * that, even if we get this wrong, cycles to the other device will
138 * be made PIO0.
139 */
140
141static unsigned int mpiix_qc_issue_prot(struct ata_queued_cmd *qc)
142{
143 struct ata_port *ap = qc->ap;
144 struct ata_device *adev = qc->dev;
145
146 /* If modes have been configured and the channel data is not loaded
147 then load it. We have to check if pio_mode is set as the core code
148 does not set adev->pio_mode to XFER_PIO_0 while probing as would be
149 logical */
150
151 if (adev->pio_mode && adev != ap->private_data)
152 mpiix_set_piomode(ap, adev);
153
154 return ata_qc_issue_prot(qc);
155}
156
157static struct scsi_host_template mpiix_sht = {
158 .module = THIS_MODULE,
159 .name = DRV_NAME,
160 .ioctl = ata_scsi_ioctl,
161 .queuecommand = ata_scsi_queuecmd,
162 .can_queue = ATA_DEF_QUEUE,
163 .this_id = ATA_SHT_THIS_ID,
164 .sg_tablesize = LIBATA_MAX_PRD,
165 .max_sectors = ATA_MAX_SECTORS,
166 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
167 .emulated = ATA_SHT_EMULATED,
168 .use_clustering = ATA_SHT_USE_CLUSTERING,
169 .proc_name = DRV_NAME,
170 .dma_boundary = ATA_DMA_BOUNDARY,
171 .slave_configure = ata_scsi_slave_config,
172 .bios_param = ata_std_bios_param,
173};
174
175static struct ata_port_operations mpiix_port_ops = {
176 .port_disable = ata_port_disable,
177 .set_piomode = mpiix_set_piomode,
178
179 .tf_load = ata_tf_load,
180 .tf_read = ata_tf_read,
181 .check_status = ata_check_status,
182 .exec_command = ata_exec_command,
183 .dev_select = ata_std_dev_select,
184
185 .freeze = ata_bmdma_freeze,
186 .thaw = ata_bmdma_thaw,
187 .error_handler = mpiix_error_handler,
188 .post_internal_cmd = ata_bmdma_post_internal_cmd,
189
190 .qc_prep = ata_qc_prep,
191 .qc_issue = mpiix_qc_issue_prot,
192 .data_xfer = ata_pio_data_xfer,
193
194 .irq_handler = ata_interrupt,
195 .irq_clear = ata_bmdma_irq_clear,
196
197 .port_start = ata_port_start,
198 .port_stop = ata_port_stop,
199 .host_stop = ata_host_stop
200};
201
202static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
203{
204 /* Single threaded by the PCI probe logic */
205 static struct ata_probe_ent probe[2];
206 static int printed_version;
207 u16 idetim;
208 int enabled;
209
210 if (!printed_version++)
211 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
212
213 /* MPIIX has many functions which can be turned on or off according
214 to other devices present. Make sure IDE is enabled before we try
215 and use it */
216
217 pci_read_config_word(dev, IDETIM, &idetim);
218 if (!(idetim & ENABLED))
219 return -ENODEV;
220
221 /* We do our own plumbing to avoid leaking special cases for whacko
222 ancient hardware into the core code. There are two issues to
223 worry about. #1 The chip is a bridge so if in legacy mode and
224 without BARs set fools the setup. #2 If you pci_disable_device
225 the MPIIX your box goes castors up */
226
227 INIT_LIST_HEAD(&probe[0].node);
228 probe[0].dev = pci_dev_to_dev(dev);
229 probe[0].port_ops = &mpiix_port_ops;
230 probe[0].sht = &mpiix_sht;
231 probe[0].pio_mask = 0x1F;
232 probe[0].irq = 14;
233 probe[0].irq_flags = SA_SHIRQ;
234 probe[0].port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
235 probe[0].n_ports = 1;
236 probe[0].port[0].cmd_addr = 0x1F0;
237 probe[0].port[0].ctl_addr = 0x3F6;
238 probe[0].port[0].altstatus_addr = 0x3F6;
239
240 /* The secondary lurks at different addresses but is otherwise
241 the same beastie */
242
243 INIT_LIST_HEAD(&probe[1].node);
244 probe[1] = probe[0];
245 probe[1].irq = 15;
246 probe[1].port[0].cmd_addr = 0x170;
247 probe[1].port[0].ctl_addr = 0x376;
248 probe[1].port[0].altstatus_addr = 0x376;
249
250 /* Let libata fill in the port details */
251 ata_std_ports(&probe[0].port[0]);
252 ata_std_ports(&probe[1].port[0]);
253
254 /* Now add the port that is active */
255 enabled = (idetim & SECONDARY) ? 1 : 0;
256
257 if (ata_device_add(&probe[enabled]))
258 return 0;
259 return -ENODEV;
260}
261
262/**
263 * mpiix_remove_one - device unload
264 * @pdev: PCI device being removed
265 *
266 * Handle an unplug/unload event for a PCI device. Unload the
267 * PCI driver but do not use the default handler as we *MUST NOT*
268 * disable the device as it has other functions.
269 */
270
271static void __devexit mpiix_remove_one(struct pci_dev *pdev)
272{
273 struct device *dev = pci_dev_to_dev(pdev);
274 struct ata_host *host = dev_get_drvdata(dev);
275
276 ata_host_remove(host);
277 dev_set_drvdata(dev, NULL);
278}
279
280
281
282static const struct pci_device_id mpiix[] = {
283 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371MX), },
284 { 0, },
285};
286
287static struct pci_driver mpiix_pci_driver = {
288 .name = DRV_NAME,
289 .id_table = mpiix,
290 .probe = mpiix_init_one,
291 .remove = mpiix_remove_one
292};
293
294static int __init mpiix_init(void)
295{
296 return pci_register_driver(&mpiix_pci_driver);
297}
298
299
300static void __exit mpiix_exit(void)
301{
302 pci_unregister_driver(&mpiix_pci_driver);
303}
304
305
306MODULE_AUTHOR("Alan Cox");
307MODULE_DESCRIPTION("low-level driver for Intel MPIIX");
308MODULE_LICENSE("GPL");
309MODULE_DEVICE_TABLE(pci, mpiix);
310MODULE_VERSION(DRV_VERSION);
311
312module_init(mpiix_init);
313module_exit(mpiix_exit);
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
new file mode 100644
index 000000000000..16cb254cb973
--- /dev/null
+++ b/drivers/ata/pata_netcell.c
@@ -0,0 +1,175 @@
1/*
2 * pata_netcell.c - Netcell PATA driver
3 *
4 * (c) 2006 Red Hat <alan@redhat.com>
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/pci.h>
10#include <linux/init.h>
11#include <linux/blkdev.h>
12#include <linux/delay.h>
13#include <linux/device.h>
14#include <scsi/scsi_host.h>
15#include <linux/libata.h>
16#include <linux/ata.h>
17
18#define DRV_NAME "pata_netcell"
19#define DRV_VERSION "0.1.5"
20
21/**
22 * netcell_probe_init - check for 40/80 pin
23 * @ap: Port
24 *
25 * Cables are handled by the RAID controller. Report 80 pin.
26 */
27
28static int netcell_pre_reset(struct ata_port *ap)
29{
30 ap->cbl = ATA_CBL_PATA80;
31 return ata_std_prereset(ap);
32}
33
34/**
35 * netcell_probe_reset - Probe specified port on PATA host controller
36 * @ap: Port to probe
37 *
38 * LOCKING:
39 * None (inherited from caller).
40 */
41
42static void netcell_error_handler(struct ata_port *ap)
43{
44 return ata_bmdma_drive_eh(ap, netcell_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
45}
46
47/* No PIO or DMA methods needed for this device */
48
49static struct scsi_host_template netcell_sht = {
50 .module = THIS_MODULE,
51 .name = DRV_NAME,
52 .ioctl = ata_scsi_ioctl,
53 .queuecommand = ata_scsi_queuecmd,
54 .can_queue = ATA_DEF_QUEUE,
55 .this_id = ATA_SHT_THIS_ID,
56 .sg_tablesize = LIBATA_MAX_PRD,
57 /* Special handling needed if you have sector or LBA48 limits */
58 .max_sectors = ATA_MAX_SECTORS,
59 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
60 .emulated = ATA_SHT_EMULATED,
61 .use_clustering = ATA_SHT_USE_CLUSTERING,
62 .proc_name = DRV_NAME,
63 .dma_boundary = ATA_DMA_BOUNDARY,
64 .slave_configure = ata_scsi_slave_config,
65 /* Use standard CHS mapping rules */
66 .bios_param = ata_std_bios_param,
67};
68
69static const struct ata_port_operations netcell_ops = {
70 .port_disable = ata_port_disable,
71
72 /* Task file is PCI ATA format, use helpers */
73 .tf_load = ata_tf_load,
74 .tf_read = ata_tf_read,
75 .check_status = ata_check_status,
76 .exec_command = ata_exec_command,
77 .dev_select = ata_std_dev_select,
78
79 .freeze = ata_bmdma_freeze,
80 .thaw = ata_bmdma_thaw,
81 .error_handler = netcell_error_handler,
82 .post_internal_cmd = ata_bmdma_post_internal_cmd,
83
84 /* BMDMA handling is PCI ATA format, use helpers */
85 .bmdma_setup = ata_bmdma_setup,
86 .bmdma_start = ata_bmdma_start,
87 .bmdma_stop = ata_bmdma_stop,
88 .bmdma_status = ata_bmdma_status,
89 .qc_prep = ata_qc_prep,
90 .qc_issue = ata_qc_issue_prot,
91 .data_xfer = ata_pio_data_xfer,
92
93 /* Timeout handling. Special recovery hooks here */
94 .eng_timeout = ata_eng_timeout,
95 .irq_handler = ata_interrupt,
96 .irq_clear = ata_bmdma_irq_clear,
97
98 /* Generic PATA PCI ATA helpers */
99 .port_start = ata_port_start,
100 .port_stop = ata_port_stop,
101 .host_stop = ata_host_stop,
102};
103
104
105/**
106 * netcell_init_one - Register Netcell ATA PCI device with kernel services
107 * @pdev: PCI device to register
108 * @ent: Entry in netcell_pci_tbl matching with @pdev
109 *
110 * Called from kernel PCI layer.
111 *
112 * LOCKING:
113 * Inherited from PCI layer (may sleep).
114 *
115 * RETURNS:
116 * Zero on success, or -ERRNO value.
117 */
118
119static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
120{
121 static int printed_version;
122 static struct ata_port_info info = {
123 .sht = &netcell_sht,
124 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
125 /* Actually we don't really care about these as the
126 firmware deals with it */
127 .pio_mask = 0x1f, /* pio0-4 */
128 .mwdma_mask = 0x07, /* mwdma0-2 */
129 .udma_mask = 0x3f, /* UDMA 133 */
130 .port_ops = &netcell_ops,
131 };
132 static struct ata_port_info *port_info[2] = { &info, &info };
133
134 if (!printed_version++)
135 dev_printk(KERN_DEBUG, &pdev->dev,
136 "version " DRV_VERSION "\n");
137
138 /* Any chip specific setup/optimisation/messages here */
139 ata_pci_clear_simplex(pdev);
140
141 /* And let the library code do the work */
142 return ata_pci_init_one(pdev, port_info, 2);
143}
144
145static const struct pci_device_id netcell_pci_tbl[] = {
146 { PCI_DEVICE(PCI_VENDOR_ID_NETCELL, PCI_DEVICE_ID_REVOLUTION), },
147 { } /* terminate list */
148};
149
150static struct pci_driver netcell_pci_driver = {
151 .name = DRV_NAME,
152 .id_table = netcell_pci_tbl,
153 .probe = netcell_init_one,
154 .remove = ata_pci_remove_one,
155};
156
157static int __init netcell_init(void)
158{
159 return pci_register_driver(&netcell_pci_driver);
160}
161
162static void __exit netcell_exit(void)
163{
164 pci_unregister_driver(&netcell_pci_driver);
165}
166
167module_init(netcell_init);
168module_exit(netcell_exit);
169
170MODULE_AUTHOR("Alan Cox");
171MODULE_DESCRIPTION("SCSI low-level driver for Netcell PATA RAID");
172MODULE_LICENSE("GPL");
173MODULE_DEVICE_TABLE(pci, netcell_pci_tbl);
174MODULE_VERSION(DRV_VERSION);
175
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
new file mode 100644
index 000000000000..93d6646d2954
--- /dev/null
+++ b/drivers/ata/pata_ns87410.c
@@ -0,0 +1,236 @@
1/*
2 * pata_ns87410.c - National Semiconductor 87410 PATA for new ATA layer
3 * (C) 2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/blkdev.h>
26#include <linux/delay.h>
27#include <scsi/scsi_host.h>
28#include <linux/libata.h>
29
30#define DRV_NAME "pata_ns87410"
31#define DRV_VERSION "0.4.2"
32
33/**
34 * ns87410_pre_reset - probe begin
35 * @ap: ATA port
36 *
37 * Set up cable type and use generic probe init
38 */
39
40static int ns87410_pre_reset(struct ata_port *ap)
41{
42 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
43 static const struct pci_bits ns87410_enable_bits[] = {
44 { 0x43, 1, 0x08, 0x08 },
45 { 0x47, 1, 0x08, 0x08 }
46 };
47
48 if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no])) {
49 ata_port_disable(ap);
50 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
51 return 0;
52 }
53 ap->cbl = ATA_CBL_PATA40;
54 return ata_std_prereset(ap);
55}
56
57/**
58 * ns87410_error_handler - probe reset
59 * @ap: ATA port
60 *
61 * Perform the ATA probe and bus reset sequence plus specific handling
62 * for this hardware. The MPIIX has the enable bits in a different place
63 * to PIIX4 and friends. As a pure PIO device it has no cable detect
64 */
65
66static void ns87410_error_handler(struct ata_port *ap)
67{
68 ata_bmdma_drive_eh(ap, ns87410_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
69}
70
71/**
72 * ns87410_set_piomode - set initial PIO mode data
73 * @ap: ATA interface
74 * @adev: ATA device
75 *
76 * Program timing data. This is kept per channel not per device,
77 * and only affects the data port.
78 */
79
80static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev)
81{
82 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
83 int port = 0x40 + 4 * ap->port_no;
84 u8 idetcr, idefr;
85 struct ata_timing at;
86
87 static const u8 activebits[15] = {
88 0, 1, 2, 3, 4,
89 5, 5, 6, 6, 6,
90 6, 7, 7, 7, 7
91 };
92
93 static const u8 recoverbits[12] = {
94 0, 1, 2, 3, 4, 5, 6, 6, 7, 7, 7, 7
95 };
96
97 pci_read_config_byte(pdev, port + 3, &idefr);
98
99 if (ata_pio_need_iordy(adev))
100 idefr |= 0x04; /* IORDY enable */
101 else
102 idefr &= ~0x04;
103
104 if (ata_timing_compute(adev, adev->pio_mode, &at, 30303, 1) < 0) {
105 dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", adev->pio_mode);
106 return;
107 }
108
109 at.active = FIT(at.active, 2, 16) - 2;
110 at.setup = FIT(at.setup, 1, 4) - 1;
111 at.recover = FIT(at.recover, 1, 12) - 1;
112
113 idetcr = (at.setup << 6) | (recoverbits[at.recover] << 3) | activebits[at.active];
114
115 pci_write_config_byte(pdev, port, idetcr);
116 pci_write_config_byte(pdev, port + 3, idefr);
117 /* We use ap->private_data as a pointer to the device currently
118 loaded for timing */
119 ap->private_data = adev;
120}
121
122/**
123 * ns87410_qc_issue_prot - command issue
124 * @qc: command pending
125 *
126 * Called when the libata layer is about to issue a command. We wrap
127 * this interface so that we can load the correct ATA timings if
128 * neccessary.
129 */
130
131static unsigned int ns87410_qc_issue_prot(struct ata_queued_cmd *qc)
132{
133 struct ata_port *ap = qc->ap;
134 struct ata_device *adev = qc->dev;
135
136 /* If modes have been configured and the channel data is not loaded
137 then load it. We have to check if pio_mode is set as the core code
138 does not set adev->pio_mode to XFER_PIO_0 while probing as would be
139 logical */
140
141 if (adev->pio_mode && adev != ap->private_data)
142 ns87410_set_piomode(ap, adev);
143
144 return ata_qc_issue_prot(qc);
145}
146
147static struct scsi_host_template ns87410_sht = {
148 .module = THIS_MODULE,
149 .name = DRV_NAME,
150 .ioctl = ata_scsi_ioctl,
151 .queuecommand = ata_scsi_queuecmd,
152 .can_queue = ATA_DEF_QUEUE,
153 .this_id = ATA_SHT_THIS_ID,
154 .sg_tablesize = LIBATA_MAX_PRD,
155 .max_sectors = ATA_MAX_SECTORS,
156 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
157 .emulated = ATA_SHT_EMULATED,
158 .use_clustering = ATA_SHT_USE_CLUSTERING,
159 .proc_name = DRV_NAME,
160 .dma_boundary = ATA_DMA_BOUNDARY,
161 .slave_configure = ata_scsi_slave_config,
162 .bios_param = ata_std_bios_param,
163};
164
165static struct ata_port_operations ns87410_port_ops = {
166 .port_disable = ata_port_disable,
167 .set_piomode = ns87410_set_piomode,
168
169 .tf_load = ata_tf_load,
170 .tf_read = ata_tf_read,
171 .check_status = ata_check_status,
172 .exec_command = ata_exec_command,
173 .dev_select = ata_std_dev_select,
174
175 .freeze = ata_bmdma_freeze,
176 .thaw = ata_bmdma_thaw,
177 .error_handler = ns87410_error_handler,
178 .post_internal_cmd = ata_bmdma_post_internal_cmd,
179
180 .qc_prep = ata_qc_prep,
181 .qc_issue = ns87410_qc_issue_prot,
182 .eng_timeout = ata_eng_timeout,
183 .data_xfer = ata_pio_data_xfer,
184
185 .irq_handler = ata_interrupt,
186 .irq_clear = ata_bmdma_irq_clear,
187
188 .port_start = ata_port_start,
189 .port_stop = ata_port_stop,
190 .host_stop = ata_host_stop
191};
192
193static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
194{
195 static struct ata_port_info info = {
196 .sht = &ns87410_sht,
197 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
198 .pio_mask = 0x0F,
199 .port_ops = &ns87410_port_ops
200 };
201 static struct ata_port_info *port_info[2] = {&info, &info};
202 return ata_pci_init_one(dev, port_info, 2);
203}
204
205static const struct pci_device_id ns87410[] = {
206 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87410), },
207 { 0, },
208};
209
210static struct pci_driver ns87410_pci_driver = {
211 .name = DRV_NAME,
212 .id_table = ns87410,
213 .probe = ns87410_init_one,
214 .remove = ata_pci_remove_one
215};
216
217static int __init ns87410_init(void)
218{
219 return pci_register_driver(&ns87410_pci_driver);
220}
221
222
223static void __exit ns87410_exit(void)
224{
225 pci_unregister_driver(&ns87410_pci_driver);
226}
227
228
229MODULE_AUTHOR("Alan Cox");
230MODULE_DESCRIPTION("low-level driver for Nat Semi 87410");
231MODULE_LICENSE("GPL");
232MODULE_DEVICE_TABLE(pci, ns87410);
233MODULE_VERSION(DRV_VERSION);
234
235module_init(ns87410_init);
236module_exit(ns87410_exit);
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
new file mode 100644
index 000000000000..04c618a2664b
--- /dev/null
+++ b/drivers/ata/pata_oldpiix.c
@@ -0,0 +1,339 @@
1/*
2 * pata_oldpiix.c - Intel PATA/SATA controllers
3 *
4 * (C) 2005 Red Hat <alan@redhat.com>
5 *
6 * Some parts based on ata_piix.c by Jeff Garzik and others.
7 *
8 * Early PIIX differs significantly from the later PIIX as it lacks
9 * SITRE and the slave timing registers. This means that you have to
10 * set timing per channel, or be clever. Libata tells us whenever it
11 * does drive selection and we use this to reload the timings.
12 *
13 * Because of these behaviour differences PIIX gets its own driver module.
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19#include <linux/init.h>
20#include <linux/blkdev.h>
21#include <linux/delay.h>
22#include <linux/device.h>
23#include <scsi/scsi_host.h>
24#include <linux/libata.h>
25#include <linux/ata.h>
26
27#define DRV_NAME "pata_oldpiix"
28#define DRV_VERSION "0.5.1"
29
30/**
31 * oldpiix_pre_reset - probe begin
32 * @ap: ATA port
33 *
34 * Set up cable type and use generic probe init
35 */
36
37static int oldpiix_pre_reset(struct ata_port *ap)
38{
39 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
40 static const struct pci_bits oldpiix_enable_bits[] = {
41 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
42 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
43 };
44
45 if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no])) {
46 ata_port_disable(ap);
47 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
48 return 0;
49 }
50 ap->cbl = ATA_CBL_PATA40;
51 return ata_std_prereset(ap);
52}
53
54/**
55 * oldpiix_pata_error_handler - Probe specified port on PATA host controller
56 * @ap: Port to probe
57 * @classes:
58 *
59 * LOCKING:
60 * None (inherited from caller).
61 */
62
63static void oldpiix_pata_error_handler(struct ata_port *ap)
64{
65 ata_bmdma_drive_eh(ap, oldpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
66}
67
68/**
69 * oldpiix_set_piomode - Initialize host controller PATA PIO timings
70 * @ap: Port whose timings we are configuring
71 * @adev: um
72 *
73 * Set PIO mode for device, in host controller PCI config space.
74 *
75 * LOCKING:
76 * None (inherited from caller).
77 */
78
79static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev)
80{
81 unsigned int pio = adev->pio_mode - XFER_PIO_0;
82 struct pci_dev *dev = to_pci_dev(ap->host->dev);
83 unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
84 u16 idetm_data;
85 int control = 0;
86
87 /*
88 * See Intel Document 298600-004 for the timing programing rules
89 * for PIIX/ICH. Note that the early PIIX does not have the slave
90 * timing port at 0x44.
91 */
92
93 static const /* ISP RTC */
94 u8 timings[][2] = { { 0, 0 },
95 { 0, 0 },
96 { 1, 0 },
97 { 2, 1 },
98 { 2, 3 }, };
99
100 if (pio > 2)
101 control |= 1; /* TIME1 enable */
102 if (ata_pio_need_iordy(adev))
103 control |= 2; /* IE IORDY */
104
105 /* Intel specifies that the PPE functionality is for disk only */
106 if (adev->class == ATA_DEV_ATA)
107 control |= 4; /* PPE enable */
108
109 pci_read_config_word(dev, idetm_port, &idetm_data);
110
111 /* Enable PPE, IE and TIME as appropriate. Clear the other
112 drive timing bits */
113 if (adev->devno == 0) {
114 idetm_data &= 0xCCE0;
115 idetm_data |= control;
116 } else {
117 idetm_data &= 0xCC0E;
118 idetm_data |= (control << 4);
119 }
120 idetm_data |= (timings[pio][0] << 12) |
121 (timings[pio][1] << 8);
122 pci_write_config_word(dev, idetm_port, idetm_data);
123
124 /* Track which port is configured */
125 ap->private_data = adev;
126}
127
128/**
129 * oldpiix_set_dmamode - Initialize host controller PATA DMA timings
130 * @ap: Port whose timings we are configuring
131 * @adev: Device to program
132 * @isich: True if the device is an ICH and has IOCFG registers
133 *
134 * Set MWDMA mode for device, in host controller PCI config space.
135 *
136 * LOCKING:
137 * None (inherited from caller).
138 */
139
140static void oldpiix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
141{
142 struct pci_dev *dev = to_pci_dev(ap->host->dev);
143 u8 idetm_port = ap->port_no ? 0x42 : 0x40;
144 u16 idetm_data;
145
146 static const /* ISP RTC */
147 u8 timings[][2] = { { 0, 0 },
148 { 0, 0 },
149 { 1, 0 },
150 { 2, 1 },
151 { 2, 3 }, };
152
153 /*
154 * MWDMA is driven by the PIO timings. We must also enable
155 * IORDY unconditionally along with TIME1. PPE has already
156 * been set when the PIO timing was set.
157 */
158
159 unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
160 unsigned int control;
161 const unsigned int needed_pio[3] = {
162 XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
163 };
164 int pio = needed_pio[mwdma] - XFER_PIO_0;
165
166 pci_read_config_word(dev, idetm_port, &idetm_data);
167
168 control = 3; /* IORDY|TIME0 */
169 /* Intel specifies that the PPE functionality is for disk only */
170 if (adev->class == ATA_DEV_ATA)
171 control |= 4; /* PPE enable */
172
173 /* If the drive MWDMA is faster than it can do PIO then
174 we must force PIO into PIO0 */
175
176 if (adev->pio_mode < needed_pio[mwdma])
177 /* Enable DMA timing only */
178 control |= 8; /* PIO cycles in PIO0 */
179
180 /* Mask out the relevant control and timing bits we will load. Also
181 clear the other drive TIME register as a precaution */
182 if (adev->devno == 0) {
183 idetm_data &= 0xCCE0;
184 idetm_data |= control;
185 } else {
186 idetm_data &= 0xCC0E;
187 idetm_data |= (control << 4);
188 }
189 idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
190 pci_write_config_word(dev, idetm_port, idetm_data);
191
192 /* Track which port is configured */
193 ap->private_data = adev;
194}
195
196/**
197 * oldpiix_qc_issue_prot - command issue
198 * @qc: command pending
199 *
200 * Called when the libata layer is about to issue a command. We wrap
201 * this interface so that we can load the correct ATA timings if
202 * neccessary. Our logic also clears TIME0/TIME1 for the other device so
203 * that, even if we get this wrong, cycles to the other device will
204 * be made PIO0.
205 */
206
207static unsigned int oldpiix_qc_issue_prot(struct ata_queued_cmd *qc)
208{
209 struct ata_port *ap = qc->ap;
210 struct ata_device *adev = qc->dev;
211
212 if (adev != ap->private_data) {
213 if (adev->dma_mode)
214 oldpiix_set_dmamode(ap, adev);
215 else if (adev->pio_mode)
216 oldpiix_set_piomode(ap, adev);
217 }
218 return ata_qc_issue_prot(qc);
219}
220
221
222static struct scsi_host_template oldpiix_sht = {
223 .module = THIS_MODULE,
224 .name = DRV_NAME,
225 .ioctl = ata_scsi_ioctl,
226 .queuecommand = ata_scsi_queuecmd,
227 .can_queue = ATA_DEF_QUEUE,
228 .this_id = ATA_SHT_THIS_ID,
229 .sg_tablesize = LIBATA_MAX_PRD,
230 .max_sectors = ATA_MAX_SECTORS,
231 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
232 .emulated = ATA_SHT_EMULATED,
233 .use_clustering = ATA_SHT_USE_CLUSTERING,
234 .proc_name = DRV_NAME,
235 .dma_boundary = ATA_DMA_BOUNDARY,
236 .slave_configure = ata_scsi_slave_config,
237 .bios_param = ata_std_bios_param,
238};
239
240static const struct ata_port_operations oldpiix_pata_ops = {
241 .port_disable = ata_port_disable,
242 .set_piomode = oldpiix_set_piomode,
243 .set_dmamode = oldpiix_set_dmamode,
244 .mode_filter = ata_pci_default_filter,
245
246 .tf_load = ata_tf_load,
247 .tf_read = ata_tf_read,
248 .check_status = ata_check_status,
249 .exec_command = ata_exec_command,
250 .dev_select = ata_std_dev_select,
251
252 .freeze = ata_bmdma_freeze,
253 .thaw = ata_bmdma_thaw,
254 .error_handler = oldpiix_pata_error_handler,
255 .post_internal_cmd = ata_bmdma_post_internal_cmd,
256
257 .bmdma_setup = ata_bmdma_setup,
258 .bmdma_start = ata_bmdma_start,
259 .bmdma_stop = ata_bmdma_stop,
260 .bmdma_status = ata_bmdma_status,
261 .qc_prep = ata_qc_prep,
262 .qc_issue = oldpiix_qc_issue_prot,
263 .data_xfer = ata_pio_data_xfer,
264
265 .irq_handler = ata_interrupt,
266 .irq_clear = ata_bmdma_irq_clear,
267
268 .port_start = ata_port_start,
269 .port_stop = ata_port_stop,
270 .host_stop = ata_host_stop,
271};
272
273
274/**
275 * oldpiix_init_one - Register PIIX ATA PCI device with kernel services
276 * @pdev: PCI device to register
277 * @ent: Entry in oldpiix_pci_tbl matching with @pdev
278 *
279 * Called from kernel PCI layer. We probe for combined mode (sigh),
280 * and then hand over control to libata, for it to do the rest.
281 *
282 * LOCKING:
283 * Inherited from PCI layer (may sleep).
284 *
285 * RETURNS:
286 * Zero on success, or -ERRNO value.
287 */
288
289static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
290{
291 static int printed_version;
292 static struct ata_port_info info = {
293 .sht = &oldpiix_sht,
294 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
295 .pio_mask = 0x1f, /* pio0-4 */
296 .mwdma_mask = 0x07, /* mwdma1-2 */
297 .port_ops = &oldpiix_pata_ops,
298 };
299 static struct ata_port_info *port_info[2] = { &info, &info };
300
301 if (!printed_version++)
302 dev_printk(KERN_DEBUG, &pdev->dev,
303 "version " DRV_VERSION "\n");
304
305 return ata_pci_init_one(pdev, port_info, 2);
306}
307
308static const struct pci_device_id oldpiix_pci_tbl[] = {
309 { PCI_DEVICE(0x8086, 0x1230), },
310 { } /* terminate list */
311};
312
313static struct pci_driver oldpiix_pci_driver = {
314 .name = DRV_NAME,
315 .id_table = oldpiix_pci_tbl,
316 .probe = oldpiix_init_one,
317 .remove = ata_pci_remove_one,
318};
319
320static int __init oldpiix_init(void)
321{
322 return pci_register_driver(&oldpiix_pci_driver);
323}
324
325static void __exit oldpiix_exit(void)
326{
327 pci_unregister_driver(&oldpiix_pci_driver);
328}
329
330
331module_init(oldpiix_init);
332module_exit(oldpiix_exit);
333
334MODULE_AUTHOR("Alan Cox");
335MODULE_DESCRIPTION("SCSI low-level driver for early PIIX series controllers");
336MODULE_LICENSE("GPL");
337MODULE_DEVICE_TABLE(pci, oldpiix_pci_tbl);
338MODULE_VERSION(DRV_VERSION);
339
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
new file mode 100644
index 000000000000..c3d01325e0e2
--- /dev/null
+++ b/drivers/ata/pata_opti.c
@@ -0,0 +1,292 @@
1/*
2 * pata_opti.c - ATI PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based on
7 * linux/drivers/ide/pci/opti621.c Version 0.7 Sept 10, 2002
8 *
9 * Copyright (C) 1996-1998 Linus Torvalds & authors (see below)
10 *
11 * Authors:
12 * Jaromir Koutek <miri@punknet.cz>,
13 * Jan Harkes <jaharkes@cwi.nl>,
14 * Mark Lord <mlord@pobox.com>
15 * Some parts of code are from ali14xx.c and from rz1000.c.
16 *
17 * Also consulted the FreeBSD prototype driver by Kevin Day to try
18 * and resolve some confusions. Further documentation can be found in
19 * Ralf Brown's interrupt list
20 *
21 * If you have other variants of the Opti range (Viper/Vendetta) please
22 * try this driver with those PCI idents and report back. For the later
23 * chips see the pata_optidma driver
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/blkdev.h>
32#include <linux/delay.h>
33#include <scsi/scsi_host.h>
34#include <linux/libata.h>
35
36#define DRV_NAME "pata_opti"
37#define DRV_VERSION "0.2.4"
38
39enum {
40 READ_REG = 0, /* index of Read cycle timing register */
41 WRITE_REG = 1, /* index of Write cycle timing register */
42 CNTRL_REG = 3, /* index of Control register */
43 STRAP_REG = 5, /* index of Strap register */
44 MISC_REG = 6 /* index of Miscellaneous register */
45};
46
47/**
48 * opti_pre_reset - probe begin
49 * @ap: ATA port
50 *
51 * Set up cable type and use generic probe init
52 */
53
54static int opti_pre_reset(struct ata_port *ap)
55{
56 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
57 static const struct pci_bits opti_enable_bits[] = {
58 { 0x45, 1, 0x80, 0x00 },
59 { 0x40, 1, 0x08, 0x00 }
60 };
61
62 if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no])) {
63 ata_port_disable(ap);
64 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
65 return 0;
66 }
67 ap->cbl = ATA_CBL_PATA40;
68 return ata_std_prereset(ap);
69}
70
71/**
72 * opti_probe_reset - probe reset
73 * @ap: ATA port
74 *
75 * Perform the ATA probe and bus reset sequence plus specific handling
76 * for this hardware. The Opti needs little handling - we have no UDMA66
77 * capability that needs cable detection. All we must do is check the port
78 * is enabled.
79 */
80
81static void opti_error_handler(struct ata_port *ap)
82{
83 ata_bmdma_drive_eh(ap, opti_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
84}
85
86/**
87 * opti_write_reg - control register setup
88 * @ap: ATA port
89 * @value: value
90 * @reg: control register number
91 *
92 * The Opti uses magic 'trapdoor' register accesses to do configuration
93 * rather than using PCI space as other controllers do. The double inw
94 * on the error register activates configuration mode. We can then write
95 * the control register
96 */
97
98static void opti_write_reg(struct ata_port *ap, u8 val, int reg)
99{
100 unsigned long regio = ap->ioaddr.cmd_addr;
101
102 /* These 3 unlock the control register access */
103 inw(regio + 1);
104 inw(regio + 1);
105 outb(3, regio + 2);
106
107 /* Do the I/O */
108 outb(val, regio + reg);
109
110 /* Relock */
111 outb(0x83, regio + 2);
112}
113
114#if 0
115/**
116 * opti_read_reg - control register read
117 * @ap: ATA port
118 * @reg: control register number
119 *
120 * The Opti uses magic 'trapdoor' register accesses to do configuration
121 * rather than using PCI space as other controllers do. The double inw
122 * on the error register activates configuration mode. We can then read
123 * the control register
124 */
125
126static u8 opti_read_reg(struct ata_port *ap, int reg)
127{
128 unsigned long regio = ap->ioaddr.cmd_addr;
129 u8 ret;
130 inw(regio + 1);
131 inw(regio + 1);
132 outb(3, regio + 2);
133 ret = inb(regio + reg);
134 outb(0x83, regio + 2);
135}
136#endif
137
138/**
139 * opti_set_piomode - set initial PIO mode data
140 * @ap: ATA interface
141 * @adev: ATA device
142 *
143 * Called to do the PIO mode setup. Timing numbers are taken from
144 * the FreeBSD driver then pre computed to keep the code clean. There
145 * are two tables depending on the hardware clock speed.
146 */
147
148static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev)
149{
150 struct ata_device *pair = ata_dev_pair(adev);
151 int clock;
152 int pio = adev->pio_mode - XFER_PIO_0;
153 unsigned long regio = ap->ioaddr.cmd_addr;
154 u8 addr;
155
156 /* Address table precomputed with prefetch off and a DCLK of 2 */
157 static const u8 addr_timing[2][5] = {
158 { 0x30, 0x20, 0x20, 0x10, 0x10 },
159 { 0x20, 0x20, 0x10, 0x10, 0x10 }
160 };
161 static const u8 data_rec_timing[2][5] = {
162 { 0x6B, 0x56, 0x42, 0x32, 0x31 },
163 { 0x58, 0x44, 0x32, 0x22, 0x21 }
164 };
165
166 outb(0xff, regio + 5);
167 clock = inw(regio + 5) & 1;
168
169 /*
170 * As with many controllers the address setup time is shared
171 * and must suit both devices if present.
172 */
173
174 addr = addr_timing[clock][pio];
175 if (pair) {
176 /* Hardware constraint */
177 u8 pair_addr = addr_timing[clock][pair->pio_mode - XFER_PIO_0];
178 if (pair_addr > addr)
179 addr = pair_addr;
180 }
181
182 /* Commence primary programming sequence */
183 opti_write_reg(ap, adev->devno, MISC_REG);
184 opti_write_reg(ap, data_rec_timing[clock][pio], READ_REG);
185 opti_write_reg(ap, data_rec_timing[clock][pio], WRITE_REG);
186 opti_write_reg(ap, addr, MISC_REG);
187
188 /* Programming sequence complete, override strapping */
189 opti_write_reg(ap, 0x85, CNTRL_REG);
190}
191
192static struct scsi_host_template opti_sht = {
193 .module = THIS_MODULE,
194 .name = DRV_NAME,
195 .ioctl = ata_scsi_ioctl,
196 .queuecommand = ata_scsi_queuecmd,
197 .can_queue = ATA_DEF_QUEUE,
198 .this_id = ATA_SHT_THIS_ID,
199 .sg_tablesize = LIBATA_MAX_PRD,
200 .max_sectors = ATA_MAX_SECTORS,
201 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
202 .emulated = ATA_SHT_EMULATED,
203 .use_clustering = ATA_SHT_USE_CLUSTERING,
204 .proc_name = DRV_NAME,
205 .dma_boundary = ATA_DMA_BOUNDARY,
206 .slave_configure = ata_scsi_slave_config,
207 .bios_param = ata_std_bios_param,
208};
209
210static struct ata_port_operations opti_port_ops = {
211 .port_disable = ata_port_disable,
212 .set_piomode = opti_set_piomode,
213/* .set_dmamode = opti_set_dmamode, */
214 .tf_load = ata_tf_load,
215 .tf_read = ata_tf_read,
216 .check_status = ata_check_status,
217 .exec_command = ata_exec_command,
218 .dev_select = ata_std_dev_select,
219
220 .freeze = ata_bmdma_freeze,
221 .thaw = ata_bmdma_thaw,
222 .error_handler = opti_error_handler,
223 .post_internal_cmd = ata_bmdma_post_internal_cmd,
224
225 .bmdma_setup = ata_bmdma_setup,
226 .bmdma_start = ata_bmdma_start,
227 .bmdma_stop = ata_bmdma_stop,
228 .bmdma_status = ata_bmdma_status,
229
230 .qc_prep = ata_qc_prep,
231 .qc_issue = ata_qc_issue_prot,
232 .eng_timeout = ata_eng_timeout,
233 .data_xfer = ata_pio_data_xfer,
234
235 .irq_handler = ata_interrupt,
236 .irq_clear = ata_bmdma_irq_clear,
237
238 .port_start = ata_port_start,
239 .port_stop = ata_port_stop,
240 .host_stop = ata_host_stop
241};
242
243static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
244{
245 static struct ata_port_info info = {
246 .sht = &opti_sht,
247 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
248 .pio_mask = 0x1f,
249 .port_ops = &opti_port_ops
250 };
251 static struct ata_port_info *port_info[2] = { &info, &info };
252 static int printed_version;
253
254 if (!printed_version++)
255 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
256
257 return ata_pci_init_one(dev, port_info, 2);
258}
259
260static const struct pci_device_id opti[] = {
261 { PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
262 { PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
263 { 0, },
264};
265
266static struct pci_driver opti_pci_driver = {
267 .name = DRV_NAME,
268 .id_table = opti,
269 .probe = opti_init_one,
270 .remove = ata_pci_remove_one
271};
272
273static int __init opti_init(void)
274{
275 return pci_register_driver(&opti_pci_driver);
276}
277
278
279static void __exit opti_exit(void)
280{
281 pci_unregister_driver(&opti_pci_driver);
282}
283
284
285MODULE_AUTHOR("Alan Cox");
286MODULE_DESCRIPTION("low-level driver for Opti 621/621X");
287MODULE_LICENSE("GPL");
288MODULE_DEVICE_TABLE(pci, opti);
289MODULE_VERSION(DRV_VERSION);
290
291module_init(opti_init);
292module_exit(opti_exit);
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
new file mode 100644
index 000000000000..177a455f4251
--- /dev/null
+++ b/drivers/ata/pata_optidma.c
@@ -0,0 +1,547 @@
1/*
2 * pata_optidma.c - Opti DMA PATA for new ATA layer
3 * (C) 2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * The Opti DMA controllers are related to the older PIO PCI controllers
7 * and indeed the VLB ones. The main differences are that the timing
8 * numbers are now based off PCI clocks not VLB and differ, and that
9 * MWDMA is supported.
10 *
11 * This driver should support Viper-N+, FireStar, FireStar Plus.
12 *
13 * These devices support virtual DMA for read (aka the CS5520). Later
14 * chips support UDMA33, but only if the rest of the board logic does,
15 * so you have to get this right. We don't support the virtual DMA
16 * but we do handle UDMA.
17 *
18 * Bits that are worth knowing
19 * Most control registers are shadowed into I/O registers
20 * 0x1F5 bit 0 tells you if the PCI/VLB clock is 33 or 25Mhz
21 * Virtual DMA registers *move* between rev 0x02 and rev 0x10
22 * UDMA requires a 66MHz FSB
23 *
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/blkdev.h>
31#include <linux/delay.h>
32#include <scsi/scsi_host.h>
33#include <linux/libata.h>
34
35#define DRV_NAME "pata_optidma"
36#define DRV_VERSION "0.2.1"
37
38enum {
39 READ_REG = 0, /* index of Read cycle timing register */
40 WRITE_REG = 1, /* index of Write cycle timing register */
41 CNTRL_REG = 3, /* index of Control register */
42 STRAP_REG = 5, /* index of Strap register */
43 MISC_REG = 6 /* index of Miscellaneous register */
44};
45
46static int pci_clock; /* 0 = 33 1 = 25 */
47
48/**
49 * optidma_pre_reset - probe begin
50 * @ap: ATA port
51 *
52 * Set up cable type and use generic probe init
53 */
54
55static int optidma_pre_reset(struct ata_port *ap)
56{
57 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
58 static const struct pci_bits optidma_enable_bits = {
59 0x40, 1, 0x08, 0x00
60 };
61
62 if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits)) {
63 ata_port_disable(ap);
64 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
65 return 0;
66 }
67 ap->cbl = ATA_CBL_PATA40;
68 return ata_std_prereset(ap);
69}
70
71/**
72 * optidma_probe_reset - probe reset
73 * @ap: ATA port
74 *
75 * Perform the ATA probe and bus reset sequence plus specific handling
76 * for this hardware. The Opti needs little handling - we have no UDMA66
77 * capability that needs cable detection. All we must do is check the port
78 * is enabled.
79 */
80
81static void optidma_error_handler(struct ata_port *ap)
82{
83 ata_bmdma_drive_eh(ap, optidma_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
84}
85
86/**
87 * optidma_unlock - unlock control registers
88 * @ap: ATA port
89 *
90 * Unlock the control register block for this adapter. Registers must not
91 * be unlocked in a situation where libata might look at them.
92 */
93
94static void optidma_unlock(struct ata_port *ap)
95{
96 unsigned long regio = ap->ioaddr.cmd_addr;
97
98 /* These 3 unlock the control register access */
99 inw(regio + 1);
100 inw(regio + 1);
101 outb(3, regio + 2);
102}
103
104/**
105 * optidma_lock - issue temporary relock
106 * @ap: ATA port
107 *
108 * Re-lock the configuration register settings.
109 */
110
111static void optidma_lock(struct ata_port *ap)
112{
113 unsigned long regio = ap->ioaddr.cmd_addr;
114
115 /* Relock */
116 outb(0x83, regio + 2);
117}
118
119/**
120 * optidma_set_mode - set mode data
121 * @ap: ATA interface
122 * @adev: ATA device
123 * @mode: Mode to set
124 *
125 * Called to do the DMA or PIO mode setup. Timing numbers are all
126 * pre computed to keep the code clean. There are two tables depending
127 * on the hardware clock speed.
128 *
129 * WARNING: While we do this the IDE registers vanish. If we take an
130 * IRQ here we depend on the host set locking to avoid catastrophe.
131 */
132
133static void optidma_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode)
134{
135 struct ata_device *pair = ata_dev_pair(adev);
136 int pio = adev->pio_mode - XFER_PIO_0;
137 int dma = adev->dma_mode - XFER_MW_DMA_0;
138 unsigned long regio = ap->ioaddr.cmd_addr;
139 u8 addr;
140
141 /* Address table precomputed with a DCLK of 2 */
142 static const u8 addr_timing[2][5] = {
143 { 0x30, 0x20, 0x20, 0x10, 0x10 },
144 { 0x20, 0x20, 0x10, 0x10, 0x10 }
145 };
146 static const u8 data_rec_timing[2][5] = {
147 { 0x59, 0x46, 0x30, 0x20, 0x20 },
148 { 0x46, 0x32, 0x20, 0x20, 0x10 }
149 };
150 static const u8 dma_data_rec_timing[2][3] = {
151 { 0x76, 0x20, 0x20 },
152 { 0x54, 0x20, 0x10 }
153 };
154
155 /* Switch from IDE to control mode */
156 optidma_unlock(ap);
157
158
159 /*
160 * As with many controllers the address setup time is shared
161 * and must suit both devices if present. FIXME: Check if we
162 * need to look at slowest of PIO/DMA mode of either device
163 */
164
165 if (mode >= XFER_MW_DMA_0)
166 addr = 0;
167 else
168 addr = addr_timing[pci_clock][pio];
169
170 if (pair) {
171 u8 pair_addr;
172 /* Hardware constraint */
173 if (pair->dma_mode)
174 pair_addr = 0;
175 else
176 pair_addr = addr_timing[pci_clock][pair->pio_mode - XFER_PIO_0];
177 if (pair_addr > addr)
178 addr = pair_addr;
179 }
180
181 /* Commence primary programming sequence */
182 /* First we load the device number into the timing select */
183 outb(adev->devno, regio + MISC_REG);
184 /* Now we load the data timings into read data/write data */
185 if (mode < XFER_MW_DMA_0) {
186 outb(data_rec_timing[pci_clock][pio], regio + READ_REG);
187 outb(data_rec_timing[pci_clock][pio], regio + WRITE_REG);
188 } else if (mode < XFER_UDMA_0) {
189 outb(dma_data_rec_timing[pci_clock][dma], regio + READ_REG);
190 outb(dma_data_rec_timing[pci_clock][dma], regio + WRITE_REG);
191 }
192 /* Finally we load the address setup into the misc register */
193 outb(addr | adev->devno, regio + MISC_REG);
194
195 /* Programming sequence complete, timing 0 dev 0, timing 1 dev 1 */
196 outb(0x85, regio + CNTRL_REG);
197
198 /* Switch back to IDE mode */
199 optidma_lock(ap);
200
201 /* Note: at this point our programming is incomplete. We are
202 not supposed to program PCI 0x43 "things we hacked onto the chip"
203 until we've done both sets of PIO/DMA timings */
204}
205
206/**
207 * optiplus_set_mode - DMA setup for Firestar Plus
208 * @ap: ATA port
209 * @adev: device
210 * @mode: desired mode
211 *
212 * The Firestar plus has additional UDMA functionality for UDMA0-2 and
213 * requires we do some additional work. Because the base work we must do
214 * is mostly shared we wrap the Firestar setup functionality in this
215 * one
216 */
217
218static void optiplus_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode)
219{
220 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
221 u8 udcfg;
222 u8 udslave;
223 int dev2 = 2 * adev->devno;
224 int unit = 2 * ap->port_no + adev->devno;
225 int udma = mode - XFER_UDMA_0;
226
227 pci_read_config_byte(pdev, 0x44, &udcfg);
228 if (mode <= XFER_UDMA_0) {
229 udcfg &= ~(1 << unit);
230 optidma_set_mode(ap, adev, adev->dma_mode);
231 } else {
232 udcfg |= (1 << unit);
233 if (ap->port_no) {
234 pci_read_config_byte(pdev, 0x45, &udslave);
235 udslave &= ~(0x03 << dev2);
236 udslave |= (udma << dev2);
237 pci_write_config_byte(pdev, 0x45, udslave);
238 } else {
239 udcfg &= ~(0x30 << dev2);
240 udcfg |= (udma << dev2);
241 }
242 }
243 pci_write_config_byte(pdev, 0x44, udcfg);
244}
245
246/**
247 * optidma_set_pio_mode - PIO setup callback
248 * @ap: ATA port
249 * @adev: Device
250 *
251 * The libata core provides separate functions for handling PIO and
252 * DMA programming. The architecture of the Firestar makes it easier
253 * for us to have a common function so we provide wrappers
254 */
255
256static void optidma_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
257{
258 optidma_set_mode(ap, adev, adev->pio_mode);
259}
260
261/**
262 * optidma_set_dma_mode - DMA setup callback
263 * @ap: ATA port
264 * @adev: Device
265 *
266 * The libata core provides separate functions for handling PIO and
267 * DMA programming. The architecture of the Firestar makes it easier
268 * for us to have a common function so we provide wrappers
269 */
270
271static void optidma_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
272{
273 optidma_set_mode(ap, adev, adev->dma_mode);
274}
275
276/**
277 * optiplus_set_pio_mode - PIO setup callback
278 * @ap: ATA port
279 * @adev: Device
280 *
281 * The libata core provides separate functions for handling PIO and
282 * DMA programming. The architecture of the Firestar makes it easier
283 * for us to have a common function so we provide wrappers
284 */
285
286static void optiplus_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
287{
288 optiplus_set_mode(ap, adev, adev->pio_mode);
289}
290
291/**
292 * optiplus_set_dma_mode - DMA setup callback
293 * @ap: ATA port
294 * @adev: Device
295 *
296 * The libata core provides separate functions for handling PIO and
297 * DMA programming. The architecture of the Firestar makes it easier
298 * for us to have a common function so we provide wrappers
299 */
300
301static void optiplus_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
302{
303 optiplus_set_mode(ap, adev, adev->dma_mode);
304}
305
306/**
307 * optidma_make_bits - PCI setup helper
308 * @adev: ATA device
309 *
310 * Turn the ATA device setup into PCI configuration bits
311 * for register 0x43 and return the two bits needed.
312 */
313
314static u8 optidma_make_bits43(struct ata_device *adev)
315{
316 static const u8 bits43[5] = {
317 0, 0, 0, 1, 2
318 };
319 if (!ata_dev_enabled(adev))
320 return 0;
321 if (adev->dma_mode)
322 return adev->dma_mode - XFER_MW_DMA_0;
323 return bits43[adev->pio_mode - XFER_PIO_0];
324}
325
326/**
327 * optidma_post_set_mode - finalize PCI setup
328 * @ap: port to set up
329 *
330 * Finalise the configuration by writing the nibble of extra bits
331 * of data into the chip.
332 */
333
334static void optidma_post_set_mode(struct ata_port *ap)
335{
336 u8 r;
337 int nybble = 4 * ap->port_no;
338 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
339
340 pci_read_config_byte(pdev, 0x43, &r);
341
342 r &= (0x0F << nybble);
343 r |= (optidma_make_bits43(&ap->device[0]) +
344 (optidma_make_bits43(&ap->device[0]) << 2)) << nybble;
345
346 pci_write_config_byte(pdev, 0x43, r);
347}
348
349static struct scsi_host_template optidma_sht = {
350 .module = THIS_MODULE,
351 .name = DRV_NAME,
352 .ioctl = ata_scsi_ioctl,
353 .queuecommand = ata_scsi_queuecmd,
354 .can_queue = ATA_DEF_QUEUE,
355 .this_id = ATA_SHT_THIS_ID,
356 .sg_tablesize = LIBATA_MAX_PRD,
357 .max_sectors = ATA_MAX_SECTORS,
358 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
359 .emulated = ATA_SHT_EMULATED,
360 .use_clustering = ATA_SHT_USE_CLUSTERING,
361 .proc_name = DRV_NAME,
362 .dma_boundary = ATA_DMA_BOUNDARY,
363 .slave_configure = ata_scsi_slave_config,
364 .bios_param = ata_std_bios_param,
365};
366
367static struct ata_port_operations optidma_port_ops = {
368 .port_disable = ata_port_disable,
369 .set_piomode = optidma_set_pio_mode,
370 .set_dmamode = optidma_set_dma_mode,
371
372 .tf_load = ata_tf_load,
373 .tf_read = ata_tf_read,
374 .check_status = ata_check_status,
375 .exec_command = ata_exec_command,
376 .dev_select = ata_std_dev_select,
377
378 .freeze = ata_bmdma_freeze,
379 .thaw = ata_bmdma_thaw,
380 .post_internal_cmd = ata_bmdma_post_internal_cmd,
381 .error_handler = optidma_error_handler,
382 .post_set_mode = optidma_post_set_mode,
383
384 .bmdma_setup = ata_bmdma_setup,
385 .bmdma_start = ata_bmdma_start,
386 .bmdma_stop = ata_bmdma_stop,
387 .bmdma_status = ata_bmdma_status,
388
389 .qc_prep = ata_qc_prep,
390 .qc_issue = ata_qc_issue_prot,
391 .eng_timeout = ata_eng_timeout,
392 .data_xfer = ata_pio_data_xfer,
393
394 .irq_handler = ata_interrupt,
395 .irq_clear = ata_bmdma_irq_clear,
396
397 .port_start = ata_port_start,
398 .port_stop = ata_port_stop,
399 .host_stop = ata_host_stop
400};
401
402static struct ata_port_operations optiplus_port_ops = {
403 .port_disable = ata_port_disable,
404 .set_piomode = optiplus_set_pio_mode,
405 .set_dmamode = optiplus_set_dma_mode,
406
407 .tf_load = ata_tf_load,
408 .tf_read = ata_tf_read,
409 .check_status = ata_check_status,
410 .exec_command = ata_exec_command,
411 .dev_select = ata_std_dev_select,
412
413 .freeze = ata_bmdma_freeze,
414 .thaw = ata_bmdma_thaw,
415 .post_internal_cmd = ata_bmdma_post_internal_cmd,
416 .error_handler = optidma_error_handler,
417 .post_set_mode = optidma_post_set_mode,
418
419 .bmdma_setup = ata_bmdma_setup,
420 .bmdma_start = ata_bmdma_start,
421 .bmdma_stop = ata_bmdma_stop,
422 .bmdma_status = ata_bmdma_status,
423
424 .qc_prep = ata_qc_prep,
425 .qc_issue = ata_qc_issue_prot,
426 .eng_timeout = ata_eng_timeout,
427 .data_xfer = ata_pio_data_xfer,
428
429 .irq_handler = ata_interrupt,
430 .irq_clear = ata_bmdma_irq_clear,
431
432 .port_start = ata_port_start,
433 .port_stop = ata_port_stop,
434 .host_stop = ata_host_stop
435};
436
437/**
438 * optiplus_with_udma - Look for UDMA capable setup
439 * @pdev; ATA controller
440 */
441
442static int optiplus_with_udma(struct pci_dev *pdev)
443{
444 u8 r;
445 int ret = 0;
446 int ioport = 0x22;
447 struct pci_dev *dev1;
448
449 /* Find function 1 */
450 dev1 = pci_get_device(0x1045, 0xC701, NULL);
451 if(dev1 == NULL)
452 return 0;
453
454 /* Rev must be >= 0x10 */
455 pci_read_config_byte(dev1, 0x08, &r);
456 if (r < 0x10)
457 goto done_nomsg;
458 /* Read the chipset system configuration to check our mode */
459 pci_read_config_byte(dev1, 0x5F, &r);
460 ioport |= (r << 8);
461 outb(0x10, ioport);
462 /* Must be 66Mhz sync */
463 if ((inb(ioport + 2) & 1) == 0)
464 goto done;
465
466 /* Check the ATA arbitration/timing is suitable */
467 pci_read_config_byte(pdev, 0x42, &r);
468 if ((r & 0x36) != 0x36)
469 goto done;
470 pci_read_config_byte(dev1, 0x52, &r);
471 if (r & 0x80) /* IDEDIR disabled */
472 ret = 1;
473done:
474 printk(KERN_WARNING "UDMA not supported in this configuration.\n");
475done_nomsg: /* Wrong chip revision */
476 pci_dev_put(dev1);
477 return ret;
478}
479
480static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
481{
482 static struct ata_port_info info_82c700 = {
483 .sht = &optidma_sht,
484 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
485 .pio_mask = 0x1f,
486 .mwdma_mask = 0x07,
487 .port_ops = &optidma_port_ops
488 };
489 static struct ata_port_info info_82c700_udma = {
490 .sht = &optidma_sht,
491 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
492 .pio_mask = 0x1f,
493 .mwdma_mask = 0x07,
494 .udma_mask = 0x07,
495 .port_ops = &optiplus_port_ops
496 };
497 static struct ata_port_info *port_info[2];
498 struct ata_port_info *info = &info_82c700;
499 static int printed_version;
500
501 if (!printed_version++)
502 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
503
504 /* Fixed location chipset magic */
505 inw(0x1F1);
506 inw(0x1F1);
507 pci_clock = inb(0x1F5) & 1; /* 0 = 33Mhz, 1 = 25Mhz */
508
509 if (optiplus_with_udma(dev))
510 info = &info_82c700_udma;
511
512 port_info[0] = port_info[1] = info;
513 return ata_pci_init_one(dev, port_info, 2);
514}
515
516static const struct pci_device_id optidma[] = {
517 { PCI_DEVICE(0x1045, 0xD568), }, /* Opti 82C700 */
518 { 0, },
519};
520
521static struct pci_driver optidma_pci_driver = {
522 .name = DRV_NAME,
523 .id_table = optidma,
524 .probe = optidma_init_one,
525 .remove = ata_pci_remove_one
526};
527
528static int __init optidma_init(void)
529{
530 return pci_register_driver(&optidma_pci_driver);
531}
532
533
534static void __exit optidma_exit(void)
535{
536 pci_unregister_driver(&optidma_pci_driver);
537}
538
539
540MODULE_AUTHOR("Alan Cox");
541MODULE_DESCRIPTION("low-level driver for Opti Firestar/Firestar Plus");
542MODULE_LICENSE("GPL");
543MODULE_DEVICE_TABLE(pci, optidma);
544MODULE_VERSION(DRV_VERSION);
545
546module_init(optidma_init);
547module_exit(optidma_exit);
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
new file mode 100644
index 000000000000..62b25cda409b
--- /dev/null
+++ b/drivers/ata/pata_pcmcia.c
@@ -0,0 +1,393 @@
1/*
2 * pata_pcmcia.c - PCMCIA PATA controller driver.
3 * Copyright 2005-2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
4 * PCMCIA ident update Copyright 2006 Marcin Juszkiewicz
5 * <openembedded@hrw.one.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING. If not, write to
19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 * Heavily based upon ide-cs.c
22 * The initial developer of the original code is David A. Hinds
23 * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
24 * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/blkdev.h>
31#include <linux/delay.h>
32#include <scsi/scsi_host.h>
33#include <linux/ata.h>
34#include <linux/libata.h>
35
36#include <pcmcia/cs_types.h>
37#include <pcmcia/cs.h>
38#include <pcmcia/cistpl.h>
39#include <pcmcia/ds.h>
40#include <pcmcia/cisreg.h>
41#include <pcmcia/ciscode.h>
42
43
44#define DRV_NAME "pata_pcmcia"
45#define DRV_VERSION "0.2.9"
46
47/*
48 * Private data structure to glue stuff together
49 */
50
51struct ata_pcmcia_info {
52 struct pcmcia_device *pdev;
53 int ndev;
54 dev_node_t node;
55};
56
57static struct scsi_host_template pcmcia_sht = {
58 .module = THIS_MODULE,
59 .name = DRV_NAME,
60 .ioctl = ata_scsi_ioctl,
61 .queuecommand = ata_scsi_queuecmd,
62 .can_queue = ATA_DEF_QUEUE,
63 .this_id = ATA_SHT_THIS_ID,
64 .sg_tablesize = LIBATA_MAX_PRD,
65 .max_sectors = ATA_MAX_SECTORS,
66 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
67 .emulated = ATA_SHT_EMULATED,
68 .use_clustering = ATA_SHT_USE_CLUSTERING,
69 .proc_name = DRV_NAME,
70 .dma_boundary = ATA_DMA_BOUNDARY,
71 .slave_configure = ata_scsi_slave_config,
72 .bios_param = ata_std_bios_param,
73};
74
75static struct ata_port_operations pcmcia_port_ops = {
76 .port_disable = ata_port_disable,
77 .tf_load = ata_tf_load,
78 .tf_read = ata_tf_read,
79 .check_status = ata_check_status,
80 .exec_command = ata_exec_command,
81 .dev_select = ata_std_dev_select,
82
83 .freeze = ata_bmdma_freeze,
84 .thaw = ata_bmdma_thaw,
85 .error_handler = ata_bmdma_error_handler,
86 .post_internal_cmd = ata_bmdma_post_internal_cmd,
87
88 .qc_prep = ata_qc_prep,
89 .qc_issue = ata_qc_issue_prot,
90 .eng_timeout = ata_eng_timeout,
91 .data_xfer = ata_pio_data_xfer_noirq,
92
93 .irq_handler = ata_interrupt,
94 .irq_clear = ata_bmdma_irq_clear,
95
96 .port_start = ata_port_start,
97 .port_stop = ata_port_stop,
98 .host_stop = ata_host_stop
99};
100
101#define CS_CHECK(fn, ret) \
102do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
103
104/**
105 * pcmcia_init_one - attach a PCMCIA interface
106 * @pdev: pcmcia device
107 *
108 * Register a PCMCIA IDE interface. Such interfaces are PIO 0 and
109 * shared IRQ.
110 */
111
112static int pcmcia_init_one(struct pcmcia_device *pdev)
113{
114 struct ata_probe_ent ae;
115 struct ata_pcmcia_info *info;
116 tuple_t tuple;
117 struct {
118 unsigned short buf[128];
119 cisparse_t parse;
120 config_info_t conf;
121 cistpl_cftable_entry_t dflt;
122 } *stk = NULL;
123 cistpl_cftable_entry_t *cfg;
124 int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM;
125 unsigned long io_base, ctl_base;
126
127 info = kzalloc(sizeof(*info), GFP_KERNEL);
128 if (info == NULL)
129 return -ENOMEM;
130
131 /* Glue stuff together. FIXME: We may be able to get rid of info with care */
132 info->pdev = pdev;
133 pdev->priv = info;
134
135 /* Set up attributes in order to probe card and get resources */
136 pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
137 pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
138 pdev->io.IOAddrLines = 3;
139 pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
140 pdev->irq.IRQInfo1 = IRQ_LEVEL_ID;
141 pdev->conf.Attributes = CONF_ENABLE_IRQ;
142 pdev->conf.IntType = INT_MEMORY_AND_IO;
143
144 /* Allocate resoure probing structures */
145
146 stk = kzalloc(sizeof(*stk), GFP_KERNEL);
147 if (!stk)
148 goto out1;
149
150 cfg = &stk->parse.cftable_entry;
151
152 /* Tuples we are walking */
153 tuple.TupleData = (cisdata_t *)&stk->buf;
154 tuple.TupleOffset = 0;
155 tuple.TupleDataMax = 255;
156 tuple.Attributes = 0;
157 tuple.DesiredTuple = CISTPL_CONFIG;
158
159 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
160 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(pdev, &tuple));
161 CS_CHECK(ParseTuple, pcmcia_parse_tuple(pdev, &tuple, &stk->parse));
162 pdev->conf.ConfigBase = stk->parse.config.base;
163 pdev->conf.Present = stk->parse.config.rmask[0];
164
165 /* See if we have a manufacturer identifier. Use it to set is_kme for
166 vendor quirks */
167 tuple.DesiredTuple = CISTPL_MANFID;
168 if (!pcmcia_get_first_tuple(pdev, &tuple) && !pcmcia_get_tuple_data(pdev, &tuple) && !pcmcia_parse_tuple(pdev, &tuple, &stk->parse))
169 is_kme = ((stk->parse.manfid.manf == MANFID_KME) && ((stk->parse.manfid.card == PRODID_KME_KXLC005_A) || (stk->parse.manfid.card == PRODID_KME_KXLC005_B)));
170
171 /* Not sure if this is right... look up the current Vcc */
172 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(pdev, &stk->conf));
173/* link->conf.Vcc = stk->conf.Vcc; */
174
175 pass = io_base = ctl_base = 0;
176 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
177 tuple.Attributes = 0;
178 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
179
180 /* Now munch the resources looking for a suitable set */
181 while (1) {
182 if (pcmcia_get_tuple_data(pdev, &tuple) != 0)
183 goto next_entry;
184 if (pcmcia_parse_tuple(pdev, &tuple, &stk->parse) != 0)
185 goto next_entry;
186 /* Check for matching Vcc, unless we're desperate */
187 if (!pass) {
188 if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
189 if (stk->conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000)
190 goto next_entry;
191 } else if (stk->dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) {
192 if (stk->conf.Vcc != stk->dflt.vcc.param[CISTPL_POWER_VNOM] / 10000)
193 goto next_entry;
194 }
195 }
196
197 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
198 pdev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
199 else if (stk->dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
200 pdev->conf.Vpp = stk->dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
201
202 if ((cfg->io.nwin > 0) || (stk->dflt.io.nwin > 0)) {
203 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &stk->dflt.io;
204 pdev->conf.ConfigIndex = cfg->index;
205 pdev->io.BasePort1 = io->win[0].base;
206 pdev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
207 if (!(io->flags & CISTPL_IO_16BIT))
208 pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
209 if (io->nwin == 2) {
210 pdev->io.NumPorts1 = 8;
211 pdev->io.BasePort2 = io->win[1].base;
212 pdev->io.NumPorts2 = (is_kme) ? 2 : 1;
213 if (pcmcia_request_io(pdev, &pdev->io) != 0)
214 goto next_entry;
215 io_base = pdev->io.BasePort1;
216 ctl_base = pdev->io.BasePort2;
217 } else if ((io->nwin == 1) && (io->win[0].len >= 16)) {
218 pdev->io.NumPorts1 = io->win[0].len;
219 pdev->io.NumPorts2 = 0;
220 if (pcmcia_request_io(pdev, &pdev->io) != 0)
221 goto next_entry;
222 io_base = pdev->io.BasePort1;
223 ctl_base = pdev->io.BasePort1 + 0x0e;
224 } else goto next_entry;
225 /* If we've got this far, we're done */
226 break;
227 }
228next_entry:
229 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
230 memcpy(&stk->dflt, cfg, sizeof(stk->dflt));
231 if (pass) {
232 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(pdev, &tuple));
233 } else if (pcmcia_get_next_tuple(pdev, &tuple) != 0) {
234 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
235 memset(&stk->dflt, 0, sizeof(stk->dflt));
236 pass++;
237 }
238 }
239
240 CS_CHECK(RequestIRQ, pcmcia_request_irq(pdev, &pdev->irq));
241 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(pdev, &pdev->conf));
242
243 /* Success. Disable the IRQ nIEN line, do quirks */
244 outb(0x02, ctl_base);
245 if (is_kme)
246 outb(0x81, ctl_base + 0x01);
247
248 /* FIXME: Could be more ports at base + 0x10 but we only deal with
249 one right now */
250 if (pdev->io.NumPorts1 >= 0x20)
251 printk(KERN_WARNING DRV_NAME ": second channel not yet supported.\n");
252
253 /*
254 * Having done the PCMCIA plumbing the ATA side is relatively
255 * sane.
256 */
257
258 memset(&ae, 0, sizeof(struct ata_probe_ent));
259 INIT_LIST_HEAD(&ae.node);
260 ae.dev = &pdev->dev;
261 ae.port_ops = &pcmcia_port_ops;
262 ae.sht = &pcmcia_sht;
263 ae.n_ports = 1;
264 ae.pio_mask = 1; /* ISA so PIO 0 cycles */
265 ae.irq = pdev->irq.AssignedIRQ;
266 ae.irq_flags = SA_SHIRQ;
267 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
268 ae.port[0].cmd_addr = io_base;
269 ae.port[0].altstatus_addr = ctl_base;
270 ae.port[0].ctl_addr = ctl_base;
271 ata_std_ports(&ae.port[0]);
272
273 if (ata_device_add(&ae) == 0)
274 goto failed;
275
276 info->ndev = 1;
277 kfree(stk);
278 return 0;
279
280cs_failed:
281 cs_error(pdev, last_fn, last_ret);
282failed:
283 kfree(stk);
284 info->ndev = 0;
285 pcmcia_disable_device(pdev);
286out1:
287 kfree(info);
288 return ret;
289}
290
291/**
292 * pcmcia_remove_one - unplug an pcmcia interface
293 * @pdev: pcmcia device
294 *
295 * A PCMCIA ATA device has been unplugged. Perform the needed
296 * cleanup. Also called on module unload for any active devices.
297 */
298
299static void pcmcia_remove_one(struct pcmcia_device *pdev)
300{
301 struct ata_pcmcia_info *info = pdev->priv;
302 struct device *dev = &pdev->dev;
303
304 if (info != NULL) {
305 /* If we have attached the device to the ATA layer, detach it */
306 if (info->ndev) {
307 struct ata_host *host = dev_get_drvdata(dev);
308 ata_host_remove(host);
309 dev_set_drvdata(dev, NULL);
310 }
311 info->ndev = 0;
312 pdev->priv = NULL;
313 }
314 pcmcia_disable_device(pdev);
315 kfree(info);
316}
317
318static struct pcmcia_device_id pcmcia_devices[] = {
319 PCMCIA_DEVICE_FUNC_ID(4),
320 PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
321 PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
322 PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401),
323 PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
324 PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
325 PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
326 PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
327 PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
328 PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar */
329 PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
330 PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
331 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
332 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
333 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
334 PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
335 PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
336 PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
337 PCMCIA_DEVICE_PROD_ID12("EXP", "CD+GAME", 0x6f58c983, 0x63c13aaf),
338 PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591),
339 PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728),
340 PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
341 PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
342 PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
343 PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
344 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
345 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
346 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
347 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
348 PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b),
349 PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c),
350 PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
351 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
352 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
353 PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
354 PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
355 PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
356 PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
357 PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
358 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
359 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
360 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
361 PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
362 PCMCIA_DEVICE_NULL,
363};
364
365MODULE_DEVICE_TABLE(pcmcia, pcmcia_devices);
366
367static struct pcmcia_driver pcmcia_driver = {
368 .owner = THIS_MODULE,
369 .drv = {
370 .name = DRV_NAME,
371 },
372 .id_table = pcmcia_devices,
373 .probe = pcmcia_init_one,
374 .remove = pcmcia_remove_one,
375};
376
377static int __init pcmcia_init(void)
378{
379 return pcmcia_register_driver(&pcmcia_driver);
380}
381
382static void __exit pcmcia_exit(void)
383{
384 pcmcia_unregister_driver(&pcmcia_driver);
385}
386
387MODULE_AUTHOR("Alan Cox");
388MODULE_DESCRIPTION("low-level driver for PCMCIA ATA");
389MODULE_LICENSE("GPL");
390MODULE_VERSION(DRV_VERSION);
391
392module_init(pcmcia_init);
393module_exit(pcmcia_exit);
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
new file mode 100644
index 000000000000..56b8c1ee2937
--- /dev/null
+++ b/drivers/ata/pata_pdc2027x.c
@@ -0,0 +1,869 @@
1/*
2 * Promise PATA TX2/TX4/TX2000/133 IDE driver for pdc20268 to pdc20277.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Ported to libata by:
10 * Albert Lee <albertcc@tw.ibm.com> IBM Corporation
11 *
12 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
13 * Portions Copyright (C) 1999 Promise Technology, Inc.
14 *
15 * Author: Frank Tiernan (frankt@promise.com)
16 * Released under terms of General Public License
17 *
18 *
19 * libata documentation is available via 'make {ps|pdf}docs',
20 * as Documentation/DocBook/libata.*
21 *
22 * Hardware information only available under NDA.
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/blkdev.h>
30#include <linux/delay.h>
31#include <linux/device.h>
32#include <scsi/scsi.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_cmnd.h>
35#include <linux/libata.h>
36#include <asm/io.h>
37
38#define DRV_NAME "pata_pdc2027x"
39#define DRV_VERSION "0.74-ac3"
40#undef PDC_DEBUG
41
42#ifdef PDC_DEBUG
43#define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
44#else
45#define PDPRINTK(fmt, args...)
46#endif
47
48enum {
49 PDC_UDMA_100 = 0,
50 PDC_UDMA_133 = 1,
51
52 PDC_100_MHZ = 100000000,
53 PDC_133_MHZ = 133333333,
54
55 PDC_SYS_CTL = 0x1100,
56 PDC_ATA_CTL = 0x1104,
57 PDC_GLOBAL_CTL = 0x1108,
58 PDC_CTCR0 = 0x110C,
59 PDC_CTCR1 = 0x1110,
60 PDC_BYTE_COUNT = 0x1120,
61 PDC_PLL_CTL = 0x1202,
62};
63
64static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
65static void pdc2027x_remove_one(struct pci_dev *pdev);
66static void pdc2027x_error_handler(struct ata_port *ap);
67static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev);
68static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
69static void pdc2027x_post_set_mode(struct ata_port *ap);
70static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc);
71
72/*
73 * ATA Timing Tables based on 133MHz controller clock.
74 * These tables are only used when the controller is in 133MHz clock.
75 * If the controller is in 100MHz clock, the ASIC hardware will
76 * set the timing registers automatically when "set feature" command
77 * is issued to the device. However, if the controller clock is 133MHz,
78 * the following tables must be used.
79 */
80static struct pdc2027x_pio_timing {
81 u8 value0, value1, value2;
82} pdc2027x_pio_timing_tbl [] = {
83 { 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
84 { 0x46, 0x29, 0xa4 }, /* PIO mode 1 */
85 { 0x23, 0x26, 0x64 }, /* PIO mode 2 */
86 { 0x27, 0x0d, 0x35 }, /* PIO mode 3, IORDY on, Prefetch off */
87 { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
88};
89
90static struct pdc2027x_mdma_timing {
91 u8 value0, value1;
92} pdc2027x_mdma_timing_tbl [] = {
93 { 0xdf, 0x5f }, /* MDMA mode 0 */
94 { 0x6b, 0x27 }, /* MDMA mode 1 */
95 { 0x69, 0x25 }, /* MDMA mode 2 */
96};
97
98static struct pdc2027x_udma_timing {
99 u8 value0, value1, value2;
100} pdc2027x_udma_timing_tbl [] = {
101 { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
102 { 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
103 { 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
104 { 0x1a, 0x05, 0xcd }, /* UDMA mode 3 */
105 { 0x1a, 0x03, 0xcd }, /* UDMA mode 4 */
106 { 0x1a, 0x02, 0xcb }, /* UDMA mode 5 */
107 { 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
108};
109
110static const struct pci_device_id pdc2027x_pci_tbl[] = {
111 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20268, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_100 },
112 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20269, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
113 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20270, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_100 },
114 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20271, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
115 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20275, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
116 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20276, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
117 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20277, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
118 { } /* terminate list */
119};
120
121static struct pci_driver pdc2027x_pci_driver = {
122 .name = DRV_NAME,
123 .id_table = pdc2027x_pci_tbl,
124 .probe = pdc2027x_init_one,
125 .remove = __devexit_p(pdc2027x_remove_one),
126};
127
128static struct scsi_host_template pdc2027x_sht = {
129 .module = THIS_MODULE,
130 .name = DRV_NAME,
131 .ioctl = ata_scsi_ioctl,
132 .queuecommand = ata_scsi_queuecmd,
133 .can_queue = ATA_DEF_QUEUE,
134 .this_id = ATA_SHT_THIS_ID,
135 .sg_tablesize = LIBATA_MAX_PRD,
136 .max_sectors = ATA_MAX_SECTORS,
137 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
138 .emulated = ATA_SHT_EMULATED,
139 .use_clustering = ATA_SHT_USE_CLUSTERING,
140 .proc_name = DRV_NAME,
141 .dma_boundary = ATA_DMA_BOUNDARY,
142 .slave_configure = ata_scsi_slave_config,
143 .bios_param = ata_std_bios_param,
144};
145
146static struct ata_port_operations pdc2027x_pata100_ops = {
147 .port_disable = ata_port_disable,
148
149 .tf_load = ata_tf_load,
150 .tf_read = ata_tf_read,
151 .check_status = ata_check_status,
152 .exec_command = ata_exec_command,
153 .dev_select = ata_std_dev_select,
154
155 .check_atapi_dma = pdc2027x_check_atapi_dma,
156 .bmdma_setup = ata_bmdma_setup,
157 .bmdma_start = ata_bmdma_start,
158 .bmdma_stop = ata_bmdma_stop,
159 .bmdma_status = ata_bmdma_status,
160 .qc_prep = ata_qc_prep,
161 .qc_issue = ata_qc_issue_prot,
162 .data_xfer = ata_mmio_data_xfer,
163
164 .freeze = ata_bmdma_freeze,
165 .thaw = ata_bmdma_thaw,
166 .error_handler = pdc2027x_error_handler,
167 .post_internal_cmd = ata_bmdma_post_internal_cmd,
168
169 .irq_handler = ata_interrupt,
170 .irq_clear = ata_bmdma_irq_clear,
171
172 .port_start = ata_port_start,
173 .port_stop = ata_port_stop,
174 .host_stop = ata_pci_host_stop,
175};
176
177static struct ata_port_operations pdc2027x_pata133_ops = {
178 .port_disable = ata_port_disable,
179 .set_piomode = pdc2027x_set_piomode,
180 .set_dmamode = pdc2027x_set_dmamode,
181 .post_set_mode = pdc2027x_post_set_mode,
182
183 .tf_load = ata_tf_load,
184 .tf_read = ata_tf_read,
185 .check_status = ata_check_status,
186 .exec_command = ata_exec_command,
187 .dev_select = ata_std_dev_select,
188
189 .check_atapi_dma = pdc2027x_check_atapi_dma,
190 .bmdma_setup = ata_bmdma_setup,
191 .bmdma_start = ata_bmdma_start,
192 .bmdma_stop = ata_bmdma_stop,
193 .bmdma_status = ata_bmdma_status,
194 .qc_prep = ata_qc_prep,
195 .qc_issue = ata_qc_issue_prot,
196 .data_xfer = ata_mmio_data_xfer,
197
198 .freeze = ata_bmdma_freeze,
199 .thaw = ata_bmdma_thaw,
200 .error_handler = pdc2027x_error_handler,
201 .post_internal_cmd = ata_bmdma_post_internal_cmd,
202
203 .irq_handler = ata_interrupt,
204 .irq_clear = ata_bmdma_irq_clear,
205
206 .port_start = ata_port_start,
207 .port_stop = ata_port_stop,
208 .host_stop = ata_pci_host_stop,
209};
210
211static struct ata_port_info pdc2027x_port_info[] = {
212 /* PDC_UDMA_100 */
213 {
214 .sht = &pdc2027x_sht,
215 .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
216 ATA_FLAG_MMIO,
217 .pio_mask = 0x1f, /* pio0-4 */
218 .mwdma_mask = 0x07, /* mwdma0-2 */
219 .udma_mask = ATA_UDMA5, /* udma0-5 */
220 .port_ops = &pdc2027x_pata100_ops,
221 },
222 /* PDC_UDMA_133 */
223 {
224 .sht = &pdc2027x_sht,
225 .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
226 ATA_FLAG_MMIO,
227 .pio_mask = 0x1f, /* pio0-4 */
228 .mwdma_mask = 0x07, /* mwdma0-2 */
229 .udma_mask = ATA_UDMA6, /* udma0-6 */
230 .port_ops = &pdc2027x_pata133_ops,
231 },
232};
233
234MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Albert Lee");
235MODULE_DESCRIPTION("libata driver module for Promise PDC20268 to PDC20277");
236MODULE_LICENSE("GPL");
237MODULE_VERSION(DRV_VERSION);
238MODULE_DEVICE_TABLE(pci, pdc2027x_pci_tbl);
239
240/**
241 * port_mmio - Get the MMIO address of PDC2027x extended registers
242 * @ap: Port
243 * @offset: offset from mmio base
244 */
245static inline void* port_mmio(struct ata_port *ap, unsigned int offset)
246{
247 return ap->host->mmio_base + ap->port_no * 0x100 + offset;
248}
249
250/**
251 * dev_mmio - Get the MMIO address of PDC2027x extended registers
252 * @ap: Port
253 * @adev: device
254 * @offset: offset from mmio base
255 */
256static inline void* dev_mmio(struct ata_port *ap, struct ata_device *adev, unsigned int offset)
257{
258 u8 adj = (adev->devno) ? 0x08 : 0x00;
259 return port_mmio(ap, offset) + adj;
260}
261
262/**
263 * pdc2027x_pata_cbl_detect - Probe host controller cable detect info
264 * @ap: Port for which cable detect info is desired
265 *
266 * Read 80c cable indicator from Promise extended register.
267 * This register is latched when the system is reset.
268 *
269 * LOCKING:
270 * None (inherited from caller).
271 */
272static void pdc2027x_cbl_detect(struct ata_port *ap)
273{
274 u32 cgcr;
275
276 /* check cable detect results */
277 cgcr = readl(port_mmio(ap, PDC_GLOBAL_CTL));
278 if (cgcr & (1 << 26))
279 goto cbl40;
280
281 PDPRINTK("No cable or 80-conductor cable on port %d\n", ap->port_no);
282
283 ap->cbl = ATA_CBL_PATA80;
284 return;
285
286cbl40:
287 printk(KERN_INFO DRV_NAME ": 40-conductor cable detected on port %d\n", ap->port_no);
288 ap->cbl = ATA_CBL_PATA40;
289 ap->udma_mask &= ATA_UDMA_MASK_40C;
290}
291
292/**
293 * pdc2027x_port_enabled - Check PDC ATA control register to see whether the port is enabled.
294 * @ap: Port to check
295 */
296static inline int pdc2027x_port_enabled(struct ata_port *ap)
297{
298 return readb(port_mmio(ap, PDC_ATA_CTL)) & 0x02;
299}
300
301/**
302 * pdc2027x_prereset - prereset for PATA host controller
303 * @ap: Target port
304 *
305 * Probeinit including cable detection.
306 *
307 * LOCKING:
308 * None (inherited from caller).
309 */
310
311static int pdc2027x_prereset(struct ata_port *ap)
312{
313 /* Check whether port enabled */
314 if (!pdc2027x_port_enabled(ap)) {
315 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
316 return 0;
317 }
318 pdc2027x_cbl_detect(ap);
319 return ata_std_prereset(ap);
320}
321
322/**
323 * pdc2027x_error_handler - Perform reset on PATA port and classify
324 * @ap: Port to reset
325 *
326 * Reset PATA phy and classify attached devices.
327 *
328 * LOCKING:
329 * None (inherited from caller).
330 */
331
332static void pdc2027x_error_handler(struct ata_port *ap)
333{
334 ata_bmdma_drive_eh(ap, pdc2027x_prereset, ata_std_softreset, NULL, ata_std_postreset);
335}
336
337/**
338 * pdc2027x_set_piomode - Initialize host controller PATA PIO timings
339 * @ap: Port to configure
340 * @adev: um
341 * @pio: PIO mode, 0 - 4
342 *
343 * Set PIO mode for device.
344 *
345 * LOCKING:
346 * None (inherited from caller).
347 */
348
349static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
350{
351 unsigned int pio = adev->pio_mode - XFER_PIO_0;
352 u32 ctcr0, ctcr1;
353
354 PDPRINTK("adev->pio_mode[%X]\n", adev->pio_mode);
355
356 /* Sanity check */
357 if (pio > 4) {
358 printk(KERN_ERR DRV_NAME ": Unknown pio mode [%d] ignored\n", pio);
359 return;
360
361 }
362
363 /* Set the PIO timing registers using value table for 133MHz */
364 PDPRINTK("Set pio regs... \n");
365
366 ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0));
367 ctcr0 &= 0xffff0000;
368 ctcr0 |= pdc2027x_pio_timing_tbl[pio].value0 |
369 (pdc2027x_pio_timing_tbl[pio].value1 << 8);
370 writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
371
372 ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
373 ctcr1 &= 0x00ffffff;
374 ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
375 writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
376
377 PDPRINTK("Set pio regs done\n");
378
379 PDPRINTK("Set to pio mode[%u] \n", pio);
380}
381
382/**
383 * pdc2027x_set_dmamode - Initialize host controller PATA UDMA timings
384 * @ap: Port to configure
385 * @adev: um
386 * @udma: udma mode, XFER_UDMA_0 to XFER_UDMA_6
387 *
388 * Set UDMA mode for device.
389 *
390 * LOCKING:
391 * None (inherited from caller).
392 */
393static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
394{
395 unsigned int dma_mode = adev->dma_mode;
396 u32 ctcr0, ctcr1;
397
398 if ((dma_mode >= XFER_UDMA_0) &&
399 (dma_mode <= XFER_UDMA_6)) {
400 /* Set the UDMA timing registers with value table for 133MHz */
401 unsigned int udma_mode = dma_mode & 0x07;
402
403 if (dma_mode == XFER_UDMA_2) {
404 /*
405 * Turn off tHOLD.
406 * If tHOLD is '1', the hardware will add half clock for data hold time.
407 * This code segment seems to be no effect. tHOLD will be overwritten below.
408 */
409 ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
410 writel(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
411 }
412
413 PDPRINTK("Set udma regs... \n");
414
415 ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
416 ctcr1 &= 0xff000000;
417 ctcr1 |= pdc2027x_udma_timing_tbl[udma_mode].value0 |
418 (pdc2027x_udma_timing_tbl[udma_mode].value1 << 8) |
419 (pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
420 writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
421
422 PDPRINTK("Set udma regs done\n");
423
424 PDPRINTK("Set to udma mode[%u] \n", udma_mode);
425
426 } else if ((dma_mode >= XFER_MW_DMA_0) &&
427 (dma_mode <= XFER_MW_DMA_2)) {
428 /* Set the MDMA timing registers with value table for 133MHz */
429 unsigned int mdma_mode = dma_mode & 0x07;
430
431 PDPRINTK("Set mdma regs... \n");
432 ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0));
433
434 ctcr0 &= 0x0000ffff;
435 ctcr0 |= (pdc2027x_mdma_timing_tbl[mdma_mode].value0 << 16) |
436 (pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24);
437
438 writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
439 PDPRINTK("Set mdma regs done\n");
440
441 PDPRINTK("Set to mdma mode[%u] \n", mdma_mode);
442 } else {
443 printk(KERN_ERR DRV_NAME ": Unknown dma mode [%u] ignored\n", dma_mode);
444 }
445}
446
447/**
448 * pdc2027x_post_set_mode - Set the timing registers back to correct values.
449 * @ap: Port to configure
450 *
451 * The pdc2027x hardware will look at "SET FEATURES" and change the timing registers
452 * automatically. The values set by the hardware might be incorrect, under 133Mhz PLL.
453 * This function overwrites the possibly incorrect values set by the hardware to be correct.
454 */
455static void pdc2027x_post_set_mode(struct ata_port *ap)
456{
457 int i;
458
459 for (i = 0; i < ATA_MAX_DEVICES; i++) {
460 struct ata_device *dev = &ap->device[i];
461
462 if (ata_dev_enabled(dev)) {
463
464 pdc2027x_set_piomode(ap, dev);
465
466 /*
467 * Enable prefetch if the device support PIO only.
468 */
469 if (dev->xfer_shift == ATA_SHIFT_PIO) {
470 u32 ctcr1 = readl(dev_mmio(ap, dev, PDC_CTCR1));
471 ctcr1 |= (1 << 25);
472 writel(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
473
474 PDPRINTK("Turn on prefetch\n");
475 } else {
476 pdc2027x_set_dmamode(ap, dev);
477 }
478 }
479 }
480}
481
482/**
483 * pdc2027x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command
484 * @qc: Metadata associated with taskfile to check
485 *
486 * LOCKING:
487 * None (inherited from caller).
488 *
489 * RETURNS: 0 when ATAPI DMA can be used
490 * 1 otherwise
491 */
492static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc)
493{
494 struct scsi_cmnd *cmd = qc->scsicmd;
495 u8 *scsicmd = cmd->cmnd;
496 int rc = 1; /* atapi dma off by default */
497
498 /*
499 * This workaround is from Promise's GPL driver.
500 * If ATAPI DMA is used for commands not in the
501 * following white list, say MODE_SENSE and REQUEST_SENSE,
502 * pdc2027x might hit the irq lost problem.
503 */
504 switch (scsicmd[0]) {
505 case READ_10:
506 case WRITE_10:
507 case READ_12:
508 case WRITE_12:
509 case READ_6:
510 case WRITE_6:
511 case 0xad: /* READ_DVD_STRUCTURE */
512 case 0xbe: /* READ_CD */
513 /* ATAPI DMA is ok */
514 rc = 0;
515 break;
516 default:
517 ;
518 }
519
520 return rc;
521}
522
523/**
524 * pdc_read_counter - Read the ctr counter
525 * @probe_ent: for the port address
526 */
527
528static long pdc_read_counter(struct ata_probe_ent *probe_ent)
529{
530 long counter;
531 int retry = 1;
532 u32 bccrl, bccrh, bccrlv, bccrhv;
533
534retry:
535 bccrl = readl(probe_ent->mmio_base + PDC_BYTE_COUNT) & 0xffff;
536 bccrh = readl(probe_ent->mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
537 rmb();
538
539 /* Read the counter values again for verification */
540 bccrlv = readl(probe_ent->mmio_base + PDC_BYTE_COUNT) & 0xffff;
541 bccrhv = readl(probe_ent->mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
542 rmb();
543
544 counter = (bccrh << 15) | bccrl;
545
546 PDPRINTK("bccrh [%X] bccrl [%X]\n", bccrh, bccrl);
547 PDPRINTK("bccrhv[%X] bccrlv[%X]\n", bccrhv, bccrlv);
548
549 /*
550 * The 30-bit decreasing counter are read by 2 pieces.
551 * Incorrect value may be read when both bccrh and bccrl are changing.
552 * Ex. When 7900 decrease to 78FF, wrong value 7800 might be read.
553 */
554 if (retry && !(bccrh == bccrhv && bccrl >= bccrlv)) {
555 retry--;
556 PDPRINTK("rereading counter\n");
557 goto retry;
558 }
559
560 return counter;
561}
562
563/**
564 * adjust_pll - Adjust the PLL input clock in Hz.
565 *
566 * @pdc_controller: controller specific information
567 * @probe_ent: For the port address
568 * @pll_clock: The input of PLL in HZ
569 */
570static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsigned int board_idx)
571{
572
573 u16 pll_ctl;
574 long pll_clock_khz = pll_clock / 1000;
575 long pout_required = board_idx? PDC_133_MHZ:PDC_100_MHZ;
576 long ratio = pout_required / pll_clock_khz;
577 int F, R;
578
579 /* Sanity check */
580 if (unlikely(pll_clock_khz < 5000L || pll_clock_khz > 70000L)) {
581 printk(KERN_ERR DRV_NAME ": Invalid PLL input clock %ldkHz, give up!\n", pll_clock_khz);
582 return;
583 }
584
585#ifdef PDC_DEBUG
586 PDPRINTK("pout_required is %ld\n", pout_required);
587
588 /* Show the current clock value of PLL control register
589 * (maybe already configured by the firmware)
590 */
591 pll_ctl = readw(probe_ent->mmio_base + PDC_PLL_CTL);
592
593 PDPRINTK("pll_ctl[%X]\n", pll_ctl);
594#endif
595
596 /*
597 * Calculate the ratio of F, R and OD
598 * POUT = (F + 2) / (( R + 2) * NO)
599 */
600 if (ratio < 8600L) { /* 8.6x */
601 /* Using NO = 0x01, R = 0x0D */
602 R = 0x0d;
603 } else if (ratio < 12900L) { /* 12.9x */
604 /* Using NO = 0x01, R = 0x08 */
605 R = 0x08;
606 } else if (ratio < 16100L) { /* 16.1x */
607 /* Using NO = 0x01, R = 0x06 */
608 R = 0x06;
609 } else if (ratio < 64000L) { /* 64x */
610 R = 0x00;
611 } else {
612 /* Invalid ratio */
613 printk(KERN_ERR DRV_NAME ": Invalid ratio %ld, give up!\n", ratio);
614 return;
615 }
616
617 F = (ratio * (R+2)) / 1000 - 2;
618
619 if (unlikely(F < 0 || F > 127)) {
620 /* Invalid F */
621 printk(KERN_ERR DRV_NAME ": F[%d] invalid!\n", F);
622 return;
623 }
624
625 PDPRINTK("F[%d] R[%d] ratio*1000[%ld]\n", F, R, ratio);
626
627 pll_ctl = (R << 8) | F;
628
629 PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl);
630
631 writew(pll_ctl, probe_ent->mmio_base + PDC_PLL_CTL);
632 readw(probe_ent->mmio_base + PDC_PLL_CTL); /* flush */
633
634 /* Wait the PLL circuit to be stable */
635 mdelay(30);
636
637#ifdef PDC_DEBUG
638 /*
639 * Show the current clock value of PLL control register
640 * (maybe configured by the firmware)
641 */
642 pll_ctl = readw(probe_ent->mmio_base + PDC_PLL_CTL);
643
644 PDPRINTK("pll_ctl[%X]\n", pll_ctl);
645#endif
646
647 return;
648}
649
650/**
651 * detect_pll_input_clock - Detect the PLL input clock in Hz.
652 * @probe_ent: for the port address
653 * Ex. 16949000 on 33MHz PCI bus for pdc20275.
654 * Half of the PCI clock.
655 */
656static long pdc_detect_pll_input_clock(struct ata_probe_ent *probe_ent)
657{
658 u32 scr;
659 long start_count, end_count;
660 long pll_clock;
661
662 /* Read current counter value */
663 start_count = pdc_read_counter(probe_ent);
664
665 /* Start the test mode */
666 scr = readl(probe_ent->mmio_base + PDC_SYS_CTL);
667 PDPRINTK("scr[%X]\n", scr);
668 writel(scr | (0x01 << 14), probe_ent->mmio_base + PDC_SYS_CTL);
669 readl(probe_ent->mmio_base + PDC_SYS_CTL); /* flush */
670
671 /* Let the counter run for 100 ms. */
672 mdelay(100);
673
674 /* Read the counter values again */
675 end_count = pdc_read_counter(probe_ent);
676
677 /* Stop the test mode */
678 scr = readl(probe_ent->mmio_base + PDC_SYS_CTL);
679 PDPRINTK("scr[%X]\n", scr);
680 writel(scr & ~(0x01 << 14), probe_ent->mmio_base + PDC_SYS_CTL);
681 readl(probe_ent->mmio_base + PDC_SYS_CTL); /* flush */
682
683 /* calculate the input clock in Hz */
684 pll_clock = (start_count - end_count) * 10;
685
686 PDPRINTK("start[%ld] end[%ld] \n", start_count, end_count);
687 PDPRINTK("PLL input clock[%ld]Hz\n", pll_clock);
688
689 return pll_clock;
690}
691
692/**
693 * pdc_hardware_init - Initialize the hardware.
694 * @pdev: instance of pci_dev found
695 * @pdc_controller: controller specific information
696 * @pe: for the port address
697 */
698static int pdc_hardware_init(struct pci_dev *pdev, struct ata_probe_ent *pe, unsigned int board_idx)
699{
700 long pll_clock;
701
702 /*
703 * Detect PLL input clock rate.
704 * On some system, where PCI bus is running at non-standard clock rate.
705 * Ex. 25MHz or 40MHz, we have to adjust the cycle_time.
706 * The pdc20275 controller employs PLL circuit to help correct timing registers setting.
707 */
708 pll_clock = pdc_detect_pll_input_clock(pe);
709
710 if (pll_clock < 0) /* counter overflow? Try again. */
711 pll_clock = pdc_detect_pll_input_clock(pe);
712
713 dev_printk(KERN_INFO, &pdev->dev, "PLL input clock %ld kHz\n", pll_clock/1000);
714
715 /* Adjust PLL control register */
716 pdc_adjust_pll(pe, pll_clock, board_idx);
717
718 return 0;
719}
720
721/**
722 * pdc_ata_setup_port - setup the mmio address
723 * @port: ata ioports to setup
724 * @base: base address
725 */
726static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
727{
728 port->cmd_addr =
729 port->data_addr = base;
730 port->feature_addr =
731 port->error_addr = base + 0x05;
732 port->nsect_addr = base + 0x0a;
733 port->lbal_addr = base + 0x0f;
734 port->lbam_addr = base + 0x10;
735 port->lbah_addr = base + 0x15;
736 port->device_addr = base + 0x1a;
737 port->command_addr =
738 port->status_addr = base + 0x1f;
739 port->altstatus_addr =
740 port->ctl_addr = base + 0x81a;
741}
742
743/**
744 * pdc2027x_init_one - PCI probe function
745 * Called when an instance of PCI adapter is inserted.
746 * This function checks whether the hardware is supported,
747 * initialize hardware and register an instance of ata_host to
748 * libata by providing struct ata_probe_ent and ata_device_add().
749 * (implements struct pci_driver.probe() )
750 *
751 * @pdev: instance of pci_dev found
752 * @ent: matching entry in the id_tbl[]
753 */
754static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
755{
756 static int printed_version;
757 unsigned int board_idx = (unsigned int) ent->driver_data;
758
759 struct ata_probe_ent *probe_ent = NULL;
760 unsigned long base;
761 void *mmio_base;
762 int rc;
763
764 if (!printed_version++)
765 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
766
767 rc = pci_enable_device(pdev);
768 if (rc)
769 return rc;
770
771 rc = pci_request_regions(pdev, DRV_NAME);
772 if (rc)
773 goto err_out;
774
775 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
776 if (rc)
777 goto err_out_regions;
778
779 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
780 if (rc)
781 goto err_out_regions;
782
783 /* Prepare the probe entry */
784 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
785 if (probe_ent == NULL) {
786 rc = -ENOMEM;
787 goto err_out_regions;
788 }
789
790 probe_ent->dev = pci_dev_to_dev(pdev);
791 INIT_LIST_HEAD(&probe_ent->node);
792
793 mmio_base = pci_iomap(pdev, 5, 0);
794 if (!mmio_base) {
795 rc = -ENOMEM;
796 goto err_out_free_ent;
797 }
798
799 base = (unsigned long) mmio_base;
800
801 probe_ent->sht = pdc2027x_port_info[board_idx].sht;
802 probe_ent->port_flags = pdc2027x_port_info[board_idx].flags;
803 probe_ent->pio_mask = pdc2027x_port_info[board_idx].pio_mask;
804 probe_ent->mwdma_mask = pdc2027x_port_info[board_idx].mwdma_mask;
805 probe_ent->udma_mask = pdc2027x_port_info[board_idx].udma_mask;
806 probe_ent->port_ops = pdc2027x_port_info[board_idx].port_ops;
807
808 probe_ent->irq = pdev->irq;
809 probe_ent->irq_flags = SA_SHIRQ;
810 probe_ent->mmio_base = mmio_base;
811
812 pdc_ata_setup_port(&probe_ent->port[0], base + 0x17c0);
813 probe_ent->port[0].bmdma_addr = base + 0x1000;
814 pdc_ata_setup_port(&probe_ent->port[1], base + 0x15c0);
815 probe_ent->port[1].bmdma_addr = base + 0x1008;
816
817 probe_ent->n_ports = 2;
818
819 pci_set_master(pdev);
820 //pci_enable_intx(pdev);
821
822 /* initialize adapter */
823 if (pdc_hardware_init(pdev, probe_ent, board_idx) != 0)
824 goto err_out_free_ent;
825
826 ata_device_add(probe_ent);
827 kfree(probe_ent);
828
829 return 0;
830
831err_out_free_ent:
832 kfree(probe_ent);
833err_out_regions:
834 pci_release_regions(pdev);
835err_out:
836 pci_disable_device(pdev);
837 return rc;
838}
839
840/**
841 * pdc2027x_remove_one - Called to remove a single instance of the
842 * adapter.
843 *
844 * @dev: The PCI device to remove.
845 * FIXME: module load/unload not working yet
846 */
847static void __devexit pdc2027x_remove_one(struct pci_dev *pdev)
848{
849 ata_pci_remove_one(pdev);
850}
851
852/**
853 * pdc2027x_init - Called after this module is loaded into the kernel.
854 */
855static int __init pdc2027x_init(void)
856{
857 return pci_module_init(&pdc2027x_pci_driver);
858}
859
860/**
861 * pdc2027x_exit - Called before this module unloaded from the kernel
862 */
863static void __exit pdc2027x_exit(void)
864{
865 pci_unregister_driver(&pdc2027x_pci_driver);
866}
867
868module_init(pdc2027x_init);
869module_exit(pdc2027x_exit);
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
new file mode 100644
index 000000000000..48f43432764e
--- /dev/null
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -0,0 +1,423 @@
1/*
2 * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based in part on linux/drivers/ide/pci/pdc202xx_old.c
7 *
8 * First cut with LBA48/ATAPI
9 *
10 * TODO:
11 * Channel interlock/reset on both required ?
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/pci.h>
17#include <linux/init.h>
18#include <linux/blkdev.h>
19#include <linux/delay.h>
20#include <scsi/scsi_host.h>
21#include <linux/libata.h>
22
23#define DRV_NAME "pata_pdc202xx_old"
24#define DRV_VERSION "0.2.1"
25
26/**
27 * pdc2024x_pre_reset - probe begin
28 * @ap: ATA port
29 *
30 * Set up cable type and use generic probe init
31 */
32
33static int pdc2024x_pre_reset(struct ata_port *ap)
34{
35 ap->cbl = ATA_CBL_PATA40;
36 return ata_std_prereset(ap);
37}
38
39
40static void pdc2024x_error_handler(struct ata_port *ap)
41{
42 ata_bmdma_drive_eh(ap, pdc2024x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
43}
44
45
46static int pdc2026x_pre_reset(struct ata_port *ap)
47{
48 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
49 u16 cis;
50
51 pci_read_config_word(pdev, 0x50, &cis);
52 if (cis & (1 << (10 + ap->port_no)))
53 ap->cbl = ATA_CBL_PATA80;
54 else
55 ap->cbl = ATA_CBL_PATA40;
56
57 return ata_std_prereset(ap);
58}
59
60static void pdc2026x_error_handler(struct ata_port *ap)
61{
62 ata_bmdma_drive_eh(ap, pdc2026x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
63}
64
65/**
66 * pdc_configure_piomode - set chip PIO timing
67 * @ap: ATA interface
68 * @adev: ATA device
69 * @pio: PIO mode
70 *
71 * Called to do the PIO mode setup. Our timing registers are shared
72 * so a configure_dmamode call will undo any work we do here and vice
73 * versa
74 */
75
76static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
77{
78 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
79 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
80 static u16 pio_timing[5] = {
81 0x0913, 0x050C , 0x0308, 0x0206, 0x0104
82 };
83 u8 r_ap, r_bp;
84
85 pci_read_config_byte(pdev, port, &r_ap);
86 pci_read_config_byte(pdev, port + 1, &r_bp);
87 r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */
88 r_bp &= ~0x07;
89 r_ap |= (pio_timing[pio] >> 8);
90 r_bp |= (pio_timing[pio] & 0xFF);
91
92 if (ata_pio_need_iordy(adev))
93 r_ap |= 0x20; /* IORDY enable */
94 if (adev->class == ATA_DEV_ATA)
95 r_ap |= 0x10; /* FIFO enable */
96 pci_write_config_byte(pdev, port, r_ap);
97 pci_write_config_byte(pdev, port + 1, r_bp);
98}
99
100/**
101 * pdc_set_piomode - set initial PIO mode data
102 * @ap: ATA interface
103 * @adev: ATA device
104 *
105 * Called to do the PIO mode setup. Our timing registers are shared
106 * but we want to set the PIO timing by default.
107 */
108
109static void pdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
110{
111 pdc_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
112}
113
114/**
115 * pdc_configure_dmamode - set DMA mode in chip
116 * @ap: ATA interface
117 * @adev: ATA device
118 *
119 * Load DMA cycle times into the chip ready for a DMA transfer
120 * to occur.
121 */
122
123static void pdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
124{
125 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
126 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
127 static u8 udma_timing[6][2] = {
128 { 0x60, 0x03 }, /* 33 Mhz Clock */
129 { 0x40, 0x02 },
130 { 0x20, 0x01 },
131 { 0x40, 0x02 }, /* 66 Mhz Clock */
132 { 0x20, 0x01 },
133 { 0x20, 0x01 }
134 };
135 u8 r_bp, r_cp;
136
137 pci_read_config_byte(pdev, port + 1, &r_bp);
138 pci_read_config_byte(pdev, port + 2, &r_cp);
139
140 r_bp &= ~0xF0;
141 r_cp &= ~0x0F;
142
143 if (adev->dma_mode >= XFER_UDMA_0) {
144 int speed = adev->dma_mode - XFER_UDMA_0;
145 r_bp |= udma_timing[speed][0];
146 r_cp |= udma_timing[speed][1];
147
148 } else {
149 int speed = adev->dma_mode - XFER_MW_DMA_0;
150 r_bp |= 0x60;
151 r_cp |= (5 - speed);
152 }
153 pci_write_config_byte(pdev, port + 1, r_bp);
154 pci_write_config_byte(pdev, port + 2, r_cp);
155
156}
157
158/**
159 * pdc2026x_bmdma_start - DMA engine begin
160 * @qc: ATA command
161 *
162 * In UDMA3 or higher we have to clock switch for the duration of the
163 * DMA transfer sequence.
164 */
165
166static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
167{
168 struct ata_port *ap = qc->ap;
169 struct ata_device *adev = qc->dev;
170 struct ata_taskfile *tf = &qc->tf;
171 int sel66 = ap->port_no ? 0x08: 0x02;
172
173 unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr;
174 unsigned long clock = master + 0x11;
175 unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no);
176
177 u32 len;
178
179 /* Check we keep host level locking here */
180 if (adev->dma_mode >= XFER_UDMA_2)
181 outb(inb(clock) | sel66, clock);
182 else
183 outb(inb(clock) & ~sel66, clock);
184
185 /* The DMA clocks may have been trashed by a reset. FIXME: make conditional
186 and move to qc_issue ? */
187 pdc_set_dmamode(ap, qc->dev);
188
189 /* Cases the state machine will not complete correctly without help */
190 if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA)
191 {
192 if (tf->flags & ATA_TFLAG_LBA48)
193 len = qc->nsect * 512;
194 else
195 len = qc->nbytes;
196
197 if (tf->flags & ATA_TFLAG_WRITE)
198 len |= 0x06000000;
199 else
200 len |= 0x05000000;
201
202 outl(len, atapi_reg);
203 }
204
205 /* Activate DMA */
206 ata_bmdma_start(qc);
207}
208
209/**
210 * pdc2026x_bmdma_end - DMA engine stop
211 * @qc: ATA command
212 *
213 * After a DMA completes we need to put the clock back to 33MHz for
214 * PIO timings.
215 */
216
217static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
218{
219 struct ata_port *ap = qc->ap;
220 struct ata_device *adev = qc->dev;
221 struct ata_taskfile *tf = &qc->tf;
222
223 int sel66 = ap->port_no ? 0x08: 0x02;
224 /* The clock bits are in the same register for both channels */
225 unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr;
226 unsigned long clock = master + 0x11;
227 unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no);
228
229 /* Cases the state machine will not complete correctly */
230 if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) {
231 outl(0, atapi_reg);
232 outb(inb(clock) & ~sel66, clock);
233 }
234 /* Check we keep host level locking here */
235 /* Flip back to 33Mhz for PIO */
236 if (adev->dma_mode >= XFER_UDMA_2)
237 outb(inb(clock) & ~sel66, clock);
238
239 ata_bmdma_stop(qc);
240}
241
242/**
243 * pdc2026x_dev_config - device setup hook
244 * @ap: ATA port
245 * @adev: newly found device
246 *
247 * Perform chip specific early setup. We need to lock the transfer
248 * sizes to 8bit to avoid making the state engine on the 2026x cards
249 * barf.
250 */
251
252static void pdc2026x_dev_config(struct ata_port *ap, struct ata_device *adev)
253{
254 adev->max_sectors = 256;
255}
256
257static struct scsi_host_template pdc_sht = {
258 .module = THIS_MODULE,
259 .name = DRV_NAME,
260 .ioctl = ata_scsi_ioctl,
261 .queuecommand = ata_scsi_queuecmd,
262 .can_queue = ATA_DEF_QUEUE,
263 .this_id = ATA_SHT_THIS_ID,
264 .sg_tablesize = LIBATA_MAX_PRD,
265 .max_sectors = ATA_MAX_SECTORS,
266 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
267 .emulated = ATA_SHT_EMULATED,
268 .use_clustering = ATA_SHT_USE_CLUSTERING,
269 .proc_name = DRV_NAME,
270 .dma_boundary = ATA_DMA_BOUNDARY,
271 .slave_configure = ata_scsi_slave_config,
272 .bios_param = ata_std_bios_param,
273};
274
275static struct ata_port_operations pdc2024x_port_ops = {
276 .port_disable = ata_port_disable,
277 .set_piomode = pdc_set_piomode,
278 .set_dmamode = pdc_set_dmamode,
279 .mode_filter = ata_pci_default_filter,
280 .tf_load = ata_tf_load,
281 .tf_read = ata_tf_read,
282 .check_status = ata_check_status,
283 .exec_command = ata_exec_command,
284 .dev_select = ata_std_dev_select,
285
286 .freeze = ata_bmdma_freeze,
287 .thaw = ata_bmdma_thaw,
288 .error_handler = pdc2024x_error_handler,
289 .post_internal_cmd = ata_bmdma_post_internal_cmd,
290
291 .bmdma_setup = ata_bmdma_setup,
292 .bmdma_start = ata_bmdma_start,
293 .bmdma_stop = ata_bmdma_stop,
294 .bmdma_status = ata_bmdma_status,
295
296 .qc_prep = ata_qc_prep,
297 .qc_issue = ata_qc_issue_prot,
298 .data_xfer = ata_pio_data_xfer,
299
300 .irq_handler = ata_interrupt,
301 .irq_clear = ata_bmdma_irq_clear,
302
303 .port_start = ata_port_start,
304 .port_stop = ata_port_stop,
305 .host_stop = ata_host_stop
306};
307
308static struct ata_port_operations pdc2026x_port_ops = {
309 .port_disable = ata_port_disable,
310 .set_piomode = pdc_set_piomode,
311 .set_dmamode = pdc_set_dmamode,
312 .mode_filter = ata_pci_default_filter,
313 .tf_load = ata_tf_load,
314 .tf_read = ata_tf_read,
315 .check_status = ata_check_status,
316 .exec_command = ata_exec_command,
317 .dev_select = ata_std_dev_select,
318 .dev_config = pdc2026x_dev_config,
319
320 .freeze = ata_bmdma_freeze,
321 .thaw = ata_bmdma_thaw,
322 .error_handler = pdc2026x_error_handler,
323 .post_internal_cmd = ata_bmdma_post_internal_cmd,
324
325 .bmdma_setup = ata_bmdma_setup,
326 .bmdma_start = pdc2026x_bmdma_start,
327 .bmdma_stop = pdc2026x_bmdma_stop,
328 .bmdma_status = ata_bmdma_status,
329
330 .qc_prep = ata_qc_prep,
331 .qc_issue = ata_qc_issue_prot,
332 .data_xfer = ata_pio_data_xfer,
333
334 .irq_handler = ata_interrupt,
335 .irq_clear = ata_bmdma_irq_clear,
336
337 .port_start = ata_port_start,
338 .port_stop = ata_port_stop,
339 .host_stop = ata_host_stop
340};
341
342static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
343{
344 static struct ata_port_info info[3] = {
345 {
346 .sht = &pdc_sht,
347 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
348 .pio_mask = 0x1f,
349 .mwdma_mask = 0x07,
350 .udma_mask = ATA_UDMA2,
351 .port_ops = &pdc2024x_port_ops
352 },
353 {
354 .sht = &pdc_sht,
355 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
356 .pio_mask = 0x1f,
357 .mwdma_mask = 0x07,
358 .udma_mask = ATA_UDMA4,
359 .port_ops = &pdc2026x_port_ops
360 },
361 {
362 .sht = &pdc_sht,
363 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
364 .pio_mask = 0x1f,
365 .mwdma_mask = 0x07,
366 .udma_mask = ATA_UDMA5,
367 .port_ops = &pdc2026x_port_ops
368 }
369
370 };
371 static struct ata_port_info *port_info[2];
372
373 port_info[0] = port_info[1] = &info[id->driver_data];
374
375 if (dev->device == PCI_DEVICE_ID_PROMISE_20265) {
376 struct pci_dev *bridge = dev->bus->self;
377 /* Don't grab anything behind a Promise I2O RAID */
378 if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
379 if( bridge->device == PCI_DEVICE_ID_INTEL_I960)
380 return -ENODEV;
381 if( bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
382 return -ENODEV;
383 }
384 }
385 return ata_pci_init_one(dev, port_info, 2);
386}
387
388static struct pci_device_id pdc[] = {
389 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0},
390 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1},
391 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1},
392 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2},
393 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2},
394 { 0, },
395};
396
397static struct pci_driver pdc_pci_driver = {
398 .name = DRV_NAME,
399 .id_table = pdc,
400 .probe = pdc_init_one,
401 .remove = ata_pci_remove_one
402};
403
404static int __init pdc_init(void)
405{
406 return pci_register_driver(&pdc_pci_driver);
407}
408
409
410static void __exit pdc_exit(void)
411{
412 pci_unregister_driver(&pdc_pci_driver);
413}
414
415
416MODULE_AUTHOR("Alan Cox");
417MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
418MODULE_LICENSE("GPL");
419MODULE_DEVICE_TABLE(pci, pdc);
420MODULE_VERSION(DRV_VERSION);
421
422module_init(pdc_init);
423module_exit(pdc_exit);
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
new file mode 100644
index 000000000000..35cfdf0ac3f0
--- /dev/null
+++ b/drivers/ata/pata_qdi.c
@@ -0,0 +1,403 @@
1/*
2 * pata_qdi.c - QDI VLB ATA controllers
3 * (C) 2006 Red Hat <alan@redhat.com>
4 *
5 * This driver mostly exists as a proof of concept for non PCI devices under
6 * libata. While the QDI6580 was 'neat' in 1993 it is no longer terribly
7 * useful.
8 *
9 * Tuning code written from the documentation at
10 * http://www.ryston.cz/petr/vlb/qd6500.html
11 * http://www.ryston.cz/petr/vlb/qd6580.html
12 *
13 * Probe code based on drivers/ide/legacy/qd65xx.c
14 * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by
15 * Samuel Thibault <samuel.thibault@fnac.net>
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/pci.h>
21#include <linux/init.h>
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <scsi/scsi_host.h>
25#include <linux/libata.h>
26#include <linux/platform_device.h>
27
28#define DRV_NAME "pata_qdi"
29#define DRV_VERSION "0.2.4"
30
31#define NR_HOST 4 /* Two 6580s */
32
33struct qdi_data {
34 unsigned long timing;
35 u8 clock[2];
36 u8 last;
37 int fast;
38 struct platform_device *platform_dev;
39
40};
41
42static struct ata_host *qdi_host[NR_HOST];
43static struct qdi_data qdi_data[NR_HOST];
44static int nr_qdi_host;
45
46#ifdef MODULE
47static int probe_qdi = 1;
48#else
49static int probe_qdi;
50#endif
51
52static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
53{
54 struct ata_timing t;
55 struct qdi_data *qdi = ap->host->private_data;
56 int active, recovery;
57 u8 timing;
58
59 /* Get the timing data in cycles */
60 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
61
62 if (qdi->fast) {
63 active = 8 - FIT(t.active, 1, 8);
64 recovery = 18 - FIT(t.recover, 3, 18);
65 } else {
66 active = 9 - FIT(t.active, 2, 9);
67 recovery = 15 - FIT(t.recover, 0, 15);
68 }
69 timing = (recovery << 4) | active | 0x08;
70
71 qdi->clock[adev->devno] = timing;
72
73 outb(timing, qdi->timing);
74}
75
76static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
77{
78 struct ata_timing t;
79 struct qdi_data *qdi = ap->host->private_data;
80 int active, recovery;
81 u8 timing;
82
83 /* Get the timing data in cycles */
84 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
85
86 if (qdi->fast) {
87 active = 8 - FIT(t.active, 1, 8);
88 recovery = 18 - FIT(t.recover, 3, 18);
89 } else {
90 active = 9 - FIT(t.active, 2, 9);
91 recovery = 15 - FIT(t.recover, 0, 15);
92 }
93 timing = (recovery << 4) | active | 0x08;
94
95 qdi->clock[adev->devno] = timing;
96
97 outb(timing, qdi->timing);
98
99 /* Clear the FIFO */
100 if (adev->class != ATA_DEV_ATA)
101 outb(0x5F, (qdi->timing & 0xFFF0) + 3);
102}
103
104/**
105 * qdi_qc_issue_prot - command issue
106 * @qc: command pending
107 *
108 * Called when the libata layer is about to issue a command. We wrap
109 * this interface so that we can load the correct ATA timings.
110 */
111
112static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
113{
114 struct ata_port *ap = qc->ap;
115 struct ata_device *adev = qc->dev;
116 struct qdi_data *qdi = ap->host->private_data;
117
118 if (qdi->clock[adev->devno] != qdi->last) {
119 if (adev->pio_mode) {
120 qdi->last = qdi->clock[adev->devno];
121 outb(qdi->clock[adev->devno], qdi->timing);
122 }
123 }
124 return ata_qc_issue_prot(qc);
125}
126
127static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
128{
129 struct ata_port *ap = adev->ap;
130 int slop = buflen & 3;
131
132 if (ata_id_has_dword_io(adev->id)) {
133 if (write_data)
134 outsl(ap->ioaddr.data_addr, buf, buflen >> 2);
135 else
136 insl(ap->ioaddr.data_addr, buf, buflen >> 2);
137
138 if (unlikely(slop)) {
139 u32 pad;
140 if (write_data) {
141 memcpy(&pad, buf + buflen - slop, slop);
142 outl(le32_to_cpu(pad), ap->ioaddr.data_addr);
143 } else {
144 pad = cpu_to_le16(inl(ap->ioaddr.data_addr));
145 memcpy(buf + buflen - slop, &pad, slop);
146 }
147 }
148 } else
149 ata_pio_data_xfer(adev, buf, buflen, write_data);
150}
151
152static struct scsi_host_template qdi_sht = {
153 .module = THIS_MODULE,
154 .name = DRV_NAME,
155 .ioctl = ata_scsi_ioctl,
156 .queuecommand = ata_scsi_queuecmd,
157 .can_queue = ATA_DEF_QUEUE,
158 .this_id = ATA_SHT_THIS_ID,
159 .sg_tablesize = LIBATA_MAX_PRD,
160 .max_sectors = ATA_MAX_SECTORS,
161 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
162 .emulated = ATA_SHT_EMULATED,
163 .use_clustering = ATA_SHT_USE_CLUSTERING,
164 .proc_name = DRV_NAME,
165 .dma_boundary = ATA_DMA_BOUNDARY,
166 .slave_configure = ata_scsi_slave_config,
167 .bios_param = ata_std_bios_param,
168};
169
170static struct ata_port_operations qdi6500_port_ops = {
171 .port_disable = ata_port_disable,
172 .set_piomode = qdi6500_set_piomode,
173
174 .tf_load = ata_tf_load,
175 .tf_read = ata_tf_read,
176 .check_status = ata_check_status,
177 .exec_command = ata_exec_command,
178 .dev_select = ata_std_dev_select,
179
180 .freeze = ata_bmdma_freeze,
181 .thaw = ata_bmdma_thaw,
182 .error_handler = ata_bmdma_error_handler,
183 .post_internal_cmd = ata_bmdma_post_internal_cmd,
184
185 .qc_prep = ata_qc_prep,
186 .qc_issue = qdi_qc_issue_prot,
187 .eng_timeout = ata_eng_timeout,
188 .data_xfer = qdi_data_xfer,
189
190 .irq_handler = ata_interrupt,
191 .irq_clear = ata_bmdma_irq_clear,
192
193 .port_start = ata_port_start,
194 .port_stop = ata_port_stop,
195 .host_stop = ata_host_stop
196};
197
198static struct ata_port_operations qdi6580_port_ops = {
199 .port_disable = ata_port_disable,
200 .set_piomode = qdi6580_set_piomode,
201
202 .tf_load = ata_tf_load,
203 .tf_read = ata_tf_read,
204 .check_status = ata_check_status,
205 .exec_command = ata_exec_command,
206 .dev_select = ata_std_dev_select,
207
208 .freeze = ata_bmdma_freeze,
209 .thaw = ata_bmdma_thaw,
210 .error_handler = ata_bmdma_error_handler,
211 .post_internal_cmd = ata_bmdma_post_internal_cmd,
212
213 .qc_prep = ata_qc_prep,
214 .qc_issue = qdi_qc_issue_prot,
215 .eng_timeout = ata_eng_timeout,
216 .data_xfer = qdi_data_xfer,
217
218 .irq_handler = ata_interrupt,
219 .irq_clear = ata_bmdma_irq_clear,
220
221 .port_start = ata_port_start,
222 .port_stop = ata_port_stop,
223 .host_stop = ata_host_stop
224};
225
226/**
227 * qdi_init_one - attach a qdi interface
228 * @type: Type to display
229 * @io: I/O port start
230 * @irq: interrupt line
231 * @fast: True if on a > 33Mhz VLB
232 *
233 * Register an ISA bus IDE interface. Such interfaces are PIO and we
234 * assume do not support IRQ sharing.
235 */
236
237static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast)
238{
239 struct ata_probe_ent ae;
240 struct platform_device *pdev;
241 int ret;
242
243 unsigned long ctrl = io + 0x206;
244
245 /*
246 * Fill in a probe structure first of all
247 */
248
249 pdev = platform_device_register_simple(DRV_NAME, nr_qdi_host, NULL, 0);
250 if (pdev == NULL)
251 return -ENOMEM;
252
253 memset(&ae, 0, sizeof(struct ata_probe_ent));
254 INIT_LIST_HEAD(&ae.node);
255 ae.dev = &pdev->dev;
256
257 if (type == 6580) {
258 ae.port_ops = &qdi6580_port_ops;
259 ae.pio_mask = 0x1F;
260 } else {
261 ae.port_ops = &qdi6500_port_ops;
262 ae.pio_mask = 0x07; /* Actually PIO3 !IORDY is possible */
263 }
264
265 ae.sht = &qdi_sht;
266 ae.n_ports = 1;
267 ae.irq = irq;
268 ae.irq_flags = 0;
269 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
270 ae.port[0].cmd_addr = io;
271 ae.port[0].altstatus_addr = ctrl;
272 ae.port[0].ctl_addr = ctrl;
273 ata_std_ports(&ae.port[0]);
274
275 /*
276 * Hook in a private data structure per channel
277 */
278 ae.private_data = &qdi_data[nr_qdi_host];
279
280 qdi_data[nr_qdi_host].timing = port;
281 qdi_data[nr_qdi_host].fast = fast;
282 qdi_data[nr_qdi_host].platform_dev = pdev;
283
284 printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io);
285 ret = ata_device_add(&ae);
286 if (ret == 0) {
287 platform_device_unregister(pdev);
288 return -ENODEV;
289 }
290
291 qdi_host[nr_qdi_host++] = dev_get_drvdata(&pdev->dev);
292 return 0;
293}
294
295/**
296 * qdi_init - attach qdi interfaces
297 *
298 * Attach qdi IDE interfaces by scanning the ports it may occupy.
299 */
300
301static __init int qdi_init(void)
302{
303 unsigned long flags;
304 static const unsigned long qd_port[2] = { 0x30, 0xB0 };
305 static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
306 static const int ide_irq[2] = { 14, 15 };
307
308 int ct = 0;
309 int i;
310
311 if (probe_qdi == 0)
312 return -ENODEV;
313
314 /*
315 * Check each possible QD65xx base address
316 */
317
318 for (i = 0; i < 2; i++) {
319 unsigned long port = qd_port[i];
320 u8 r, res;
321
322
323 if (request_region(port, 2, "pata_qdi")) {
324 /* Check for a card */
325 local_irq_save(flags);
326 r = inb_p(port);
327 outb_p(0x19, port);
328 res = inb_p(port);
329 outb_p(r, port);
330 local_irq_restore(flags);
331
332 /* Fail */
333 if (res == 0x19)
334 {
335 release_region(port, 2);
336 continue;
337 }
338
339 /* Passes the presence test */
340 r = inb_p(port + 1); /* Check port agrees with port set */
341 if ((r & 2) >> 1 != i) {
342 release_region(port, 2);
343 continue;
344 }
345
346 /* Check card type */
347 if ((r & 0xF0) == 0xC0) {
348 /* QD6500: single channel */
349 if (r & 8) {
350 /* Disabled ? */
351 release_region(port, 2);
352 continue;
353 }
354 ct += qdi_init_one(port, 6500, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04);
355 }
356 if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
357 /* QD6580: dual channel */
358 if (!request_region(port + 2 , 2, "pata_qdi"))
359 {
360 release_region(port, 2);
361 continue;
362 }
363 res = inb(port + 3);
364 if (res & 1) {
365 /* Single channel mode */
366 ct += qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04);
367 } else {
368 /* Dual channel mode */
369 ct += qdi_init_one(port, 6580, 0x1F0, 14, r & 0x04);
370 ct += qdi_init_one(port + 2, 6580, 0x170, 15, r & 0x04);
371 }
372 }
373 }
374 }
375 if (ct != 0)
376 return 0;
377 return -ENODEV;
378}
379
380static __exit void qdi_exit(void)
381{
382 int i;
383
384 for (i = 0; i < nr_qdi_host; i++) {
385 ata_host_remove(qdi_host[i]);
386 /* Free the control resource. The 6580 dual channel has the resources
387 * claimed as a pair of 2 byte resources so we need no special cases...
388 */
389 release_region(qdi_data[i].timing, 2);
390 platform_device_unregister(qdi_data[i].platform_dev);
391 }
392}
393
394MODULE_AUTHOR("Alan Cox");
395MODULE_DESCRIPTION("low-level driver for qdi ATA");
396MODULE_LICENSE("GPL");
397MODULE_VERSION(DRV_VERSION);
398
399module_init(qdi_init);
400module_exit(qdi_exit);
401
402module_param(probe_qdi, int, 0);
403
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
new file mode 100644
index 000000000000..277f8411b521
--- /dev/null
+++ b/drivers/ata/pata_radisys.c
@@ -0,0 +1,335 @@
1/*
2 * pata_radisys.c - Intel PATA/SATA controllers
3 *
4 * (C) 2006 Red Hat <alan@redhat.com>
5 *
6 * Some parts based on ata_piix.c by Jeff Garzik and others.
7 *
8 * A PIIX relative, this device has a single ATA channel and no
9 * slave timings, SITRE or PPE. In that sense it is a close relative
10 * of the original PIIX. It does however support UDMA 33/66 per channel
11 * although no other modes/timings. Also lacking is 32bit I/O on the ATA
12 * port.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/init.h>
19#include <linux/blkdev.h>
20#include <linux/delay.h>
21#include <linux/device.h>
22#include <scsi/scsi_host.h>
23#include <linux/libata.h>
24#include <linux/ata.h>
25
26#define DRV_NAME "pata_radisys"
27#define DRV_VERSION "0.4.1"
28
29/**
30 * radisys_probe_init - probe begin
31 * @ap: ATA port
32 *
33 * Set up cable type and use generic probe init
34 */
35
36static int radisys_pre_reset(struct ata_port *ap)
37{
38 ap->cbl = ATA_CBL_PATA80;
39 return ata_std_prereset(ap);
40}
41
42
43/**
44 * radisys_pata_error_handler - Probe specified port on PATA host controller
45 * @ap: Port to probe
46 * @classes:
47 *
48 * LOCKING:
49 * None (inherited from caller).
50 */
51
52static void radisys_pata_error_handler(struct ata_port *ap)
53{
54 ata_bmdma_drive_eh(ap, radisys_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
55}
56
57/**
58 * radisys_set_piomode - Initialize host controller PATA PIO timings
59 * @ap: Port whose timings we are configuring
60 * @adev: um
61 *
62 * Set PIO mode for device, in host controller PCI config space.
63 *
64 * LOCKING:
65 * None (inherited from caller).
66 */
67
68static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev)
69{
70 unsigned int pio = adev->pio_mode - XFER_PIO_0;
71 struct pci_dev *dev = to_pci_dev(ap->host->dev);
72 u16 idetm_data;
73 int control = 0;
74
75 /*
76 * See Intel Document 298600-004 for the timing programing rules
77 * for PIIX/ICH. Note that the early PIIX does not have the slave
78 * timing port at 0x44. The Radisys is a relative of the PIIX
79 * but not the same so be careful.
80 */
81
82 static const /* ISP RTC */
83 u8 timings[][2] = { { 0, 0 }, /* Check me */
84 { 0, 0 },
85 { 1, 1 },
86 { 2, 2 },
87 { 3, 3 }, };
88
89 if (pio > 0)
90 control |= 1; /* TIME1 enable */
91 if (ata_pio_need_iordy(adev))
92 control |= 2; /* IE IORDY */
93
94 pci_read_config_word(dev, 0x40, &idetm_data);
95
96 /* Enable IE and TIME as appropriate. Clear the other
97 drive timing bits */
98 idetm_data &= 0xCCCC;
99 idetm_data |= (control << (4 * adev->devno));
100 idetm_data |= (timings[pio][0] << 12) |
101 (timings[pio][1] << 8);
102 pci_write_config_word(dev, 0x40, idetm_data);
103
104 /* Track which port is configured */
105 ap->private_data = adev;
106}
107
108/**
109 * radisys_set_dmamode - Initialize host controller PATA DMA timings
110 * @ap: Port whose timings we are configuring
111 * @adev: Device to program
112 * @isich: True if the device is an ICH and has IOCFG registers
113 *
114 * Set MWDMA mode for device, in host controller PCI config space.
115 *
116 * LOCKING:
117 * None (inherited from caller).
118 */
119
120static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev)
121{
122 struct pci_dev *dev = to_pci_dev(ap->host->dev);
123 u16 idetm_data;
124 u8 udma_enable;
125
126 static const /* ISP RTC */
127 u8 timings[][2] = { { 0, 0 },
128 { 0, 0 },
129 { 1, 1 },
130 { 2, 2 },
131 { 3, 3 }, };
132
133 /*
134 * MWDMA is driven by the PIO timings. We must also enable
135 * IORDY unconditionally.
136 */
137
138 pci_read_config_word(dev, 0x40, &idetm_data);
139 pci_read_config_byte(dev, 0x48, &udma_enable);
140
141 if (adev->dma_mode < XFER_UDMA_0) {
142 unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
143 const unsigned int needed_pio[3] = {
144 XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
145 };
146 int pio = needed_pio[mwdma] - XFER_PIO_0;
147 int control = 3; /* IORDY|TIME0 */
148
149 /* If the drive MWDMA is faster than it can do PIO then
150 we must force PIO0 for PIO cycles. */
151
152 if (adev->pio_mode < needed_pio[mwdma])
153 control = 1;
154
155 /* Mask out the relevant control and timing bits we will load. Also
156 clear the other drive TIME register as a precaution */
157
158 idetm_data &= 0xCCCC;
159 idetm_data |= control << (4 * adev->devno);
160 idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
161
162 udma_enable &= ~(1 << adev->devno);
163 } else {
164 u8 udma_mode;
165
166 /* UDMA66 on: UDMA 33 and 66 are switchable via register 0x4A */
167
168 pci_read_config_byte(dev, 0x4A, &udma_mode);
169
170 if (adev->xfer_mode == XFER_UDMA_2)
171 udma_mode &= ~ (1 << adev->devno);
172 else /* UDMA 4 */
173 udma_mode |= (1 << adev->devno);
174
175 pci_write_config_byte(dev, 0x4A, udma_mode);
176
177 udma_enable |= (1 << adev->devno);
178 }
179 pci_write_config_word(dev, 0x40, idetm_data);
180 pci_write_config_byte(dev, 0x48, udma_enable);
181
182 /* Track which port is configured */
183 ap->private_data = adev;
184}
185
186/**
187 * radisys_qc_issue_prot - command issue
188 * @qc: command pending
189 *
190 * Called when the libata layer is about to issue a command. We wrap
191 * this interface so that we can load the correct ATA timings if
192 * neccessary. Our logic also clears TIME0/TIME1 for the other device so
193 * that, even if we get this wrong, cycles to the other device will
194 * be made PIO0.
195 */
196
197static unsigned int radisys_qc_issue_prot(struct ata_queued_cmd *qc)
198{
199 struct ata_port *ap = qc->ap;
200 struct ata_device *adev = qc->dev;
201
202 if (adev != ap->private_data) {
203 /* UDMA timing is not shared */
204 if (adev->dma_mode < XFER_UDMA_0) {
205 if (adev->dma_mode)
206 radisys_set_dmamode(ap, adev);
207 else if (adev->pio_mode)
208 radisys_set_piomode(ap, adev);
209 }
210 }
211 return ata_qc_issue_prot(qc);
212}
213
214
215static struct scsi_host_template radisys_sht = {
216 .module = THIS_MODULE,
217 .name = DRV_NAME,
218 .ioctl = ata_scsi_ioctl,
219 .queuecommand = ata_scsi_queuecmd,
220 .can_queue = ATA_DEF_QUEUE,
221 .this_id = ATA_SHT_THIS_ID,
222 .sg_tablesize = LIBATA_MAX_PRD,
223 .max_sectors = ATA_MAX_SECTORS,
224 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
225 .emulated = ATA_SHT_EMULATED,
226 .use_clustering = ATA_SHT_USE_CLUSTERING,
227 .proc_name = DRV_NAME,
228 .dma_boundary = ATA_DMA_BOUNDARY,
229 .slave_configure = ata_scsi_slave_config,
230 .bios_param = ata_std_bios_param,
231};
232
233static const struct ata_port_operations radisys_pata_ops = {
234 .port_disable = ata_port_disable,
235 .set_piomode = radisys_set_piomode,
236 .set_dmamode = radisys_set_dmamode,
237 .mode_filter = ata_pci_default_filter,
238
239 .tf_load = ata_tf_load,
240 .tf_read = ata_tf_read,
241 .check_status = ata_check_status,
242 .exec_command = ata_exec_command,
243 .dev_select = ata_std_dev_select,
244
245 .freeze = ata_bmdma_freeze,
246 .thaw = ata_bmdma_thaw,
247 .error_handler = radisys_pata_error_handler,
248 .post_internal_cmd = ata_bmdma_post_internal_cmd,
249
250 .bmdma_setup = ata_bmdma_setup,
251 .bmdma_start = ata_bmdma_start,
252 .bmdma_stop = ata_bmdma_stop,
253 .bmdma_status = ata_bmdma_status,
254 .qc_prep = ata_qc_prep,
255 .qc_issue = radisys_qc_issue_prot,
256 .data_xfer = ata_pio_data_xfer,
257
258 .eng_timeout = ata_eng_timeout,
259
260 .irq_handler = ata_interrupt,
261 .irq_clear = ata_bmdma_irq_clear,
262
263 .port_start = ata_port_start,
264 .port_stop = ata_port_stop,
265 .host_stop = ata_host_stop,
266};
267
268
269/**
270 * radisys_init_one - Register PIIX ATA PCI device with kernel services
271 * @pdev: PCI device to register
272 * @ent: Entry in radisys_pci_tbl matching with @pdev
273 *
274 * Called from kernel PCI layer. We probe for combined mode (sigh),
275 * and then hand over control to libata, for it to do the rest.
276 *
277 * LOCKING:
278 * Inherited from PCI layer (may sleep).
279 *
280 * RETURNS:
281 * Zero on success, or -ERRNO value.
282 */
283
284static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
285{
286 static int printed_version;
287 static struct ata_port_info info = {
288 .sht = &radisys_sht,
289 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
290 .pio_mask = 0x1f, /* pio0-4 */
291 .mwdma_mask = 0x07, /* mwdma1-2 */
292 .udma_mask = 0x14, /* UDMA33/66 only */
293 .port_ops = &radisys_pata_ops,
294 };
295 static struct ata_port_info *port_info[2] = { &info, &info };
296
297 if (!printed_version++)
298 dev_printk(KERN_DEBUG, &pdev->dev,
299 "version " DRV_VERSION "\n");
300
301 return ata_pci_init_one(pdev, port_info, 2);
302}
303
304static const struct pci_device_id radisys_pci_tbl[] = {
305 { 0x1331, 0x8201, PCI_ANY_ID, PCI_ANY_ID, },
306 { } /* terminate list */
307};
308
309static struct pci_driver radisys_pci_driver = {
310 .name = DRV_NAME,
311 .id_table = radisys_pci_tbl,
312 .probe = radisys_init_one,
313 .remove = ata_pci_remove_one,
314};
315
316static int __init radisys_init(void)
317{
318 return pci_register_driver(&radisys_pci_driver);
319}
320
321static void __exit radisys_exit(void)
322{
323 pci_unregister_driver(&radisys_pci_driver);
324}
325
326
327module_init(radisys_init);
328module_exit(radisys_exit);
329
330MODULE_AUTHOR("Alan Cox");
331MODULE_DESCRIPTION("SCSI low-level driver for Radisys R82600 controllers");
332MODULE_LICENSE("GPL");
333MODULE_DEVICE_TABLE(pci, radisys_pci_tbl);
334MODULE_VERSION(DRV_VERSION);
335
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
new file mode 100644
index 000000000000..3c6d84fd4312
--- /dev/null
+++ b/drivers/ata/pata_rz1000.c
@@ -0,0 +1,205 @@
1/*
2 * RZ1000/1001 driver based upon
3 *
4 * linux/drivers/ide/pci/rz1000.c Version 0.06 January 12, 2003
5 * Copyright (C) 1995-1998 Linus Torvalds & author (see below)
6 * Principal Author: mlord@pobox.com (Mark Lord)
7 *
8 * See linux/MAINTAINERS for address of current maintainer.
9 *
10 * This file provides support for disabling the buggy read-ahead
11 * mode of the RZ1000 IDE chipset, commonly used on Intel motherboards.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/pci.h>
17#include <linux/init.h>
18#include <linux/blkdev.h>
19#include <linux/delay.h>
20#include <scsi/scsi_host.h>
21#include <linux/libata.h>
22
23#define DRV_NAME "pata_rz1000"
24#define DRV_VERSION "0.2.2"
25
26
27/**
28 * rz1000_prereset - probe begin
29 * @ap: ATA port
30 *
31 * Set up cable type and use generics
32 */
33
34static int rz1000_prereset(struct ata_port *ap)
35{
36 ap->cbl = ATA_CBL_PATA40;
37 return ata_std_prereset(ap);
38}
39
40/**
41 * rz1000_error_handler - probe reset
42 * @ap: ATA port
43 *
44 * Perform the ATA standard reset sequence
45 */
46
47static void rz1000_error_handler(struct ata_port *ap)
48{
49 ata_bmdma_drive_eh(ap, rz1000_prereset, ata_std_softreset, NULL, ata_std_postreset);
50}
51
52/**
53 * rz1000_set_mode - mode setting function
54 * @ap: ATA interface
55 *
56 * Use a non standard set_mode function. We don't want to be tuned. We
57 * would prefer to be BIOS generic but for the fact our hardware is
58 * whacked out.
59 */
60
61static void rz1000_set_mode(struct ata_port *ap)
62{
63 int i;
64
65 for (i = 0; i < ATA_MAX_DEVICES; i++) {
66 struct ata_device *dev = &ap->device[i];
67 if (ata_dev_enabled(dev)) {
68 /* We don't really care */
69 dev->pio_mode = XFER_PIO_0;
70 dev->xfer_mode = XFER_PIO_0;
71 dev->xfer_shift = ATA_SHIFT_PIO;
72 dev->flags |= ATA_DFLAG_PIO;
73 }
74 }
75}
76
77
78static struct scsi_host_template rz1000_sht = {
79 .module = THIS_MODULE,
80 .name = DRV_NAME,
81 .ioctl = ata_scsi_ioctl,
82 .queuecommand = ata_scsi_queuecmd,
83 .can_queue = ATA_DEF_QUEUE,
84 .this_id = ATA_SHT_THIS_ID,
85 .sg_tablesize = LIBATA_MAX_PRD,
86 .max_sectors = ATA_MAX_SECTORS,
87 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
88 .emulated = ATA_SHT_EMULATED,
89 .use_clustering = ATA_SHT_USE_CLUSTERING,
90 .proc_name = DRV_NAME,
91 .dma_boundary = ATA_DMA_BOUNDARY,
92 .slave_configure = ata_scsi_slave_config,
93 .bios_param = ata_std_bios_param,
94};
95
96static struct ata_port_operations rz1000_port_ops = {
97 .set_mode = rz1000_set_mode,
98
99 .port_disable = ata_port_disable,
100 .tf_load = ata_tf_load,
101 .tf_read = ata_tf_read,
102 .check_status = ata_check_status,
103 .exec_command = ata_exec_command,
104 .dev_select = ata_std_dev_select,
105
106 .error_handler = rz1000_error_handler,
107
108 .bmdma_setup = ata_bmdma_setup,
109 .bmdma_start = ata_bmdma_start,
110 .bmdma_stop = ata_bmdma_stop,
111 .bmdma_status = ata_bmdma_status,
112
113 .qc_prep = ata_qc_prep,
114 .qc_issue = ata_qc_issue_prot,
115 .eng_timeout = ata_eng_timeout,
116 .data_xfer = ata_pio_data_xfer,
117
118 .freeze = ata_bmdma_freeze,
119 .thaw = ata_bmdma_thaw,
120 .error_handler = rz1000_error_handler,
121 .post_internal_cmd = ata_bmdma_post_internal_cmd,
122
123 .irq_handler = ata_interrupt,
124 .irq_clear = ata_bmdma_irq_clear,
125
126 .port_start = ata_port_start,
127 .port_stop = ata_port_stop,
128 .host_stop = ata_host_stop
129};
130
131/**
132 * rz1000_init_one - Register RZ1000 ATA PCI device with kernel services
133 * @pdev: PCI device to register
134 * @ent: Entry in rz1000_pci_tbl matching with @pdev
135 *
136 * Configure an RZ1000 interface. This doesn't require much special
137 * handling except that we *MUST* kill the chipset readahead or the
138 * user may experience data corruption.
139 */
140
141static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
142{
143 static int printed_version;
144 struct ata_port_info *port_info[2];
145 u16 reg;
146 static struct ata_port_info info = {
147 .sht = &rz1000_sht,
148 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
149 .pio_mask = 0x1f,
150 .port_ops = &rz1000_port_ops
151 };
152
153 if (!printed_version++)
154 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
155
156 /* Be exceptionally paranoid as we must be sure to apply the fix */
157 if (pci_read_config_word(pdev, 0x40, &reg) != 0)
158 goto fail;
159 reg &= 0xDFFF;
160 if (pci_write_config_word(pdev, 0x40, reg) != 0)
161 goto fail;
162 printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
163
164 port_info[0] = &info;
165 port_info[1] = &info;
166 return ata_pci_init_one(pdev, port_info, 2);
167fail:
168 printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n");
169 /* Not safe to use so skip */
170 return -ENODEV;
171}
172
173static struct pci_device_id pata_rz1000[] = {
174 { PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), },
175 { PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), },
176 { 0, },
177};
178
179static struct pci_driver rz1000_pci_driver = {
180 .name = DRV_NAME,
181 .id_table = pata_rz1000,
182 .probe = rz1000_init_one,
183 .remove = ata_pci_remove_one
184};
185
186
187static int __init rz1000_init(void)
188{
189 return pci_register_driver(&rz1000_pci_driver);
190}
191
192static void __exit rz1000_exit(void)
193{
194 pci_unregister_driver(&rz1000_pci_driver);
195}
196
197MODULE_AUTHOR("Alan Cox");
198MODULE_DESCRIPTION("low-level driver for RZ1000 PCI ATA");
199MODULE_LICENSE("GPL");
200MODULE_DEVICE_TABLE(pci, pata_rz1000);
201MODULE_VERSION(DRV_VERSION);
202
203module_init(rz1000_init);
204module_exit(rz1000_exit);
205
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
new file mode 100644
index 000000000000..4166c1a8a9e8
--- /dev/null
+++ b/drivers/ata/pata_sc1200.c
@@ -0,0 +1,287 @@
1/*
2 * New ATA layer SC1200 driver Alan Cox <alan@redhat.com>
3 *
4 * TODO: Mode selection filtering
5 * TODO: Can't enable second channel until ATA core has serialize
6 * TODO: Needs custom DMA cleanup code
7 *
8 * Based very heavily on
9 *
10 * linux/drivers/ide/pci/sc1200.c Version 0.91 28-Jan-2003
11 *
12 * Copyright (C) 2000-2002 Mark Lord <mlord@pobox.com>
13 * May be copied or modified under the terms of the GNU General Public License
14 *
15 * Development of this chipset driver was funded
16 * by the nice folks at National Semiconductor.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License version 2 as
20 * published by the Free Software Foundation.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <scsi/scsi_host.h>
40#include <linux/libata.h>
41
42#define DRV_NAME "sc1200"
43#define DRV_VERSION "0.2.3"
44
45#define SC1200_REV_A 0x00
46#define SC1200_REV_B1 0x01
47#define SC1200_REV_B3 0x02
48#define SC1200_REV_C1 0x03
49#define SC1200_REV_D1 0x04
50
51/**
52 * sc1200_clock - PCI clock
53 *
54 * Return the PCI bus clocking for the SC1200 chipset configuration
55 * in use. We return 0 for 33MHz 1 for 48MHz and 2 for 66Mhz
56 */
57
58static int sc1200_clock(void)
59{
60 /* Magic registers that give us the chipset data */
61 u8 chip_id = inb(0x903C);
62 u8 silicon_rev = inb(0x903D);
63 u16 pci_clock;
64
65 if (chip_id == 0x04 && silicon_rev < SC1200_REV_B1)
66 return 0; /* 33 MHz mode */
67
68 /* Clock generator configuration 0x901E its 8/9 are the PCI clocking
69 0/3 is 33Mhz 1 is 48 2 is 66 */
70
71 pci_clock = inw(0x901E);
72 pci_clock >>= 8;
73 pci_clock &= 0x03;
74 if (pci_clock == 3)
75 pci_clock = 0;
76 return pci_clock;
77}
78
79/**
80 * sc1200_set_piomode - PIO setup
81 * @ap: ATA interface
82 * @adev: device on the interface
83 *
84 * Set our PIO requirements. This is fairly simple on the SC1200
85 */
86
87static void sc1200_set_piomode(struct ata_port *ap, struct ata_device *adev)
88{
89 static const u32 pio_timings[4][5] = {
90 {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, // format0 33Mhz
91 {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}, // format1, 33Mhz
92 {0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021}, // format1, 48Mhz
93 {0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131} // format1, 66Mhz
94 };
95
96 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
97 u32 format;
98 unsigned int reg = 0x40 + 0x10 * ap->port_no;
99 int mode = adev->pio_mode - XFER_PIO_0;
100
101 pci_read_config_dword(pdev, reg + 4, &format);
102 format >>= 31;
103 format += sc1200_clock();
104 pci_write_config_dword(pdev, reg + 8 * adev->devno,
105 pio_timings[format][mode]);
106}
107
108/**
109 * sc1200_set_dmamode - DMA timing setup
110 * @ap: ATA interface
111 * @adev: Device being configured
112 *
113 * We cannot mix MWDMA and UDMA without reloading timings each switch
114 * master to slave.
115 */
116
117static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev)
118{
119 static const u32 udma_timing[3][3] = {
120 { 0x00921250, 0x00911140, 0x00911030 },
121 { 0x00932470, 0x00922260, 0x00922140 },
122 { 0x009436A1, 0x00933481, 0x00923261 }
123 };
124
125 static const u32 mwdma_timing[3][3] = {
126 { 0x00077771, 0x00012121, 0x00002020 },
127 { 0x000BBBB2, 0x00024241, 0x00013131 },
128 { 0x000FFFF3, 0x00035352, 0x00015151 }
129 };
130
131 int clock = sc1200_clock();
132 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
133 unsigned int reg = 0x40 + 0x10 * ap->port_no;
134 int mode = adev->dma_mode;
135 u32 format;
136
137 if (mode >= XFER_UDMA_0)
138 format = udma_timing[clock][mode - XFER_UDMA_0];
139 else
140 format = mwdma_timing[clock][mode - XFER_MW_DMA_0];
141
142 if (adev->devno == 0) {
143 u32 timings;
144
145 pci_read_config_dword(pdev, reg + 4, &timings);
146 timings &= 0x80000000UL;
147 timings |= format;
148 pci_write_config_dword(pdev, reg + 4, timings);
149 } else
150 pci_write_config_dword(pdev, reg + 12, format);
151}
152
153/**
154 * sc1200_qc_issue_prot - command issue
155 * @qc: command pending
156 *
157 * Called when the libata layer is about to issue a command. We wrap
158 * this interface so that we can load the correct ATA timings if
159 * neccessary. Specifically we have a problem that there is only
160 * one MWDMA/UDMA bit.
161 */
162
163static unsigned int sc1200_qc_issue_prot(struct ata_queued_cmd *qc)
164{
165 struct ata_port *ap = qc->ap;
166 struct ata_device *adev = qc->dev;
167 struct ata_device *prev = ap->private_data;
168
169 /* See if the DMA settings could be wrong */
170 if (adev->dma_mode != 0 && adev != prev && prev != NULL) {
171 /* Maybe, but do the channels match MWDMA/UDMA ? */
172 if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) ||
173 (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0))
174 /* Switch the mode bits */
175 sc1200_set_dmamode(ap, adev);
176 }
177
178 return ata_qc_issue_prot(qc);
179}
180
181static struct scsi_host_template sc1200_sht = {
182 .module = THIS_MODULE,
183 .name = DRV_NAME,
184 .ioctl = ata_scsi_ioctl,
185 .queuecommand = ata_scsi_queuecmd,
186 .can_queue = ATA_DEF_QUEUE,
187 .this_id = ATA_SHT_THIS_ID,
188 .sg_tablesize = LIBATA_MAX_PRD,
189 .max_sectors = ATA_MAX_SECTORS,
190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
191 .emulated = ATA_SHT_EMULATED,
192 .use_clustering = ATA_SHT_USE_CLUSTERING,
193 .proc_name = DRV_NAME,
194 .dma_boundary = ATA_DMA_BOUNDARY,
195 .slave_configure = ata_scsi_slave_config,
196 .bios_param = ata_std_bios_param,
197};
198
199static struct ata_port_operations sc1200_port_ops = {
200 .port_disable = ata_port_disable,
201 .set_piomode = sc1200_set_piomode,
202 .set_dmamode = sc1200_set_dmamode,
203 .mode_filter = ata_pci_default_filter,
204
205 .tf_load = ata_tf_load,
206 .tf_read = ata_tf_read,
207 .check_status = ata_check_status,
208 .exec_command = ata_exec_command,
209 .dev_select = ata_std_dev_select,
210
211 .error_handler = ata_bmdma_error_handler,
212
213 .bmdma_setup = ata_bmdma_setup,
214 .bmdma_start = ata_bmdma_start,
215 .bmdma_stop = ata_bmdma_stop,
216 .bmdma_status = ata_bmdma_status,
217
218 .qc_prep = ata_qc_prep,
219 .qc_issue = sc1200_qc_issue_prot,
220 .eng_timeout = ata_eng_timeout,
221 .data_xfer = ata_pio_data_xfer,
222
223 .irq_handler = ata_interrupt,
224 .irq_clear = ata_bmdma_irq_clear,
225
226 .port_start = ata_port_start,
227 .port_stop = ata_port_stop,
228 .host_stop = ata_host_stop
229};
230
231/**
232 * sc1200_init_one - Initialise an SC1200
233 * @dev: PCI device
234 * @id: Entry in match table
235 *
236 * Just throw the needed data at the libata helper and it does all
237 * our work.
238 */
239
240static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
241{
242 static struct ata_port_info info = {
243 .sht = &sc1200_sht,
244 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
245 .pio_mask = 0x1f,
246 .mwdma_mask = 0x07,
247 .udma_mask = 0x07,
248 .port_ops = &sc1200_port_ops
249 };
250 static struct ata_port_info *port_info[2] = { &info, &info };
251
252 /* Can't enable port 2 yet, see top comments */
253 return ata_pci_init_one(dev, port_info, 1);
254}
255
256static struct pci_device_id sc1200[] = {
257 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_IDE), },
258 { 0, },
259};
260
261static struct pci_driver sc1200_pci_driver = {
262 .name = DRV_NAME,
263 .id_table = sc1200,
264 .probe = sc1200_init_one,
265 .remove = ata_pci_remove_one
266};
267
268static int __init sc1200_init(void)
269{
270 return pci_register_driver(&sc1200_pci_driver);
271}
272
273
274static void __exit sc1200_exit(void)
275{
276 pci_unregister_driver(&sc1200_pci_driver);
277}
278
279
280MODULE_AUTHOR("Alan Cox, Mark Lord");
281MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200");
282MODULE_LICENSE("GPL");
283MODULE_DEVICE_TABLE(pci, sc1200);
284MODULE_VERSION(DRV_VERSION);
285
286module_init(sc1200_init);
287module_exit(sc1200_exit);
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
new file mode 100644
index 000000000000..af456113c55d
--- /dev/null
+++ b/drivers/ata/pata_serverworks.c
@@ -0,0 +1,587 @@
1/*
2 * ata-serverworks.c - Serverworks PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based upon
7 *
8 * serverworks.c
9 *
10 * Copyright (C) 1998-2000 Michel Aubry
11 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
12 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
13 * Portions copyright (c) 2001 Sun Microsystems
14 *
15 *
16 * RCC/ServerWorks IDE driver for Linux
17 *
18 * OSB4: `Open South Bridge' IDE Interface (fn 1)
19 * supports UDMA mode 2 (33 MB/s)
20 *
21 * CSB5: `Champion South Bridge' IDE Interface (fn 1)
22 * all revisions support UDMA mode 4 (66 MB/s)
23 * revision A2.0 and up support UDMA mode 5 (100 MB/s)
24 *
25 * *** The CSB5 does not provide ANY register ***
26 * *** to detect 80-conductor cable presence. ***
27 *
28 * CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
29 *
30 * Documentation:
31 * Available under NDA only. Errata info very hard to get.
32 */
33
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/init.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <scsi/scsi_host.h>
41#include <linux/libata.h>
42
43#define DRV_NAME "pata_serverworks"
44#define DRV_VERSION "0.3.6"
45
46#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
47#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
48
49/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
50 * can overrun their FIFOs when used with the CSB5 */
51
52static const char *csb_bad_ata100[] = {
53 "ST320011A",
54 "ST340016A",
55 "ST360021A",
56 "ST380021A",
57 NULL
58};
59
60/**
61 * dell_cable - Dell serverworks cable detection
62 * @ap: ATA port to do cable detect
63 *
64 * Dell hide the 40/80 pin select for their interfaces in the top two
65 * bits of the subsystem ID.
66 */
67
68static int dell_cable(struct ata_port *ap) {
69 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
70
71 if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
72 return ATA_CBL_PATA80;
73 return ATA_CBL_PATA40;
74}
75
76/**
77 * sun_cable - Sun Cobalt 'Alpine' cable detection
78 * @ap: ATA port to do cable select
79 *
80 * Cobalt CSB5 IDE hides the 40/80pin in the top two bits of the
81 * subsystem ID the same as dell. We could use one function but we may
82 * need to extend the Dell one in future
83 */
84
85static int sun_cable(struct ata_port *ap) {
86 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
87
88 if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
89 return ATA_CBL_PATA80;
90 return ATA_CBL_PATA40;
91}
92
93/**
94 * osb4_cable - OSB4 cable detect
95 * @ap: ATA port to check
96 *
97 * The OSB4 isn't UDMA66 capable so this is easy
98 */
99
100static int osb4_cable(struct ata_port *ap) {
101 return ATA_CBL_PATA40;
102}
103
104/**
105 * csb4_cable - CSB5/6 cable detect
106 * @ap: ATA port to check
107 *
108 * Serverworks default arrangement is to use the drive side detection
109 * only.
110 */
111
112static int csb_cable(struct ata_port *ap) {
113 return ATA_CBL_PATA80;
114}
115
116struct sv_cable_table {
117 int device;
118 int subvendor;
119 int (*cable_detect)(struct ata_port *ap);
120};
121
122/*
123 * Note that we don't copy the old serverworks code because the old
124 * code contains obvious mistakes
125 */
126
127static struct sv_cable_table cable_detect[] = {
128 { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, dell_cable },
129 { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, dell_cable },
130 { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, sun_cable },
131 { PCI_DEVICE_ID_SERVERWORKS_OSB4, PCI_ANY_ID, osb4_cable },
132 { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, csb_cable },
133 { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, csb_cable },
134 { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, csb_cable },
135 { PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, csb_cable },
136 { }
137};
138
139/**
140 * serverworks_pre_reset - cable detection
141 * @ap: ATA port
142 *
143 * Perform cable detection according to the device and subvendor
144 * identifications
145 */
146
147static int serverworks_pre_reset(struct ata_port *ap) {
148 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
149 struct sv_cable_table *cb = cable_detect;
150
151 while(cb->device) {
152 if (cb->device == pdev->device &&
153 (cb->subvendor == pdev->subsystem_vendor ||
154 cb->subvendor == PCI_ANY_ID)) {
155 ap->cbl = cb->cable_detect(ap);
156 return ata_std_prereset(ap);
157 }
158 cb++;
159 }
160
161 BUG();
162 return -1; /* kill compiler warning */
163}
164
165static void serverworks_error_handler(struct ata_port *ap)
166{
167 return ata_bmdma_drive_eh(ap, serverworks_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
168}
169
170/**
171 * serverworks_is_csb - Check for CSB or OSB
172 * @pdev: PCI device to check
173 *
174 * Returns true if the device being checked is known to be a CSB
175 * series device.
176 */
177
178static u8 serverworks_is_csb(struct pci_dev *pdev)
179{
180 switch (pdev->device) {
181 case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
182 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
183 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
184 case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
185 return 1;
186 default:
187 break;
188 }
189 return 0;
190}
191
192/**
193 * serverworks_osb4_filter - mode selection filter
194 * @ap: ATA interface
195 * @adev: ATA device
196 *
197 * Filter the offered modes for the device to apply controller
198 * specific rules. OSB4 requires no UDMA for disks due to a FIFO
199 * bug we hit.
200 */
201
202static unsigned long serverworks_osb4_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
203{
204 if (adev->class == ATA_DEV_ATA)
205 mask &= ~ATA_MASK_UDMA;
206 return ata_pci_default_filter(ap, adev, mask);
207}
208
209
210/**
211 * serverworks_csb_filter - mode selection filter
212 * @ap: ATA interface
213 * @adev: ATA device
214 *
215 * Check the blacklist and disable UDMA5 if matched
216 */
217
218static unsigned long serverworks_csb_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
219{
220 const char *p;
221 char model_num[40];
222 int len, i;
223
224 /* Disk, UDMA */
225 if (adev->class != ATA_DEV_ATA)
226 return ata_pci_default_filter(ap, adev, mask);
227
228 /* Actually do need to check */
229 ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
230 /* Precuationary - why not do this in the libata core ?? */
231
232 len = strlen(model_num);
233 while ((len > 0) && (model_num[len - 1] == ' ')) {
234 len--;
235 model_num[len] = 0;
236 }
237
238 for(i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
239 if (!strncmp(p, model_num, len))
240 mask &= ~(0x1F << ATA_SHIFT_UDMA);
241 }
242 return ata_pci_default_filter(ap, adev, mask);
243}
244
245
246/**
247 * serverworks_set_piomode - set initial PIO mode data
248 * @ap: ATA interface
249 * @adev: ATA device
250 *
251 * Program the OSB4/CSB5 timing registers for PIO. The PIO register
252 * load is done as a simple lookup.
253 */
254static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev)
255{
256 static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
257 int offset = 1 + (2 * ap->port_no) - adev->devno;
258 int devbits = (2 * ap->port_no + adev->devno) * 4;
259 u16 csb5_pio;
260 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
261 int pio = adev->pio_mode - XFER_PIO_0;
262
263 pci_write_config_byte(pdev, 0x40 + offset, pio_mode[pio]);
264
265 /* The OSB4 just requires the timing but the CSB series want the
266 mode number as well */
267 if (serverworks_is_csb(pdev)) {
268 pci_read_config_word(pdev, 0x4A, &csb5_pio);
269 csb5_pio &= ~(0x0F << devbits);
270 pci_write_config_byte(pdev, 0x4A, csb5_pio | (pio << devbits));
271 }
272}
273
274/**
275 * serverworks_set_dmamode - set initial DMA mode data
276 * @ap: ATA interface
277 * @adev: ATA device
278 *
279 * Program the MWDMA/UDMA modes for the serverworks OSB4/CSB5
280 * chipset. The MWDMA mode values are pulled from a lookup table
281 * while the chipset uses mode number for UDMA.
282 */
283
284static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev)
285{
286 static const u8 dma_mode[] = { 0x77, 0x21, 0x20 };
287 int offset = 1 + 2 * ap->port_no - adev->devno;
288 int devbits = (2 * ap->port_no + adev->devno);
289 u8 ultra;
290 u8 ultra_cfg;
291 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
292
293 pci_read_config_byte(pdev, 0x54, &ultra_cfg);
294
295 if (adev->dma_mode >= XFER_UDMA_0) {
296 pci_write_config_byte(pdev, 0x44 + offset, 0x20);
297
298 pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra);
299 ultra &= ~(0x0F << (ap->port_no * 4));
300 ultra |= (adev->dma_mode - XFER_UDMA_0)
301 << (ap->port_no * 4);
302 pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra);
303
304 ultra_cfg |= (1 << devbits);
305 } else {
306 pci_write_config_byte(pdev, 0x44 + offset,
307 dma_mode[adev->dma_mode - XFER_MW_DMA_0]);
308 ultra_cfg &= ~(1 << devbits);
309 }
310 pci_write_config_byte(pdev, 0x54, ultra_cfg);
311}
312
313static struct scsi_host_template serverworks_sht = {
314 .module = THIS_MODULE,
315 .name = DRV_NAME,
316 .ioctl = ata_scsi_ioctl,
317 .queuecommand = ata_scsi_queuecmd,
318 .can_queue = ATA_DEF_QUEUE,
319 .this_id = ATA_SHT_THIS_ID,
320 .sg_tablesize = LIBATA_MAX_PRD,
321 .max_sectors = ATA_MAX_SECTORS,
322 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
323 .emulated = ATA_SHT_EMULATED,
324 .use_clustering = ATA_SHT_USE_CLUSTERING,
325 .proc_name = DRV_NAME,
326 .dma_boundary = ATA_DMA_BOUNDARY,
327 .slave_configure = ata_scsi_slave_config,
328 .bios_param = ata_std_bios_param,
329};
330
331static struct ata_port_operations serverworks_osb4_port_ops = {
332 .port_disable = ata_port_disable,
333 .set_piomode = serverworks_set_piomode,
334 .set_dmamode = serverworks_set_dmamode,
335 .mode_filter = serverworks_osb4_filter,
336
337 .tf_load = ata_tf_load,
338 .tf_read = ata_tf_read,
339 .check_status = ata_check_status,
340 .exec_command = ata_exec_command,
341 .dev_select = ata_std_dev_select,
342
343 .freeze = ata_bmdma_freeze,
344 .thaw = ata_bmdma_thaw,
345 .error_handler = serverworks_error_handler,
346 .post_internal_cmd = ata_bmdma_post_internal_cmd,
347
348 .bmdma_setup = ata_bmdma_setup,
349 .bmdma_start = ata_bmdma_start,
350 .bmdma_stop = ata_bmdma_stop,
351 .bmdma_status = ata_bmdma_status,
352
353 .qc_prep = ata_qc_prep,
354 .qc_issue = ata_qc_issue_prot,
355 .eng_timeout = ata_eng_timeout,
356 .data_xfer = ata_pio_data_xfer,
357
358 .irq_handler = ata_interrupt,
359 .port_start = ata_port_start,
360 .port_stop = ata_port_stop,
361 .host_stop = ata_host_stop
362};
363
364static struct ata_port_operations serverworks_csb_port_ops = {
365 .port_disable = ata_port_disable,
366 .set_piomode = serverworks_set_piomode,
367 .set_dmamode = serverworks_set_dmamode,
368 .mode_filter = serverworks_csb_filter,
369
370 .tf_load = ata_tf_load,
371 .tf_read = ata_tf_read,
372 .check_status = ata_check_status,
373 .exec_command = ata_exec_command,
374 .dev_select = ata_std_dev_select,
375
376 .freeze = ata_bmdma_freeze,
377 .thaw = ata_bmdma_thaw,
378 .error_handler = serverworks_error_handler,
379 .post_internal_cmd = ata_bmdma_post_internal_cmd,
380
381 .bmdma_setup = ata_bmdma_setup,
382 .bmdma_start = ata_bmdma_start,
383 .bmdma_stop = ata_bmdma_stop,
384 .bmdma_status = ata_bmdma_status,
385
386 .qc_prep = ata_qc_prep,
387 .qc_issue = ata_qc_issue_prot,
388 .eng_timeout = ata_eng_timeout,
389 .data_xfer = ata_pio_data_xfer,
390
391 .irq_handler = ata_interrupt,
392 .port_start = ata_port_start,
393 .port_stop = ata_port_stop,
394 .host_stop = ata_host_stop
395};
396
397static int serverworks_fixup_osb4(struct pci_dev *pdev)
398{
399 u32 reg;
400 struct pci_dev *isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
401 PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
402 if (isa_dev) {
403 pci_read_config_dword(isa_dev, 0x64, &reg);
404 reg &= ~0x00002000; /* disable 600ns interrupt mask */
405 if (!(reg & 0x00004000))
406 printk(KERN_DEBUG DRV_NAME ": UDMA not BIOS enabled.\n");
407 reg |= 0x00004000; /* enable UDMA/33 support */
408 pci_write_config_dword(isa_dev, 0x64, reg);
409 pci_dev_put(isa_dev);
410 return 0;
411 }
412 printk(KERN_WARNING "ata_serverworks: Unable to find bridge.\n");
413 return -ENODEV;
414}
415
416static int serverworks_fixup_csb(struct pci_dev *pdev)
417{
418 u8 rev;
419 u8 btr;
420
421 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
422
423 /* Third Channel Test */
424 if (!(PCI_FUNC(pdev->devfn) & 1)) {
425 struct pci_dev * findev = NULL;
426 u32 reg4c = 0;
427 findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
428 PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL);
429 if (findev) {
430 pci_read_config_dword(findev, 0x4C, &reg4c);
431 reg4c &= ~0x000007FF;
432 reg4c |= 0x00000040;
433 reg4c |= 0x00000020;
434 pci_write_config_dword(findev, 0x4C, reg4c);
435 pci_dev_put(findev);
436 }
437 } else {
438 struct pci_dev * findev = NULL;
439 u8 reg41 = 0;
440
441 findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
442 PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL);
443 if (findev) {
444 pci_read_config_byte(findev, 0x41, &reg41);
445 reg41 &= ~0x40;
446 pci_write_config_byte(findev, 0x41, reg41);
447 pci_dev_put(findev);
448 }
449 }
450 /* setup the UDMA Control register
451 *
452 * 1. clear bit 6 to enable DMA
453 * 2. enable DMA modes with bits 0-1
454 * 00 : legacy
455 * 01 : udma2
456 * 10 : udma2/udma4
457 * 11 : udma2/udma4/udma5
458 */
459 pci_read_config_byte(pdev, 0x5A, &btr);
460 btr &= ~0x40;
461 if (!(PCI_FUNC(pdev->devfn) & 1))
462 btr |= 0x2;
463 else
464 btr |= (rev >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
465 pci_write_config_byte(pdev, 0x5A, btr);
466
467 return btr;
468}
469
470static void serverworks_fixup_ht1000(struct pci_dev *pdev)
471{
472 u8 btr;
473 /* Setup HT1000 SouthBridge Controller - Single Channel Only */
474 pci_read_config_byte(pdev, 0x5A, &btr);
475 btr &= ~0x40;
476 btr |= 0x3;
477 pci_write_config_byte(pdev, 0x5A, btr);
478}
479
480
481static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
482{
483 int ports = 2;
484 static struct ata_port_info info[4] = {
485 { /* OSB4 */
486 .sht = &serverworks_sht,
487 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
488 .pio_mask = 0x1f,
489 .mwdma_mask = 0x07,
490 .udma_mask = 0x07,
491 .port_ops = &serverworks_osb4_port_ops
492 }, { /* OSB4 no UDMA */
493 .sht = &serverworks_sht,
494 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
495 .pio_mask = 0x1f,
496 .mwdma_mask = 0x07,
497 .udma_mask = 0x00,
498 .port_ops = &serverworks_osb4_port_ops
499 }, { /* CSB5 */
500 .sht = &serverworks_sht,
501 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
502 .pio_mask = 0x1f,
503 .mwdma_mask = 0x07,
504 .udma_mask = 0x1f,
505 .port_ops = &serverworks_csb_port_ops
506 }, { /* CSB5 - later revisions*/
507 .sht = &serverworks_sht,
508 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
509 .pio_mask = 0x1f,
510 .mwdma_mask = 0x07,
511 .udma_mask = 0x3f,
512 .port_ops = &serverworks_csb_port_ops
513 }
514 };
515 static struct ata_port_info *port_info[2];
516 struct ata_port_info *devinfo = &info[id->driver_data];
517
518 /* Force master latency timer to 64 PCI clocks */
519 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
520
521 /* OSB4 : South Bridge and IDE */
522 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
523 /* Select non UDMA capable OSB4 if we can't do fixups */
524 if ( serverworks_fixup_osb4(pdev) < 0)
525 devinfo = &info[1];
526 }
527 /* setup CSB5/CSB6 : South Bridge and IDE option RAID */
528 else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
529 (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
530 (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
531
532 /* If the returned btr is the newer revision then
533 select the right info block */
534 if (serverworks_fixup_csb(pdev) == 3)
535 devinfo = &info[3];
536
537 /* Is this the 3rd channel CSB6 IDE ? */
538 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)
539 ports = 1;
540 }
541 /* setup HT1000E */
542 else if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
543 serverworks_fixup_ht1000(pdev);
544
545 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
546 ata_pci_clear_simplex(pdev);
547
548 port_info[0] = port_info[1] = devinfo;
549 return ata_pci_init_one(pdev, port_info, ports);
550}
551
552static struct pci_device_id serverworks[] = {
553 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
554 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2},
555 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2},
556 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2},
557 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2},
558 { 0, },
559};
560
561static struct pci_driver serverworks_pci_driver = {
562 .name = DRV_NAME,
563 .id_table = serverworks,
564 .probe = serverworks_init_one,
565 .remove = ata_pci_remove_one
566};
567
568static int __init serverworks_init(void)
569{
570 return pci_register_driver(&serverworks_pci_driver);
571}
572
573
574static void __exit serverworks_exit(void)
575{
576 pci_unregister_driver(&serverworks_pci_driver);
577}
578
579
580MODULE_AUTHOR("Alan Cox");
581MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6");
582MODULE_LICENSE("GPL");
583MODULE_DEVICE_TABLE(pci, serverworks);
584MODULE_VERSION(DRV_VERSION);
585
586module_init(serverworks_init);
587module_exit(serverworks_exit);
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
new file mode 100644
index 000000000000..8f7db9638d0a
--- /dev/null
+++ b/drivers/ata/pata_sil680.c
@@ -0,0 +1,381 @@
1/*
2 * pata_sil680.c - SIL680 PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based upon
7 *
8 * linux/drivers/ide/pci/siimage.c Version 1.07 Nov 30, 2003
9 *
10 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
11 * Copyright (C) 2003 Red Hat <alan@redhat.com>
12 *
13 * May be copied or modified under the terms of the GNU General Public License
14 *
15 * Documentation publically available.
16 *
17 * If you have strange problems with nVidia chipset systems please
18 * see the SI support documentation and update your system BIOS
19 * if neccessary
20 *
21 * TODO
22 * If we know all our devices are LBA28 (or LBA28 sized) we could use
23 * the command fifo mode.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/blkdev.h>
31#include <linux/delay.h>
32#include <scsi/scsi_host.h>
33#include <linux/libata.h>
34
35#define DRV_NAME "pata_sil680"
36#define DRV_VERSION "0.3.2"
37
38/**
39 * sil680_selreg - return register base
40 * @hwif: interface
41 * @r: config offset
42 *
43 * Turn a config register offset into the right address in either
44 * PCI space or MMIO space to access the control register in question
45 * Thankfully this is a configuration operation so isnt performance
46 * criticial.
47 */
48
49static unsigned long sil680_selreg(struct ata_port *ap, int r)
50{
51 unsigned long base = 0xA0 + r;
52 base += (ap->port_no << 4);
53 return base;
54}
55
56/**
57 * sil680_seldev - return register base
58 * @hwif: interface
59 * @r: config offset
60 *
61 * Turn a config register offset into the right address in either
62 * PCI space or MMIO space to access the control register in question
63 * including accounting for the unit shift.
64 */
65
66static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r)
67{
68 unsigned long base = 0xA0 + r;
69 base += (ap->port_no << 4);
70 base |= adev->devno ? 2 : 0;
71 return base;
72}
73
74
75/**
76 * sil680_cable_detect - cable detection
77 * @ap: ATA port
78 *
79 * Perform cable detection. The SIL680 stores this in PCI config
80 * space for us.
81 */
82
83static int sil680_cable_detect(struct ata_port *ap) {
84 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
85 unsigned long addr = sil680_selreg(ap, 0);
86 u8 ata66;
87 pci_read_config_byte(pdev, addr, &ata66);
88 if (ata66 & 1)
89 return ATA_CBL_PATA80;
90 else
91 return ATA_CBL_PATA40;
92}
93
94static int sil680_pre_reset(struct ata_port *ap)
95{
96 ap->cbl = sil680_cable_detect(ap);
97 return ata_std_prereset(ap);
98}
99
100/**
101 * sil680_bus_reset - reset the SIL680 bus
102 * @ap: ATA port to reset
103 *
104 * Perform the SIL680 housekeeping when doing an ATA bus reset
105 */
106
107static int sil680_bus_reset(struct ata_port *ap,unsigned int *classes)
108{
109 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
110 unsigned long addr = sil680_selreg(ap, 0);
111 u8 reset;
112
113 pci_read_config_byte(pdev, addr, &reset);
114 pci_write_config_byte(pdev, addr, reset | 0x03);
115 udelay(25);
116 pci_write_config_byte(pdev, addr, reset);
117 return ata_std_softreset(ap, classes);
118}
119
120static void sil680_error_handler(struct ata_port *ap)
121{
122 ata_bmdma_drive_eh(ap, sil680_pre_reset, sil680_bus_reset, NULL, ata_std_postreset);
123}
124
125/**
126 * sil680_set_piomode - set initial PIO mode data
127 * @ap: ATA interface
128 * @adev: ATA device
129 *
130 * Program the SIL680 registers for PIO mode. Note that the task speed
131 * registers are shared between the devices so we must pick the lowest
132 * mode for command work.
133 */
134
135static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
136{
137 static u16 speed_p[5] = { 0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1 };
138 static u16 speed_t[5] = { 0x328A, 0x1281, 0x1281, 0x10C3, 0x10C1 };
139
140 unsigned long tfaddr = sil680_selreg(ap, 0x02);
141 unsigned long addr = sil680_seldev(ap, adev, 0x04);
142 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
143 int pio = adev->pio_mode - XFER_PIO_0;
144 int lowest_pio = pio;
145 u16 reg;
146
147 struct ata_device *pair = ata_dev_pair(adev);
148
149 if (pair != NULL && adev->pio_mode > pair->pio_mode)
150 lowest_pio = pair->pio_mode - XFER_PIO_0;
151
152 pci_write_config_word(pdev, addr, speed_p[pio]);
153 pci_write_config_word(pdev, tfaddr, speed_t[lowest_pio]);
154
155 pci_read_config_word(pdev, tfaddr-2, &reg);
156 reg &= ~0x0200; /* Clear IORDY */
157 if (ata_pio_need_iordy(adev))
158 reg |= 0x0200; /* Enable IORDY */
159 pci_write_config_word(pdev, tfaddr-2, reg);
160}
161
162/**
163 * sil680_set_dmamode - set initial DMA mode data
164 * @ap: ATA interface
165 * @adev: ATA device
166 *
167 * Program the MWDMA/UDMA modes for the sil680 k
168 * chipset. The MWDMA mode values are pulled from a lookup table
169 * while the chipset uses mode number for UDMA.
170 */
171
172static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
173{
174 static u8 ultra_table[2][7] = {
175 { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01, 0xFF }, /* 100MHz */
176 { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }, /* 133Mhz */
177 };
178 static u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 };
179
180 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
181 unsigned long ma = sil680_seldev(ap, adev, 0x08);
182 unsigned long ua = sil680_seldev(ap, adev, 0x0C);
183 unsigned long addr_mask = 0x80 + 4 * ap->port_no;
184 int port_shift = adev->devno * 4;
185 u8 scsc, mode;
186 u16 multi, ultra;
187
188 pci_read_config_byte(pdev, 0x8A, &scsc);
189 pci_read_config_byte(pdev, addr_mask, &mode);
190 pci_read_config_word(pdev, ma, &multi);
191 pci_read_config_word(pdev, ua, &ultra);
192
193 /* Mask timing bits */
194 ultra &= ~0x3F;
195 mode &= ~(0x03 << port_shift);
196
197 /* Extract scsc */
198 scsc = (scsc & 0x30) ? 1: 0;
199
200 if (adev->dma_mode >= XFER_UDMA_0) {
201 multi = 0x10C1;
202 ultra |= ultra_table[scsc][adev->dma_mode - XFER_UDMA_0];
203 mode |= (0x03 << port_shift);
204 } else {
205 multi = dma_table[adev->dma_mode - XFER_MW_DMA_0];
206 mode |= (0x02 << port_shift);
207 }
208 pci_write_config_byte(pdev, addr_mask, mode);
209 pci_write_config_word(pdev, ma, multi);
210 pci_write_config_word(pdev, ua, ultra);
211}
212
213static struct scsi_host_template sil680_sht = {
214 .module = THIS_MODULE,
215 .name = DRV_NAME,
216 .ioctl = ata_scsi_ioctl,
217 .queuecommand = ata_scsi_queuecmd,
218 .can_queue = ATA_DEF_QUEUE,
219 .this_id = ATA_SHT_THIS_ID,
220 .sg_tablesize = LIBATA_MAX_PRD,
221 .max_sectors = ATA_MAX_SECTORS,
222 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
223 .emulated = ATA_SHT_EMULATED,
224 .use_clustering = ATA_SHT_USE_CLUSTERING,
225 .proc_name = DRV_NAME,
226 .dma_boundary = ATA_DMA_BOUNDARY,
227 .slave_configure = ata_scsi_slave_config,
228 .bios_param = ata_std_bios_param,
229};
230
231static struct ata_port_operations sil680_port_ops = {
232 .port_disable = ata_port_disable,
233 .set_piomode = sil680_set_piomode,
234 .set_dmamode = sil680_set_dmamode,
235 .mode_filter = ata_pci_default_filter,
236 .tf_load = ata_tf_load,
237 .tf_read = ata_tf_read,
238 .check_status = ata_check_status,
239 .exec_command = ata_exec_command,
240 .dev_select = ata_std_dev_select,
241
242 .freeze = ata_bmdma_freeze,
243 .thaw = ata_bmdma_thaw,
244 .error_handler = sil680_error_handler,
245 .post_internal_cmd = ata_bmdma_post_internal_cmd,
246
247 .bmdma_setup = ata_bmdma_setup,
248 .bmdma_start = ata_bmdma_start,
249 .bmdma_stop = ata_bmdma_stop,
250 .bmdma_status = ata_bmdma_status,
251
252 .qc_prep = ata_qc_prep,
253 .qc_issue = ata_qc_issue_prot,
254 .eng_timeout = ata_eng_timeout,
255 .data_xfer = ata_pio_data_xfer,
256
257 .irq_handler = ata_interrupt,
258 .irq_clear = ata_bmdma_irq_clear,
259
260 .port_start = ata_port_start,
261 .port_stop = ata_port_stop,
262 .host_stop = ata_host_stop
263};
264
265static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
266{
267 static struct ata_port_info info = {
268 .sht = &sil680_sht,
269 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
270 .pio_mask = 0x1f,
271 .mwdma_mask = 0x07,
272 .udma_mask = 0x7f,
273 .port_ops = &sil680_port_ops
274 };
275 static struct ata_port_info info_slow = {
276 .sht = &sil680_sht,
277 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
278 .pio_mask = 0x1f,
279 .mwdma_mask = 0x07,
280 .udma_mask = 0x3f,
281 .port_ops = &sil680_port_ops
282 };
283 static struct ata_port_info *port_info[2] = {&info, &info};
284 static int printed_version;
285 u32 class_rev = 0;
286 u8 tmpbyte = 0;
287
288 if (!printed_version++)
289 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
290
291 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
292 class_rev &= 0xff;
293 /* FIXME: double check */
294 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, (class_rev) ? 1 : 255);
295
296 pci_write_config_byte(pdev, 0x80, 0x00);
297 pci_write_config_byte(pdev, 0x84, 0x00);
298
299 pci_read_config_byte(pdev, 0x8A, &tmpbyte);
300
301 printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n",
302 tmpbyte & 1, tmpbyte & 0x30);
303
304 switch(tmpbyte & 0x30) {
305 case 0x00:
306 /* 133 clock attempt to force it on */
307 pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10);
308 break;
309 case 0x30:
310 /* if clocking is disabled */
311 /* 133 clock attempt to force it on */
312 pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20);
313 break;
314 case 0x10:
315 /* 133 already */
316 break;
317 case 0x20:
318 /* BIOS set PCI x2 clocking */
319 break;
320 }
321
322 pci_read_config_byte(pdev, 0x8A, &tmpbyte);
323 printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n",
324 tmpbyte & 1, tmpbyte & 0x30);
325 if ((tmpbyte & 0x30) == 0)
326 port_info[0] = port_info[1] = &info_slow;
327
328 pci_write_config_byte(pdev, 0xA1, 0x72);
329 pci_write_config_word(pdev, 0xA2, 0x328A);
330 pci_write_config_dword(pdev, 0xA4, 0x62DD62DD);
331 pci_write_config_dword(pdev, 0xA8, 0x43924392);
332 pci_write_config_dword(pdev, 0xAC, 0x40094009);
333 pci_write_config_byte(pdev, 0xB1, 0x72);
334 pci_write_config_word(pdev, 0xB2, 0x328A);
335 pci_write_config_dword(pdev, 0xB4, 0x62DD62DD);
336 pci_write_config_dword(pdev, 0xB8, 0x43924392);
337 pci_write_config_dword(pdev, 0xBC, 0x40094009);
338
339 switch(tmpbyte & 0x30) {
340 case 0x00: printk(KERN_INFO "sil680: 100MHz clock.\n");break;
341 case 0x10: printk(KERN_INFO "sil680: 133MHz clock.\n");break;
342 case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n");break;
343 /* This last case is _NOT_ ok */
344 case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n");
345 return -EIO;
346 }
347 return ata_pci_init_one(pdev, port_info, 2);
348}
349
350static const struct pci_device_id sil680[] = {
351 { PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_680), },
352 { 0, },
353};
354
355static struct pci_driver sil680_pci_driver = {
356 .name = DRV_NAME,
357 .id_table = sil680,
358 .probe = sil680_init_one,
359 .remove = ata_pci_remove_one
360};
361
362static int __init sil680_init(void)
363{
364 return pci_register_driver(&sil680_pci_driver);
365}
366
367
368static void __exit sil680_exit(void)
369{
370 pci_unregister_driver(&sil680_pci_driver);
371}
372
373
374MODULE_AUTHOR("Alan Cox");
375MODULE_DESCRIPTION("low-level driver for SI680 PATA");
376MODULE_LICENSE("GPL");
377MODULE_DEVICE_TABLE(pci, sil680);
378MODULE_VERSION(DRV_VERSION);
379
380module_init(sil680_init);
381module_exit(sil680_exit);
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
new file mode 100644
index 000000000000..2e555168b431
--- /dev/null
+++ b/drivers/ata/pata_sis.c
@@ -0,0 +1,1034 @@
1/*
2 * pata_sis.c - SiS ATA driver
3 *
4 * (C) 2005 Red Hat <alan@redhat.com>
5 *
6 * Based upon linux/drivers/ide/pci/sis5513.c
7 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
8 * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
9 * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz>
10 * SiS Taiwan : for direct support and hardware.
11 * Daniela Engert : for initial ATA100 advices and numerous others.
12 * John Fremlin, Manfred Spraul, Dave Morgan, Peter Kjellerstedt :
13 * for checking code correctness, providing patches.
14 * Original tests and design on the SiS620 chipset.
15 * ATA100 tests and design on the SiS735 chipset.
16 * ATA16/33 support from specs
17 * ATA133 support for SiS961/962 by L.C. Chang <lcchang@sis.com.tw>
18 *
19 *
20 * TODO
21 * Check MWDMA on drives that don't support MWDMA speed pio cycles ?
22 * More Testing
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/blkdev.h>
30#include <linux/delay.h>
31#include <linux/device.h>
32#include <scsi/scsi_host.h>
33#include <linux/libata.h>
34#include <linux/ata.h>
35
36#define DRV_NAME "pata_sis"
37#define DRV_VERSION "0.4.3"
38
39struct sis_chipset {
40 u16 device; /* PCI host ID */
41 struct ata_port_info *info; /* Info block */
42 /* Probably add family, cable detect type etc here to clean
43 up code later */
44};
45
46/**
47 * sis_port_base - return PCI configuration base for dev
48 * @adev: device
49 *
50 * Returns the base of the PCI configuration registers for this port
51 * number.
52 */
53
54static int sis_port_base(struct ata_device *adev)
55{
56 return 0x40 + (4 * adev->ap->port_no) + (2 * adev->devno);
57}
58
59/**
60 * sis_133_pre_reset - check for 40/80 pin
61 * @ap: Port
62 *
63 * Perform cable detection for the later UDMA133 capable
64 * SiS chipset.
65 */
66
67static int sis_133_pre_reset(struct ata_port *ap)
68{
69 static const struct pci_bits sis_enable_bits[] = {
70 { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */
71 { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */
72 };
73
74 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
75 u16 tmp;
76
77 if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) {
78 ata_port_disable(ap);
79 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
80 return 0;
81 }
82 /* The top bit of this register is the cable detect bit */
83 pci_read_config_word(pdev, 0x50 + 2 * ap->port_no, &tmp);
84 if (tmp & 0x8000)
85 ap->cbl = ATA_CBL_PATA40;
86 else
87 ap->cbl = ATA_CBL_PATA80;
88
89 return ata_std_prereset(ap);
90}
91
92/**
93 * sis_error_handler - Probe specified port on PATA host controller
94 * @ap: Port to probe
95 *
96 * LOCKING:
97 * None (inherited from caller).
98 */
99
100static void sis_133_error_handler(struct ata_port *ap)
101{
102 ata_bmdma_drive_eh(ap, sis_133_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
103}
104
105
106/**
107 * sis_66_pre_reset - check for 40/80 pin
108 * @ap: Port
109 *
110 * Perform cable detection on the UDMA66, UDMA100 and early UDMA133
111 * SiS IDE controllers.
112 */
113
114static int sis_66_pre_reset(struct ata_port *ap)
115{
116 static const struct pci_bits sis_enable_bits[] = {
117 { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */
118 { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */
119 };
120
121 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
122 u8 tmp;
123
124 if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) {
125 ata_port_disable(ap);
126 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
127 return 0;
128 }
129 /* Older chips keep cable detect in bits 4/5 of reg 0x48 */
130 pci_read_config_byte(pdev, 0x48, &tmp);
131 tmp >>= ap->port_no;
132 if (tmp & 0x10)
133 ap->cbl = ATA_CBL_PATA40;
134 else
135 ap->cbl = ATA_CBL_PATA80;
136
137 return ata_std_prereset(ap);
138}
139
140/**
141 * sis_66_error_handler - Probe specified port on PATA host controller
142 * @ap: Port to probe
143 * @classes:
144 *
145 * LOCKING:
146 * None (inherited from caller).
147 */
148
149static void sis_66_error_handler(struct ata_port *ap)
150{
151 ata_bmdma_drive_eh(ap, sis_66_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
152}
153
154/**
155 * sis_old_pre_reset - probe begin
156 * @ap: ATA port
157 *
158 * Set up cable type and use generic probe init
159 */
160
161static int sis_old_pre_reset(struct ata_port *ap)
162{
163 static const struct pci_bits sis_enable_bits[] = {
164 { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */
165 { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */
166 };
167
168 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
169
170 if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) {
171 ata_port_disable(ap);
172 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
173 return 0;
174 }
175 ap->cbl = ATA_CBL_PATA40;
176 return ata_std_prereset(ap);
177}
178
179
180/**
181 * sis_old_error_handler - Probe specified port on PATA host controller
182 * @ap: Port to probe
183 *
184 * LOCKING:
185 * None (inherited from caller).
186 */
187
188static void sis_old_error_handler(struct ata_port *ap)
189{
190 ata_bmdma_drive_eh(ap, sis_old_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
191}
192
193/**
194 * sis_set_fifo - Set RWP fifo bits for this device
195 * @ap: Port
196 * @adev: Device
197 *
198 * SIS chipsets implement prefetch/postwrite bits for each device
199 * on both channels. This functionality is not ATAPI compatible and
200 * must be configured according to the class of device present
201 */
202
203static void sis_set_fifo(struct ata_port *ap, struct ata_device *adev)
204{
205 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
206 u8 fifoctrl;
207 u8 mask = 0x11;
208
209 mask <<= (2 * ap->port_no);
210 mask <<= adev->devno;
211
212 /* This holds various bits including the FIFO control */
213 pci_read_config_byte(pdev, 0x4B, &fifoctrl);
214 fifoctrl &= ~mask;
215
216 /* Enable for ATA (disk) only */
217 if (adev->class == ATA_DEV_ATA)
218 fifoctrl |= mask;
219 pci_write_config_byte(pdev, 0x4B, fifoctrl);
220}
221
222/**
223 * sis_old_set_piomode - Initialize host controller PATA PIO timings
224 * @ap: Port whose timings we are configuring
225 * @adev: Device we are configuring for.
226 *
227 * Set PIO mode for device, in host controller PCI config space. This
228 * function handles PIO set up for all chips that are pre ATA100 and
229 * also early ATA100 devices.
230 *
231 * LOCKING:
232 * None (inherited from caller).
233 */
234
235static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev)
236{
237 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
238 int port = sis_port_base(adev);
239 u8 t1, t2;
240 int speed = adev->pio_mode - XFER_PIO_0;
241
242 const u8 active[] = { 0x00, 0x07, 0x04, 0x03, 0x01 };
243 const u8 recovery[] = { 0x00, 0x06, 0x04, 0x03, 0x03 };
244
245 sis_set_fifo(ap, adev);
246
247 pci_read_config_byte(pdev, port, &t1);
248 pci_read_config_byte(pdev, port + 1, &t2);
249
250 t1 &= ~0x0F; /* Clear active/recovery timings */
251 t2 &= ~0x07;
252
253 t1 |= active[speed];
254 t2 |= recovery[speed];
255
256 pci_write_config_byte(pdev, port, t1);
257 pci_write_config_byte(pdev, port + 1, t2);
258}
259
260/**
261 * sis_100_set_pioode - Initialize host controller PATA PIO timings
262 * @ap: Port whose timings we are configuring
263 * @adev: Device we are configuring for.
264 *
265 * Set PIO mode for device, in host controller PCI config space. This
266 * function handles PIO set up for ATA100 devices and early ATA133.
267 *
268 * LOCKING:
269 * None (inherited from caller).
270 */
271
272static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev)
273{
274 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
275 int port = sis_port_base(adev);
276 int speed = adev->pio_mode - XFER_PIO_0;
277
278 const u8 actrec[] = { 0x00, 0x67, 0x44, 0x33, 0x31 };
279
280 sis_set_fifo(ap, adev);
281
282 pci_write_config_byte(pdev, port, actrec[speed]);
283}
284
285/**
286 * sis_133_set_pioode - Initialize host controller PATA PIO timings
287 * @ap: Port whose timings we are configuring
288 * @adev: Device we are configuring for.
289 *
290 * Set PIO mode for device, in host controller PCI config space. This
291 * function handles PIO set up for the later ATA133 devices.
292 *
293 * LOCKING:
294 * None (inherited from caller).
295 */
296
297static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev)
298{
299 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
300 int port = 0x40;
301 u32 t1;
302 u32 reg54;
303 int speed = adev->pio_mode - XFER_PIO_0;
304
305 const u32 timing133[] = {
306 0x28269000, /* Recovery << 24 | Act << 16 | Ini << 12 */
307 0x0C266000,
308 0x04263000,
309 0x0C0A3000,
310 0x05093000
311 };
312 const u32 timing100[] = {
313 0x1E1C6000, /* Recovery << 24 | Act << 16 | Ini << 12 */
314 0x091C4000,
315 0x031C2000,
316 0x09072000,
317 0x04062000
318 };
319
320 sis_set_fifo(ap, adev);
321
322 /* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */
323 pci_read_config_dword(pdev, 0x54, &reg54);
324 if (reg54 & 0x40000000)
325 port = 0x70;
326 port += 8 * ap->port_no + 4 * adev->devno;
327
328 pci_read_config_dword(pdev, port, &t1);
329 t1 &= 0xC0C00FFF; /* Mask out timing */
330
331 if (t1 & 0x08) /* 100 or 133 ? */
332 t1 |= timing133[speed];
333 else
334 t1 |= timing100[speed];
335 pci_write_config_byte(pdev, port, t1);
336}
337
338/**
339 * sis_old_set_dmamode - Initialize host controller PATA DMA timings
340 * @ap: Port whose timings we are configuring
341 * @adev: Device to program
342 *
343 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
344 * Handles pre UDMA and UDMA33 devices. Supports MWDMA as well unlike
345 * the old ide/pci driver.
346 *
347 * LOCKING:
348 * None (inherited from caller).
349 */
350
351static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
352{
353 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
354 int speed = adev->dma_mode - XFER_MW_DMA_0;
355 int drive_pci = sis_port_base(adev);
356 u16 timing;
357
358 const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 };
359 const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 };
360
361 pci_read_config_word(pdev, drive_pci, &timing);
362
363 if (adev->dma_mode < XFER_UDMA_0) {
364 /* bits 3-0 hold recovery timing bits 8-10 active timing and
365 the higer bits are dependant on the device */
366 timing &= ~ 0x870F;
367 timing |= mwdma_bits[speed];
368 pci_write_config_word(pdev, drive_pci, timing);
369 } else {
370 /* Bit 15 is UDMA on/off, bit 13-14 are cycle time */
371 speed = adev->dma_mode - XFER_UDMA_0;
372 timing &= ~0x6000;
373 timing |= udma_bits[speed];
374 }
375}
376
377/**
378 * sis_66_set_dmamode - Initialize host controller PATA DMA timings
379 * @ap: Port whose timings we are configuring
380 * @adev: Device to program
381 *
382 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
383 * Handles UDMA66 and early UDMA100 devices. Supports MWDMA as well unlike
384 * the old ide/pci driver.
385 *
386 * LOCKING:
387 * None (inherited from caller).
388 */
389
390static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
391{
392 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
393 int speed = adev->dma_mode - XFER_MW_DMA_0;
394 int drive_pci = sis_port_base(adev);
395 u16 timing;
396
397 const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 };
398 const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000};
399
400 pci_read_config_word(pdev, drive_pci, &timing);
401
402 if (adev->dma_mode < XFER_UDMA_0) {
403 /* bits 3-0 hold recovery timing bits 8-10 active timing and
404 the higer bits are dependant on the device, bit 15 udma */
405 timing &= ~ 0x870F;
406 timing |= mwdma_bits[speed];
407 } else {
408 /* Bit 15 is UDMA on/off, bit 12-14 are cycle time */
409 speed = adev->dma_mode - XFER_UDMA_0;
410 timing &= ~0x6000;
411 timing |= udma_bits[speed];
412 }
413 pci_write_config_word(pdev, drive_pci, timing);
414}
415
416/**
417 * sis_100_set_dmamode - Initialize host controller PATA DMA timings
418 * @ap: Port whose timings we are configuring
419 * @adev: Device to program
420 *
421 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
422 * Handles UDMA66 and early UDMA100 devices.
423 *
424 * LOCKING:
425 * None (inherited from caller).
426 */
427
428static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev)
429{
430 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
431 int speed = adev->dma_mode - XFER_MW_DMA_0;
432 int drive_pci = sis_port_base(adev);
433 u16 timing;
434
435 const u16 udma_bits[] = { 0x8B00, 0x8700, 0x8500, 0x8300, 0x8200, 0x8100};
436
437 pci_read_config_word(pdev, drive_pci, &timing);
438
439 if (adev->dma_mode < XFER_UDMA_0) {
440 /* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */
441 } else {
442 /* Bit 15 is UDMA on/off, bit 12-14 are cycle time */
443 speed = adev->dma_mode - XFER_UDMA_0;
444 timing &= ~0x0F00;
445 timing |= udma_bits[speed];
446 }
447 pci_write_config_word(pdev, drive_pci, timing);
448}
449
450/**
451 * sis_133_early_set_dmamode - Initialize host controller PATA DMA timings
452 * @ap: Port whose timings we are configuring
453 * @adev: Device to program
454 *
455 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
456 * Handles early SiS 961 bridges. Supports MWDMA as well unlike
457 * the old ide/pci driver.
458 *
459 * LOCKING:
460 * None (inherited from caller).
461 */
462
463static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *adev)
464{
465 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
466 int speed = adev->dma_mode - XFER_MW_DMA_0;
467 int drive_pci = sis_port_base(adev);
468 u16 timing;
469
470 const u16 udma_bits[] = { 0x8F00, 0x8A00, 0x8700, 0x8500, 0x8300, 0x8200, 0x8100};
471
472 pci_read_config_word(pdev, drive_pci, &timing);
473
474 if (adev->dma_mode < XFER_UDMA_0) {
475 /* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */
476 } else {
477 /* Bit 15 is UDMA on/off, bit 12-14 are cycle time */
478 speed = adev->dma_mode - XFER_UDMA_0;
479 timing &= ~0x0F00;
480 timing |= udma_bits[speed];
481 }
482 pci_write_config_word(pdev, drive_pci, timing);
483}
484
485/**
486 * sis_133_set_dmamode - Initialize host controller PATA DMA timings
487 * @ap: Port whose timings we are configuring
488 * @adev: Device to program
489 *
490 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
491 * Handles early SiS 961 bridges. Supports MWDMA as well unlike
492 * the old ide/pci driver.
493 *
494 * LOCKING:
495 * None (inherited from caller).
496 */
497
498static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev)
499{
500 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
501 int speed = adev->dma_mode - XFER_MW_DMA_0;
502 int port = 0x40;
503 u32 t1;
504 u32 reg54;
505
506 /* bits 4- cycle time 8 - cvs time */
507 const u32 timing_u100[] = { 0x6B0, 0x470, 0x350, 0x140, 0x120, 0x110, 0x000 };
508 const u32 timing_u133[] = { 0x9F0, 0x6A0, 0x470, 0x250, 0x230, 0x220, 0x210 };
509
510 /* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */
511 pci_read_config_dword(pdev, 0x54, &reg54);
512 if (reg54 & 0x40000000)
513 port = 0x70;
514 port += (8 * ap->port_no) + (4 * adev->devno);
515
516 pci_read_config_dword(pdev, port, &t1);
517
518 if (adev->dma_mode < XFER_UDMA_0) {
519 t1 &= ~0x00000004;
520 /* FIXME: need data sheet to add MWDMA here. Also lacking on
521 ide/pci driver */
522 } else {
523 speed = adev->dma_mode - XFER_UDMA_0;
524 /* if & 8 no UDMA133 - need info for ... */
525 t1 &= ~0x00000FF0;
526 t1 |= 0x00000004;
527 if (t1 & 0x08)
528 t1 |= timing_u133[speed];
529 else
530 t1 |= timing_u100[speed];
531 }
532 pci_write_config_dword(pdev, port, t1);
533}
534
535static struct scsi_host_template sis_sht = {
536 .module = THIS_MODULE,
537 .name = DRV_NAME,
538 .ioctl = ata_scsi_ioctl,
539 .queuecommand = ata_scsi_queuecmd,
540 .can_queue = ATA_DEF_QUEUE,
541 .this_id = ATA_SHT_THIS_ID,
542 .sg_tablesize = LIBATA_MAX_PRD,
543 .max_sectors = ATA_MAX_SECTORS,
544 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
545 .emulated = ATA_SHT_EMULATED,
546 .use_clustering = ATA_SHT_USE_CLUSTERING,
547 .proc_name = DRV_NAME,
548 .dma_boundary = ATA_DMA_BOUNDARY,
549 .slave_configure = ata_scsi_slave_config,
550 .bios_param = ata_std_bios_param,
551};
552
553static const struct ata_port_operations sis_133_ops = {
554 .port_disable = ata_port_disable,
555 .set_piomode = sis_133_set_piomode,
556 .set_dmamode = sis_133_set_dmamode,
557 .mode_filter = ata_pci_default_filter,
558
559 .tf_load = ata_tf_load,
560 .tf_read = ata_tf_read,
561 .check_status = ata_check_status,
562 .exec_command = ata_exec_command,
563 .dev_select = ata_std_dev_select,
564
565 .freeze = ata_bmdma_freeze,
566 .thaw = ata_bmdma_thaw,
567 .error_handler = sis_133_error_handler,
568 .post_internal_cmd = ata_bmdma_post_internal_cmd,
569
570 .bmdma_setup = ata_bmdma_setup,
571 .bmdma_start = ata_bmdma_start,
572 .bmdma_stop = ata_bmdma_stop,
573 .bmdma_status = ata_bmdma_status,
574 .qc_prep = ata_qc_prep,
575 .qc_issue = ata_qc_issue_prot,
576 .data_xfer = ata_pio_data_xfer,
577
578 .eng_timeout = ata_eng_timeout,
579
580 .irq_handler = ata_interrupt,
581 .irq_clear = ata_bmdma_irq_clear,
582
583 .port_start = ata_port_start,
584 .port_stop = ata_port_stop,
585 .host_stop = ata_host_stop,
586};
587
588static const struct ata_port_operations sis_133_early_ops = {
589 .port_disable = ata_port_disable,
590 .set_piomode = sis_100_set_piomode,
591 .set_dmamode = sis_133_early_set_dmamode,
592 .mode_filter = ata_pci_default_filter,
593
594 .tf_load = ata_tf_load,
595 .tf_read = ata_tf_read,
596 .check_status = ata_check_status,
597 .exec_command = ata_exec_command,
598 .dev_select = ata_std_dev_select,
599
600 .freeze = ata_bmdma_freeze,
601 .thaw = ata_bmdma_thaw,
602 .error_handler = sis_66_error_handler,
603 .post_internal_cmd = ata_bmdma_post_internal_cmd,
604
605 .bmdma_setup = ata_bmdma_setup,
606 .bmdma_start = ata_bmdma_start,
607 .bmdma_stop = ata_bmdma_stop,
608 .bmdma_status = ata_bmdma_status,
609 .qc_prep = ata_qc_prep,
610 .qc_issue = ata_qc_issue_prot,
611 .data_xfer = ata_pio_data_xfer,
612
613 .eng_timeout = ata_eng_timeout,
614
615 .irq_handler = ata_interrupt,
616 .irq_clear = ata_bmdma_irq_clear,
617
618 .port_start = ata_port_start,
619 .port_stop = ata_port_stop,
620 .host_stop = ata_host_stop,
621};
622
623static const struct ata_port_operations sis_100_ops = {
624 .port_disable = ata_port_disable,
625 .set_piomode = sis_100_set_piomode,
626 .set_dmamode = sis_100_set_dmamode,
627 .mode_filter = ata_pci_default_filter,
628
629 .tf_load = ata_tf_load,
630 .tf_read = ata_tf_read,
631 .check_status = ata_check_status,
632 .exec_command = ata_exec_command,
633 .dev_select = ata_std_dev_select,
634
635 .freeze = ata_bmdma_freeze,
636 .thaw = ata_bmdma_thaw,
637 .error_handler = sis_66_error_handler,
638 .post_internal_cmd = ata_bmdma_post_internal_cmd,
639
640
641 .bmdma_setup = ata_bmdma_setup,
642 .bmdma_start = ata_bmdma_start,
643 .bmdma_stop = ata_bmdma_stop,
644 .bmdma_status = ata_bmdma_status,
645 .qc_prep = ata_qc_prep,
646 .qc_issue = ata_qc_issue_prot,
647 .data_xfer = ata_pio_data_xfer,
648
649 .eng_timeout = ata_eng_timeout,
650
651 .irq_handler = ata_interrupt,
652 .irq_clear = ata_bmdma_irq_clear,
653
654 .port_start = ata_port_start,
655 .port_stop = ata_port_stop,
656 .host_stop = ata_host_stop,
657};
658
659static const struct ata_port_operations sis_66_ops = {
660 .port_disable = ata_port_disable,
661 .set_piomode = sis_old_set_piomode,
662 .set_dmamode = sis_66_set_dmamode,
663 .mode_filter = ata_pci_default_filter,
664
665 .tf_load = ata_tf_load,
666 .tf_read = ata_tf_read,
667 .check_status = ata_check_status,
668 .exec_command = ata_exec_command,
669 .dev_select = ata_std_dev_select,
670
671 .freeze = ata_bmdma_freeze,
672 .thaw = ata_bmdma_thaw,
673 .error_handler = sis_66_error_handler,
674 .post_internal_cmd = ata_bmdma_post_internal_cmd,
675
676 .bmdma_setup = ata_bmdma_setup,
677 .bmdma_start = ata_bmdma_start,
678 .bmdma_stop = ata_bmdma_stop,
679 .bmdma_status = ata_bmdma_status,
680 .qc_prep = ata_qc_prep,
681 .qc_issue = ata_qc_issue_prot,
682 .data_xfer = ata_pio_data_xfer,
683
684 .eng_timeout = ata_eng_timeout,
685
686 .irq_handler = ata_interrupt,
687 .irq_clear = ata_bmdma_irq_clear,
688
689 .port_start = ata_port_start,
690 .port_stop = ata_port_stop,
691 .host_stop = ata_host_stop,
692};
693
694static const struct ata_port_operations sis_old_ops = {
695 .port_disable = ata_port_disable,
696 .set_piomode = sis_old_set_piomode,
697 .set_dmamode = sis_old_set_dmamode,
698 .mode_filter = ata_pci_default_filter,
699
700 .tf_load = ata_tf_load,
701 .tf_read = ata_tf_read,
702 .check_status = ata_check_status,
703 .exec_command = ata_exec_command,
704 .dev_select = ata_std_dev_select,
705
706 .freeze = ata_bmdma_freeze,
707 .thaw = ata_bmdma_thaw,
708 .error_handler = sis_old_error_handler,
709 .post_internal_cmd = ata_bmdma_post_internal_cmd,
710
711 .bmdma_setup = ata_bmdma_setup,
712 .bmdma_start = ata_bmdma_start,
713 .bmdma_stop = ata_bmdma_stop,
714 .bmdma_status = ata_bmdma_status,
715 .qc_prep = ata_qc_prep,
716 .qc_issue = ata_qc_issue_prot,
717 .data_xfer = ata_pio_data_xfer,
718
719 .eng_timeout = ata_eng_timeout,
720
721 .irq_handler = ata_interrupt,
722 .irq_clear = ata_bmdma_irq_clear,
723
724 .port_start = ata_port_start,
725 .port_stop = ata_port_stop,
726 .host_stop = ata_host_stop,
727};
728
729static struct ata_port_info sis_info = {
730 .sht = &sis_sht,
731 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
732 .pio_mask = 0x1f, /* pio0-4 */
733 .mwdma_mask = 0x07,
734 .udma_mask = 0,
735 .port_ops = &sis_old_ops,
736};
737static struct ata_port_info sis_info33 = {
738 .sht = &sis_sht,
739 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
740 .pio_mask = 0x1f, /* pio0-4 */
741 .mwdma_mask = 0x07,
742 .udma_mask = ATA_UDMA2, /* UDMA 33 */
743 .port_ops = &sis_old_ops,
744};
745static struct ata_port_info sis_info66 = {
746 .sht = &sis_sht,
747 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
748 .pio_mask = 0x1f, /* pio0-4 */
749 .udma_mask = ATA_UDMA4, /* UDMA 66 */
750 .port_ops = &sis_66_ops,
751};
752static struct ata_port_info sis_info100 = {
753 .sht = &sis_sht,
754 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
755 .pio_mask = 0x1f, /* pio0-4 */
756 .udma_mask = ATA_UDMA5,
757 .port_ops = &sis_100_ops,
758};
759static struct ata_port_info sis_info100_early = {
760 .sht = &sis_sht,
761 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
762 .udma_mask = ATA_UDMA5,
763 .pio_mask = 0x1f, /* pio0-4 */
764 .port_ops = &sis_66_ops,
765};
766static struct ata_port_info sis_info133 = {
767 .sht = &sis_sht,
768 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
769 .pio_mask = 0x1f, /* pio0-4 */
770 .udma_mask = ATA_UDMA6,
771 .port_ops = &sis_133_ops,
772};
773static struct ata_port_info sis_info133_early = {
774 .sht = &sis_sht,
775 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
776 .pio_mask = 0x1f, /* pio0-4 */
777 .udma_mask = ATA_UDMA6,
778 .port_ops = &sis_133_early_ops,
779};
780
781
782static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis)
783{
784 u16 regw;
785 u8 reg;
786
787 if (sis->info == &sis_info133) {
788 pci_read_config_word(pdev, 0x50, &regw);
789 if (regw & 0x08)
790 pci_write_config_word(pdev, 0x50, regw & ~0x08);
791 pci_read_config_word(pdev, 0x52, &regw);
792 if (regw & 0x08)
793 pci_write_config_word(pdev, 0x52, regw & ~0x08);
794 return;
795 }
796
797 if (sis->info == &sis_info133_early || sis->info == &sis_info100) {
798 /* Fix up latency */
799 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
800 /* Set compatibility bit */
801 pci_read_config_byte(pdev, 0x49, &reg);
802 if (!(reg & 0x01))
803 pci_write_config_byte(pdev, 0x49, reg | 0x01);
804 return;
805 }
806
807 if (sis->info == &sis_info66 || sis->info == &sis_info100_early) {
808 /* Fix up latency */
809 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
810 /* Set compatibility bit */
811 pci_read_config_byte(pdev, 0x52, &reg);
812 if (!(reg & 0x04))
813 pci_write_config_byte(pdev, 0x52, reg | 0x04);
814 return;
815 }
816
817 if (sis->info == &sis_info33) {
818 pci_read_config_byte(pdev, PCI_CLASS_PROG, &reg);
819 if (( reg & 0x0F ) != 0x00)
820 pci_write_config_byte(pdev, PCI_CLASS_PROG, reg & 0xF0);
821 /* Fall through to ATA16 fixup below */
822 }
823
824 if (sis->info == &sis_info || sis->info == &sis_info33) {
825 /* force per drive recovery and active timings
826 needed on ATA_33 and below chips */
827 pci_read_config_byte(pdev, 0x52, &reg);
828 if (!(reg & 0x08))
829 pci_write_config_byte(pdev, 0x52, reg|0x08);
830 return;
831 }
832
833 BUG();
834}
835
836/**
837 * sis_init_one - Register SiS ATA PCI device with kernel services
838 * @pdev: PCI device to register
839 * @ent: Entry in sis_pci_tbl matching with @pdev
840 *
841 * Called from kernel PCI layer. We probe for combined mode (sigh),
842 * and then hand over control to libata, for it to do the rest.
843 *
844 * LOCKING:
845 * Inherited from PCI layer (may sleep).
846 *
847 * RETURNS:
848 * Zero on success, or -ERRNO value.
849 */
850
851static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
852{
853 static int printed_version;
854 static struct ata_port_info *port_info[2];
855 struct ata_port_info *port;
856 struct pci_dev *host = NULL;
857 struct sis_chipset *chipset = NULL;
858
859 static struct sis_chipset sis_chipsets[] = {
860
861 { 0x0968, &sis_info133 },
862 { 0x0966, &sis_info133 },
863 { 0x0965, &sis_info133 },
864 { 0x0745, &sis_info100 },
865 { 0x0735, &sis_info100 },
866 { 0x0733, &sis_info100 },
867 { 0x0635, &sis_info100 },
868 { 0x0633, &sis_info100 },
869
870 { 0x0730, &sis_info100_early }, /* 100 with ATA 66 layout */
871 { 0x0550, &sis_info100_early }, /* 100 with ATA 66 layout */
872
873 { 0x0640, &sis_info66 },
874 { 0x0630, &sis_info66 },
875 { 0x0620, &sis_info66 },
876 { 0x0540, &sis_info66 },
877 { 0x0530, &sis_info66 },
878
879 { 0x5600, &sis_info33 },
880 { 0x5598, &sis_info33 },
881 { 0x5597, &sis_info33 },
882 { 0x5591, &sis_info33 },
883 { 0x5582, &sis_info33 },
884 { 0x5581, &sis_info33 },
885
886 { 0x5596, &sis_info },
887 { 0x5571, &sis_info },
888 { 0x5517, &sis_info },
889 { 0x5511, &sis_info },
890
891 {0}
892 };
893 static struct sis_chipset sis133_early = {
894 0x0, &sis_info133_early
895 };
896 static struct sis_chipset sis133 = {
897 0x0, &sis_info133
898 };
899 static struct sis_chipset sis100_early = {
900 0x0, &sis_info100_early
901 };
902 static struct sis_chipset sis100 = {
903 0x0, &sis_info100
904 };
905
906 if (!printed_version++)
907 dev_printk(KERN_DEBUG, &pdev->dev,
908 "version " DRV_VERSION "\n");
909
910 /* We have to find the bridge first */
911
912 for (chipset = &sis_chipsets[0]; chipset->device; chipset++) {
913 host = pci_get_device(PCI_VENDOR_ID_SI, chipset->device, NULL);
914 if (host != NULL) {
915 if (chipset->device == 0x630) { /* SIS630 */
916 u8 host_rev;
917 pci_read_config_byte(host, PCI_REVISION_ID, &host_rev);
918 if (host_rev >= 0x30) /* 630 ET */
919 chipset = &sis100_early;
920 }
921 break;
922 }
923 }
924
925 /* Look for concealed bridges */
926 if (host == NULL) {
927 /* Second check */
928 u32 idemisc;
929 u16 trueid;
930
931 /* Disable ID masking and register remapping then
932 see what the real ID is */
933
934 pci_read_config_dword(pdev, 0x54, &idemisc);
935 pci_write_config_dword(pdev, 0x54, idemisc & 0x7fffffff);
936 pci_read_config_word(pdev, PCI_DEVICE_ID, &trueid);
937 pci_write_config_dword(pdev, 0x54, idemisc);
938
939 switch(trueid) {
940 case 0x5518: /* SIS 962/963 */
941 chipset = &sis133;
942 if ((idemisc & 0x40000000) == 0) {
943 pci_write_config_dword(pdev, 0x54, idemisc | 0x40000000);
944 printk(KERN_INFO "SIS5513: Switching to 5513 register mapping\n");
945 }
946 break;
947 case 0x0180: /* SIS 965/965L */
948 chipset = &sis133;
949 break;
950 case 0x1180: /* SIS 966/966L */
951 chipset = &sis133;
952 break;
953 }
954 }
955
956 /* Further check */
957 if (chipset == NULL) {
958 struct pci_dev *lpc_bridge;
959 u16 trueid;
960 u8 prefctl;
961 u8 idecfg;
962 u8 sbrev;
963
964 /* Try the second unmasking technique */
965 pci_read_config_byte(pdev, 0x4a, &idecfg);
966 pci_write_config_byte(pdev, 0x4a, idecfg | 0x10);
967 pci_read_config_word(pdev, PCI_DEVICE_ID, &trueid);
968 pci_write_config_byte(pdev, 0x4a, idecfg);
969
970 switch(trueid) {
971 case 0x5517:
972 lpc_bridge = pci_get_slot(pdev->bus, 0x10); /* Bus 0 Dev 2 Fn 0 */
973 if (lpc_bridge == NULL)
974 break;
975 pci_read_config_byte(lpc_bridge, PCI_REVISION_ID, &sbrev);
976 pci_read_config_byte(pdev, 0x49, &prefctl);
977 pci_dev_put(lpc_bridge);
978
979 if (sbrev == 0x10 && (prefctl & 0x80)) {
980 chipset = &sis133_early;
981 break;
982 }
983 chipset = &sis100;
984 break;
985 }
986 }
987 pci_dev_put(host);
988
989 /* No chipset info, no support */
990 if (chipset == NULL)
991 return -ENODEV;
992
993 port = chipset->info;
994 port->private_data = chipset;
995
996 sis_fixup(pdev, chipset);
997
998 port_info[0] = port_info[1] = port;
999 return ata_pci_init_one(pdev, port_info, 2);
1000}
1001
1002static const struct pci_device_id sis_pci_tbl[] = {
1003 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x5513), }, /* SiS 5513 */
1004 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x5518), }, /* SiS 5518 */
1005 { }
1006};
1007
1008static struct pci_driver sis_pci_driver = {
1009 .name = DRV_NAME,
1010 .id_table = sis_pci_tbl,
1011 .probe = sis_init_one,
1012 .remove = ata_pci_remove_one,
1013};
1014
1015static int __init sis_init(void)
1016{
1017 return pci_register_driver(&sis_pci_driver);
1018}
1019
1020static void __exit sis_exit(void)
1021{
1022 pci_unregister_driver(&sis_pci_driver);
1023}
1024
1025
1026module_init(sis_init);
1027module_exit(sis_exit);
1028
1029MODULE_AUTHOR("Alan Cox");
1030MODULE_DESCRIPTION("SCSI low-level driver for SiS ATA");
1031MODULE_LICENSE("GPL");
1032MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
1033MODULE_VERSION(DRV_VERSION);
1034
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
new file mode 100644
index 000000000000..f8499786917a
--- /dev/null
+++ b/drivers/ata/pata_sl82c105.c
@@ -0,0 +1,388 @@
1/*
2 * pata_sl82c105.c - SL82C105 PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based in part on linux/drivers/ide/pci/sl82c105.c
7 * SL82C105/Winbond 553 IDE driver
8 *
9 * and in part on the documentation and errata sheet
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/init.h>
16#include <linux/blkdev.h>
17#include <linux/delay.h>
18#include <scsi/scsi_host.h>
19#include <linux/libata.h>
20
21#define DRV_NAME "pata_sl82c105"
22#define DRV_VERSION "0.2.2"
23
24enum {
25 /*
26 * SL82C105 PCI config register 0x40 bits.
27 */
28 CTRL_IDE_IRQB = (1 << 30),
29 CTRL_IDE_IRQA = (1 << 28),
30 CTRL_LEGIRQ = (1 << 11),
31 CTRL_P1F16 = (1 << 5),
32 CTRL_P1EN = (1 << 4),
33 CTRL_P0F16 = (1 << 1),
34 CTRL_P0EN = (1 << 0)
35};
36
37/**
38 * sl82c105_pre_reset - probe begin
39 * @ap: ATA port
40 *
41 * Set up cable type and use generic probe init
42 */
43
44static int sl82c105_pre_reset(struct ata_port *ap)
45{
46 static const struct pci_bits sl82c105_enable_bits[] = {
47 { 0x40, 1, 0x01, 0x01 },
48 { 0x40, 1, 0x10, 0x10 }
49 };
50 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
51
52 if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) {
53 ata_port_disable(ap);
54 dev_printk(KERN_INFO, &pdev->dev, "port disabled. ignoring.\n");
55 return 0;
56 }
57 ap->cbl = ATA_CBL_PATA40;
58 return ata_std_prereset(ap);
59}
60
61
62static void sl82c105_error_handler(struct ata_port *ap)
63{
64 ata_bmdma_drive_eh(ap, sl82c105_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
65}
66
67
68/**
69 * sl82c105_configure_piomode - set chip PIO timing
70 * @ap: ATA interface
71 * @adev: ATA device
72 * @pio: PIO mode
73 *
74 * Called to do the PIO mode setup. Our timing registers are shared
75 * so a configure_dmamode call will undo any work we do here and vice
76 * versa
77 */
78
79static void sl82c105_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
80{
81 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
82 static u16 pio_timing[5] = {
83 0x50D, 0x407, 0x304, 0x242, 0x240
84 };
85 u16 dummy;
86 int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
87
88 pci_write_config_word(pdev, timing, pio_timing[pio]);
89 /* Can we lose this oddity of the old driver */
90 pci_read_config_word(pdev, timing, &dummy);
91}
92
93/**
94 * sl82c105_set_piomode - set initial PIO mode data
95 * @ap: ATA interface
96 * @adev: ATA device
97 *
98 * Called to do the PIO mode setup. Our timing registers are shared
99 * but we want to set the PIO timing by default.
100 */
101
102static void sl82c105_set_piomode(struct ata_port *ap, struct ata_device *adev)
103{
104 sl82c105_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
105}
106
107/**
108 * sl82c105_configure_dmamode - set DMA mode in chip
109 * @ap: ATA interface
110 * @adev: ATA device
111 *
112 * Load DMA cycle times into the chip ready for a DMA transfer
113 * to occur.
114 */
115
116static void sl82c105_configure_dmamode(struct ata_port *ap, struct ata_device *adev)
117{
118 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
119 static u16 dma_timing[3] = {
120 0x707, 0x201, 0x200
121 };
122 u16 dummy;
123 int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
124 int dma = adev->dma_mode - XFER_MW_DMA_0;
125
126 pci_write_config_word(pdev, timing, dma_timing[dma]);
127 /* Can we lose this oddity of the old driver */
128 pci_read_config_word(pdev, timing, &dummy);
129}
130
131/**
132 * sl82c105_set_dmamode - set initial DMA mode data
133 * @ap: ATA interface
134 * @adev: ATA device
135 *
136 * Called to do the DMA mode setup. This replaces the PIO timings
137 * for the device in question. Set appropriate PIO timings not DMA
138 * timings at this point.
139 */
140
141static void sl82c105_set_dmamode(struct ata_port *ap, struct ata_device *adev)
142{
143 switch(adev->dma_mode) {
144 case XFER_MW_DMA_0:
145 sl82c105_configure_piomode(ap, adev, 1);
146 break;
147 case XFER_MW_DMA_1:
148 sl82c105_configure_piomode(ap, adev, 3);
149 break;
150 case XFER_MW_DMA_2:
151 sl82c105_configure_piomode(ap, adev, 3);
152 break;
153 default:
154 BUG();
155 }
156}
157
158/**
159 * sl82c105_reset_engine - Reset the DMA engine
160 * @ap: ATA interface
161 *
162 * The sl82c105 has some serious problems with the DMA engine
163 * when transfers don't run as expected or ATAPI is used. The
164 * recommended fix is to reset the engine each use using a chip
165 * test register.
166 */
167
168static void sl82c105_reset_engine(struct ata_port *ap)
169{
170 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
171 u16 val;
172
173 pci_read_config_word(pdev, 0x7E, &val);
174 pci_write_config_word(pdev, 0x7E, val | 4);
175 pci_write_config_word(pdev, 0x7E, val & ~4);
176}
177
178/**
179 * sl82c105_bmdma_start - DMA engine begin
180 * @qc: ATA command
181 *
182 * Reset the DMA engine each use as recommended by the errata
183 * document.
184 *
185 * FIXME: if we switch clock at BMDMA start/end we might get better
186 * PIO performance on DMA capable devices.
187 */
188
189static void sl82c105_bmdma_start(struct ata_queued_cmd *qc)
190{
191 struct ata_port *ap = qc->ap;
192
193 sl82c105_reset_engine(ap);
194
195 /* Set the clocks for DMA */
196 sl82c105_configure_dmamode(ap, qc->dev);
197 /* Activate DMA */
198 ata_bmdma_start(qc);
199}
200
201/**
202 * sl82c105_bmdma_end - DMA engine stop
203 * @qc: ATA command
204 *
205 * Reset the DMA engine each use as recommended by the errata
206 * document.
207 *
208 * This function is also called to turn off DMA when a timeout occurs
209 * during DMA operation. In both cases we need to reset the engine,
210 * so no actual eng_timeout handler is required.
211 *
212 * We assume bmdma_stop is always called if bmdma_start as called. If
213 * not then we may need to wrap qc_issue.
214 */
215
216static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc)
217{
218 struct ata_port *ap = qc->ap;
219
220 ata_bmdma_stop(qc);
221 sl82c105_reset_engine(ap);
222
223 /* This will redo the initial setup of the DMA device to matching
224 PIO timings */
225 sl82c105_set_dmamode(ap, qc->dev);
226}
227
228static struct scsi_host_template sl82c105_sht = {
229 .module = THIS_MODULE,
230 .name = DRV_NAME,
231 .ioctl = ata_scsi_ioctl,
232 .queuecommand = ata_scsi_queuecmd,
233 .can_queue = ATA_DEF_QUEUE,
234 .this_id = ATA_SHT_THIS_ID,
235 .sg_tablesize = LIBATA_MAX_PRD,
236 .max_sectors = ATA_MAX_SECTORS,
237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
238 .emulated = ATA_SHT_EMULATED,
239 .use_clustering = ATA_SHT_USE_CLUSTERING,
240 .proc_name = DRV_NAME,
241 .dma_boundary = ATA_DMA_BOUNDARY,
242 .slave_configure = ata_scsi_slave_config,
243 .bios_param = ata_std_bios_param,
244};
245
246static struct ata_port_operations sl82c105_port_ops = {
247 .port_disable = ata_port_disable,
248 .set_piomode = sl82c105_set_piomode,
249 .set_dmamode = sl82c105_set_dmamode,
250 .mode_filter = ata_pci_default_filter,
251
252 .tf_load = ata_tf_load,
253 .tf_read = ata_tf_read,
254 .check_status = ata_check_status,
255 .exec_command = ata_exec_command,
256 .dev_select = ata_std_dev_select,
257
258 .error_handler = sl82c105_error_handler,
259
260 .bmdma_setup = ata_bmdma_setup,
261 .bmdma_start = sl82c105_bmdma_start,
262 .bmdma_stop = sl82c105_bmdma_stop,
263 .bmdma_status = ata_bmdma_status,
264
265 .qc_prep = ata_qc_prep,
266 .qc_issue = ata_qc_issue_prot,
267 .eng_timeout = ata_eng_timeout,
268 .data_xfer = ata_pio_data_xfer,
269
270 .irq_handler = ata_interrupt,
271 .irq_clear = ata_bmdma_irq_clear,
272
273 .port_start = ata_port_start,
274 .port_stop = ata_port_stop,
275 .host_stop = ata_host_stop
276};
277
278/**
279 * sl82c105_bridge_revision - find bridge version
280 * @pdev: PCI device for the ATA function
281 *
282 * Locates the PCI bridge associated with the ATA function and
283 * providing it is a Winbond 553 reports the revision. If it cannot
284 * find a revision or the right device it returns -1
285 */
286
287static int sl82c105_bridge_revision(struct pci_dev *pdev)
288{
289 struct pci_dev *bridge;
290 u8 rev;
291
292 /*
293 * The bridge should be part of the same device, but function 0.
294 */
295 bridge = pci_get_slot(pdev->bus,
296 PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
297 if (!bridge)
298 return -1;
299
300 /*
301 * Make sure it is a Winbond 553 and is an ISA bridge.
302 */
303 if (bridge->vendor != PCI_VENDOR_ID_WINBOND ||
304 bridge->device != PCI_DEVICE_ID_WINBOND_83C553 ||
305 bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) {
306 pci_dev_put(bridge);
307 return -1;
308 }
309 /*
310 * We need to find function 0's revision, not function 1
311 */
312 pci_read_config_byte(bridge, PCI_REVISION_ID, &rev);
313
314 pci_dev_put(bridge);
315 return rev;
316}
317
318
319static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
320{
321 static struct ata_port_info info_dma = {
322 .sht = &sl82c105_sht,
323 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
324 .pio_mask = 0x1f,
325 .mwdma_mask = 0x07,
326 .port_ops = &sl82c105_port_ops
327 };
328 static struct ata_port_info info_early = {
329 .sht = &sl82c105_sht,
330 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
331 .pio_mask = 0x1f,
332 .port_ops = &sl82c105_port_ops
333 };
334 static struct ata_port_info *port_info[2] = { &info_early, &info_early };
335 u32 val;
336 int rev;
337
338 rev = sl82c105_bridge_revision(dev);
339
340 if (rev == -1)
341 dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Unable to find bridge, disabling DMA.\n");
342 else if (rev <= 5)
343 dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Early bridge revision, no DMA available.\n");
344 else {
345 port_info[0] = &info_dma;
346 port_info[1] = &info_dma;
347 }
348
349 pci_read_config_dword(dev, 0x40, &val);
350 val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
351 pci_write_config_dword(dev, 0x40, val);
352
353
354 return ata_pci_init_one(dev, port_info, 1); /* For now */
355}
356
357static struct pci_device_id sl82c105[] = {
358 { PCI_DEVICE(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105), },
359 { 0, },
360};
361
362static struct pci_driver sl82c105_pci_driver = {
363 .name = DRV_NAME,
364 .id_table = sl82c105,
365 .probe = sl82c105_init_one,
366 .remove = ata_pci_remove_one
367};
368
369static int __init sl82c105_init(void)
370{
371 return pci_register_driver(&sl82c105_pci_driver);
372}
373
374
375static void __exit sl82c105_exit(void)
376{
377 pci_unregister_driver(&sl82c105_pci_driver);
378}
379
380
381MODULE_AUTHOR("Alan Cox");
382MODULE_DESCRIPTION("low-level driver for Sl82c105");
383MODULE_LICENSE("GPL");
384MODULE_DEVICE_TABLE(pci, sl82c105);
385MODULE_VERSION(DRV_VERSION);
386
387module_init(sl82c105_init);
388module_exit(sl82c105_exit);
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
new file mode 100644
index 000000000000..36f788728f3f
--- /dev/null
+++ b/drivers/ata/pata_triflex.c
@@ -0,0 +1,285 @@
1/*
2 * pata_triflex.c - Compaq PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based upon
7 *
8 * triflex.c
9 *
10 * IDE Chipset driver for the Compaq TriFlex IDE controller.
11 *
12 * Known to work with the Compaq Workstation 5x00 series.
13 *
14 * Copyright (C) 2002 Hewlett-Packard Development Group, L.P.
15 * Author: Torben Mathiasen <torben.mathiasen@hp.com>
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License version 2 as
19 * published by the Free Software Foundation.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 *
30 * Loosely based on the piix & svwks drivers.
31 *
32 * Documentation:
33 * Not publically available.
34 */
35
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/init.h>
40#include <linux/blkdev.h>
41#include <linux/delay.h>
42#include <scsi/scsi_host.h>
43#include <linux/libata.h>
44
45#define DRV_NAME "pata_triflex"
46#define DRV_VERSION "0.2.5"
47
48/**
49 * triflex_probe_init - probe begin
50 * @ap: ATA port
51 *
52 * Set up cable type and use generic probe init
53 */
54
55static int triflex_probe_init(struct ata_port *ap)
56{
57 static const struct pci_bits triflex_enable_bits[] = {
58 { 0x80, 1, 0x01, 0x01 },
59 { 0x80, 1, 0x02, 0x02 }
60 };
61
62 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
63
64 if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no])) {
65 ata_port_disable(ap);
66 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
67 return 0;
68 }
69 ap->cbl = ATA_CBL_PATA40;
70 return ata_std_prereset(ap);
71}
72
73
74
75static void triflex_error_handler(struct ata_port *ap)
76{
77 ata_bmdma_drive_eh(ap, triflex_probe_init, ata_std_softreset, NULL, ata_std_postreset);
78}
79
80/**
81 * triflex_load_timing - timing configuration
82 * @ap: ATA interface
83 * @adev: Device on the bus
84 * @speed: speed to configure
85 *
86 * The Triflex has one set of timings per device per channel. This
87 * means we must do some switching. As the PIO and DMA timings don't
88 * match we have to do some reloading unlike PIIX devices where tuning
89 * tricks can avoid it.
90 */
91
92static void triflex_load_timing(struct ata_port *ap, struct ata_device *adev, int speed)
93{
94 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
95 u32 timing = 0;
96 u32 triflex_timing, old_triflex_timing;
97 int channel_offset = ap->port_no ? 0x74: 0x70;
98 unsigned int is_slave = (adev->devno != 0);
99
100
101 pci_read_config_dword(pdev, channel_offset, &old_triflex_timing);
102 triflex_timing = old_triflex_timing;
103
104 switch(speed)
105 {
106 case XFER_MW_DMA_2:
107 timing = 0x0103;break;
108 case XFER_MW_DMA_1:
109 timing = 0x0203;break;
110 case XFER_MW_DMA_0:
111 timing = 0x0808;break;
112 case XFER_SW_DMA_2:
113 case XFER_SW_DMA_1:
114 case XFER_SW_DMA_0:
115 timing = 0x0F0F;break;
116 case XFER_PIO_4:
117 timing = 0x0202;break;
118 case XFER_PIO_3:
119 timing = 0x0204;break;
120 case XFER_PIO_2:
121 timing = 0x0404;break;
122 case XFER_PIO_1:
123 timing = 0x0508;break;
124 case XFER_PIO_0:
125 timing = 0x0808;break;
126 default:
127 BUG();
128 }
129 triflex_timing &= ~ (0xFFFF << (16 * is_slave));
130 triflex_timing |= (timing << (16 * is_slave));
131
132 if (triflex_timing != old_triflex_timing)
133 pci_write_config_dword(pdev, channel_offset, triflex_timing);
134}
135
136/**
137 * triflex_set_piomode - set initial PIO mode data
138 * @ap: ATA interface
139 * @adev: ATA device
140 *
141 * Use the timing loader to set up the PIO mode. We have to do this
142 * because DMA start/stop will only be called once DMA occurs. If there
143 * has been no DMA then the PIO timings are still needed.
144 */
145static void triflex_set_piomode(struct ata_port *ap, struct ata_device *adev)
146{
147 triflex_load_timing(ap, adev, adev->pio_mode);
148}
149
150/**
151 * triflex_dma_start - DMA start callback
152 * @qc: Command in progress
153 *
154 * Usually drivers set the DMA timing at the point the set_dmamode call
155 * is made. Triflex however requires we load new timings on the
156 * transition or keep matching PIO/DMA pairs (ie MWDMA2/PIO4 etc).
157 * We load the DMA timings just before starting DMA and then restore
158 * the PIO timing when the DMA is finished.
159 */
160
161static void triflex_bmdma_start(struct ata_queued_cmd *qc)
162{
163 triflex_load_timing(qc->ap, qc->dev, qc->dev->dma_mode);
164 ata_bmdma_start(qc);
165}
166
167/**
168 * triflex_dma_stop - DMA stop callback
169 * @ap: ATA interface
170 * @adev: ATA device
171 *
172 * We loaded new timings in dma_start, as a result we need to restore
173 * the PIO timings in dma_stop so that the next command issue gets the
174 * right clock values.
175 */
176
177static void triflex_bmdma_stop(struct ata_queued_cmd *qc)
178{
179 ata_bmdma_stop(qc);
180 triflex_load_timing(qc->ap, qc->dev, qc->dev->pio_mode);
181}
182
183static struct scsi_host_template triflex_sht = {
184 .module = THIS_MODULE,
185 .name = DRV_NAME,
186 .ioctl = ata_scsi_ioctl,
187 .queuecommand = ata_scsi_queuecmd,
188 .can_queue = ATA_DEF_QUEUE,
189 .this_id = ATA_SHT_THIS_ID,
190 .sg_tablesize = LIBATA_MAX_PRD,
191 .max_sectors = ATA_MAX_SECTORS,
192 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
193 .emulated = ATA_SHT_EMULATED,
194 .use_clustering = ATA_SHT_USE_CLUSTERING,
195 .proc_name = DRV_NAME,
196 .dma_boundary = ATA_DMA_BOUNDARY,
197 .slave_configure = ata_scsi_slave_config,
198 .bios_param = ata_std_bios_param,
199};
200
201static struct ata_port_operations triflex_port_ops = {
202 .port_disable = ata_port_disable,
203 .set_piomode = triflex_set_piomode,
204 .mode_filter = ata_pci_default_filter,
205
206 .tf_load = ata_tf_load,
207 .tf_read = ata_tf_read,
208 .check_status = ata_check_status,
209 .exec_command = ata_exec_command,
210 .dev_select = ata_std_dev_select,
211
212 .freeze = ata_bmdma_freeze,
213 .thaw = ata_bmdma_thaw,
214 .error_handler = triflex_error_handler,
215 .post_internal_cmd = ata_bmdma_post_internal_cmd,
216
217 .bmdma_setup = ata_bmdma_setup,
218 .bmdma_start = triflex_bmdma_start,
219 .bmdma_stop = triflex_bmdma_stop,
220 .bmdma_status = ata_bmdma_status,
221
222 .qc_prep = ata_qc_prep,
223 .qc_issue = ata_qc_issue_prot,
224 .eng_timeout = ata_eng_timeout,
225 .data_xfer = ata_pio_data_xfer,
226
227 .irq_handler = ata_interrupt,
228 .irq_clear = ata_bmdma_irq_clear,
229
230 .port_start = ata_port_start,
231 .port_stop = ata_port_stop,
232 .host_stop = ata_host_stop
233};
234
235static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
236{
237 static struct ata_port_info info = {
238 .sht = &triflex_sht,
239 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
240 .pio_mask = 0x1f,
241 .mwdma_mask = 0x07,
242 .port_ops = &triflex_port_ops
243 };
244 static struct ata_port_info *port_info[2] = { &info, &info };
245 static int printed_version;
246
247 if (!printed_version++)
248 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
249
250 return ata_pci_init_one(dev, port_info, 2);
251}
252
253static const struct pci_device_id triflex[] = {
254 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE,
255 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
256 { 0, },
257};
258
259static struct pci_driver triflex_pci_driver = {
260 .name = DRV_NAME,
261 .id_table = triflex,
262 .probe = triflex_init_one,
263 .remove = ata_pci_remove_one
264};
265
266static int __init triflex_init(void)
267{
268 return pci_register_driver(&triflex_pci_driver);
269}
270
271
272static void __exit triflex_exit(void)
273{
274 pci_unregister_driver(&triflex_pci_driver);
275}
276
277
278MODULE_AUTHOR("Alan Cox");
279MODULE_DESCRIPTION("low-level driver for Compaq Triflex");
280MODULE_LICENSE("GPL");
281MODULE_DEVICE_TABLE(pci, triflex);
282MODULE_VERSION(DRV_VERSION);
283
284module_init(triflex_init);
285module_exit(triflex_exit);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
new file mode 100644
index 000000000000..1b2ff133b163
--- /dev/null
+++ b/drivers/ata/pata_via.c
@@ -0,0 +1,568 @@
1/*
2 * pata_via.c - VIA PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Documentation
7 * Most chipset documentation available under NDA only
8 *
9 * VIA version guide
10 * VIA VT82C561 - early design, uses ata_generic currently
11 * VIA VT82C576 - MWDMA, 33Mhz
12 * VIA VT82C586 - MWDMA, 33Mhz
13 * VIA VT82C586a - Added UDMA to 33Mhz
14 * VIA VT82C586b - UDMA33
15 * VIA VT82C596a - Nonfunctional UDMA66
16 * VIA VT82C596b - Working UDMA66
17 * VIA VT82C686 - Nonfunctional UDMA66
18 * VIA VT82C686a - Working UDMA66
19 * VIA VT82C686b - Updated to UDMA100
20 * VIA VT8231 - UDMA100
21 * VIA VT8233 - UDMA100
22 * VIA VT8233a - UDMA133
23 * VIA VT8233c - UDMA100
24 * VIA VT8235 - UDMA133
25 * VIA VT8237 - UDMA133
26 *
27 * Most registers remain compatible across chips. Others start reserved
28 * and acquire sensible semantics if set to 1 (eg cable detect). A few
29 * exceptions exist, notably around the FIFO settings.
30 *
31 * One additional quirk of the VIA design is that like ALi they use few
32 * PCI IDs for a lot of chips.
33 *
34 * Based heavily on:
35 *
36 * Version 3.38
37 *
38 * VIA IDE driver for Linux. Supported southbridges:
39 *
40 * vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
41 * vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
42 * vt8235, vt8237
43 *
44 * Copyright (c) 2000-2002 Vojtech Pavlik
45 *
46 * Based on the work of:
47 * Michel Aubry
48 * Jeff Garzik
49 * Andre Hedrick
50
51 */
52
53#include <linux/kernel.h>
54#include <linux/module.h>
55#include <linux/pci.h>
56#include <linux/init.h>
57#include <linux/blkdev.h>
58#include <linux/delay.h>
59#include <scsi/scsi_host.h>
60#include <linux/libata.h>
61
62#define DRV_NAME "pata_via"
63#define DRV_VERSION "0.1.13"
64
65/*
66 * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx
67 * driver.
68 */
69
70enum {
71 VIA_UDMA = 0x007,
72 VIA_UDMA_NONE = 0x000,
73 VIA_UDMA_33 = 0x001,
74 VIA_UDMA_66 = 0x002,
75 VIA_UDMA_100 = 0x003,
76 VIA_UDMA_133 = 0x004,
77 VIA_BAD_PREQ = 0x010, /* Crashes if PREQ# till DDACK# set */
78 VIA_BAD_CLK66 = 0x020, /* 66 MHz clock doesn't work correctly */
79 VIA_SET_FIFO = 0x040, /* Needs to have FIFO split set */
80 VIA_NO_UNMASK = 0x080, /* Doesn't work with IRQ unmasking on */
81 VIA_BAD_ID = 0x100, /* Has wrong vendor ID (0x1107) */
82 VIA_BAD_AST = 0x200, /* Don't touch Address Setup Timing */
83 VIA_NO_ENABLES = 0x400, /* Has no enablebits */
84};
85
86/*
87 * VIA SouthBridge chips.
88 */
89
90static const struct via_isa_bridge {
91 const char *name;
92 u16 id;
93 u8 rev_min;
94 u8 rev_max;
95 u16 flags;
96} via_isa_bridges[] = {
97 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
98 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES},
99 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
100 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
101 { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
102 { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
103 { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, VIA_UDMA_100 },
104 { "vt8233", PCI_DEVICE_ID_VIA_8233_0, 0x00, 0x2f, VIA_UDMA_100 },
105 { "vt8231", PCI_DEVICE_ID_VIA_8231, 0x00, 0x2f, VIA_UDMA_100 },
106 { "vt82c686b", PCI_DEVICE_ID_VIA_82C686, 0x40, 0x4f, VIA_UDMA_100 },
107 { "vt82c686a", PCI_DEVICE_ID_VIA_82C686, 0x10, 0x2f, VIA_UDMA_66 },
108 { "vt82c686", PCI_DEVICE_ID_VIA_82C686, 0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 },
109 { "vt82c596b", PCI_DEVICE_ID_VIA_82C596, 0x10, 0x2f, VIA_UDMA_66 },
110 { "vt82c596a", PCI_DEVICE_ID_VIA_82C596, 0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 },
111 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, VIA_UDMA_33 | VIA_SET_FIFO },
112 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, VIA_UDMA_33 | VIA_SET_FIFO | VIA_BAD_PREQ },
113 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, VIA_UDMA_33 | VIA_SET_FIFO },
114 { "vt82c586a", PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, VIA_UDMA_33 | VIA_SET_FIFO },
115 { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, VIA_UDMA_NONE | VIA_SET_FIFO },
116 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK },
117 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
118 { NULL }
119};
120
121/**
122 * via_cable_detect - cable detection
123 * @ap: ATA port
124 *
125 * Perform cable detection. Actually for the VIA case the BIOS
126 * already did this for us. We read the values provided by the
127 * BIOS. If you are using an 8235 in a non-PC configuration you
128 * may need to update this code.
129 *
130 * Hotplug also impacts on this.
131 */
132
133static int via_cable_detect(struct ata_port *ap) {
134 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
135 u32 ata66;
136
137 pci_read_config_dword(pdev, 0x50, &ata66);
138 /* Check both the drive cable reporting bits, we might not have
139 two drives */
140 if (ata66 & (0x10100000 >> (16 * ap->port_no)))
141 return ATA_CBL_PATA80;
142 else
143 return ATA_CBL_PATA40;
144}
145
146static int via_pre_reset(struct ata_port *ap)
147{
148 const struct via_isa_bridge *config = ap->host->private_data;
149
150 if (!(config->flags & VIA_NO_ENABLES)) {
151 static const struct pci_bits via_enable_bits[] = {
152 { 0x40, 1, 0x02, 0x02 },
153 { 0x40, 1, 0x01, 0x01 }
154 };
155
156 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
157
158 if (!pci_test_config_bits(pdev, &via_enable_bits[ap->port_no])) {
159 ata_port_disable(ap);
160 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
161 return 0;
162 }
163 }
164
165 if ((config->flags & VIA_UDMA) >= VIA_UDMA_66)
166 ap->cbl = via_cable_detect(ap);
167 else
168 ap->cbl = ATA_CBL_PATA40;
169 return ata_std_prereset(ap);
170}
171
172
173/**
174 * via_error_handler - reset for VIA chips
175 * @ap: ATA port
176 *
177 * Handle the reset callback for the later chips with cable detect
178 */
179
180static void via_error_handler(struct ata_port *ap)
181{
182 ata_bmdma_drive_eh(ap, via_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
183}
184
185/**
186 * via_do_set_mode - set initial PIO mode data
187 * @ap: ATA interface
188 * @adev: ATA device
189 * @mode: ATA mode being programmed
190 * @tdiv: Clocks per PCI clock
191 * @set_ast: Set to program address setup
192 * @udma_type: UDMA mode/format of registers
193 *
194 * Program the VIA registers for DMA and PIO modes. Uses the ata timing
195 * support in order to compute modes.
196 *
197 * FIXME: Hotplug will require we serialize multiple mode changes
198 * on the two channels.
199 */
200
201static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mode, int tdiv, int set_ast, int udma_type)
202{
203 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
204 struct ata_device *peer = ata_dev_pair(adev);
205 struct ata_timing t, p;
206 static int via_clock = 33333; /* Bus clock in kHZ - ought to be tunable one day */
207 unsigned long T = 1000000000 / via_clock;
208 unsigned long UT = T/tdiv;
209 int ut;
210 int offset = 3 - (2*ap->port_no) - adev->devno;
211
212
213 /* Calculate the timing values we require */
214 ata_timing_compute(adev, mode, &t, T, UT);
215
216 /* We share 8bit timing so we must merge the constraints */
217 if (peer) {
218 if (peer->pio_mode) {
219 ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
220 ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
221 }
222 }
223
224 /* Address setup is programmable but breaks on UDMA133 setups */
225 if (set_ast) {
226 u8 setup; /* 2 bits per drive */
227 int shift = 2 * offset;
228
229 pci_read_config_byte(pdev, 0x4C, &setup);
230 setup &= ~(3 << shift);
231 setup |= FIT(t.setup, 1, 4) << shift; /* 1,4 or 1,4 - 1 FIXME */
232 pci_write_config_byte(pdev, 0x4C, setup);
233 }
234
235 /* Load the PIO mode bits */
236 pci_write_config_byte(pdev, 0x4F - ap->port_no,
237 ((FIT(t.act8b, 1, 16) - 1) << 4) | (FIT(t.rec8b, 1, 16) - 1));
238 pci_write_config_byte(pdev, 0x48 + offset,
239 ((FIT(t.active, 1, 16) - 1) << 4) | (FIT(t.recover, 1, 16) - 1));
240
241 /* Load the UDMA bits according to type */
242 switch(udma_type) {
243 default:
244 /* BUG() ? */
245 /* fall through */
246 case 33:
247 ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 5) - 2)) : 0x03;
248 break;
249 case 66:
250 ut = t.udma ? (0xe8 | (FIT(t.udma, 2, 9) - 2)) : 0x0f;
251 break;
252 case 100:
253 ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07;
254 break;
255 case 133:
256 ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07;
257 break;
258 }
259 /* Set UDMA unless device is not UDMA capable */
260 if (udma_type)
261 pci_write_config_byte(pdev, 0x50 + offset, ut);
262}
263
264static void via_set_piomode(struct ata_port *ap, struct ata_device *adev)
265{
266 const struct via_isa_bridge *config = ap->host->private_data;
267 int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
268 int mode = config->flags & VIA_UDMA;
269 static u8 tclock[5] = { 1, 1, 2, 3, 4 };
270 static u8 udma[5] = { 0, 33, 66, 100, 133 };
271
272 via_do_set_mode(ap, adev, adev->pio_mode, tclock[mode], set_ast, udma[mode]);
273}
274
275static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev)
276{
277 const struct via_isa_bridge *config = ap->host->private_data;
278 int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
279 int mode = config->flags & VIA_UDMA;
280 static u8 tclock[5] = { 1, 1, 2, 3, 4 };
281 static u8 udma[5] = { 0, 33, 66, 100, 133 };
282
283 via_do_set_mode(ap, adev, adev->dma_mode, tclock[mode], set_ast, udma[mode]);
284}
285
286static struct scsi_host_template via_sht = {
287 .module = THIS_MODULE,
288 .name = DRV_NAME,
289 .ioctl = ata_scsi_ioctl,
290 .queuecommand = ata_scsi_queuecmd,
291 .can_queue = ATA_DEF_QUEUE,
292 .this_id = ATA_SHT_THIS_ID,
293 .sg_tablesize = LIBATA_MAX_PRD,
294 .max_sectors = ATA_MAX_SECTORS,
295 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
296 .emulated = ATA_SHT_EMULATED,
297 .use_clustering = ATA_SHT_USE_CLUSTERING,
298 .proc_name = DRV_NAME,
299 .dma_boundary = ATA_DMA_BOUNDARY,
300 .slave_configure = ata_scsi_slave_config,
301 .bios_param = ata_std_bios_param,
302};
303
304static struct ata_port_operations via_port_ops = {
305 .port_disable = ata_port_disable,
306 .set_piomode = via_set_piomode,
307 .set_dmamode = via_set_dmamode,
308 .mode_filter = ata_pci_default_filter,
309
310 .tf_load = ata_tf_load,
311 .tf_read = ata_tf_read,
312 .check_status = ata_check_status,
313 .exec_command = ata_exec_command,
314 .dev_select = ata_std_dev_select,
315
316 .freeze = ata_bmdma_freeze,
317 .thaw = ata_bmdma_thaw,
318 .error_handler = via_error_handler,
319 .post_internal_cmd = ata_bmdma_post_internal_cmd,
320
321 .bmdma_setup = ata_bmdma_setup,
322 .bmdma_start = ata_bmdma_start,
323 .bmdma_stop = ata_bmdma_stop,
324 .bmdma_status = ata_bmdma_status,
325
326 .qc_prep = ata_qc_prep,
327 .qc_issue = ata_qc_issue_prot,
328 .eng_timeout = ata_eng_timeout,
329 .data_xfer = ata_pio_data_xfer,
330
331 .irq_handler = ata_interrupt,
332 .irq_clear = ata_bmdma_irq_clear,
333
334 .port_start = ata_port_start,
335 .port_stop = ata_port_stop,
336 .host_stop = ata_host_stop
337};
338
339static struct ata_port_operations via_port_ops_noirq = {
340 .port_disable = ata_port_disable,
341 .set_piomode = via_set_piomode,
342 .set_dmamode = via_set_dmamode,
343 .mode_filter = ata_pci_default_filter,
344
345 .tf_load = ata_tf_load,
346 .tf_read = ata_tf_read,
347 .check_status = ata_check_status,
348 .exec_command = ata_exec_command,
349 .dev_select = ata_std_dev_select,
350
351 .freeze = ata_bmdma_freeze,
352 .thaw = ata_bmdma_thaw,
353 .error_handler = via_error_handler,
354 .post_internal_cmd = ata_bmdma_post_internal_cmd,
355
356 .bmdma_setup = ata_bmdma_setup,
357 .bmdma_start = ata_bmdma_start,
358 .bmdma_stop = ata_bmdma_stop,
359 .bmdma_status = ata_bmdma_status,
360
361 .qc_prep = ata_qc_prep,
362 .qc_issue = ata_qc_issue_prot,
363 .eng_timeout = ata_eng_timeout,
364 .data_xfer = ata_pio_data_xfer_noirq,
365
366 .irq_handler = ata_interrupt,
367 .irq_clear = ata_bmdma_irq_clear,
368
369 .port_start = ata_port_start,
370 .port_stop = ata_port_stop,
371 .host_stop = ata_host_stop
372};
373
374/**
375 * via_init_one - discovery callback
376 * @pdev: PCI device ID
377 * @id: PCI table info
378 *
379 * A VIA IDE interface has been discovered. Figure out what revision
380 * and perform configuration work before handing it to the ATA layer
381 */
382
383static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
384{
385 /* Early VIA without UDMA support */
386 static struct ata_port_info via_mwdma_info = {
387 .sht = &via_sht,
388 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
389 .pio_mask = 0x1f,
390 .mwdma_mask = 0x07,
391 .port_ops = &via_port_ops
392 };
393 /* Ditto with IRQ masking required */
394 static struct ata_port_info via_mwdma_info_borked = {
395 .sht = &via_sht,
396 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
397 .pio_mask = 0x1f,
398 .mwdma_mask = 0x07,
399 .port_ops = &via_port_ops_noirq,
400 };
401 /* VIA UDMA 33 devices (and borked 66) */
402 static struct ata_port_info via_udma33_info = {
403 .sht = &via_sht,
404 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
405 .pio_mask = 0x1f,
406 .mwdma_mask = 0x07,
407 .udma_mask = 0x7,
408 .port_ops = &via_port_ops
409 };
410 /* VIA UDMA 66 devices */
411 static struct ata_port_info via_udma66_info = {
412 .sht = &via_sht,
413 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
414 .pio_mask = 0x1f,
415 .mwdma_mask = 0x07,
416 .udma_mask = 0x1f,
417 .port_ops = &via_port_ops
418 };
419 /* VIA UDMA 100 devices */
420 static struct ata_port_info via_udma100_info = {
421 .sht = &via_sht,
422 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
423 .pio_mask = 0x1f,
424 .mwdma_mask = 0x07,
425 .udma_mask = 0x3f,
426 .port_ops = &via_port_ops
427 };
428 /* UDMA133 with bad AST (All current 133) */
429 static struct ata_port_info via_udma133_info = {
430 .sht = &via_sht,
431 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
432 .pio_mask = 0x1f,
433 .mwdma_mask = 0x07,
434 .udma_mask = 0x7f, /* FIXME: should check north bridge */
435 .port_ops = &via_port_ops
436 };
437 struct ata_port_info *port_info[2], *type;
438 struct pci_dev *isa = NULL;
439 const struct via_isa_bridge *config;
440 static int printed_version;
441 u8 t;
442 u8 enable;
443 u32 timing;
444
445 if (!printed_version++)
446 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
447
448 /* To find out how the IDE will behave and what features we
449 actually have to look at the bridge not the IDE controller */
450 for (config = via_isa_bridges; config->id; config++)
451 if ((isa = pci_get_device(PCI_VENDOR_ID_VIA +
452 !!(config->flags & VIA_BAD_ID),
453 config->id, NULL))) {
454
455 pci_read_config_byte(isa, PCI_REVISION_ID, &t);
456 if (t >= config->rev_min &&
457 t <= config->rev_max)
458 break;
459 pci_dev_put(isa);
460 }
461
462 if (!config->id) {
463 printk(KERN_WARNING "via: Unknown VIA SouthBridge, disabling.\n");
464 return -ENODEV;
465 }
466 pci_dev_put(isa);
467
468 /* 0x40 low bits indicate enabled channels */
469 pci_read_config_byte(pdev, 0x40 , &enable);
470 enable &= 3;
471 if (enable == 0) {
472 return -ENODEV;
473 }
474
475 /* Initialise the FIFO for the enabled channels. */
476 if (config->flags & VIA_SET_FIFO) {
477 u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20};
478 u8 fifo;
479
480 pci_read_config_byte(pdev, 0x43, &fifo);
481
482 /* Clear PREQ# until DDACK# for errata */
483 if (config->flags & VIA_BAD_PREQ)
484 fifo &= 0x7F;
485 else
486 fifo &= 0x9f;
487 /* Turn on FIFO for enabled channels */
488 fifo |= fifo_setting[enable];
489 pci_write_config_byte(pdev, 0x43, fifo);
490 }
491 /* Clock set up */
492 switch(config->flags & VIA_UDMA) {
493 case VIA_UDMA_NONE:
494 if (config->flags & VIA_NO_UNMASK)
495 type = &via_mwdma_info_borked;
496 else
497 type = &via_mwdma_info;
498 break;
499 case VIA_UDMA_33:
500 type = &via_udma33_info;
501 break;
502 case VIA_UDMA_66:
503 type = &via_udma66_info;
504 /* The 66 MHz devices require we enable the clock */
505 pci_read_config_dword(pdev, 0x50, &timing);
506 timing |= 0x80008;
507 pci_write_config_dword(pdev, 0x50, timing);
508 break;
509 case VIA_UDMA_100:
510 type = &via_udma100_info;
511 break;
512 case VIA_UDMA_133:
513 type = &via_udma133_info;
514 break;
515 default:
516 WARN_ON(1);
517 return -ENODEV;
518 }
519
520 if (config->flags & VIA_BAD_CLK66) {
521 /* Disable the 66MHz clock on problem devices */
522 pci_read_config_dword(pdev, 0x50, &timing);
523 timing &= ~0x80008;
524 pci_write_config_dword(pdev, 0x50, timing);
525 }
526
527 /* We have established the device type, now fire it up */
528 type->private_data = (void *)config;
529
530 port_info[0] = port_info[1] = type;
531 return ata_pci_init_one(pdev, port_info, 2);
532}
533
534static const struct pci_device_id via[] = {
535 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576_1), },
536 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1), },
537 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_6410), },
538 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), },
539 { 0, },
540};
541
542static struct pci_driver via_pci_driver = {
543 .name = DRV_NAME,
544 .id_table = via,
545 .probe = via_init_one,
546 .remove = ata_pci_remove_one
547};
548
549static int __init via_init(void)
550{
551 return pci_register_driver(&via_pci_driver);
552}
553
554
555static void __exit via_exit(void)
556{
557 pci_unregister_driver(&via_pci_driver);
558}
559
560
561MODULE_AUTHOR("Alan Cox");
562MODULE_DESCRIPTION("low-level driver for VIA PATA");
563MODULE_LICENSE("GPL");
564MODULE_DEVICE_TABLE(pci, via);
565MODULE_VERSION(DRV_VERSION);
566
567module_init(via_init);
568module_exit(via_exit);
diff --git a/drivers/scsi/pdc_adma.c b/drivers/ata/pdc_adma.c
index efc8fff1d250..0e23ecb77bc2 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -127,7 +127,7 @@ static int adma_ata_init_one (struct pci_dev *pdev,
127static irqreturn_t adma_intr (int irq, void *dev_instance, 127static irqreturn_t adma_intr (int irq, void *dev_instance,
128 struct pt_regs *regs); 128 struct pt_regs *regs);
129static int adma_port_start(struct ata_port *ap); 129static int adma_port_start(struct ata_port *ap);
130static void adma_host_stop(struct ata_host_set *host_set); 130static void adma_host_stop(struct ata_host *host);
131static void adma_port_stop(struct ata_port *ap); 131static void adma_port_stop(struct ata_port *ap);
132static void adma_phy_reset(struct ata_port *ap); 132static void adma_phy_reset(struct ata_port *ap);
133static void adma_qc_prep(struct ata_queued_cmd *qc); 133static void adma_qc_prep(struct ata_queued_cmd *qc);
@@ -182,7 +182,7 @@ static struct ata_port_info adma_port_info[] = {
182 /* board_1841_idx */ 182 /* board_1841_idx */
183 { 183 {
184 .sht = &adma_ata_sht, 184 .sht = &adma_ata_sht,
185 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | 185 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | 186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
187 ATA_FLAG_PIO_POLLING, 187 ATA_FLAG_PIO_POLLING,
188 .pio_mask = 0x10, /* pio4 */ 188 .pio_mask = 0x10, /* pio4 */
@@ -237,7 +237,7 @@ static void adma_reset_engine(void __iomem *chan)
237static void adma_reinit_engine(struct ata_port *ap) 237static void adma_reinit_engine(struct ata_port *ap)
238{ 238{
239 struct adma_port_priv *pp = ap->private_data; 239 struct adma_port_priv *pp = ap->private_data;
240 void __iomem *mmio_base = ap->host_set->mmio_base; 240 void __iomem *mmio_base = ap->host->mmio_base;
241 void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no); 241 void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
242 242
243 /* mask/clear ATA interrupts */ 243 /* mask/clear ATA interrupts */
@@ -265,7 +265,7 @@ static void adma_reinit_engine(struct ata_port *ap)
265 265
266static inline void adma_enter_reg_mode(struct ata_port *ap) 266static inline void adma_enter_reg_mode(struct ata_port *ap)
267{ 267{
268 void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no); 268 void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no);
269 269
270 writew(aPIOMD4, chan + ADMA_CONTROL); 270 writew(aPIOMD4, chan + ADMA_CONTROL);
271 readb(chan + ADMA_STATUS); /* flush */ 271 readb(chan + ADMA_STATUS); /* flush */
@@ -412,7 +412,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
412static inline void adma_packet_start(struct ata_queued_cmd *qc) 412static inline void adma_packet_start(struct ata_queued_cmd *qc)
413{ 413{
414 struct ata_port *ap = qc->ap; 414 struct ata_port *ap = qc->ap;
415 void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no); 415 void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no);
416 416
417 VPRINTK("ENTER, ap %p\n", ap); 417 VPRINTK("ENTER, ap %p\n", ap);
418 418
@@ -442,13 +442,13 @@ static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
442 return ata_qc_issue_prot(qc); 442 return ata_qc_issue_prot(qc);
443} 443}
444 444
445static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set) 445static inline unsigned int adma_intr_pkt(struct ata_host *host)
446{ 446{
447 unsigned int handled = 0, port_no; 447 unsigned int handled = 0, port_no;
448 u8 __iomem *mmio_base = host_set->mmio_base; 448 u8 __iomem *mmio_base = host->mmio_base;
449 449
450 for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 450 for (port_no = 0; port_no < host->n_ports; ++port_no) {
451 struct ata_port *ap = host_set->ports[port_no]; 451 struct ata_port *ap = host->ports[port_no];
452 struct adma_port_priv *pp; 452 struct adma_port_priv *pp;
453 struct ata_queued_cmd *qc; 453 struct ata_queued_cmd *qc;
454 void __iomem *chan = ADMA_REGS(mmio_base, port_no); 454 void __iomem *chan = ADMA_REGS(mmio_base, port_no);
@@ -476,13 +476,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
476 return handled; 476 return handled;
477} 477}
478 478
479static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set) 479static inline unsigned int adma_intr_mmio(struct ata_host *host)
480{ 480{
481 unsigned int handled = 0, port_no; 481 unsigned int handled = 0, port_no;
482 482
483 for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 483 for (port_no = 0; port_no < host->n_ports; ++port_no) {
484 struct ata_port *ap; 484 struct ata_port *ap;
485 ap = host_set->ports[port_no]; 485 ap = host->ports[port_no];
486 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) { 486 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
487 struct ata_queued_cmd *qc; 487 struct ata_queued_cmd *qc;
488 struct adma_port_priv *pp = ap->private_data; 488 struct adma_port_priv *pp = ap->private_data;
@@ -497,7 +497,7 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
497 continue; 497 continue;
498 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", 498 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
499 ap->id, qc->tf.protocol, status); 499 ap->id, qc->tf.protocol, status);
500 500
501 /* complete taskfile transaction */ 501 /* complete taskfile transaction */
502 pp->state = adma_state_idle; 502 pp->state = adma_state_idle;
503 qc->err_mask |= ac_err_mask(status); 503 qc->err_mask |= ac_err_mask(status);
@@ -511,14 +511,14 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
511 511
512static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs) 512static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs)
513{ 513{
514 struct ata_host_set *host_set = dev_instance; 514 struct ata_host *host = dev_instance;
515 unsigned int handled = 0; 515 unsigned int handled = 0;
516 516
517 VPRINTK("ENTER\n"); 517 VPRINTK("ENTER\n");
518 518
519 spin_lock(&host_set->lock); 519 spin_lock(&host->lock);
520 handled = adma_intr_pkt(host_set) | adma_intr_mmio(host_set); 520 handled = adma_intr_pkt(host) | adma_intr_mmio(host);
521 spin_unlock(&host_set->lock); 521 spin_unlock(&host->lock);
522 522
523 VPRINTK("EXIT\n"); 523 VPRINTK("EXIT\n");
524 524
@@ -544,7 +544,7 @@ static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
544 544
545static int adma_port_start(struct ata_port *ap) 545static int adma_port_start(struct ata_port *ap)
546{ 546{
547 struct device *dev = ap->host_set->dev; 547 struct device *dev = ap->host->dev;
548 struct adma_port_priv *pp; 548 struct adma_port_priv *pp;
549 int rc; 549 int rc;
550 550
@@ -582,10 +582,10 @@ err_out:
582 582
583static void adma_port_stop(struct ata_port *ap) 583static void adma_port_stop(struct ata_port *ap)
584{ 584{
585 struct device *dev = ap->host_set->dev; 585 struct device *dev = ap->host->dev;
586 struct adma_port_priv *pp = ap->private_data; 586 struct adma_port_priv *pp = ap->private_data;
587 587
588 adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no)); 588 adma_reset_engine(ADMA_REGS(ap->host->mmio_base, ap->port_no));
589 if (pp != NULL) { 589 if (pp != NULL) {
590 ap->private_data = NULL; 590 ap->private_data = NULL;
591 if (pp->pkt != NULL) 591 if (pp->pkt != NULL)
@@ -596,14 +596,14 @@ static void adma_port_stop(struct ata_port *ap)
596 ata_port_stop(ap); 596 ata_port_stop(ap);
597} 597}
598 598
599static void adma_host_stop(struct ata_host_set *host_set) 599static void adma_host_stop(struct ata_host *host)
600{ 600{
601 unsigned int port_no; 601 unsigned int port_no;
602 602
603 for (port_no = 0; port_no < ADMA_PORTS; ++port_no) 603 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
604 adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no)); 604 adma_reset_engine(ADMA_REGS(host->mmio_base, port_no));
605 605
606 ata_pci_host_stop(host_set); 606 ata_pci_host_stop(host);
607} 607}
608 608
609static void adma_host_init(unsigned int chip_id, 609static void adma_host_init(unsigned int chip_id,
@@ -684,7 +684,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
684 INIT_LIST_HEAD(&probe_ent->node); 684 INIT_LIST_HEAD(&probe_ent->node);
685 685
686 probe_ent->sht = adma_port_info[board_idx].sht; 686 probe_ent->sht = adma_port_info[board_idx].sht;
687 probe_ent->host_flags = adma_port_info[board_idx].host_flags; 687 probe_ent->port_flags = adma_port_info[board_idx].flags;
688 probe_ent->pio_mask = adma_port_info[board_idx].pio_mask; 688 probe_ent->pio_mask = adma_port_info[board_idx].pio_mask;
689 probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask; 689 probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask;
690 probe_ent->udma_mask = adma_port_info[board_idx].udma_mask; 690 probe_ent->udma_mask = adma_port_info[board_idx].udma_mask;
@@ -722,7 +722,7 @@ err_out:
722 722
723static int __init adma_ata_init(void) 723static int __init adma_ata_init(void)
724{ 724{
725 return pci_module_init(&adma_ata_pci_driver); 725 return pci_register_driver(&adma_ata_pci_driver);
726} 726}
727 727
728static void __exit adma_ata_exit(void) 728static void __exit adma_ata_exit(void)
diff --git a/drivers/scsi/sata_mv.c b/drivers/ata/sata_mv.c
index fa38a413d16b..fdce6e07ecd2 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -342,7 +342,7 @@ static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
342static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 342static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
343static void mv_phy_reset(struct ata_port *ap); 343static void mv_phy_reset(struct ata_port *ap);
344static void __mv_phy_reset(struct ata_port *ap, int can_sleep); 344static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
345static void mv_host_stop(struct ata_host_set *host_set); 345static void mv_host_stop(struct ata_host *host);
346static int mv_port_start(struct ata_port *ap); 346static int mv_port_start(struct ata_port *ap);
347static void mv_port_stop(struct ata_port *ap); 347static void mv_port_stop(struct ata_port *ap);
348static void mv_qc_prep(struct ata_queued_cmd *qc); 348static void mv_qc_prep(struct ata_queued_cmd *qc);
@@ -480,35 +480,35 @@ static const struct ata_port_operations mv_iie_ops = {
480static const struct ata_port_info mv_port_info[] = { 480static const struct ata_port_info mv_port_info[] = {
481 { /* chip_504x */ 481 { /* chip_504x */
482 .sht = &mv_sht, 482 .sht = &mv_sht,
483 .host_flags = MV_COMMON_FLAGS, 483 .flags = MV_COMMON_FLAGS,
484 .pio_mask = 0x1f, /* pio0-4 */ 484 .pio_mask = 0x1f, /* pio0-4 */
485 .udma_mask = 0x7f, /* udma0-6 */ 485 .udma_mask = 0x7f, /* udma0-6 */
486 .port_ops = &mv5_ops, 486 .port_ops = &mv5_ops,
487 }, 487 },
488 { /* chip_508x */ 488 { /* chip_508x */
489 .sht = &mv_sht, 489 .sht = &mv_sht,
490 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), 490 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
491 .pio_mask = 0x1f, /* pio0-4 */ 491 .pio_mask = 0x1f, /* pio0-4 */
492 .udma_mask = 0x7f, /* udma0-6 */ 492 .udma_mask = 0x7f, /* udma0-6 */
493 .port_ops = &mv5_ops, 493 .port_ops = &mv5_ops,
494 }, 494 },
495 { /* chip_5080 */ 495 { /* chip_5080 */
496 .sht = &mv_sht, 496 .sht = &mv_sht,
497 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), 497 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
498 .pio_mask = 0x1f, /* pio0-4 */ 498 .pio_mask = 0x1f, /* pio0-4 */
499 .udma_mask = 0x7f, /* udma0-6 */ 499 .udma_mask = 0x7f, /* udma0-6 */
500 .port_ops = &mv5_ops, 500 .port_ops = &mv5_ops,
501 }, 501 },
502 { /* chip_604x */ 502 { /* chip_604x */
503 .sht = &mv_sht, 503 .sht = &mv_sht,
504 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 504 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
505 .pio_mask = 0x1f, /* pio0-4 */ 505 .pio_mask = 0x1f, /* pio0-4 */
506 .udma_mask = 0x7f, /* udma0-6 */ 506 .udma_mask = 0x7f, /* udma0-6 */
507 .port_ops = &mv6_ops, 507 .port_ops = &mv6_ops,
508 }, 508 },
509 { /* chip_608x */ 509 { /* chip_608x */
510 .sht = &mv_sht, 510 .sht = &mv_sht,
511 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | 511 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
512 MV_FLAG_DUAL_HC), 512 MV_FLAG_DUAL_HC),
513 .pio_mask = 0x1f, /* pio0-4 */ 513 .pio_mask = 0x1f, /* pio0-4 */
514 .udma_mask = 0x7f, /* udma0-6 */ 514 .udma_mask = 0x7f, /* udma0-6 */
@@ -516,14 +516,14 @@ static const struct ata_port_info mv_port_info[] = {
516 }, 516 },
517 { /* chip_6042 */ 517 { /* chip_6042 */
518 .sht = &mv_sht, 518 .sht = &mv_sht,
519 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 519 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
520 .pio_mask = 0x1f, /* pio0-4 */ 520 .pio_mask = 0x1f, /* pio0-4 */
521 .udma_mask = 0x7f, /* udma0-6 */ 521 .udma_mask = 0x7f, /* udma0-6 */
522 .port_ops = &mv_iie_ops, 522 .port_ops = &mv_iie_ops,
523 }, 523 },
524 { /* chip_7042 */ 524 { /* chip_7042 */
525 .sht = &mv_sht, 525 .sht = &mv_sht,
526 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | 526 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
527 MV_FLAG_DUAL_HC), 527 MV_FLAG_DUAL_HC),
528 .pio_mask = 0x1f, /* pio0-4 */ 528 .pio_mask = 0x1f, /* pio0-4 */
529 .udma_mask = 0x7f, /* udma0-6 */ 529 .udma_mask = 0x7f, /* udma0-6 */
@@ -618,12 +618,12 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
618 618
619static inline void __iomem *mv_ap_base(struct ata_port *ap) 619static inline void __iomem *mv_ap_base(struct ata_port *ap)
620{ 620{
621 return mv_port_base(ap->host_set->mmio_base, ap->port_no); 621 return mv_port_base(ap->host->mmio_base, ap->port_no);
622} 622}
623 623
624static inline int mv_get_hc_count(unsigned long host_flags) 624static inline int mv_get_hc_count(unsigned long port_flags)
625{ 625{
626 return ((host_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 626 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
627} 627}
628 628
629static void mv_irq_clear(struct ata_port *ap) 629static void mv_irq_clear(struct ata_port *ap)
@@ -809,7 +809,7 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
809 809
810/** 810/**
811 * mv_host_stop - Host specific cleanup/stop routine. 811 * mv_host_stop - Host specific cleanup/stop routine.
812 * @host_set: host data structure 812 * @host: host data structure
813 * 813 *
814 * Disable ints, cleanup host memory, call general purpose 814 * Disable ints, cleanup host memory, call general purpose
815 * host_stop. 815 * host_stop.
@@ -817,10 +817,10 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
817 * LOCKING: 817 * LOCKING:
818 * Inherited from caller. 818 * Inherited from caller.
819 */ 819 */
820static void mv_host_stop(struct ata_host_set *host_set) 820static void mv_host_stop(struct ata_host *host)
821{ 821{
822 struct mv_host_priv *hpriv = host_set->private_data; 822 struct mv_host_priv *hpriv = host->private_data;
823 struct pci_dev *pdev = to_pci_dev(host_set->dev); 823 struct pci_dev *pdev = to_pci_dev(host->dev);
824 824
825 if (hpriv->hp_flags & MV_HP_FLAG_MSI) { 825 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
826 pci_disable_msi(pdev); 826 pci_disable_msi(pdev);
@@ -828,7 +828,7 @@ static void mv_host_stop(struct ata_host_set *host_set)
828 pci_intx(pdev, 0); 828 pci_intx(pdev, 0);
829 } 829 }
830 kfree(hpriv); 830 kfree(hpriv);
831 ata_host_stop(host_set); 831 ata_host_stop(host);
832} 832}
833 833
834static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev) 834static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
@@ -875,8 +875,8 @@ static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
875 */ 875 */
876static int mv_port_start(struct ata_port *ap) 876static int mv_port_start(struct ata_port *ap)
877{ 877{
878 struct device *dev = ap->host_set->dev; 878 struct device *dev = ap->host->dev;
879 struct mv_host_priv *hpriv = ap->host_set->private_data; 879 struct mv_host_priv *hpriv = ap->host->private_data;
880 struct mv_port_priv *pp; 880 struct mv_port_priv *pp;
881 void __iomem *port_mmio = mv_ap_base(ap); 881 void __iomem *port_mmio = mv_ap_base(ap);
882 void *mem; 882 void *mem;
@@ -965,17 +965,17 @@ err_out:
965 * Stop DMA, cleanup port memory. 965 * Stop DMA, cleanup port memory.
966 * 966 *
967 * LOCKING: 967 * LOCKING:
968 * This routine uses the host_set lock to protect the DMA stop. 968 * This routine uses the host lock to protect the DMA stop.
969 */ 969 */
970static void mv_port_stop(struct ata_port *ap) 970static void mv_port_stop(struct ata_port *ap)
971{ 971{
972 struct device *dev = ap->host_set->dev; 972 struct device *dev = ap->host->dev;
973 struct mv_port_priv *pp = ap->private_data; 973 struct mv_port_priv *pp = ap->private_data;
974 unsigned long flags; 974 unsigned long flags;
975 975
976 spin_lock_irqsave(&ap->host_set->lock, flags); 976 spin_lock_irqsave(&ap->host->lock, flags);
977 mv_stop_dma(ap); 977 mv_stop_dma(ap);
978 spin_unlock_irqrestore(&ap->host_set->lock, flags); 978 spin_unlock_irqrestore(&ap->host->lock, flags);
979 979
980 ap->private_data = NULL; 980 ap->private_data = NULL;
981 ata_pad_free(ap, dev); 981 ata_pad_free(ap, dev);
@@ -1330,7 +1330,7 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1330 1330
1331/** 1331/**
1332 * mv_host_intr - Handle all interrupts on the given host controller 1332 * mv_host_intr - Handle all interrupts on the given host controller
1333 * @host_set: host specific structure 1333 * @host: host specific structure
1334 * @relevant: port error bits relevant to this host controller 1334 * @relevant: port error bits relevant to this host controller
1335 * @hc: which host controller we're to look at 1335 * @hc: which host controller we're to look at
1336 * 1336 *
@@ -1344,10 +1344,9 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1344 * LOCKING: 1344 * LOCKING:
1345 * Inherited from caller. 1345 * Inherited from caller.
1346 */ 1346 */
1347static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, 1347static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1348 unsigned int hc)
1349{ 1348{
1350 void __iomem *mmio = host_set->mmio_base; 1349 void __iomem *mmio = host->mmio_base;
1351 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 1350 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1352 struct ata_queued_cmd *qc; 1351 struct ata_queued_cmd *qc;
1353 u32 hc_irq_cause; 1352 u32 hc_irq_cause;
@@ -1371,7 +1370,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1371 1370
1372 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1371 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1373 u8 ata_status = 0; 1372 u8 ata_status = 0;
1374 struct ata_port *ap = host_set->ports[port]; 1373 struct ata_port *ap = host->ports[port];
1375 struct mv_port_priv *pp = ap->private_data; 1374 struct mv_port_priv *pp = ap->private_data;
1376 1375
1377 hard_port = mv_hardport_from_port(port); /* range 0..3 */ 1376 hard_port = mv_hardport_from_port(port); /* range 0..3 */
@@ -1444,15 +1443,15 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1444 * reported here. 1443 * reported here.
1445 * 1444 *
1446 * LOCKING: 1445 * LOCKING:
1447 * This routine holds the host_set lock while processing pending 1446 * This routine holds the host lock while processing pending
1448 * interrupts. 1447 * interrupts.
1449 */ 1448 */
1450static irqreturn_t mv_interrupt(int irq, void *dev_instance, 1449static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1451 struct pt_regs *regs) 1450 struct pt_regs *regs)
1452{ 1451{
1453 struct ata_host_set *host_set = dev_instance; 1452 struct ata_host *host = dev_instance;
1454 unsigned int hc, handled = 0, n_hcs; 1453 unsigned int hc, handled = 0, n_hcs;
1455 void __iomem *mmio = host_set->mmio_base; 1454 void __iomem *mmio = host->mmio_base;
1456 struct mv_host_priv *hpriv; 1455 struct mv_host_priv *hpriv;
1457 u32 irq_stat; 1456 u32 irq_stat;
1458 1457
@@ -1465,18 +1464,18 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1465 return IRQ_NONE; 1464 return IRQ_NONE;
1466 } 1465 }
1467 1466
1468 n_hcs = mv_get_hc_count(host_set->ports[0]->flags); 1467 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1469 spin_lock(&host_set->lock); 1468 spin_lock(&host->lock);
1470 1469
1471 for (hc = 0; hc < n_hcs; hc++) { 1470 for (hc = 0; hc < n_hcs; hc++) {
1472 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); 1471 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1473 if (relevant) { 1472 if (relevant) {
1474 mv_host_intr(host_set, relevant, hc); 1473 mv_host_intr(host, relevant, hc);
1475 handled++; 1474 handled++;
1476 } 1475 }
1477 } 1476 }
1478 1477
1479 hpriv = host_set->private_data; 1478 hpriv = host->private_data;
1480 if (IS_60XX(hpriv)) { 1479 if (IS_60XX(hpriv)) {
1481 /* deal with the interrupt coalescing bits */ 1480 /* deal with the interrupt coalescing bits */
1482 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) { 1481 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
@@ -1491,12 +1490,12 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1491 readl(mmio + PCI_IRQ_CAUSE_OFS)); 1490 readl(mmio + PCI_IRQ_CAUSE_OFS));
1492 1491
1493 DPRINTK("All regs @ PCI error\n"); 1492 DPRINTK("All regs @ PCI error\n");
1494 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev)); 1493 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1495 1494
1496 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); 1495 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1497 handled++; 1496 handled++;
1498 } 1497 }
1499 spin_unlock(&host_set->lock); 1498 spin_unlock(&host->lock);
1500 1499
1501 return IRQ_RETVAL(handled); 1500 return IRQ_RETVAL(handled);
1502} 1501}
@@ -1528,7 +1527,7 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1528 1527
1529static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in) 1528static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1530{ 1529{
1531 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no); 1530 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
1532 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1531 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1533 1532
1534 if (ofs != 0xffffffffU) 1533 if (ofs != 0xffffffffU)
@@ -1539,7 +1538,7 @@ static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1539 1538
1540static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 1539static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1541{ 1540{
1542 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no); 1541 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
1543 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1542 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1544 1543
1545 if (ofs != 0xffffffffU) 1544 if (ofs != 0xffffffffU)
@@ -1904,8 +1903,8 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 1903
1905static void mv_stop_and_reset(struct ata_port *ap) 1904static void mv_stop_and_reset(struct ata_port *ap)
1906{ 1905{
1907 struct mv_host_priv *hpriv = ap->host_set->private_data; 1906 struct mv_host_priv *hpriv = ap->host->private_data;
1908 void __iomem *mmio = ap->host_set->mmio_base; 1907 void __iomem *mmio = ap->host->mmio_base;
1909 1908
1910 mv_stop_dma(ap); 1909 mv_stop_dma(ap);
1911 1910
@@ -1936,7 +1935,7 @@ static inline void __msleep(unsigned int msec, int can_sleep)
1936static void __mv_phy_reset(struct ata_port *ap, int can_sleep) 1935static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1937{ 1936{
1938 struct mv_port_priv *pp = ap->private_data; 1937 struct mv_port_priv *pp = ap->private_data;
1939 struct mv_host_priv *hpriv = ap->host_set->private_data; 1938 struct mv_host_priv *hpriv = ap->host->private_data;
1940 void __iomem *port_mmio = mv_ap_base(ap); 1939 void __iomem *port_mmio = mv_ap_base(ap);
1941 struct ata_taskfile tf; 1940 struct ata_taskfile tf;
1942 struct ata_device *dev = &ap->device[0]; 1941 struct ata_device *dev = &ap->device[0];
@@ -2034,7 +2033,7 @@ static void mv_phy_reset(struct ata_port *ap)
2034 * chip/bus, fail the command, and move on. 2033 * chip/bus, fail the command, and move on.
2035 * 2034 *
2036 * LOCKING: 2035 * LOCKING:
2037 * This routine holds the host_set lock while failing the command. 2036 * This routine holds the host lock while failing the command.
2038 */ 2037 */
2039static void mv_eng_timeout(struct ata_port *ap) 2038static void mv_eng_timeout(struct ata_port *ap)
2040{ 2039{
@@ -2043,18 +2042,17 @@ static void mv_eng_timeout(struct ata_port *ap)
2043 2042
2044 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n"); 2043 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2045 DPRINTK("All regs @ start of eng_timeout\n"); 2044 DPRINTK("All regs @ start of eng_timeout\n");
2046 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, 2045 mv_dump_all_regs(ap->host->mmio_base, ap->port_no,
2047 to_pci_dev(ap->host_set->dev)); 2046 to_pci_dev(ap->host->dev));
2048 2047
2049 qc = ata_qc_from_tag(ap, ap->active_tag); 2048 qc = ata_qc_from_tag(ap, ap->active_tag);
2050 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", 2049 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2051 ap->host_set->mmio_base, ap, qc, qc->scsicmd, 2050 ap->host->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2052 &qc->scsicmd->cmnd);
2053 2051
2054 spin_lock_irqsave(&ap->host_set->lock, flags); 2052 spin_lock_irqsave(&ap->host->lock, flags);
2055 mv_err_intr(ap, 0); 2053 mv_err_intr(ap, 0);
2056 mv_stop_and_reset(ap); 2054 mv_stop_and_reset(ap);
2057 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2055 spin_unlock_irqrestore(&ap->host->lock, flags);
2058 2056
2059 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 2057 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2060 if (qc->flags & ATA_QCFLAG_ACTIVE) { 2058 if (qc->flags & ATA_QCFLAG_ACTIVE) {
@@ -2235,7 +2233,7 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2235 if (rc) 2233 if (rc)
2236 goto done; 2234 goto done;
2237 2235
2238 n_hc = mv_get_hc_count(probe_ent->host_flags); 2236 n_hc = mv_get_hc_count(probe_ent->port_flags);
2239 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc; 2237 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
2240 2238
2241 for (port = 0; port < probe_ent->n_ports; port++) 2239 for (port = 0; port < probe_ent->n_ports; port++)
@@ -2388,7 +2386,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2388 memset(hpriv, 0, sizeof(*hpriv)); 2386 memset(hpriv, 0, sizeof(*hpriv));
2389 2387
2390 probe_ent->sht = mv_port_info[board_idx].sht; 2388 probe_ent->sht = mv_port_info[board_idx].sht;
2391 probe_ent->host_flags = mv_port_info[board_idx].host_flags; 2389 probe_ent->port_flags = mv_port_info[board_idx].flags;
2392 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask; 2390 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
2393 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask; 2391 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
2394 probe_ent->port_ops = mv_port_info[board_idx].port_ops; 2392 probe_ent->port_ops = mv_port_info[board_idx].port_ops;
@@ -2446,7 +2444,7 @@ err_out:
2446 2444
2447static int __init mv_init(void) 2445static int __init mv_init(void)
2448{ 2446{
2449 return pci_module_init(&mv_pci_driver); 2447 return pci_register_driver(&mv_pci_driver);
2450} 2448}
2451 2449
2452static void __exit mv_exit(void) 2450static void __exit mv_exit(void)
diff --git a/drivers/scsi/sata_nv.c b/drivers/ata/sata_nv.c
index 56da25581f31..27c22feebf30 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -81,7 +81,7 @@ enum {
81}; 81};
82 82
83static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 83static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
84static void nv_ck804_host_stop(struct ata_host_set *host_set); 84static void nv_ck804_host_stop(struct ata_host *host);
85static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance, 85static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
86 struct pt_regs *regs); 86 struct pt_regs *regs);
87static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance, 87static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
@@ -257,7 +257,7 @@ static struct ata_port_info nv_port_info[] = {
257 /* generic */ 257 /* generic */
258 { 258 {
259 .sht = &nv_sht, 259 .sht = &nv_sht,
260 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 260 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
261 .pio_mask = NV_PIO_MASK, 261 .pio_mask = NV_PIO_MASK,
262 .mwdma_mask = NV_MWDMA_MASK, 262 .mwdma_mask = NV_MWDMA_MASK,
263 .udma_mask = NV_UDMA_MASK, 263 .udma_mask = NV_UDMA_MASK,
@@ -266,7 +266,7 @@ static struct ata_port_info nv_port_info[] = {
266 /* nforce2/3 */ 266 /* nforce2/3 */
267 { 267 {
268 .sht = &nv_sht, 268 .sht = &nv_sht,
269 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 269 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
270 .pio_mask = NV_PIO_MASK, 270 .pio_mask = NV_PIO_MASK,
271 .mwdma_mask = NV_MWDMA_MASK, 271 .mwdma_mask = NV_MWDMA_MASK,
272 .udma_mask = NV_UDMA_MASK, 272 .udma_mask = NV_UDMA_MASK,
@@ -275,7 +275,7 @@ static struct ata_port_info nv_port_info[] = {
275 /* ck804 */ 275 /* ck804 */
276 { 276 {
277 .sht = &nv_sht, 277 .sht = &nv_sht,
278 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 278 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
279 .pio_mask = NV_PIO_MASK, 279 .pio_mask = NV_PIO_MASK,
280 .mwdma_mask = NV_MWDMA_MASK, 280 .mwdma_mask = NV_MWDMA_MASK,
281 .udma_mask = NV_UDMA_MASK, 281 .udma_mask = NV_UDMA_MASK,
@@ -292,17 +292,17 @@ MODULE_VERSION(DRV_VERSION);
292static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance, 292static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
293 struct pt_regs *regs) 293 struct pt_regs *regs)
294{ 294{
295 struct ata_host_set *host_set = dev_instance; 295 struct ata_host *host = dev_instance;
296 unsigned int i; 296 unsigned int i;
297 unsigned int handled = 0; 297 unsigned int handled = 0;
298 unsigned long flags; 298 unsigned long flags;
299 299
300 spin_lock_irqsave(&host_set->lock, flags); 300 spin_lock_irqsave(&host->lock, flags);
301 301
302 for (i = 0; i < host_set->n_ports; i++) { 302 for (i = 0; i < host->n_ports; i++) {
303 struct ata_port *ap; 303 struct ata_port *ap;
304 304
305 ap = host_set->ports[i]; 305 ap = host->ports[i];
306 if (ap && 306 if (ap &&
307 !(ap->flags & ATA_FLAG_DISABLED)) { 307 !(ap->flags & ATA_FLAG_DISABLED)) {
308 struct ata_queued_cmd *qc; 308 struct ata_queued_cmd *qc;
@@ -318,7 +318,7 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
318 318
319 } 319 }
320 320
321 spin_unlock_irqrestore(&host_set->lock, flags); 321 spin_unlock_irqrestore(&host->lock, flags);
322 322
323 return IRQ_RETVAL(handled); 323 return IRQ_RETVAL(handled);
324} 324}
@@ -354,12 +354,12 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
354 return 1; 354 return 1;
355} 355}
356 356
357static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat) 357static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
358{ 358{
359 int i, handled = 0; 359 int i, handled = 0;
360 360
361 for (i = 0; i < host_set->n_ports; i++) { 361 for (i = 0; i < host->n_ports; i++) {
362 struct ata_port *ap = host_set->ports[i]; 362 struct ata_port *ap = host->ports[i];
363 363
364 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) 364 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
365 handled += nv_host_intr(ap, irq_stat); 365 handled += nv_host_intr(ap, irq_stat);
@@ -373,14 +373,14 @@ static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat)
373static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance, 373static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
374 struct pt_regs *regs) 374 struct pt_regs *regs)
375{ 375{
376 struct ata_host_set *host_set = dev_instance; 376 struct ata_host *host = dev_instance;
377 u8 irq_stat; 377 u8 irq_stat;
378 irqreturn_t ret; 378 irqreturn_t ret;
379 379
380 spin_lock(&host_set->lock); 380 spin_lock(&host->lock);
381 irq_stat = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); 381 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
382 ret = nv_do_interrupt(host_set, irq_stat); 382 ret = nv_do_interrupt(host, irq_stat);
383 spin_unlock(&host_set->lock); 383 spin_unlock(&host->lock);
384 384
385 return ret; 385 return ret;
386} 386}
@@ -388,14 +388,14 @@ static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
388static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance, 388static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
389 struct pt_regs *regs) 389 struct pt_regs *regs)
390{ 390{
391 struct ata_host_set *host_set = dev_instance; 391 struct ata_host *host = dev_instance;
392 u8 irq_stat; 392 u8 irq_stat;
393 irqreturn_t ret; 393 irqreturn_t ret;
394 394
395 spin_lock(&host_set->lock); 395 spin_lock(&host->lock);
396 irq_stat = readb(host_set->mmio_base + NV_INT_STATUS_CK804); 396 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
397 ret = nv_do_interrupt(host_set, irq_stat); 397 ret = nv_do_interrupt(host, irq_stat);
398 spin_unlock(&host_set->lock); 398 spin_unlock(&host->lock);
399 399
400 return ret; 400 return ret;
401} 401}
@@ -418,7 +418,7 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
418 418
419static void nv_nf2_freeze(struct ata_port *ap) 419static void nv_nf2_freeze(struct ata_port *ap)
420{ 420{
421 unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr; 421 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
422 int shift = ap->port_no * NV_INT_PORT_SHIFT; 422 int shift = ap->port_no * NV_INT_PORT_SHIFT;
423 u8 mask; 423 u8 mask;
424 424
@@ -429,7 +429,7 @@ static void nv_nf2_freeze(struct ata_port *ap)
429 429
430static void nv_nf2_thaw(struct ata_port *ap) 430static void nv_nf2_thaw(struct ata_port *ap)
431{ 431{
432 unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr; 432 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
433 int shift = ap->port_no * NV_INT_PORT_SHIFT; 433 int shift = ap->port_no * NV_INT_PORT_SHIFT;
434 u8 mask; 434 u8 mask;
435 435
@@ -442,7 +442,7 @@ static void nv_nf2_thaw(struct ata_port *ap)
442 442
443static void nv_ck804_freeze(struct ata_port *ap) 443static void nv_ck804_freeze(struct ata_port *ap)
444{ 444{
445 void __iomem *mmio_base = ap->host_set->mmio_base; 445 void __iomem *mmio_base = ap->host->mmio_base;
446 int shift = ap->port_no * NV_INT_PORT_SHIFT; 446 int shift = ap->port_no * NV_INT_PORT_SHIFT;
447 u8 mask; 447 u8 mask;
448 448
@@ -453,7 +453,7 @@ static void nv_ck804_freeze(struct ata_port *ap)
453 453
454static void nv_ck804_thaw(struct ata_port *ap) 454static void nv_ck804_thaw(struct ata_port *ap)
455{ 455{
456 void __iomem *mmio_base = ap->host_set->mmio_base; 456 void __iomem *mmio_base = ap->host->mmio_base;
457 int shift = ap->port_no * NV_INT_PORT_SHIFT; 457 int shift = ap->port_no * NV_INT_PORT_SHIFT;
458 u8 mask; 458 u8 mask;
459 459
@@ -568,9 +568,9 @@ err_out:
568 return rc; 568 return rc;
569} 569}
570 570
571static void nv_ck804_host_stop(struct ata_host_set *host_set) 571static void nv_ck804_host_stop(struct ata_host *host)
572{ 572{
573 struct pci_dev *pdev = to_pci_dev(host_set->dev); 573 struct pci_dev *pdev = to_pci_dev(host->dev);
574 u8 regval; 574 u8 regval;
575 575
576 /* disable SATA space for CK804 */ 576 /* disable SATA space for CK804 */
@@ -578,12 +578,12 @@ static void nv_ck804_host_stop(struct ata_host_set *host_set)
578 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN; 578 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
579 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval); 579 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
580 580
581 ata_pci_host_stop(host_set); 581 ata_pci_host_stop(host);
582} 582}
583 583
584static int __init nv_init(void) 584static int __init nv_init(void)
585{ 585{
586 return pci_module_init(&nv_pci_driver); 586 return pci_register_driver(&nv_pci_driver);
587} 587}
588 588
589static void __exit nv_exit(void) 589static void __exit nv_exit(void)
diff --git a/drivers/scsi/sata_promise.c b/drivers/ata/sata_promise.c
index 4776f4e55839..d627812ea73d 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -104,7 +104,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
104static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 104static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
105static void pdc_irq_clear(struct ata_port *ap); 105static void pdc_irq_clear(struct ata_port *ap);
106static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 106static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
107static void pdc_host_stop(struct ata_host_set *host_set); 107static void pdc_host_stop(struct ata_host *host);
108 108
109 109
110static struct scsi_host_template pdc_ata_sht = { 110static struct scsi_host_template pdc_ata_sht = {
@@ -175,7 +175,7 @@ static const struct ata_port_info pdc_port_info[] = {
175 /* board_2037x */ 175 /* board_2037x */
176 { 176 {
177 .sht = &pdc_ata_sht, 177 .sht = &pdc_ata_sht,
178 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 178 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
179 .pio_mask = 0x1f, /* pio0-4 */ 179 .pio_mask = 0x1f, /* pio0-4 */
180 .mwdma_mask = 0x07, /* mwdma0-2 */ 180 .mwdma_mask = 0x07, /* mwdma0-2 */
181 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 181 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -185,7 +185,7 @@ static const struct ata_port_info pdc_port_info[] = {
185 /* board_20319 */ 185 /* board_20319 */
186 { 186 {
187 .sht = &pdc_ata_sht, 187 .sht = &pdc_ata_sht,
188 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 188 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
189 .pio_mask = 0x1f, /* pio0-4 */ 189 .pio_mask = 0x1f, /* pio0-4 */
190 .mwdma_mask = 0x07, /* mwdma0-2 */ 190 .mwdma_mask = 0x07, /* mwdma0-2 */
191 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 191 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -195,7 +195,7 @@ static const struct ata_port_info pdc_port_info[] = {
195 /* board_20619 */ 195 /* board_20619 */
196 { 196 {
197 .sht = &pdc_ata_sht, 197 .sht = &pdc_ata_sht,
198 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS, 198 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
199 .pio_mask = 0x1f, /* pio0-4 */ 199 .pio_mask = 0x1f, /* pio0-4 */
200 .mwdma_mask = 0x07, /* mwdma0-2 */ 200 .mwdma_mask = 0x07, /* mwdma0-2 */
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -205,7 +205,7 @@ static const struct ata_port_info pdc_port_info[] = {
205 /* board_20771 */ 205 /* board_20771 */
206 { 206 {
207 .sht = &pdc_ata_sht, 207 .sht = &pdc_ata_sht,
208 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 208 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
209 .pio_mask = 0x1f, /* pio0-4 */ 209 .pio_mask = 0x1f, /* pio0-4 */
210 .mwdma_mask = 0x07, /* mwdma0-2 */ 210 .mwdma_mask = 0x07, /* mwdma0-2 */
211 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 211 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -215,7 +215,7 @@ static const struct ata_port_info pdc_port_info[] = {
215 /* board_2057x */ 215 /* board_2057x */
216 { 216 {
217 .sht = &pdc_ata_sht, 217 .sht = &pdc_ata_sht,
218 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 218 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
219 .pio_mask = 0x1f, /* pio0-4 */ 219 .pio_mask = 0x1f, /* pio0-4 */
220 .mwdma_mask = 0x07, /* mwdma0-2 */ 220 .mwdma_mask = 0x07, /* mwdma0-2 */
221 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 221 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -225,7 +225,7 @@ static const struct ata_port_info pdc_port_info[] = {
225 /* board_40518 */ 225 /* board_40518 */
226 { 226 {
227 .sht = &pdc_ata_sht, 227 .sht = &pdc_ata_sht,
228 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 228 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
229 .pio_mask = 0x1f, /* pio0-4 */ 229 .pio_mask = 0x1f, /* pio0-4 */
230 .mwdma_mask = 0x07, /* mwdma0-2 */ 230 .mwdma_mask = 0x07, /* mwdma0-2 */
231 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 231 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -292,7 +292,7 @@ static struct pci_driver pdc_ata_pci_driver = {
292 292
293static int pdc_port_start(struct ata_port *ap) 293static int pdc_port_start(struct ata_port *ap)
294{ 294{
295 struct device *dev = ap->host_set->dev; 295 struct device *dev = ap->host->dev;
296 struct pdc_port_priv *pp; 296 struct pdc_port_priv *pp;
297 int rc; 297 int rc;
298 298
@@ -326,7 +326,7 @@ err_out:
326 326
327static void pdc_port_stop(struct ata_port *ap) 327static void pdc_port_stop(struct ata_port *ap)
328{ 328{
329 struct device *dev = ap->host_set->dev; 329 struct device *dev = ap->host->dev;
330 struct pdc_port_priv *pp = ap->private_data; 330 struct pdc_port_priv *pp = ap->private_data;
331 331
332 ap->private_data = NULL; 332 ap->private_data = NULL;
@@ -336,11 +336,11 @@ static void pdc_port_stop(struct ata_port *ap)
336} 336}
337 337
338 338
339static void pdc_host_stop(struct ata_host_set *host_set) 339static void pdc_host_stop(struct ata_host *host)
340{ 340{
341 struct pdc_host_priv *hp = host_set->private_data; 341 struct pdc_host_priv *hp = host->private_data;
342 342
343 ata_pci_host_stop(host_set); 343 ata_pci_host_stop(host);
344 344
345 kfree(hp); 345 kfree(hp);
346} 346}
@@ -443,14 +443,14 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
443 443
444static void pdc_eng_timeout(struct ata_port *ap) 444static void pdc_eng_timeout(struct ata_port *ap)
445{ 445{
446 struct ata_host_set *host_set = ap->host_set; 446 struct ata_host *host = ap->host;
447 u8 drv_stat; 447 u8 drv_stat;
448 struct ata_queued_cmd *qc; 448 struct ata_queued_cmd *qc;
449 unsigned long flags; 449 unsigned long flags;
450 450
451 DPRINTK("ENTER\n"); 451 DPRINTK("ENTER\n");
452 452
453 spin_lock_irqsave(&host_set->lock, flags); 453 spin_lock_irqsave(&host->lock, flags);
454 454
455 qc = ata_qc_from_tag(ap, ap->active_tag); 455 qc = ata_qc_from_tag(ap, ap->active_tag);
456 456
@@ -473,7 +473,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
473 break; 473 break;
474 } 474 }
475 475
476 spin_unlock_irqrestore(&host_set->lock, flags); 476 spin_unlock_irqrestore(&host->lock, flags);
477 ata_eh_qc_complete(qc); 477 ata_eh_qc_complete(qc);
478 DPRINTK("EXIT\n"); 478 DPRINTK("EXIT\n");
479} 479}
@@ -509,15 +509,15 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
509 509
510static void pdc_irq_clear(struct ata_port *ap) 510static void pdc_irq_clear(struct ata_port *ap)
511{ 511{
512 struct ata_host_set *host_set = ap->host_set; 512 struct ata_host *host = ap->host;
513 void __iomem *mmio = host_set->mmio_base; 513 void __iomem *mmio = host->mmio_base;
514 514
515 readl(mmio + PDC_INT_SEQMASK); 515 readl(mmio + PDC_INT_SEQMASK);
516} 516}
517 517
518static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 518static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
519{ 519{
520 struct ata_host_set *host_set = dev_instance; 520 struct ata_host *host = dev_instance;
521 struct ata_port *ap; 521 struct ata_port *ap;
522 u32 mask = 0; 522 u32 mask = 0;
523 unsigned int i, tmp; 523 unsigned int i, tmp;
@@ -526,12 +526,12 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
526 526
527 VPRINTK("ENTER\n"); 527 VPRINTK("ENTER\n");
528 528
529 if (!host_set || !host_set->mmio_base) { 529 if (!host || !host->mmio_base) {
530 VPRINTK("QUICK EXIT\n"); 530 VPRINTK("QUICK EXIT\n");
531 return IRQ_NONE; 531 return IRQ_NONE;
532 } 532 }
533 533
534 mmio_base = host_set->mmio_base; 534 mmio_base = host->mmio_base;
535 535
536 /* reading should also clear interrupts */ 536 /* reading should also clear interrupts */
537 mask = readl(mmio_base + PDC_INT_SEQMASK); 537 mask = readl(mmio_base + PDC_INT_SEQMASK);
@@ -541,7 +541,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
541 return IRQ_NONE; 541 return IRQ_NONE;
542 } 542 }
543 543
544 spin_lock(&host_set->lock); 544 spin_lock(&host->lock);
545 545
546 mask &= 0xffff; /* only 16 tags possible */ 546 mask &= 0xffff; /* only 16 tags possible */
547 if (!mask) { 547 if (!mask) {
@@ -551,9 +551,9 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
551 551
552 writel(mask, mmio_base + PDC_INT_SEQMASK); 552 writel(mask, mmio_base + PDC_INT_SEQMASK);
553 553
554 for (i = 0; i < host_set->n_ports; i++) { 554 for (i = 0; i < host->n_ports; i++) {
555 VPRINTK("port %u\n", i); 555 VPRINTK("port %u\n", i);
556 ap = host_set->ports[i]; 556 ap = host->ports[i];
557 tmp = mask & (1 << (i + 1)); 557 tmp = mask & (1 << (i + 1));
558 if (tmp && ap && 558 if (tmp && ap &&
559 !(ap->flags & ATA_FLAG_DISABLED)) { 559 !(ap->flags & ATA_FLAG_DISABLED)) {
@@ -568,7 +568,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
568 VPRINTK("EXIT\n"); 568 VPRINTK("EXIT\n");
569 569
570done_irq: 570done_irq:
571 spin_unlock(&host_set->lock); 571 spin_unlock(&host->lock);
572 return IRQ_RETVAL(handled); 572 return IRQ_RETVAL(handled);
573} 573}
574 574
@@ -581,8 +581,8 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
581 581
582 VPRINTK("ENTER, ap %p\n", ap); 582 VPRINTK("ENTER, ap %p\n", ap);
583 583
584 writel(0x00000001, ap->host_set->mmio_base + (seq * 4)); 584 writel(0x00000001, ap->host->mmio_base + (seq * 4));
585 readl(ap->host_set->mmio_base + (seq * 4)); /* flush */ 585 readl(ap->host->mmio_base + (seq * 4)); /* flush */
586 586
587 pp->pkt[2] = seq; 587 pp->pkt[2] = seq;
588 wmb(); /* flush PRD, pkt writes */ 588 wmb(); /* flush PRD, pkt writes */
@@ -743,7 +743,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
743 probe_ent->private_data = hp; 743 probe_ent->private_data = hp;
744 744
745 probe_ent->sht = pdc_port_info[board_idx].sht; 745 probe_ent->sht = pdc_port_info[board_idx].sht;
746 probe_ent->host_flags = pdc_port_info[board_idx].host_flags; 746 probe_ent->port_flags = pdc_port_info[board_idx].flags;
747 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; 747 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
748 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask; 748 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
749 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask; 749 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
@@ -824,7 +824,7 @@ err_out:
824 824
825static int __init pdc_ata_init(void) 825static int __init pdc_ata_init(void)
826{ 826{
827 return pci_module_init(&pdc_ata_pci_driver); 827 return pci_register_driver(&pdc_ata_pci_driver);
828} 828}
829 829
830 830
diff --git a/drivers/scsi/sata_promise.h b/drivers/ata/sata_promise.h
index 6ee5e190262d..6ee5e190262d 100644
--- a/drivers/scsi/sata_promise.h
+++ b/drivers/ata/sata_promise.h
diff --git a/drivers/scsi/sata_qstor.c b/drivers/ata/sata_qstor.c
index d374c1db0cf3..fa29dfe2a7b5 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -116,7 +116,7 @@ static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
116static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 116static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
117static irqreturn_t qs_intr (int irq, void *dev_instance, struct pt_regs *regs); 117static irqreturn_t qs_intr (int irq, void *dev_instance, struct pt_regs *regs);
118static int qs_port_start(struct ata_port *ap); 118static int qs_port_start(struct ata_port *ap);
119static void qs_host_stop(struct ata_host_set *host_set); 119static void qs_host_stop(struct ata_host *host);
120static void qs_port_stop(struct ata_port *ap); 120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap); 121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc); 122static void qs_qc_prep(struct ata_queued_cmd *qc);
@@ -174,7 +174,7 @@ static const struct ata_port_info qs_port_info[] = {
174 /* board_2068_idx */ 174 /* board_2068_idx */
175 { 175 {
176 .sht = &qs_ata_sht, 176 .sht = &qs_ata_sht,
177 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 177 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
178 ATA_FLAG_SATA_RESET | 178 ATA_FLAG_SATA_RESET |
179 //FIXME ATA_FLAG_SRST | 179 //FIXME ATA_FLAG_SRST |
180 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, 180 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
@@ -220,7 +220,7 @@ static void qs_irq_clear(struct ata_port *ap)
220 220
221static inline void qs_enter_reg_mode(struct ata_port *ap) 221static inline void qs_enter_reg_mode(struct ata_port *ap)
222{ 222{
223 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000); 223 u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
224 224
225 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0); 225 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
226 readb(chan + QS_CCT_CTR0); /* flush */ 226 readb(chan + QS_CCT_CTR0); /* flush */
@@ -228,7 +228,7 @@ static inline void qs_enter_reg_mode(struct ata_port *ap)
228 228
229static inline void qs_reset_channel_logic(struct ata_port *ap) 229static inline void qs_reset_channel_logic(struct ata_port *ap)
230{ 230{
231 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000); 231 u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
232 232
233 writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1); 233 writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
234 readb(chan + QS_CCT_CTR0); /* flush */ 234 readb(chan + QS_CCT_CTR0); /* flush */
@@ -342,7 +342,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
342static inline void qs_packet_start(struct ata_queued_cmd *qc) 342static inline void qs_packet_start(struct ata_queued_cmd *qc)
343{ 343{
344 struct ata_port *ap = qc->ap; 344 struct ata_port *ap = qc->ap;
345 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000); 345 u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
346 346
347 VPRINTK("ENTER, ap %p\n", ap); 347 VPRINTK("ENTER, ap %p\n", ap);
348 348
@@ -375,11 +375,11 @@ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
375 return ata_qc_issue_prot(qc); 375 return ata_qc_issue_prot(qc);
376} 376}
377 377
378static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set) 378static inline unsigned int qs_intr_pkt(struct ata_host *host)
379{ 379{
380 unsigned int handled = 0; 380 unsigned int handled = 0;
381 u8 sFFE; 381 u8 sFFE;
382 u8 __iomem *mmio_base = host_set->mmio_base; 382 u8 __iomem *mmio_base = host->mmio_base;
383 383
384 do { 384 do {
385 u32 sff0 = readl(mmio_base + QS_HST_SFF); 385 u32 sff0 = readl(mmio_base + QS_HST_SFF);
@@ -391,7 +391,7 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
391 u8 sDST = sff0 >> 16; /* dev status */ 391 u8 sDST = sff0 >> 16; /* dev status */
392 u8 sHST = sff1 & 0x3f; /* host status */ 392 u8 sHST = sff1 & 0x3f; /* host status */
393 unsigned int port_no = (sff1 >> 8) & 0x03; 393 unsigned int port_no = (sff1 >> 8) & 0x03;
394 struct ata_port *ap = host_set->ports[port_no]; 394 struct ata_port *ap = host->ports[port_no];
395 395
396 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", 396 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
397 sff1, sff0, port_no, sHST, sDST); 397 sff1, sff0, port_no, sHST, sDST);
@@ -421,13 +421,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
421 return handled; 421 return handled;
422} 422}
423 423
424static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set) 424static inline unsigned int qs_intr_mmio(struct ata_host *host)
425{ 425{
426 unsigned int handled = 0, port_no; 426 unsigned int handled = 0, port_no;
427 427
428 for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 428 for (port_no = 0; port_no < host->n_ports; ++port_no) {
429 struct ata_port *ap; 429 struct ata_port *ap;
430 ap = host_set->ports[port_no]; 430 ap = host->ports[port_no];
431 if (ap && 431 if (ap &&
432 !(ap->flags & ATA_FLAG_DISABLED)) { 432 !(ap->flags & ATA_FLAG_DISABLED)) {
433 struct ata_queued_cmd *qc; 433 struct ata_queued_cmd *qc;
@@ -457,14 +457,14 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
457 457
458static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *regs) 458static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *regs)
459{ 459{
460 struct ata_host_set *host_set = dev_instance; 460 struct ata_host *host = dev_instance;
461 unsigned int handled = 0; 461 unsigned int handled = 0;
462 462
463 VPRINTK("ENTER\n"); 463 VPRINTK("ENTER\n");
464 464
465 spin_lock(&host_set->lock); 465 spin_lock(&host->lock);
466 handled = qs_intr_pkt(host_set) | qs_intr_mmio(host_set); 466 handled = qs_intr_pkt(host) | qs_intr_mmio(host);
467 spin_unlock(&host_set->lock); 467 spin_unlock(&host->lock);
468 468
469 VPRINTK("EXIT\n"); 469 VPRINTK("EXIT\n");
470 470
@@ -491,9 +491,9 @@ static void qs_ata_setup_port(struct ata_ioports *port, unsigned long base)
491 491
492static int qs_port_start(struct ata_port *ap) 492static int qs_port_start(struct ata_port *ap)
493{ 493{
494 struct device *dev = ap->host_set->dev; 494 struct device *dev = ap->host->dev;
495 struct qs_port_priv *pp; 495 struct qs_port_priv *pp;
496 void __iomem *mmio_base = ap->host_set->mmio_base; 496 void __iomem *mmio_base = ap->host->mmio_base;
497 void __iomem *chan = mmio_base + (ap->port_no * 0x4000); 497 void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
498 u64 addr; 498 u64 addr;
499 int rc; 499 int rc;
@@ -530,7 +530,7 @@ err_out:
530 530
531static void qs_port_stop(struct ata_port *ap) 531static void qs_port_stop(struct ata_port *ap)
532{ 532{
533 struct device *dev = ap->host_set->dev; 533 struct device *dev = ap->host->dev;
534 struct qs_port_priv *pp = ap->private_data; 534 struct qs_port_priv *pp = ap->private_data;
535 535
536 if (pp != NULL) { 536 if (pp != NULL) {
@@ -543,10 +543,10 @@ static void qs_port_stop(struct ata_port *ap)
543 ata_port_stop(ap); 543 ata_port_stop(ap);
544} 544}
545 545
546static void qs_host_stop(struct ata_host_set *host_set) 546static void qs_host_stop(struct ata_host *host)
547{ 547{
548 void __iomem *mmio_base = host_set->mmio_base; 548 void __iomem *mmio_base = host->mmio_base;
549 struct pci_dev *pdev = to_pci_dev(host_set->dev); 549 struct pci_dev *pdev = to_pci_dev(host->dev);
550 550
551 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ 551 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
552 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ 552 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
@@ -673,7 +673,7 @@ static int qs_ata_init_one(struct pci_dev *pdev,
673 INIT_LIST_HEAD(&probe_ent->node); 673 INIT_LIST_HEAD(&probe_ent->node);
674 674
675 probe_ent->sht = qs_port_info[board_idx].sht; 675 probe_ent->sht = qs_port_info[board_idx].sht;
676 probe_ent->host_flags = qs_port_info[board_idx].host_flags; 676 probe_ent->port_flags = qs_port_info[board_idx].flags;
677 probe_ent->pio_mask = qs_port_info[board_idx].pio_mask; 677 probe_ent->pio_mask = qs_port_info[board_idx].pio_mask;
678 probe_ent->mwdma_mask = qs_port_info[board_idx].mwdma_mask; 678 probe_ent->mwdma_mask = qs_port_info[board_idx].mwdma_mask;
679 probe_ent->udma_mask = qs_port_info[board_idx].udma_mask; 679 probe_ent->udma_mask = qs_port_info[board_idx].udma_mask;
@@ -712,7 +712,7 @@ err_out:
712 712
713static int __init qs_ata_init(void) 713static int __init qs_ata_init(void)
714{ 714{
715 return pci_module_init(&qs_ata_pci_driver); 715 return pci_register_driver(&qs_ata_pci_driver);
716} 716}
717 717
718static void __exit qs_ata_exit(void) 718static void __exit qs_ata_exit(void)
diff --git a/drivers/scsi/sata_sil.c b/drivers/ata/sata_sil.c
index d0a85073ebf7..c63dbabc0cd9 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -56,7 +56,7 @@ enum {
56 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 56 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
57 SIL_FLAG_MOD15WRITE = (1 << 30), 57 SIL_FLAG_MOD15WRITE = (1 << 30),
58 58
59 SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 59 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
60 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME, 60 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
61 61
62 /* 62 /*
@@ -109,7 +109,9 @@ enum {
109}; 109};
110 110
111static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 111static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
112#ifdef CONFIG_PM
112static int sil_pci_device_resume(struct pci_dev *pdev); 113static int sil_pci_device_resume(struct pci_dev *pdev);
114#endif
113static void sil_dev_config(struct ata_port *ap, struct ata_device *dev); 115static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
114static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); 116static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
115static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 117static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
@@ -141,12 +143,8 @@ static const struct sil_drivelist {
141 { "ST330013AS", SIL_QUIRK_MOD15WRITE }, 143 { "ST330013AS", SIL_QUIRK_MOD15WRITE },
142 { "ST340017AS", SIL_QUIRK_MOD15WRITE }, 144 { "ST340017AS", SIL_QUIRK_MOD15WRITE },
143 { "ST360015AS", SIL_QUIRK_MOD15WRITE }, 145 { "ST360015AS", SIL_QUIRK_MOD15WRITE },
144 { "ST380013AS", SIL_QUIRK_MOD15WRITE },
145 { "ST380023AS", SIL_QUIRK_MOD15WRITE }, 146 { "ST380023AS", SIL_QUIRK_MOD15WRITE },
146 { "ST3120023AS", SIL_QUIRK_MOD15WRITE }, 147 { "ST3120023AS", SIL_QUIRK_MOD15WRITE },
147 { "ST3160023AS", SIL_QUIRK_MOD15WRITE },
148 { "ST3120026AS", SIL_QUIRK_MOD15WRITE },
149 { "ST3200822AS", SIL_QUIRK_MOD15WRITE },
150 { "ST340014ASL", SIL_QUIRK_MOD15WRITE }, 148 { "ST340014ASL", SIL_QUIRK_MOD15WRITE },
151 { "ST360014ASL", SIL_QUIRK_MOD15WRITE }, 149 { "ST360014ASL", SIL_QUIRK_MOD15WRITE },
152 { "ST380011ASL", SIL_QUIRK_MOD15WRITE }, 150 { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
@@ -161,8 +159,10 @@ static struct pci_driver sil_pci_driver = {
161 .id_table = sil_pci_tbl, 159 .id_table = sil_pci_tbl,
162 .probe = sil_init_one, 160 .probe = sil_init_one,
163 .remove = ata_pci_remove_one, 161 .remove = ata_pci_remove_one,
162#ifdef CONFIG_PM
164 .suspend = ata_pci_device_suspend, 163 .suspend = ata_pci_device_suspend,
165 .resume = sil_pci_device_resume, 164 .resume = sil_pci_device_resume,
165#endif
166}; 166};
167 167
168static struct scsi_host_template sil_sht = { 168static struct scsi_host_template sil_sht = {
@@ -218,7 +218,7 @@ static const struct ata_port_info sil_port_info[] = {
218 /* sil_3112 */ 218 /* sil_3112 */
219 { 219 {
220 .sht = &sil_sht, 220 .sht = &sil_sht,
221 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE, 221 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
222 .pio_mask = 0x1f, /* pio0-4 */ 222 .pio_mask = 0x1f, /* pio0-4 */
223 .mwdma_mask = 0x07, /* mwdma0-2 */ 223 .mwdma_mask = 0x07, /* mwdma0-2 */
224 .udma_mask = 0x3f, /* udma0-5 */ 224 .udma_mask = 0x3f, /* udma0-5 */
@@ -227,7 +227,7 @@ static const struct ata_port_info sil_port_info[] = {
227 /* sil_3112_no_sata_irq */ 227 /* sil_3112_no_sata_irq */
228 { 228 {
229 .sht = &sil_sht, 229 .sht = &sil_sht,
230 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE | 230 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
231 SIL_FLAG_NO_SATA_IRQ, 231 SIL_FLAG_NO_SATA_IRQ,
232 .pio_mask = 0x1f, /* pio0-4 */ 232 .pio_mask = 0x1f, /* pio0-4 */
233 .mwdma_mask = 0x07, /* mwdma0-2 */ 233 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -237,7 +237,7 @@ static const struct ata_port_info sil_port_info[] = {
237 /* sil_3512 */ 237 /* sil_3512 */
238 { 238 {
239 .sht = &sil_sht, 239 .sht = &sil_sht,
240 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 240 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
241 .pio_mask = 0x1f, /* pio0-4 */ 241 .pio_mask = 0x1f, /* pio0-4 */
242 .mwdma_mask = 0x07, /* mwdma0-2 */ 242 .mwdma_mask = 0x07, /* mwdma0-2 */
243 .udma_mask = 0x3f, /* udma0-5 */ 243 .udma_mask = 0x3f, /* udma0-5 */
@@ -246,7 +246,7 @@ static const struct ata_port_info sil_port_info[] = {
246 /* sil_3114 */ 246 /* sil_3114 */
247 { 247 {
248 .sht = &sil_sht, 248 .sht = &sil_sht,
249 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 249 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
250 .pio_mask = 0x1f, /* pio0-4 */ 250 .pio_mask = 0x1f, /* pio0-4 */
251 .mwdma_mask = 0x07, /* mwdma0-2 */ 251 .mwdma_mask = 0x07, /* mwdma0-2 */
252 .udma_mask = 0x3f, /* udma0-5 */ 252 .udma_mask = 0x3f, /* udma0-5 */
@@ -295,10 +295,9 @@ static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
295 295
296static void sil_post_set_mode (struct ata_port *ap) 296static void sil_post_set_mode (struct ata_port *ap)
297{ 297{
298 struct ata_host_set *host_set = ap->host_set; 298 struct ata_host *host = ap->host;
299 struct ata_device *dev; 299 struct ata_device *dev;
300 void __iomem *addr = 300 void __iomem *addr = host->mmio_base + sil_port[ap->port_no].xfer_mode;
301 host_set->mmio_base + sil_port[ap->port_no].xfer_mode;
302 u32 tmp, dev_mode[2]; 301 u32 tmp, dev_mode[2];
303 unsigned int i; 302 unsigned int i;
304 303
@@ -440,15 +439,15 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
440static irqreturn_t sil_interrupt(int irq, void *dev_instance, 439static irqreturn_t sil_interrupt(int irq, void *dev_instance,
441 struct pt_regs *regs) 440 struct pt_regs *regs)
442{ 441{
443 struct ata_host_set *host_set = dev_instance; 442 struct ata_host *host = dev_instance;
444 void __iomem *mmio_base = host_set->mmio_base; 443 void __iomem *mmio_base = host->mmio_base;
445 int handled = 0; 444 int handled = 0;
446 int i; 445 int i;
447 446
448 spin_lock(&host_set->lock); 447 spin_lock(&host->lock);
449 448
450 for (i = 0; i < host_set->n_ports; i++) { 449 for (i = 0; i < host->n_ports; i++) {
451 struct ata_port *ap = host_set->ports[i]; 450 struct ata_port *ap = host->ports[i];
452 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); 451 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
453 452
454 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) 453 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
@@ -466,14 +465,14 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance,
466 handled = 1; 465 handled = 1;
467 } 466 }
468 467
469 spin_unlock(&host_set->lock); 468 spin_unlock(&host->lock);
470 469
471 return IRQ_RETVAL(handled); 470 return IRQ_RETVAL(handled);
472} 471}
473 472
474static void sil_freeze(struct ata_port *ap) 473static void sil_freeze(struct ata_port *ap)
475{ 474{
476 void __iomem *mmio_base = ap->host_set->mmio_base; 475 void __iomem *mmio_base = ap->host->mmio_base;
477 u32 tmp; 476 u32 tmp;
478 477
479 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */ 478 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
@@ -488,7 +487,7 @@ static void sil_freeze(struct ata_port *ap)
488 487
489static void sil_thaw(struct ata_port *ap) 488static void sil_thaw(struct ata_port *ap)
490{ 489{
491 void __iomem *mmio_base = ap->host_set->mmio_base; 490 void __iomem *mmio_base = ap->host->mmio_base;
492 u32 tmp; 491 u32 tmp;
493 492
494 /* clear IRQ */ 493 /* clear IRQ */
@@ -567,7 +566,7 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
567} 566}
568 567
569static void sil_init_controller(struct pci_dev *pdev, 568static void sil_init_controller(struct pci_dev *pdev,
570 int n_ports, unsigned long host_flags, 569 int n_ports, unsigned long port_flags,
571 void __iomem *mmio_base) 570 void __iomem *mmio_base)
572{ 571{
573 u8 cls; 572 u8 cls;
@@ -587,7 +586,7 @@ static void sil_init_controller(struct pci_dev *pdev,
587 "cache line size not set. Driver may not function\n"); 586 "cache line size not set. Driver may not function\n");
588 587
589 /* Apply R_ERR on DMA activate FIS errata workaround */ 588 /* Apply R_ERR on DMA activate FIS errata workaround */
590 if (host_flags & SIL_FLAG_RERR_ON_DMA_ACT) { 589 if (port_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
591 int cnt; 590 int cnt;
592 591
593 for (i = 0, cnt = 0; i < n_ports; i++) { 592 for (i = 0, cnt = 0; i < n_ports; i++) {
@@ -658,7 +657,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
658 probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask; 657 probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask;
659 probe_ent->irq = pdev->irq; 658 probe_ent->irq = pdev->irq;
660 probe_ent->irq_flags = IRQF_SHARED; 659 probe_ent->irq_flags = IRQF_SHARED;
661 probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags; 660 probe_ent->port_flags = sil_port_info[ent->driver_data].flags;
662 661
663 mmio_base = pci_iomap(pdev, 5, 0); 662 mmio_base = pci_iomap(pdev, 5, 0);
664 if (mmio_base == NULL) { 663 if (mmio_base == NULL) {
@@ -679,7 +678,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
679 ata_std_ports(&probe_ent->port[i]); 678 ata_std_ports(&probe_ent->port[i]);
680 } 679 }
681 680
682 sil_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags, 681 sil_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags,
683 mmio_base); 682 mmio_base);
684 683
685 pci_set_master(pdev); 684 pci_set_master(pdev);
@@ -700,21 +699,23 @@ err_out:
700 return rc; 699 return rc;
701} 700}
702 701
702#ifdef CONFIG_PM
703static int sil_pci_device_resume(struct pci_dev *pdev) 703static int sil_pci_device_resume(struct pci_dev *pdev)
704{ 704{
705 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 705 struct ata_host *host = dev_get_drvdata(&pdev->dev);
706 706
707 ata_pci_device_do_resume(pdev); 707 ata_pci_device_do_resume(pdev);
708 sil_init_controller(pdev, host_set->n_ports, host_set->ports[0]->flags, 708 sil_init_controller(pdev, host->n_ports, host->ports[0]->flags,
709 host_set->mmio_base); 709 host->mmio_base);
710 ata_host_set_resume(host_set); 710 ata_host_resume(host);
711 711
712 return 0; 712 return 0;
713} 713}
714#endif
714 715
715static int __init sil_init(void) 716static int __init sil_init(void)
716{ 717{
717 return pci_module_init(&sil_pci_driver); 718 return pci_register_driver(&sil_pci_driver);
718} 719}
719 720
720static void __exit sil_exit(void) 721static void __exit sil_exit(void)
diff --git a/drivers/scsi/sata_sil24.c b/drivers/ata/sata_sil24.c
index 3f368c7d3ef9..39cb07baebae 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -316,7 +316,7 @@ struct sil24_port_priv {
316 struct ata_taskfile tf; /* Cached taskfile registers */ 316 struct ata_taskfile tf; /* Cached taskfile registers */
317}; 317};
318 318
319/* ap->host_set->private_data */ 319/* ap->host->private_data */
320struct sil24_host_priv { 320struct sil24_host_priv {
321 void __iomem *host_base; /* global controller control (128 bytes @BAR0) */ 321 void __iomem *host_base; /* global controller control (128 bytes @BAR0) */
322 void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */ 322 void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */
@@ -337,9 +337,11 @@ static void sil24_error_handler(struct ata_port *ap);
337static void sil24_post_internal_cmd(struct ata_queued_cmd *qc); 337static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
338static int sil24_port_start(struct ata_port *ap); 338static int sil24_port_start(struct ata_port *ap);
339static void sil24_port_stop(struct ata_port *ap); 339static void sil24_port_stop(struct ata_port *ap);
340static void sil24_host_stop(struct ata_host_set *host_set); 340static void sil24_host_stop(struct ata_host *host);
341static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 341static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
342#ifdef CONFIG_PM
342static int sil24_pci_device_resume(struct pci_dev *pdev); 343static int sil24_pci_device_resume(struct pci_dev *pdev);
344#endif
343 345
344static const struct pci_device_id sil24_pci_tbl[] = { 346static const struct pci_device_id sil24_pci_tbl[] = {
345 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, 347 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
@@ -355,8 +357,10 @@ static struct pci_driver sil24_pci_driver = {
355 .id_table = sil24_pci_tbl, 357 .id_table = sil24_pci_tbl,
356 .probe = sil24_init_one, 358 .probe = sil24_init_one,
357 .remove = ata_pci_remove_one, /* safe? */ 359 .remove = ata_pci_remove_one, /* safe? */
360#ifdef CONFIG_PM
358 .suspend = ata_pci_device_suspend, 361 .suspend = ata_pci_device_suspend,
359 .resume = sil24_pci_device_resume, 362 .resume = sil24_pci_device_resume,
363#endif
360}; 364};
361 365
362static struct scsi_host_template sil24_sht = { 366static struct scsi_host_template sil24_sht = {
@@ -411,7 +415,7 @@ static const struct ata_port_operations sil24_ops = {
411}; 415};
412 416
413/* 417/*
414 * Use bits 30-31 of host_flags to encode available port numbers. 418 * Use bits 30-31 of port_flags to encode available port numbers.
415 * Current maxium is 4. 419 * Current maxium is 4.
416 */ 420 */
417#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30) 421#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
@@ -421,7 +425,7 @@ static struct ata_port_info sil24_port_info[] = {
421 /* sil_3124 */ 425 /* sil_3124 */
422 { 426 {
423 .sht = &sil24_sht, 427 .sht = &sil24_sht,
424 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) | 428 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
425 SIL24_FLAG_PCIX_IRQ_WOC, 429 SIL24_FLAG_PCIX_IRQ_WOC,
426 .pio_mask = 0x1f, /* pio0-4 */ 430 .pio_mask = 0x1f, /* pio0-4 */
427 .mwdma_mask = 0x07, /* mwdma0-2 */ 431 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -431,7 +435,7 @@ static struct ata_port_info sil24_port_info[] = {
431 /* sil_3132 */ 435 /* sil_3132 */
432 { 436 {
433 .sht = &sil24_sht, 437 .sht = &sil24_sht,
434 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2), 438 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
435 .pio_mask = 0x1f, /* pio0-4 */ 439 .pio_mask = 0x1f, /* pio0-4 */
436 .mwdma_mask = 0x07, /* mwdma0-2 */ 440 .mwdma_mask = 0x07, /* mwdma0-2 */
437 .udma_mask = 0x3f, /* udma0-5 */ 441 .udma_mask = 0x3f, /* udma0-5 */
@@ -440,7 +444,7 @@ static struct ata_port_info sil24_port_info[] = {
440 /* sil_3131/sil_3531 */ 444 /* sil_3131/sil_3531 */
441 { 445 {
442 .sht = &sil24_sht, 446 .sht = &sil24_sht,
443 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1), 447 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
444 .pio_mask = 0x1f, /* pio0-4 */ 448 .pio_mask = 0x1f, /* pio0-4 */
445 .mwdma_mask = 0x07, /* mwdma0-2 */ 449 .mwdma_mask = 0x07, /* mwdma0-2 */
446 .udma_mask = 0x3f, /* udma0-5 */ 450 .udma_mask = 0x3f, /* udma0-5 */
@@ -867,8 +871,8 @@ static inline void sil24_host_intr(struct ata_port *ap)
867 871
868static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs) 872static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
869{ 873{
870 struct ata_host_set *host_set = dev_instance; 874 struct ata_host *host = dev_instance;
871 struct sil24_host_priv *hpriv = host_set->private_data; 875 struct sil24_host_priv *hpriv = host->private_data;
872 unsigned handled = 0; 876 unsigned handled = 0;
873 u32 status; 877 u32 status;
874 int i; 878 int i;
@@ -884,20 +888,20 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
884 if (!(status & IRQ_STAT_4PORTS)) 888 if (!(status & IRQ_STAT_4PORTS))
885 goto out; 889 goto out;
886 890
887 spin_lock(&host_set->lock); 891 spin_lock(&host->lock);
888 892
889 for (i = 0; i < host_set->n_ports; i++) 893 for (i = 0; i < host->n_ports; i++)
890 if (status & (1 << i)) { 894 if (status & (1 << i)) {
891 struct ata_port *ap = host_set->ports[i]; 895 struct ata_port *ap = host->ports[i];
892 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { 896 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
893 sil24_host_intr(host_set->ports[i]); 897 sil24_host_intr(host->ports[i]);
894 handled++; 898 handled++;
895 } else 899 } else
896 printk(KERN_ERR DRV_NAME 900 printk(KERN_ERR DRV_NAME
897 ": interrupt from disabled port %d\n", i); 901 ": interrupt from disabled port %d\n", i);
898 } 902 }
899 903
900 spin_unlock(&host_set->lock); 904 spin_unlock(&host->lock);
901 out: 905 out:
902 return IRQ_RETVAL(handled); 906 return IRQ_RETVAL(handled);
903} 907}
@@ -937,7 +941,7 @@ static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *de
937 941
938static int sil24_port_start(struct ata_port *ap) 942static int sil24_port_start(struct ata_port *ap)
939{ 943{
940 struct device *dev = ap->host_set->dev; 944 struct device *dev = ap->host->dev;
941 struct sil24_port_priv *pp; 945 struct sil24_port_priv *pp;
942 union sil24_cmd_block *cb; 946 union sil24_cmd_block *cb;
943 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS; 947 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
@@ -976,7 +980,7 @@ err_out:
976 980
977static void sil24_port_stop(struct ata_port *ap) 981static void sil24_port_stop(struct ata_port *ap)
978{ 982{
979 struct device *dev = ap->host_set->dev; 983 struct device *dev = ap->host->dev;
980 struct sil24_port_priv *pp = ap->private_data; 984 struct sil24_port_priv *pp = ap->private_data;
981 985
982 sil24_cblk_free(pp, dev); 986 sil24_cblk_free(pp, dev);
@@ -984,10 +988,10 @@ static void sil24_port_stop(struct ata_port *ap)
984 kfree(pp); 988 kfree(pp);
985} 989}
986 990
987static void sil24_host_stop(struct ata_host_set *host_set) 991static void sil24_host_stop(struct ata_host *host)
988{ 992{
989 struct sil24_host_priv *hpriv = host_set->private_data; 993 struct sil24_host_priv *hpriv = host->private_data;
990 struct pci_dev *pdev = to_pci_dev(host_set->dev); 994 struct pci_dev *pdev = to_pci_dev(host->dev);
991 995
992 pci_iounmap(pdev, hpriv->host_base); 996 pci_iounmap(pdev, hpriv->host_base);
993 pci_iounmap(pdev, hpriv->port_base); 997 pci_iounmap(pdev, hpriv->port_base);
@@ -995,7 +999,7 @@ static void sil24_host_stop(struct ata_host_set *host_set)
995} 999}
996 1000
997static void sil24_init_controller(struct pci_dev *pdev, int n_ports, 1001static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
998 unsigned long host_flags, 1002 unsigned long port_flags,
999 void __iomem *host_base, 1003 void __iomem *host_base,
1000 void __iomem *port_base) 1004 void __iomem *port_base)
1001{ 1005{
@@ -1028,7 +1032,7 @@ static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
1028 } 1032 }
1029 1033
1030 /* Configure IRQ WoC */ 1034 /* Configure IRQ WoC */
1031 if (host_flags & SIL24_FLAG_PCIX_IRQ_WOC) 1035 if (port_flags & SIL24_FLAG_PCIX_IRQ_WOC)
1032 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT); 1036 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1033 else 1037 else
1034 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); 1038 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
@@ -1097,12 +1101,12 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1097 INIT_LIST_HEAD(&probe_ent->node); 1101 INIT_LIST_HEAD(&probe_ent->node);
1098 1102
1099 probe_ent->sht = pinfo->sht; 1103 probe_ent->sht = pinfo->sht;
1100 probe_ent->host_flags = pinfo->host_flags; 1104 probe_ent->port_flags = pinfo->flags;
1101 probe_ent->pio_mask = pinfo->pio_mask; 1105 probe_ent->pio_mask = pinfo->pio_mask;
1102 probe_ent->mwdma_mask = pinfo->mwdma_mask; 1106 probe_ent->mwdma_mask = pinfo->mwdma_mask;
1103 probe_ent->udma_mask = pinfo->udma_mask; 1107 probe_ent->udma_mask = pinfo->udma_mask;
1104 probe_ent->port_ops = pinfo->port_ops; 1108 probe_ent->port_ops = pinfo->port_ops;
1105 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags); 1109 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->flags);
1106 1110
1107 probe_ent->irq = pdev->irq; 1111 probe_ent->irq = pdev->irq;
1108 probe_ent->irq_flags = IRQF_SHARED; 1112 probe_ent->irq_flags = IRQF_SHARED;
@@ -1140,14 +1144,14 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1140 } 1144 }
1141 1145
1142 /* Apply workaround for completion IRQ loss on PCI-X errata */ 1146 /* Apply workaround for completion IRQ loss on PCI-X errata */
1143 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) { 1147 if (probe_ent->port_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
1144 tmp = readl(host_base + HOST_CTRL); 1148 tmp = readl(host_base + HOST_CTRL);
1145 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL)) 1149 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
1146 dev_printk(KERN_INFO, &pdev->dev, 1150 dev_printk(KERN_INFO, &pdev->dev,
1147 "Applying completion IRQ loss on PCI-X " 1151 "Applying completion IRQ loss on PCI-X "
1148 "errata fix\n"); 1152 "errata fix\n");
1149 else 1153 else
1150 probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC; 1154 probe_ent->port_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
1151 } 1155 }
1152 1156
1153 for (i = 0; i < probe_ent->n_ports; i++) { 1157 for (i = 0; i < probe_ent->n_ports; i++) {
@@ -1160,7 +1164,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1160 ata_std_ports(&probe_ent->port[i]); 1164 ata_std_ports(&probe_ent->port[i]);
1161 } 1165 }
1162 1166
1163 sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags, 1167 sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags,
1164 host_base, port_base); 1168 host_base, port_base);
1165 1169
1166 pci_set_master(pdev); 1170 pci_set_master(pdev);
@@ -1184,28 +1188,29 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1184 return rc; 1188 return rc;
1185} 1189}
1186 1190
1191#ifdef CONFIG_PM
1187static int sil24_pci_device_resume(struct pci_dev *pdev) 1192static int sil24_pci_device_resume(struct pci_dev *pdev)
1188{ 1193{
1189 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 1194 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1190 struct sil24_host_priv *hpriv = host_set->private_data; 1195 struct sil24_host_priv *hpriv = host->private_data;
1191 1196
1192 ata_pci_device_do_resume(pdev); 1197 ata_pci_device_do_resume(pdev);
1193 1198
1194 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) 1199 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
1195 writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL); 1200 writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL);
1196 1201
1197 sil24_init_controller(pdev, host_set->n_ports, 1202 sil24_init_controller(pdev, host->n_ports, host->ports[0]->flags,
1198 host_set->ports[0]->flags,
1199 hpriv->host_base, hpriv->port_base); 1203 hpriv->host_base, hpriv->port_base);
1200 1204
1201 ata_host_set_resume(host_set); 1205 ata_host_resume(host);
1202 1206
1203 return 0; 1207 return 0;
1204} 1208}
1209#endif
1205 1210
1206static int __init sil24_init(void) 1211static int __init sil24_init(void)
1207{ 1212{
1208 return pci_module_init(&sil24_pci_driver); 1213 return pci_register_driver(&sil24_pci_driver);
1209} 1214}
1210 1215
1211static void __exit sil24_exit(void) 1216static void __exit sil24_exit(void)
diff --git a/drivers/scsi/sata_sis.c b/drivers/ata/sata_sis.c
index ee6b5df41d30..9b17375d8056 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -128,7 +128,7 @@ static const struct ata_port_operations sis_ops = {
128 128
129static struct ata_port_info sis_port_info = { 129static struct ata_port_info sis_port_info = {
130 .sht = &sis_sht, 130 .sht = &sis_sht,
131 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 131 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
132 .pio_mask = 0x1f, 132 .pio_mask = 0x1f,
133 .mwdma_mask = 0x7, 133 .mwdma_mask = 0x7,
134 .udma_mask = 0x7f, 134 .udma_mask = 0x7f,
@@ -158,7 +158,7 @@ static unsigned int get_scr_cfg_addr(unsigned int port_no, unsigned int sc_reg,
158 158
159static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg) 159static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
160{ 160{
161 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 161 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
162 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device); 162 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device);
163 u32 val, val2 = 0; 163 u32 val, val2 = 0;
164 u8 pmr; 164 u8 pmr;
@@ -178,7 +178,7 @@ static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
178 178
179static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val) 179static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
180{ 180{
181 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 181 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
182 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr, pdev->device); 182 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr, pdev->device);
183 u8 pmr; 183 u8 pmr;
184 184
@@ -195,7 +195,7 @@ static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
195 195
196static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg) 196static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
197{ 197{
198 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 198 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
199 u32 val, val2 = 0; 199 u32 val, val2 = 0;
200 u8 pmr; 200 u8 pmr;
201 201
@@ -217,7 +217,7 @@ static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
217 217
218static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) 218static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
219{ 219{
220 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 220 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
221 u8 pmr; 221 u8 pmr;
222 222
223 if (sc_reg > SCR_CONTROL) 223 if (sc_reg > SCR_CONTROL)
@@ -275,17 +275,17 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
275 /* check and see if the SCRs are in IO space or PCI cfg space */ 275 /* check and see if the SCRs are in IO space or PCI cfg space */
276 pci_read_config_dword(pdev, SIS_GENCTL, &genctl); 276 pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
277 if ((genctl & GENCTL_IOMAPPED_SCR) == 0) 277 if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
278 probe_ent->host_flags |= SIS_FLAG_CFGSCR; 278 probe_ent->port_flags |= SIS_FLAG_CFGSCR;
279 279
280 /* if hardware thinks SCRs are in IO space, but there are 280 /* if hardware thinks SCRs are in IO space, but there are
281 * no IO resources assigned, change to PCI cfg space. 281 * no IO resources assigned, change to PCI cfg space.
282 */ 282 */
283 if ((!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) && 283 if ((!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) &&
284 ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) || 284 ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
285 (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) { 285 (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
286 genctl &= ~GENCTL_IOMAPPED_SCR; 286 genctl &= ~GENCTL_IOMAPPED_SCR;
287 pci_write_config_dword(pdev, SIS_GENCTL, genctl); 287 pci_write_config_dword(pdev, SIS_GENCTL, genctl);
288 probe_ent->host_flags |= SIS_FLAG_CFGSCR; 288 probe_ent->port_flags |= SIS_FLAG_CFGSCR;
289 } 289 }
290 290
291 pci_read_config_byte(pdev, SIS_PMR, &pmr); 291 pci_read_config_byte(pdev, SIS_PMR, &pmr);
@@ -306,7 +306,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
306 port2_start = 0x20; 306 port2_start = 0x20;
307 } 307 }
308 308
309 if (!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) { 309 if (!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) {
310 probe_ent->port[0].scr_addr = 310 probe_ent->port[0].scr_addr =
311 pci_resource_start(pdev, SIS_SCR_PCI_BAR); 311 pci_resource_start(pdev, SIS_SCR_PCI_BAR);
312 probe_ent->port[1].scr_addr = 312 probe_ent->port[1].scr_addr =
@@ -334,7 +334,7 @@ err_out:
334 334
335static int __init sis_init(void) 335static int __init sis_init(void)
336{ 336{
337 return pci_module_init(&sis_pci_driver); 337 return pci_register_driver(&sis_pci_driver);
338} 338}
339 339
340static void __exit sis_exit(void) 340static void __exit sis_exit(void)
diff --git a/drivers/scsi/sata_svw.c b/drivers/ata/sata_svw.c
index 6b70c3c76dfd..d6d6658d8328 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -169,7 +169,7 @@ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
169 * @qc: Info associated with this ATA transaction. 169 * @qc: Info associated with this ATA transaction.
170 * 170 *
171 * LOCKING: 171 * LOCKING:
172 * spin_lock_irqsave(host_set lock) 172 * spin_lock_irqsave(host lock)
173 */ 173 */
174 174
175static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc) 175static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
@@ -199,7 +199,7 @@ static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
199 * @qc: Info associated with this ATA transaction. 199 * @qc: Info associated with this ATA transaction.
200 * 200 *
201 * LOCKING: 201 * LOCKING:
202 * spin_lock_irqsave(host_set lock) 202 * spin_lock_irqsave(host lock)
203 */ 203 */
204 204
205static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc) 205static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
@@ -261,12 +261,12 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
261 return 0; 261 return 0;
262 262
263 /* Find the OF node for the PCI device proper */ 263 /* Find the OF node for the PCI device proper */
264 np = pci_device_to_OF_node(to_pci_dev(ap->host_set->dev)); 264 np = pci_device_to_OF_node(to_pci_dev(ap->host->dev));
265 if (np == NULL) 265 if (np == NULL)
266 return 0; 266 return 0;
267 267
268 /* Match it to a port node */ 268 /* Match it to a port node */
269 index = (ap == ap->host_set->ports[0]) ? 0 : 1; 269 index = (ap == ap->host->ports[0]) ? 0 : 1;
270 for (np = np->child; np != NULL; np = np->sibling) { 270 for (np = np->child; np != NULL; np = np->sibling) {
271 const u32 *reg = get_property(np, "reg", NULL); 271 const u32 *reg = get_property(np, "reg", NULL);
272 if (!reg) 272 if (!reg)
@@ -423,7 +423,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
423 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); 423 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
424 424
425 probe_ent->sht = &k2_sata_sht; 425 probe_ent->sht = &k2_sata_sht;
426 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 426 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
427 ATA_FLAG_MMIO; 427 ATA_FLAG_MMIO;
428 probe_ent->port_ops = &k2_sata_ops; 428 probe_ent->port_ops = &k2_sata_ops;
429 probe_ent->n_ports = 4; 429 probe_ent->n_ports = 4;
@@ -488,7 +488,7 @@ static struct pci_driver k2_sata_pci_driver = {
488 488
489static int __init k2_sata_init(void) 489static int __init k2_sata_init(void)
490{ 490{
491 return pci_module_init(&k2_sata_pci_driver); 491 return pci_register_driver(&k2_sata_pci_driver);
492} 492}
493 493
494 494
diff --git a/drivers/scsi/sata_sx4.c b/drivers/ata/sata_sx4.c
index ccc8cad24f7d..091867e10ea3 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -160,7 +160,7 @@ static void pdc_port_stop(struct ata_port *ap);
160static void pdc20621_qc_prep(struct ata_queued_cmd *qc); 160static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
161static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 161static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
162static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 162static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
163static void pdc20621_host_stop(struct ata_host_set *host_set); 163static void pdc20621_host_stop(struct ata_host *host);
164static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe); 164static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
165static int pdc20621_detect_dimm(struct ata_probe_ent *pe); 165static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
166static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, 166static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe,
@@ -218,7 +218,7 @@ static const struct ata_port_info pdc_port_info[] = {
218 /* board_20621 */ 218 /* board_20621 */
219 { 219 {
220 .sht = &pdc_sata_sht, 220 .sht = &pdc_sata_sht,
221 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 221 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
222 ATA_FLAG_SRST | ATA_FLAG_MMIO | 222 ATA_FLAG_SRST | ATA_FLAG_MMIO |
223 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING, 223 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
224 .pio_mask = 0x1f, /* pio0-4 */ 224 .pio_mask = 0x1f, /* pio0-4 */
@@ -244,21 +244,21 @@ static struct pci_driver pdc_sata_pci_driver = {
244}; 244};
245 245
246 246
247static void pdc20621_host_stop(struct ata_host_set *host_set) 247static void pdc20621_host_stop(struct ata_host *host)
248{ 248{
249 struct pci_dev *pdev = to_pci_dev(host_set->dev); 249 struct pci_dev *pdev = to_pci_dev(host->dev);
250 struct pdc_host_priv *hpriv = host_set->private_data; 250 struct pdc_host_priv *hpriv = host->private_data;
251 void __iomem *dimm_mmio = hpriv->dimm_mmio; 251 void __iomem *dimm_mmio = hpriv->dimm_mmio;
252 252
253 pci_iounmap(pdev, dimm_mmio); 253 pci_iounmap(pdev, dimm_mmio);
254 kfree(hpriv); 254 kfree(hpriv);
255 255
256 pci_iounmap(pdev, host_set->mmio_base); 256 pci_iounmap(pdev, host->mmio_base);
257} 257}
258 258
259static int pdc_port_start(struct ata_port *ap) 259static int pdc_port_start(struct ata_port *ap)
260{ 260{
261 struct device *dev = ap->host_set->dev; 261 struct device *dev = ap->host->dev;
262 struct pdc_port_priv *pp; 262 struct pdc_port_priv *pp;
263 int rc; 263 int rc;
264 264
@@ -293,7 +293,7 @@ err_out:
293 293
294static void pdc_port_stop(struct ata_port *ap) 294static void pdc_port_stop(struct ata_port *ap)
295{ 295{
296 struct device *dev = ap->host_set->dev; 296 struct device *dev = ap->host->dev;
297 struct pdc_port_priv *pp = ap->private_data; 297 struct pdc_port_priv *pp = ap->private_data;
298 298
299 ap->private_data = NULL; 299 ap->private_data = NULL;
@@ -453,8 +453,8 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
453 struct scatterlist *sg; 453 struct scatterlist *sg;
454 struct ata_port *ap = qc->ap; 454 struct ata_port *ap = qc->ap;
455 struct pdc_port_priv *pp = ap->private_data; 455 struct pdc_port_priv *pp = ap->private_data;
456 void __iomem *mmio = ap->host_set->mmio_base; 456 void __iomem *mmio = ap->host->mmio_base;
457 struct pdc_host_priv *hpriv = ap->host_set->private_data; 457 struct pdc_host_priv *hpriv = ap->host->private_data;
458 void __iomem *dimm_mmio = hpriv->dimm_mmio; 458 void __iomem *dimm_mmio = hpriv->dimm_mmio;
459 unsigned int portno = ap->port_no; 459 unsigned int portno = ap->port_no;
460 unsigned int i, idx, total_len = 0, sgt_len; 460 unsigned int i, idx, total_len = 0, sgt_len;
@@ -514,8 +514,8 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
514{ 514{
515 struct ata_port *ap = qc->ap; 515 struct ata_port *ap = qc->ap;
516 struct pdc_port_priv *pp = ap->private_data; 516 struct pdc_port_priv *pp = ap->private_data;
517 void __iomem *mmio = ap->host_set->mmio_base; 517 void __iomem *mmio = ap->host->mmio_base;
518 struct pdc_host_priv *hpriv = ap->host_set->private_data; 518 struct pdc_host_priv *hpriv = ap->host->private_data;
519 void __iomem *dimm_mmio = hpriv->dimm_mmio; 519 void __iomem *dimm_mmio = hpriv->dimm_mmio;
520 unsigned int portno = ap->port_no; 520 unsigned int portno = ap->port_no;
521 unsigned int i; 521 unsigned int i;
@@ -565,8 +565,8 @@ static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
565 u32 pkt_ofs) 565 u32 pkt_ofs)
566{ 566{
567 struct ata_port *ap = qc->ap; 567 struct ata_port *ap = qc->ap;
568 struct ata_host_set *host_set = ap->host_set; 568 struct ata_host *host = ap->host;
569 void __iomem *mmio = host_set->mmio_base; 569 void __iomem *mmio = host->mmio_base;
570 570
571 /* hard-code chip #0 */ 571 /* hard-code chip #0 */
572 mmio += PDC_CHIP0_OFS; 572 mmio += PDC_CHIP0_OFS;
@@ -583,7 +583,7 @@ static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
583 u32 pkt_ofs) 583 u32 pkt_ofs)
584{ 584{
585 struct ata_port *ap = qc->ap; 585 struct ata_port *ap = qc->ap;
586 struct pdc_host_priv *pp = ap->host_set->private_data; 586 struct pdc_host_priv *pp = ap->host->private_data;
587 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK; 587 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
588 588
589 if (!pp->doing_hdma) { 589 if (!pp->doing_hdma) {
@@ -601,7 +601,7 @@ static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
601static void pdc20621_pop_hdma(struct ata_queued_cmd *qc) 601static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
602{ 602{
603 struct ata_port *ap = qc->ap; 603 struct ata_port *ap = qc->ap;
604 struct pdc_host_priv *pp = ap->host_set->private_data; 604 struct pdc_host_priv *pp = ap->host->private_data;
605 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK; 605 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
606 606
607 /* if nothing on queue, we're done */ 607 /* if nothing on queue, we're done */
@@ -620,7 +620,7 @@ static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
620{ 620{
621 struct ata_port *ap = qc->ap; 621 struct ata_port *ap = qc->ap;
622 unsigned int port_no = ap->port_no; 622 unsigned int port_no = ap->port_no;
623 struct pdc_host_priv *hpriv = ap->host_set->private_data; 623 struct pdc_host_priv *hpriv = ap->host->private_data;
624 void *dimm_mmio = hpriv->dimm_mmio; 624 void *dimm_mmio = hpriv->dimm_mmio;
625 625
626 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP); 626 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
@@ -638,9 +638,9 @@ static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
638static void pdc20621_packet_start(struct ata_queued_cmd *qc) 638static void pdc20621_packet_start(struct ata_queued_cmd *qc)
639{ 639{
640 struct ata_port *ap = qc->ap; 640 struct ata_port *ap = qc->ap;
641 struct ata_host_set *host_set = ap->host_set; 641 struct ata_host *host = ap->host;
642 unsigned int port_no = ap->port_no; 642 unsigned int port_no = ap->port_no;
643 void __iomem *mmio = host_set->mmio_base; 643 void __iomem *mmio = host->mmio_base;
644 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 644 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
645 u8 seq = (u8) (port_no + 1); 645 u8 seq = (u8) (port_no + 1);
646 unsigned int port_ofs; 646 unsigned int port_ofs;
@@ -781,8 +781,8 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
781 781
782static void pdc20621_irq_clear(struct ata_port *ap) 782static void pdc20621_irq_clear(struct ata_port *ap)
783{ 783{
784 struct ata_host_set *host_set = ap->host_set; 784 struct ata_host *host = ap->host;
785 void __iomem *mmio = host_set->mmio_base; 785 void __iomem *mmio = host->mmio_base;
786 786
787 mmio += PDC_CHIP0_OFS; 787 mmio += PDC_CHIP0_OFS;
788 788
@@ -791,7 +791,7 @@ static void pdc20621_irq_clear(struct ata_port *ap)
791 791
792static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 792static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
793{ 793{
794 struct ata_host_set *host_set = dev_instance; 794 struct ata_host *host = dev_instance;
795 struct ata_port *ap; 795 struct ata_port *ap;
796 u32 mask = 0; 796 u32 mask = 0;
797 unsigned int i, tmp, port_no; 797 unsigned int i, tmp, port_no;
@@ -800,12 +800,12 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
800 800
801 VPRINTK("ENTER\n"); 801 VPRINTK("ENTER\n");
802 802
803 if (!host_set || !host_set->mmio_base) { 803 if (!host || !host->mmio_base) {
804 VPRINTK("QUICK EXIT\n"); 804 VPRINTK("QUICK EXIT\n");
805 return IRQ_NONE; 805 return IRQ_NONE;
806 } 806 }
807 807
808 mmio_base = host_set->mmio_base; 808 mmio_base = host->mmio_base;
809 809
810 /* reading should also clear interrupts */ 810 /* reading should also clear interrupts */
811 mmio_base += PDC_CHIP0_OFS; 811 mmio_base += PDC_CHIP0_OFS;
@@ -822,16 +822,16 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
822 return IRQ_NONE; 822 return IRQ_NONE;
823 } 823 }
824 824
825 spin_lock(&host_set->lock); 825 spin_lock(&host->lock);
826 826
827 for (i = 1; i < 9; i++) { 827 for (i = 1; i < 9; i++) {
828 port_no = i - 1; 828 port_no = i - 1;
829 if (port_no > 3) 829 if (port_no > 3)
830 port_no -= 4; 830 port_no -= 4;
831 if (port_no >= host_set->n_ports) 831 if (port_no >= host->n_ports)
832 ap = NULL; 832 ap = NULL;
833 else 833 else
834 ap = host_set->ports[port_no]; 834 ap = host->ports[port_no];
835 tmp = mask & (1 << i); 835 tmp = mask & (1 << i);
836 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 836 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
837 if (tmp && ap && 837 if (tmp && ap &&
@@ -845,7 +845,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
845 } 845 }
846 } 846 }
847 847
848 spin_unlock(&host_set->lock); 848 spin_unlock(&host->lock);
849 849
850 VPRINTK("mask == 0x%x\n", mask); 850 VPRINTK("mask == 0x%x\n", mask);
851 851
@@ -857,13 +857,13 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
857static void pdc_eng_timeout(struct ata_port *ap) 857static void pdc_eng_timeout(struct ata_port *ap)
858{ 858{
859 u8 drv_stat; 859 u8 drv_stat;
860 struct ata_host_set *host_set = ap->host_set; 860 struct ata_host *host = ap->host;
861 struct ata_queued_cmd *qc; 861 struct ata_queued_cmd *qc;
862 unsigned long flags; 862 unsigned long flags;
863 863
864 DPRINTK("ENTER\n"); 864 DPRINTK("ENTER\n");
865 865
866 spin_lock_irqsave(&host_set->lock, flags); 866 spin_lock_irqsave(&host->lock, flags);
867 867
868 qc = ata_qc_from_tag(ap, ap->active_tag); 868 qc = ata_qc_from_tag(ap, ap->active_tag);
869 869
@@ -885,7 +885,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
885 break; 885 break;
886 } 886 }
887 887
888 spin_unlock_irqrestore(&host_set->lock, flags); 888 spin_unlock_irqrestore(&host->lock, flags);
889 ata_eh_qc_complete(qc); 889 ata_eh_qc_complete(qc);
890 DPRINTK("EXIT\n"); 890 DPRINTK("EXIT\n");
891} 891}
@@ -1429,7 +1429,7 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1429 hpriv->dimm_mmio = dimm_mmio; 1429 hpriv->dimm_mmio = dimm_mmio;
1430 1430
1431 probe_ent->sht = pdc_port_info[board_idx].sht; 1431 probe_ent->sht = pdc_port_info[board_idx].sht;
1432 probe_ent->host_flags = pdc_port_info[board_idx].host_flags; 1432 probe_ent->port_flags = pdc_port_info[board_idx].flags;
1433 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; 1433 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
1434 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask; 1434 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
1435 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask; 1435 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
@@ -1482,7 +1482,7 @@ err_out:
1482 1482
1483static int __init pdc_sata_init(void) 1483static int __init pdc_sata_init(void)
1484{ 1484{
1485 return pci_module_init(&pdc_sata_pci_driver); 1485 return pci_register_driver(&pdc_sata_pci_driver);
1486} 1486}
1487 1487
1488 1488
diff --git a/drivers/scsi/sata_uli.c b/drivers/ata/sata_uli.c
index 33cdb4867ef1..8fc6e800011a 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -128,7 +128,7 @@ static const struct ata_port_operations uli_ops = {
128 128
129static struct ata_port_info uli_port_info = { 129static struct ata_port_info uli_port_info = {
130 .sht = &uli_sht, 130 .sht = &uli_sht,
131 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 131 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
132 .pio_mask = 0x1f, /* pio0-4 */ 132 .pio_mask = 0x1f, /* pio0-4 */
133 .udma_mask = 0x7f, /* udma0-6 */ 133 .udma_mask = 0x7f, /* udma0-6 */
134 .port_ops = &uli_ops, 134 .port_ops = &uli_ops,
@@ -143,13 +143,13 @@ MODULE_VERSION(DRV_VERSION);
143 143
144static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg) 144static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
145{ 145{
146 struct uli_priv *hpriv = ap->host_set->private_data; 146 struct uli_priv *hpriv = ap->host->private_data;
147 return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg); 147 return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
148} 148}
149 149
150static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg) 150static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
151{ 151{
152 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 152 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
153 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); 153 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
154 u32 val; 154 u32 val;
155 155
@@ -159,7 +159,7 @@ static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
159 159
160static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val) 160static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
161{ 161{
162 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 162 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
163 unsigned int cfg_addr = get_scr_cfg_addr(ap, scr); 163 unsigned int cfg_addr = get_scr_cfg_addr(ap, scr);
164 164
165 pci_write_config_dword(pdev, cfg_addr, val); 165 pci_write_config_dword(pdev, cfg_addr, val);
@@ -287,7 +287,7 @@ err_out:
287 287
288static int __init uli_init(void) 288static int __init uli_init(void)
289{ 289{
290 return pci_module_init(&uli_pci_driver); 290 return pci_register_driver(&uli_pci_driver);
291} 291}
292 292
293static void __exit uli_exit(void) 293static void __exit uli_exit(void)
diff --git a/drivers/scsi/sata_via.c b/drivers/ata/sata_via.c
index a3727af8b9c1..7f087aef99de 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -176,7 +176,7 @@ static const struct ata_port_operations vt6421_sata_ops = {
176 176
177static struct ata_port_info vt6420_port_info = { 177static struct ata_port_info vt6420_port_info = {
178 .sht = &svia_sht, 178 .sht = &svia_sht,
179 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 179 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
180 .pio_mask = 0x1f, 180 .pio_mask = 0x1f,
181 .mwdma_mask = 0x07, 181 .mwdma_mask = 0x07,
182 .udma_mask = 0x7f, 182 .udma_mask = 0x7f,
@@ -346,7 +346,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
346 INIT_LIST_HEAD(&probe_ent->node); 346 INIT_LIST_HEAD(&probe_ent->node);
347 347
348 probe_ent->sht = &svia_sht; 348 probe_ent->sht = &svia_sht;
349 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY; 349 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
350 probe_ent->port_ops = &vt6421_sata_ops; 350 probe_ent->port_ops = &vt6421_sata_ops;
351 probe_ent->n_ports = N_PORTS; 351 probe_ent->n_ports = N_PORTS;
352 probe_ent->irq = pdev->irq; 352 probe_ent->irq = pdev->irq;
@@ -489,7 +489,7 @@ err_out:
489 489
490static int __init svia_init(void) 490static int __init svia_init(void)
491{ 491{
492 return pci_module_init(&svia_pci_driver); 492 return pci_register_driver(&svia_pci_driver);
493} 493}
494 494
495static void __exit svia_exit(void) 495static void __exit svia_exit(void)
diff --git a/drivers/scsi/sata_vsc.c b/drivers/ata/sata_vsc.c
index ad37871594f5..d0d92f33de54 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -123,7 +123,7 @@ static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
123 void __iomem *mask_addr; 123 void __iomem *mask_addr;
124 u8 mask; 124 u8 mask;
125 125
126 mask_addr = ap->host_set->mmio_base + 126 mask_addr = ap->host->mmio_base +
127 VSC_SATA_INT_MASK_OFFSET + ap->port_no; 127 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
128 mask = readb(mask_addr); 128 mask = readb(mask_addr);
129 if (ctl & ATA_NIEN) 129 if (ctl & ATA_NIEN)
@@ -206,20 +206,20 @@ static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
206static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, 206static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
207 struct pt_regs *regs) 207 struct pt_regs *regs)
208{ 208{
209 struct ata_host_set *host_set = dev_instance; 209 struct ata_host *host = dev_instance;
210 unsigned int i; 210 unsigned int i;
211 unsigned int handled = 0; 211 unsigned int handled = 0;
212 u32 int_status; 212 u32 int_status;
213 213
214 spin_lock(&host_set->lock); 214 spin_lock(&host->lock);
215 215
216 int_status = readl(host_set->mmio_base + VSC_SATA_INT_STAT_OFFSET); 216 int_status = readl(host->mmio_base + VSC_SATA_INT_STAT_OFFSET);
217 217
218 for (i = 0; i < host_set->n_ports; i++) { 218 for (i = 0; i < host->n_ports; i++) {
219 if (int_status & ((u32) 0xFF << (8 * i))) { 219 if (int_status & ((u32) 0xFF << (8 * i))) {
220 struct ata_port *ap; 220 struct ata_port *ap;
221 221
222 ap = host_set->ports[i]; 222 ap = host->ports[i];
223 223
224 if (is_vsc_sata_int_err(i, int_status)) { 224 if (is_vsc_sata_int_err(i, int_status)) {
225 u32 err_status; 225 u32 err_status;
@@ -259,7 +259,7 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
259 } 259 }
260 } 260 }
261 261
262 spin_unlock(&host_set->lock); 262 spin_unlock(&host->lock);
263 263
264 return IRQ_RETVAL(handled); 264 return IRQ_RETVAL(handled);
265} 265}
@@ -395,7 +395,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
395 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80); 395 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
396 396
397 probe_ent->sht = &vsc_sata_sht; 397 probe_ent->sht = &vsc_sata_sht;
398 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 398 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
399 ATA_FLAG_MMIO; 399 ATA_FLAG_MMIO;
400 probe_ent->port_ops = &vsc_sata_ops; 400 probe_ent->port_ops = &vsc_sata_ops;
401 probe_ent->n_ports = 4; 401 probe_ent->n_ports = 4;
@@ -462,7 +462,7 @@ static struct pci_driver vsc_sata_pci_driver = {
462 462
463static int __init vsc_sata_init(void) 463static int __init vsc_sata_init(void)
464{ 464{
465 return pci_module_init(&vsc_sata_pci_driver); 465 return pci_register_driver(&vsc_sata_pci_driver);
466} 466}
467 467
468 468
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 17e709e7d72a..def78a2a7c15 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1210,7 +1210,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_
1210DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus ); 1210DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus );
1211DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus ); 1211DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus );
1212 1212
1213#if defined(CONFIG_SCSI_SATA) || defined(CONFIG_SCSI_SATA_MODULE) 1213#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1214 1214
1215/* 1215/*
1216 * If we are using libata we can drive this chip properly but must 1216 * If we are using libata we can drive this chip properly but must
@@ -1300,7 +1300,7 @@ static int __init combined_setup(char *str)
1300} 1300}
1301__setup("combined_mode=", combined_setup); 1301__setup("combined_mode=", combined_setup);
1302 1302
1303#ifdef CONFIG_SCSI_SATA_INTEL_COMBINED 1303#ifdef CONFIG_SATA_INTEL_COMBINED
1304static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev) 1304static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
1305{ 1305{
1306 u8 prog, comb, tmp; 1306 u8 prog, comb, tmp;
@@ -1393,7 +1393,7 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
1393 request_region(0x170, 8, "libata"); /* port 1 */ 1393 request_region(0x170, 8, "libata"); /* port 1 */
1394} 1394}
1395DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_intel_ide_combined ); 1395DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_intel_ide_combined );
1396#endif /* CONFIG_SCSI_SATA_INTEL_COMBINED */ 1396#endif /* CONFIG_SATA_INTEL_COMBINED */
1397 1397
1398 1398
1399int pcie_mch_quirk; 1399int pcie_mch_quirk;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a6f920d218a0..c4dfcc91ddda 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -494,67 +494,6 @@ config SCSI_ARCMSR
494 494
495source "drivers/scsi/megaraid/Kconfig.megaraid" 495source "drivers/scsi/megaraid/Kconfig.megaraid"
496 496
497config SCSI_SATA
498 tristate "Serial ATA (SATA) support"
499 depends on SCSI
500 help
501 This driver family supports Serial ATA host controllers
502 and devices.
503
504 If unsure, say N.
505
506config SCSI_SATA_AHCI
507 tristate "AHCI SATA support"
508 depends on SCSI_SATA && PCI
509 help
510 This option enables support for AHCI Serial ATA.
511
512 If unsure, say N.
513
514config SCSI_SATA_SVW
515 tristate "ServerWorks Frodo / Apple K2 SATA support"
516 depends on SCSI_SATA && PCI
517 help
518 This option enables support for Broadcom/Serverworks/Apple K2
519 SATA support.
520
521 If unsure, say N.
522
523config SCSI_ATA_PIIX
524 tristate "Intel PIIX/ICH SATA support"
525 depends on SCSI_SATA && PCI
526 help
527 This option enables support for ICH5/6/7/8 Serial ATA.
528 If PATA support was enabled previously, this enables
529 support for select Intel PIIX/ICH PATA host controllers.
530
531 If unsure, say N.
532
533config SCSI_SATA_MV
534 tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)"
535 depends on SCSI_SATA && PCI && EXPERIMENTAL
536 help
537 This option enables support for the Marvell Serial ATA family.
538 Currently supports 88SX[56]0[48][01] chips.
539
540 If unsure, say N.
541
542config SCSI_SATA_NV
543 tristate "NVIDIA SATA support"
544 depends on SCSI_SATA && PCI && EXPERIMENTAL
545 help
546 This option enables support for NVIDIA Serial ATA.
547
548 If unsure, say N.
549
550config SCSI_PDC_ADMA
551 tristate "Pacific Digital ADMA support"
552 depends on SCSI_SATA && PCI
553 help
554 This option enables support for Pacific Digital ADMA controllers
555
556 If unsure, say N.
557
558config SCSI_HPTIOP 497config SCSI_HPTIOP
559 tristate "HighPoint RocketRAID 3xxx Controller support" 498 tristate "HighPoint RocketRAID 3xxx Controller support"
560 depends on SCSI && PCI 499 depends on SCSI && PCI
@@ -565,83 +504,6 @@ config SCSI_HPTIOP
565 To compile this driver as a module, choose M here; the module 504 To compile this driver as a module, choose M here; the module
566 will be called hptiop. If unsure, say N. 505 will be called hptiop. If unsure, say N.
567 506
568config SCSI_SATA_QSTOR
569 tristate "Pacific Digital SATA QStor support"
570 depends on SCSI_SATA && PCI
571 help
572 This option enables support for Pacific Digital Serial ATA QStor.
573
574 If unsure, say N.
575
576config SCSI_SATA_PROMISE
577 tristate "Promise SATA TX2/TX4 support"
578 depends on SCSI_SATA && PCI
579 help
580 This option enables support for Promise Serial ATA TX2/TX4.
581
582 If unsure, say N.
583
584config SCSI_SATA_SX4
585 tristate "Promise SATA SX4 support"
586 depends on SCSI_SATA && PCI && EXPERIMENTAL
587 help
588 This option enables support for Promise Serial ATA SX4.
589
590 If unsure, say N.
591
592config SCSI_SATA_SIL
593 tristate "Silicon Image SATA support"
594 depends on SCSI_SATA && PCI && EXPERIMENTAL
595 help
596 This option enables support for Silicon Image Serial ATA.
597
598 If unsure, say N.
599
600config SCSI_SATA_SIL24
601 tristate "Silicon Image 3124/3132 SATA support"
602 depends on SCSI_SATA && PCI && EXPERIMENTAL
603 help
604 This option enables support for Silicon Image 3124/3132 Serial ATA.
605
606 If unsure, say N.
607
608config SCSI_SATA_SIS
609 tristate "SiS 964/180 SATA support"
610 depends on SCSI_SATA && PCI && EXPERIMENTAL
611 help
612 This option enables support for SiS Serial ATA 964/180.
613
614 If unsure, say N.
615
616config SCSI_SATA_ULI
617 tristate "ULi Electronics SATA support"
618 depends on SCSI_SATA && PCI && EXPERIMENTAL
619 help
620 This option enables support for ULi Electronics SATA.
621
622 If unsure, say N.
623
624config SCSI_SATA_VIA
625 tristate "VIA SATA support"
626 depends on SCSI_SATA && PCI
627 help
628 This option enables support for VIA Serial ATA.
629
630 If unsure, say N.
631
632config SCSI_SATA_VITESSE
633 tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
634 depends on SCSI_SATA && PCI
635 help
636 This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
637
638 If unsure, say N.
639
640config SCSI_SATA_INTEL_COMBINED
641 bool
642 depends on IDE=y && !BLK_DEV_IDE_SATA && (SCSI_SATA_AHCI || SCSI_ATA_PIIX)
643 default y
644
645config SCSI_BUSLOGIC 507config SCSI_BUSLOGIC
646 tristate "BusLogic SCSI support" 508 tristate "BusLogic SCSI support"
647 depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API 509 depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 8fc2c594b537..1ef951be7a5d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -125,21 +125,6 @@ obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
125obj-$(CONFIG_SCSI_NSP32) += nsp32.o 125obj-$(CONFIG_SCSI_NSP32) += nsp32.o
126obj-$(CONFIG_SCSI_IPR) += ipr.o 126obj-$(CONFIG_SCSI_IPR) += ipr.o
127obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ 127obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
128obj-$(CONFIG_SCSI_SATA_AHCI) += libata.o ahci.o
129obj-$(CONFIG_SCSI_SATA_SVW) += libata.o sata_svw.o
130obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o
131obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o
132obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o
133obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o
134obj-$(CONFIG_SCSI_SATA_SIL24) += libata.o sata_sil24.o
135obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o
136obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o
137obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o
138obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o
139obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o
140obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o
141obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o
142obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o
143obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 128obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
144obj-$(CONFIG_SCSI_STEX) += stex.o 129obj-$(CONFIG_SCSI_STEX) += stex.o
145 130
@@ -171,7 +156,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
171CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 156CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
172zalon7xx-objs := zalon.o ncr53c8xx.o 157zalon7xx-objs := zalon.o ncr53c8xx.o
173NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 158NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
174libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-eh.o
175oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 159oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
176 160
177# Files generated that shall be removed upon make clean 161# Files generated that shall be removed upon make clean