aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/Kconfig547
-rw-r--r--drivers/ata/Makefile89
-rw-r--r--drivers/ata/ahci.c2566
-rw-r--r--drivers/ata/ahci.h354
-rw-r--r--drivers/ata/ahci_platform.c200
-rw-r--r--drivers/ata/ata_generic.c34
-rw-r--r--drivers/ata/ata_piix.c18
-rw-r--r--drivers/ata/libahci.c2206
-rw-r--r--drivers/ata/libata-acpi.c6
-rw-r--r--drivers/ata/libata-core.c428
-rw-r--r--drivers/ata/libata-eh.c15
-rw-r--r--drivers/ata/libata-pmp.c32
-rw-r--r--drivers/ata/libata-scsi.c46
-rw-r--r--drivers/ata/libata-sff.c1805
-rw-r--r--drivers/ata/libata.h30
-rw-r--r--drivers/ata/pata_acpi.c10
-rw-r--r--drivers/ata/pata_ali.c7
-rw-r--r--drivers/ata/pata_amd.c2
-rw-r--r--drivers/ata/pata_artop.c5
-rw-r--r--drivers/ata/pata_at91.c1
-rw-r--r--drivers/ata/pata_atiixp.c6
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/pata_bf54x.c89
-rw-r--r--drivers/ata/pata_cmd640.c13
-rw-r--r--drivers/ata/pata_cmd64x.c8
-rw-r--r--drivers/ata/pata_cs5520.c4
-rw-r--r--drivers/ata/pata_cs5530.c6
-rw-r--r--drivers/ata/pata_cs5535.c2
-rw-r--r--drivers/ata/pata_cs5536.c2
-rw-r--r--drivers/ata/pata_cypress.c2
-rw-r--r--drivers/ata/pata_efar.c4
-rw-r--r--drivers/ata/pata_hpt366.c4
-rw-r--r--drivers/ata/pata_hpt37x.c6
-rw-r--r--drivers/ata/pata_hpt3x2n.c4
-rw-r--r--drivers/ata/pata_hpt3x3.c2
-rw-r--r--drivers/ata/pata_icside.c7
-rw-r--r--drivers/ata/pata_it8213.c2
-rw-r--r--drivers/ata/pata_it821x.c8
-rw-r--r--drivers/ata/pata_jmicron.c2
-rw-r--r--drivers/ata/pata_legacy.c15
-rw-r--r--drivers/ata/pata_macio.c19
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_mpc52xx.c102
-rw-r--r--drivers/ata/pata_netcell.c2
-rw-r--r--drivers/ata/pata_ninja32.c2
-rw-r--r--drivers/ata/pata_ns87415.c4
-rw-r--r--drivers/ata/pata_octeon_cf.c30
-rw-r--r--drivers/ata/pata_of_platform.c13
-rw-r--r--drivers/ata/pata_oldpiix.c4
-rw-r--r--drivers/ata/pata_optidma.c2
-rw-r--r--drivers/ata/pata_pcmcia.c87
-rw-r--r--drivers/ata/pata_pdc2027x.c6
-rw-r--r--drivers/ata/pata_pdc202xx_old.c4
-rw-r--r--drivers/ata/pata_piccolo.c2
-rw-r--r--drivers/ata/pata_platform.c1
-rw-r--r--drivers/ata/pata_pxa.c411
-rw-r--r--drivers/ata/pata_radisys.c4
-rw-r--r--drivers/ata/pata_rdc.c4
-rw-r--r--drivers/ata/pata_samsung_cf.c683
-rw-r--r--drivers/ata/pata_sc1200.c6
-rw-r--r--drivers/ata/pata_scc.c87
-rw-r--r--drivers/ata/pata_sch.c12
-rw-r--r--drivers/ata/pata_serverworks.c8
-rw-r--r--drivers/ata/pata_sil680.c34
-rw-r--r--drivers/ata/pata_sis.c2
-rw-r--r--drivers/ata/pata_sl82c105.c2
-rw-r--r--drivers/ata/pata_triflex.c2
-rw-r--r--drivers/ata/pata_via.c6
-rw-r--r--drivers/ata/pata_winbond.c282
-rw-r--r--drivers/ata/pdc_adma.c74
-rw-r--r--drivers/ata/sata_dwc_460ex.c1756
-rw-r--r--drivers/ata/sata_fsl.c39
-rw-r--r--drivers/ata/sata_inic162x.c25
-rw-r--r--drivers/ata/sata_mv.c142
-rw-r--r--drivers/ata/sata_nv.c297
-rw-r--r--drivers/ata/sata_promise.c32
-rw-r--r--drivers/ata/sata_qstor.c104
-rw-r--r--drivers/ata/sata_sil.c13
-rw-r--r--drivers/ata/sata_sil24.c33
-rw-r--r--drivers/ata/sata_sis.c4
-rw-r--r--drivers/ata/sata_svw.c4
-rw-r--r--drivers/ata/sata_sx4.c10
-rw-r--r--drivers/ata/sata_uli.c6
-rw-r--r--drivers/ata/sata_via.c35
-rw-r--r--drivers/ata/sata_vsc.c12
85 files changed, 8104 insertions, 4894 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 01c52c415bdc..11ec911016c6 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -57,6 +57,8 @@ config SATA_PMP
57 This option adds support for SATA Port Multipliers 57 This option adds support for SATA Port Multipliers
58 (the SATA version of an ethernet hub, or SAS expander). 58 (the SATA version of an ethernet hub, or SAS expander).
59 59
60comment "Controllers with non-SFF native interface"
61
60config SATA_AHCI 62config SATA_AHCI
61 tristate "AHCI SATA support" 63 tristate "AHCI SATA support"
62 depends on PCI 64 depends on PCI
@@ -65,11 +67,11 @@ config SATA_AHCI
65 67
66 If unsure, say N. 68 If unsure, say N.
67 69
68config SATA_SIL24 70config SATA_AHCI_PLATFORM
69 tristate "Silicon Image 3124/3132 SATA support" 71 tristate "Platform AHCI SATA support"
70 depends on PCI
71 help 72 help
72 This option enables support for Silicon Image 3124/3132 Serial ATA. 73 This option enables support for Platform AHCI Serial ATA
74 controllers.
73 75
74 If unsure, say N. 76 If unsure, say N.
75 77
@@ -82,6 +84,20 @@ config SATA_FSL
82 84
83 If unsure, say N. 85 If unsure, say N.
84 86
87config SATA_INIC162X
88 tristate "Initio 162x SATA support"
89 depends on PCI
90 help
91 This option enables support for Initio 162x Serial ATA.
92
93config SATA_SIL24
94 tristate "Silicon Image 3124/3132 SATA support"
95 depends on PCI
96 help
97 This option enables support for Silicon Image 3124/3132 Serial ATA.
98
99 If unsure, say N.
100
85config ATA_SFF 101config ATA_SFF
86 bool "ATA SFF support" 102 bool "ATA SFF support"
87 default y 103 default y
@@ -102,15 +118,65 @@ config ATA_SFF
102 118
103if ATA_SFF 119if ATA_SFF
104 120
105config SATA_SVW 121comment "SFF controllers with custom DMA interface"
106 tristate "ServerWorks Frodo / Apple K2 SATA support" 122
123config PDC_ADMA
124 tristate "Pacific Digital ADMA support"
107 depends on PCI 125 depends on PCI
108 help 126 help
109 This option enables support for Broadcom/Serverworks/Apple K2 127 This option enables support for Pacific Digital ADMA controllers
110 SATA support. 128
129 If unsure, say N.
130
131config PATA_MPC52xx
132 tristate "Freescale MPC52xx SoC internal IDE"
133 depends on PPC_MPC52xx && PPC_BESTCOMM
134 select PPC_BESTCOMM_ATA
135 help
136 This option enables support for integrated IDE controller
137 of the Freescale MPC52xx SoC.
138
139 If unsure, say N.
140
141config PATA_OCTEON_CF
142 tristate "OCTEON Boot Bus Compact Flash support"
143 depends on CPU_CAVIUM_OCTEON
144 help
145 This option enables a polled compact flash driver for use with
146 compact flash cards attached to the OCTEON boot bus.
147
148 If unsure, say N.
149
150config SATA_QSTOR
151 tristate "Pacific Digital SATA QStor support"
152 depends on PCI
153 help
154 This option enables support for Pacific Digital Serial ATA QStor.
155
156 If unsure, say N.
157
158config SATA_SX4
159 tristate "Promise SATA SX4 support (Experimental)"
160 depends on PCI && EXPERIMENTAL
161 help
162 This option enables support for Promise Serial ATA SX4.
111 163
112 If unsure, say N. 164 If unsure, say N.
113 165
166config ATA_BMDMA
167 bool "ATA BMDMA support"
168 default y
169 help
170 This option adds support for SFF ATA controllers with BMDMA
171 capability. BMDMA stands for bus-master DMA and is the
172 de facto DMA interface for SFF controllers.
173
174 If unsure, say Y.
175
176if ATA_BMDMA
177
178comment "SATA SFF controllers with BMDMA"
179
114config ATA_PIIX 180config ATA_PIIX
115 tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support" 181 tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support"
116 depends on PCI 182 depends on PCI
@@ -121,6 +187,15 @@ config ATA_PIIX
121 187
122 If unsure, say N. 188 If unsure, say N.
123 189
190config SATA_DWC
191 tristate "DesignWare Cores SATA support"
192 depends on 460EX
193 help
194 This option enables support for the on-chip SATA controller of the
195 AppliedMicro processor 460EX.
196
197 If unsure, say N.
198
124config SATA_MV 199config SATA_MV
125 tristate "Marvell SATA support" 200 tristate "Marvell SATA support"
126 help 201 help
@@ -138,22 +213,6 @@ config SATA_NV
138 213
139 If unsure, say N. 214 If unsure, say N.
140 215
141config PDC_ADMA
142 tristate "Pacific Digital ADMA support"
143 depends on PCI
144 help
145 This option enables support for Pacific Digital ADMA controllers
146
147 If unsure, say N.
148
149config SATA_QSTOR
150 tristate "Pacific Digital SATA QStor support"
151 depends on PCI
152 help
153 This option enables support for Pacific Digital Serial ATA QStor.
154
155 If unsure, say N.
156
157config SATA_PROMISE 216config SATA_PROMISE
158 tristate "Promise SATA TX2/TX4 support" 217 tristate "Promise SATA TX2/TX4 support"
159 depends on PCI 218 depends on PCI
@@ -162,14 +221,6 @@ config SATA_PROMISE
162 221
163 If unsure, say N. 222 If unsure, say N.
164 223
165config SATA_SX4
166 tristate "Promise SATA SX4 support (Experimental)"
167 depends on PCI && EXPERIMENTAL
168 help
169 This option enables support for Promise Serial ATA SX4.
170
171 If unsure, say N.
172
173config SATA_SIL 224config SATA_SIL
174 tristate "Silicon Image SATA support" 225 tristate "Silicon Image SATA support"
175 depends on PCI 226 depends on PCI
@@ -189,6 +240,15 @@ config SATA_SIS
189 enable the PATA_SIS driver in the config. 240 enable the PATA_SIS driver in the config.
190 If unsure, say N. 241 If unsure, say N.
191 242
243config SATA_SVW
244 tristate "ServerWorks Frodo / Apple K2 SATA support"
245 depends on PCI
246 help
247 This option enables support for Broadcom/Serverworks/Apple K2
248 SATA support.
249
250 If unsure, say N.
251
192config SATA_ULI 252config SATA_ULI
193 tristate "ULi Electronics SATA support" 253 tristate "ULi Electronics SATA support"
194 depends on PCI 254 depends on PCI
@@ -213,20 +273,7 @@ config SATA_VITESSE
213 273
214 If unsure, say N. 274 If unsure, say N.
215 275
216config SATA_INIC162X 276comment "PATA SFF controllers with BMDMA"
217 tristate "Initio 162x SATA support"
218 depends on PCI
219 help
220 This option enables support for Initio 162x Serial ATA.
221
222config PATA_ACPI
223 tristate "ACPI firmware driver for PATA"
224 depends on ATA_ACPI
225 help
226 This option enables an ACPI method driver which drives
227 motherboard PATA controller interfaces through the ACPI
228 firmware in the BIOS. This driver can sometimes handle
229 otherwise unsupported hardware.
230 277
231config PATA_ALI 278config PATA_ALI
232 tristate "ALi PATA support" 279 tristate "ALi PATA support"
@@ -254,40 +301,30 @@ config PATA_ARTOP
254 301
255 If unsure, say N. 302 If unsure, say N.
256 303
257config PATA_ATP867X 304config PATA_ATIIXP
258 tristate "ARTOP/Acard ATP867X PATA support" 305 tristate "ATI PATA support"
259 depends on PCI 306 depends on PCI
260 help 307 help
261 This option enables support for ARTOP/Acard ATP867X PATA 308 This option enables support for the ATI ATA interfaces
262 controllers. 309 found on the many ATI chipsets.
263
264 If unsure, say N.
265
266config PATA_AT32
267 tristate "Atmel AVR32 PATA support (Experimental)"
268 depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
269 help
270 This option enables support for the IDE devices on the
271 Atmel AT32AP platform.
272 310
273 If unsure, say N. 311 If unsure, say N.
274 312
275config PATA_ATIIXP 313config PATA_ATP867X
276 tristate "ATI PATA support" 314 tristate "ARTOP/Acard ATP867X PATA support"
277 depends on PCI 315 depends on PCI
278 help 316 help
279 This option enables support for the ATI ATA interfaces 317 This option enables support for ARTOP/Acard ATP867X PATA
280 found on the many ATI chipsets. 318 controllers.
281 319
282 If unsure, say N. 320 If unsure, say N.
283 321
284config PATA_CMD640_PCI 322config PATA_BF54X
285 tristate "CMD640 PCI PATA support (Experimental)" 323 tristate "Blackfin 54x ATAPI support"
286 depends on PCI && EXPERIMENTAL 324 depends on BF542 || BF548 || BF549
287 help 325 help
288 This option enables support for the CMD640 PCI IDE 326 This option enables support for the built-in ATAPI controller on
289 interface chip. Only the primary channel is currently 327 Blackfin 54x family chips.
290 supported.
291 328
292 If unsure, say N. 329 If unsure, say N.
293 330
@@ -354,15 +391,6 @@ config PATA_EFAR
354 391
355 If unsure, say N. 392 If unsure, say N.
356 393
357config ATA_GENERIC
358 tristate "Generic ATA support"
359 depends on PCI
360 help
361 This option enables support for generic BIOS configured
362 ATA controllers via the new ATA layer
363
364 If unsure, say N.
365
366config PATA_HPT366 394config PATA_HPT366
367 tristate "HPT 366/368 PATA support" 395 tristate "HPT 366/368 PATA support"
368 depends on PCI 396 depends on PCI
@@ -407,12 +435,20 @@ config PATA_HPT3X3_DMA
407 controllers. Enable with care as there are still some 435 controllers. Enable with care as there are still some
408 problems with DMA on this chipset. 436 problems with DMA on this chipset.
409 437
410config PATA_ISAPNP 438config PATA_ICSIDE
411 tristate "ISA Plug and Play PATA support" 439 tristate "Acorn ICS PATA support"
412 depends on ISAPNP 440 depends on ARM && ARCH_ACORN
413 help 441 help
414 This option enables support for ISA plug & play ATA 442 On Acorn systems, say Y here if you wish to use the ICS PATA
415 controllers such as those found on old soundcards. 443 interface card. This is not required for ICS partition support.
444 If you are unsure, say N to this.
445
446config PATA_IT8213
447 tristate "IT8213 PATA support (Experimental)"
448 depends on PCI && EXPERIMENTAL
449 help
450 This option enables support for the ITE 821 PATA
451 controllers via the new ATA layer.
416 452
417 If unsure, say N. 453 If unsure, say N.
418 454
@@ -426,15 +462,6 @@ config PATA_IT821X
426 462
427 If unsure, say N. 463 If unsure, say N.
428 464
429config PATA_IT8213
430 tristate "IT8213 PATA support (Experimental)"
431 depends on PCI && EXPERIMENTAL
432 help
433 This option enables support for the ITE 821 PATA
434 controllers via the new ATA layer.
435
436 If unsure, say N.
437
438config PATA_JMICRON 465config PATA_JMICRON
439 tristate "JMicron PATA support" 466 tristate "JMicron PATA support"
440 depends on PCI 467 depends on PCI
@@ -444,23 +471,14 @@ config PATA_JMICRON
444 471
445 If unsure, say N. 472 If unsure, say N.
446 473
447config PATA_LEGACY 474config PATA_MACIO
448 tristate "Legacy ISA PATA support (Experimental)" 475 tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE"
449 depends on (ISA || PCI) && EXPERIMENTAL 476 depends on PPC_PMAC
450 help
451 This option enables support for ISA/VLB/PCI bus legacy PATA
452 ports and allows them to be accessed via the new ATA layer.
453
454 If unsure, say N.
455
456config PATA_TRIFLEX
457 tristate "Compaq Triflex PATA support"
458 depends on PCI
459 help 477 help
460 Enable support for the Compaq 'Triflex' IDE controller as found 478 Most IDE capable PowerMacs have IDE busses driven by a variant
461 on many Compaq Pentium-Pro systems, via the new ATA layer. 479 of this controller which is part of the Apple chipset used on
462 480 most PowerMac models. Some models have multiple busses using
463 If unsure, say N. 481 different chipsets, though generally, MacIO is one of them.
464 482
465config PATA_MARVELL 483config PATA_MARVELL
466 tristate "Marvell PATA support via legacy mode" 484 tristate "Marvell PATA support via legacy mode"
@@ -473,32 +491,6 @@ config PATA_MARVELL
473 491
474 If unsure, say N. 492 If unsure, say N.
475 493
476config PATA_MPC52xx
477 tristate "Freescale MPC52xx SoC internal IDE"
478 depends on PPC_MPC52xx && PPC_BESTCOMM
479 select PPC_BESTCOMM_ATA
480 help
481 This option enables support for integrated IDE controller
482 of the Freescale MPC52xx SoC.
483
484 If unsure, say N.
485
486config PATA_MPIIX
487 tristate "Intel PATA MPIIX support"
488 depends on PCI
489 help
490 This option enables support for MPIIX PATA support.
491
492 If unsure, say N.
493
494config PATA_OLDPIIX
495 tristate "Intel PATA old PIIX support"
496 depends on PCI
497 help
498 This option enables support for early PIIX PATA support.
499
500 If unsure, say N.
501
502config PATA_NETCELL 494config PATA_NETCELL
503 tristate "NETCELL Revolution RAID support" 495 tristate "NETCELL Revolution RAID support"
504 depends on PCI 496 depends on PCI
@@ -517,15 +509,6 @@ config PATA_NINJA32
517 509
518 If unsure, say N. 510 If unsure, say N.
519 511
520config PATA_NS87410
521 tristate "Nat Semi NS87410 PATA support"
522 depends on PCI
523 help
524 This option enables support for the National Semiconductor
525 NS87410 PCI-IDE controller.
526
527 If unsure, say N.
528
529config PATA_NS87415 512config PATA_NS87415
530 tristate "Nat Semi NS87415 PATA support" 513 tristate "Nat Semi NS87415 PATA support"
531 depends on PCI 514 depends on PCI
@@ -535,12 +518,11 @@ config PATA_NS87415
535 518
536 If unsure, say N. 519 If unsure, say N.
537 520
538config PATA_OPTI 521config PATA_OLDPIIX
539 tristate "OPTI621/6215 PATA support (Very Experimental)" 522 tristate "Intel PATA old PIIX support"
540 depends on PCI && EXPERIMENTAL 523 depends on PCI
541 help 524 help
542 This option enables full PIO support for the early Opti ATA 525 This option enables support for early PIIX PATA support.
543 controllers found on some old motherboards.
544 526
545 If unsure, say N. 527 If unsure, say N.
546 528
@@ -554,24 +536,6 @@ config PATA_OPTIDMA
554 536
555 If unsure, say N. 537 If unsure, say N.
556 538
557config PATA_PALMLD
558 tristate "Palm LifeDrive PATA support"
559 depends on MACH_PALMLD
560 help
561 This option enables support for Palm LifeDrive's internal ATA
562 port via the new ATA layer.
563
564 If unsure, say N.
565
566config PATA_PCMCIA
567 tristate "PCMCIA PATA support"
568 depends on PCMCIA
569 help
570 This option enables support for PCMCIA ATA interfaces, including
571 compact flash card adapters via the new ATA layer.
572
573 If unsure, say N.
574
575config PATA_PDC2027X 539config PATA_PDC2027X
576 tristate "Promise PATA 2027x support" 540 tristate "Promise PATA 2027x support"
577 depends on PCI 541 depends on PCI
@@ -589,12 +553,6 @@ config PATA_PDC_OLD
589 553
590 If unsure, say N. 554 If unsure, say N.
591 555
592config PATA_QDI
593 tristate "QDI VLB PATA support"
594 depends on ISA
595 help
596 Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
597
598config PATA_RADISYS 556config PATA_RADISYS
599 tristate "RADISYS 82600 PATA support (Experimental)" 557 tristate "RADISYS 82600 PATA support (Experimental)"
600 depends on PCI && EXPERIMENTAL 558 depends on PCI && EXPERIMENTAL
@@ -604,15 +562,6 @@ config PATA_RADISYS
604 562
605 If unsure, say N. 563 If unsure, say N.
606 564
607config PATA_RB532
608 tristate "RouterBoard 532 PATA CompactFlash support"
609 depends on MIKROTIK_RB532
610 help
611 This option enables support for the RouterBoard 532
612 PATA CompactFlash controller.
613
614 If unsure, say N.
615
616config PATA_RDC 565config PATA_RDC
617 tristate "RDC PATA support" 566 tristate "RDC PATA support"
618 depends on PCI 567 depends on PCI
@@ -623,21 +572,30 @@ config PATA_RDC
623 572
624 If unsure, say N. 573 If unsure, say N.
625 574
626config PATA_RZ1000 575config PATA_SC1200
627 tristate "PC Tech RZ1000 PATA support" 576 tristate "SC1200 PATA support"
628 depends on PCI 577 depends on PCI
629 help 578 help
630 This option enables basic support for the PC Tech RZ1000/1 579 This option enables support for the NatSemi/AMD SC1200 SoC
631 PATA controllers via the new ATA layer 580 companion chip used with the Geode processor family.
632 581
633 If unsure, say N. 582 If unsure, say N.
634 583
635config PATA_SC1200 584config PATA_SCC
636 tristate "SC1200 PATA support" 585 tristate "Toshiba's Cell Reference Set IDE support"
586 depends on PCI && PPC_CELLEB
587 help
588 This option enables support for the built-in IDE controller on
589 Toshiba Cell Reference Board.
590
591 If unsure, say N.
592
593config PATA_SCH
594 tristate "Intel SCH PATA support"
637 depends on PCI 595 depends on PCI
638 help 596 help
639 This option enables support for the NatSemi/AMD SC1200 SoC 597 This option enables support for Intel SCH PATA on the Intel
640 companion chip used with the Geode processor family. 598 SCH (US15W, US15L, UL11L) series host controllers.
641 599
642 If unsure, say N. 600 If unsure, say N.
643 601
@@ -675,6 +633,15 @@ config PATA_TOSHIBA
675 633
676 If unsure, say N. 634 If unsure, say N.
677 635
636config PATA_TRIFLEX
637 tristate "Compaq Triflex PATA support"
638 depends on PCI
639 help
640 Enable support for the Compaq 'Triflex' IDE controller as found
641 on many Compaq Pentium-Pro systems, via the new ATA layer.
642
643 If unsure, say N.
644
678config PATA_VIA 645config PATA_VIA
679 tristate "VIA PATA support" 646 tristate "VIA PATA support"
680 depends on PCI 647 depends on PCI
@@ -684,6 +651,17 @@ config PATA_VIA
684 651
685 If unsure, say N. 652 If unsure, say N.
686 653
654config PATA_PXA
655 tristate "PXA DMA-capable PATA support"
656 depends on ARCH_PXA
657 help
658 This option enables support for harddrive attached to PXA CPU's bus.
659
660 NOTE: This driver utilizes PXA DMA controller, in case your hardware
661 is not capable of doing MWDMA, use pata_platform instead.
662
663 If unsure, say N.
664
687config PATA_WINBOND 665config PATA_WINBOND
688 tristate "Winbond SL82C105 PATA support" 666 tristate "Winbond SL82C105 PATA support"
689 depends on PCI 667 depends on PCI
@@ -693,12 +671,99 @@ config PATA_WINBOND
693 671
694 If unsure, say N. 672 If unsure, say N.
695 673
696config PATA_WINBOND_VLB 674endif # ATA_BMDMA
697 tristate "Winbond W83759A VLB PATA support (Experimental)" 675
698 depends on ISA && EXPERIMENTAL 676comment "PIO-only SFF controllers"
677
678config PATA_AT32
679 tristate "Atmel AVR32 PATA support (Experimental)"
680 depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
699 help 681 help
700 Support for the Winbond W83759A controller on Vesa Local Bus 682 This option enables support for the IDE devices on the
701 systems. 683 Atmel AT32AP platform.
684
685 If unsure, say N.
686
687config PATA_AT91
688 tristate "PATA support for AT91SAM9260"
689 depends on ARM && ARCH_AT91
690 help
691 This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
692
693 If unsure, say N.
694
695config PATA_CMD640_PCI
696 tristate "CMD640 PCI PATA support (Experimental)"
697 depends on PCI && EXPERIMENTAL
698 help
699 This option enables support for the CMD640 PCI IDE
700 interface chip. Only the primary channel is currently
701 supported.
702
703 If unsure, say N.
704
705config PATA_ISAPNP
706 tristate "ISA Plug and Play PATA support"
707 depends on ISAPNP
708 help
709 This option enables support for ISA plug & play ATA
710 controllers such as those found on old soundcards.
711
712 If unsure, say N.
713
714config PATA_IXP4XX_CF
715 tristate "IXP4XX Compact Flash support"
716 depends on ARCH_IXP4XX
717 help
718 This option enables support for a Compact Flash connected on
719 the ixp4xx expansion bus. This driver had been written for
720 Loft/Avila boards in mind but can work with others.
721
722 If unsure, say N.
723
724config PATA_MPIIX
725 tristate "Intel PATA MPIIX support"
726 depends on PCI
727 help
728 This option enables support for MPIIX PATA support.
729
730 If unsure, say N.
731
732config PATA_NS87410
733 tristate "Nat Semi NS87410 PATA support"
734 depends on PCI
735 help
736 This option enables support for the National Semiconductor
737 NS87410 PCI-IDE controller.
738
739 If unsure, say N.
740
741config PATA_OPTI
742 tristate "OPTI621/6215 PATA support (Very Experimental)"
743 depends on PCI && EXPERIMENTAL
744 help
745 This option enables full PIO support for the early Opti ATA
746 controllers found on some old motherboards.
747
748 If unsure, say N.
749
750config PATA_PALMLD
751 tristate "Palm LifeDrive PATA support"
752 depends on MACH_PALMLD
753 help
754 This option enables support for Palm LifeDrive's internal ATA
755 port via the new ATA layer.
756
757 If unsure, say N.
758
759config PATA_PCMCIA
760 tristate "PCMCIA PATA support"
761 depends on PCMCIA
762 help
763 This option enables support for PCMCIA ATA interfaces, including
764 compact flash card adapters via the new ATA layer.
765
766 If unsure, say N.
702 767
703config HAVE_PATA_PLATFORM 768config HAVE_PATA_PLATFORM
704 bool 769 bool
@@ -717,14 +782,6 @@ config PATA_PLATFORM
717 782
718 If unsure, say N. 783 If unsure, say N.
719 784
720config PATA_AT91
721 tristate "PATA support for AT91SAM9260"
722 depends on ARM && ARCH_AT91
723 help
724 This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
725
726 If unsure, say N.
727
728config PATA_OF_PLATFORM 785config PATA_OF_PLATFORM
729 tristate "OpenFirmware platform device PATA support" 786 tristate "OpenFirmware platform device PATA support"
730 depends on PATA_PLATFORM && PPC_OF 787 depends on PATA_PLATFORM && PPC_OF
@@ -735,69 +792,75 @@ config PATA_OF_PLATFORM
735 792
736 If unsure, say N. 793 If unsure, say N.
737 794
738config PATA_ICSIDE 795config PATA_QDI
739 tristate "Acorn ICS PATA support" 796 tristate "QDI VLB PATA support"
740 depends on ARM && ARCH_ACORN 797 depends on ISA
741 help 798 help
742 On Acorn systems, say Y here if you wish to use the ICS PATA 799 Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
743 interface card. This is not required for ICS partition support.
744 If you are unsure, say N to this.
745 800
746config PATA_IXP4XX_CF 801config PATA_RB532
747 tristate "IXP4XX Compact Flash support" 802 tristate "RouterBoard 532 PATA CompactFlash support"
748 depends on ARCH_IXP4XX 803 depends on MIKROTIK_RB532
749 help 804 help
750 This option enables support for a Compact Flash connected on 805 This option enables support for the RouterBoard 532
751 the ixp4xx expansion bus. This driver had been written for 806 PATA CompactFlash controller.
752 Loft/Avila boards in mind but can work with others.
753 807
754 If unsure, say N. 808 If unsure, say N.
755 809
756config PATA_OCTEON_CF 810config PATA_RZ1000
757 tristate "OCTEON Boot Bus Compact Flash support" 811 tristate "PC Tech RZ1000 PATA support"
758 depends on CPU_CAVIUM_OCTEON 812 depends on PCI
759 help 813 help
760 This option enables a polled compact flash driver for use with 814 This option enables basic support for the PC Tech RZ1000/1
761 compact flash cards attached to the OCTEON boot bus. 815 PATA controllers via the new ATA layer
762 816
763 If unsure, say N. 817 If unsure, say N.
764 818
765config PATA_SCC 819config PATA_SAMSUNG_CF
766 tristate "Toshiba's Cell Reference Set IDE support" 820 tristate "Samsung SoC PATA support"
767 depends on PCI && PPC_CELLEB 821 depends on SAMSUNG_DEV_IDE
768 help 822 help
769 This option enables support for the built-in IDE controller on 823 This option enables basic support for Samsung's S3C/S5P board
770 Toshiba Cell Reference Board. 824 PATA controllers via the new ATA layer
771 825
772 If unsure, say N. 826 If unsure, say N.
773 827
774config PATA_SCH 828config PATA_WINBOND_VLB
775 tristate "Intel SCH PATA support" 829 tristate "Winbond W83759A VLB PATA support (Experimental)"
776 depends on PCI 830 depends on ISA && EXPERIMENTAL
831 select PATA_LEGACY
777 help 832 help
778 This option enables support for Intel SCH PATA on the Intel 833 Support for the Winbond W83759A controller on Vesa Local Bus
779 SCH (US15W, US15L, UL11L) series host controllers. 834 systems.
780 835
781 If unsure, say N. 836comment "Generic fallback / legacy drivers"
782 837
783config PATA_BF54X 838config PATA_ACPI
784 tristate "Blackfin 54x ATAPI support" 839 tristate "ACPI firmware driver for PATA"
785 depends on BF542 || BF548 || BF549 840 depends on ATA_ACPI && ATA_BMDMA
786 help 841 help
787 This option enables support for the built-in ATAPI controller on 842 This option enables an ACPI method driver which drives
788 Blackfin 54x family chips. 843 motherboard PATA controller interfaces through the ACPI
844 firmware in the BIOS. This driver can sometimes handle
845 otherwise unsupported hardware.
846
847config ATA_GENERIC
848 tristate "Generic ATA support"
849 depends on PCI && ATA_BMDMA
850 help
851 This option enables support for generic BIOS configured
852 ATA controllers via the new ATA layer
789 853
790 If unsure, say N. 854 If unsure, say N.
791 855
792config PATA_MACIO 856config PATA_LEGACY
793 tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE" 857 tristate "Legacy ISA PATA support (Experimental)"
794 depends on PPC_PMAC 858 depends on (ISA || PCI) && EXPERIMENTAL
795 help 859 help
796 Most IDE capable PowerMacs have IDE busses driven by a variant 860 This option enables support for ISA/VLB/PCI bus legacy PATA
797 of this controller which is part of the Apple chipset used on 861 ports and allows them to be accessed via the new ATA layer.
798 most PowerMac models. Some models have multiple busses using
799 different chipsets, though generally, MacIO is one of them.
800 862
863 If unsure, say N.
801 864
802endif # ATA_SFF 865endif # ATA_SFF
803endif # ATA 866endif # ATA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index fc936d4471d6..d5df04a395ca 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -1,32 +1,40 @@
1 1
2obj-$(CONFIG_ATA) += libata.o 2obj-$(CONFIG_ATA) += libata.o
3 3
4obj-$(CONFIG_SATA_AHCI) += ahci.o 4# non-SFF interface
5obj-$(CONFIG_SATA_SVW) += sata_svw.o 5obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
6obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
7obj-$(CONFIG_SATA_FSL) += sata_fsl.o
8obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
9obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
10obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
11
12# SFF w/ custom DMA
13obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
14obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
15obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
16obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
17obj-$(CONFIG_SATA_SX4) += sata_sx4.o
18
19# SFF SATA w/ BMDMA
6obj-$(CONFIG_ATA_PIIX) += ata_piix.o 20obj-$(CONFIG_ATA_PIIX) += ata_piix.o
21obj-$(CONFIG_SATA_MV) += sata_mv.o
22obj-$(CONFIG_SATA_NV) += sata_nv.o
7obj-$(CONFIG_SATA_PROMISE) += sata_promise.o 23obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
8obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
9obj-$(CONFIG_SATA_SIL) += sata_sil.o 24obj-$(CONFIG_SATA_SIL) += sata_sil.o
10obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
11obj-$(CONFIG_SATA_VIA) += sata_via.o
12obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
13obj-$(CONFIG_SATA_SIS) += sata_sis.o 25obj-$(CONFIG_SATA_SIS) += sata_sis.o
14obj-$(CONFIG_SATA_SX4) += sata_sx4.o 26obj-$(CONFIG_SATA_SVW) += sata_svw.o
15obj-$(CONFIG_SATA_NV) += sata_nv.o
16obj-$(CONFIG_SATA_ULI) += sata_uli.o 27obj-$(CONFIG_SATA_ULI) += sata_uli.o
17obj-$(CONFIG_SATA_MV) += sata_mv.o 28obj-$(CONFIG_SATA_VIA) += sata_via.o
18obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o 29obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
19obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
20obj-$(CONFIG_SATA_FSL) += sata_fsl.o
21obj-$(CONFIG_PATA_MACIO) += pata_macio.o
22 30
31# SFF PATA w/ BMDMA
23obj-$(CONFIG_PATA_ALI) += pata_ali.o 32obj-$(CONFIG_PATA_ALI) += pata_ali.o
24obj-$(CONFIG_PATA_AMD) += pata_amd.o 33obj-$(CONFIG_PATA_AMD) += pata_amd.o
25obj-$(CONFIG_PATA_ARTOP) += pata_artop.o 34obj-$(CONFIG_PATA_ARTOP) += pata_artop.o
26obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o
27obj-$(CONFIG_PATA_AT32) += pata_at32.o
28obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o 35obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o
29obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o 36obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o
37obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
30obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o 38obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o
31obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o 39obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o
32obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o 40obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o
@@ -38,47 +46,52 @@ obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o
38obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o 46obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
39obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o 47obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
40obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o 48obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
41obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o 49obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
42obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
43obj-$(CONFIG_PATA_IT8213) += pata_it8213.o 50obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
51obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
44obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o 52obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
53obj-$(CONFIG_PATA_MACIO) += pata_macio.o
54obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
45obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o 55obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
46obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o 56obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o
47obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
48obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o 57obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o
49obj-$(CONFIG_PATA_OPTI) += pata_opti.o
50obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
51obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
52obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
53obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
54obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o 58obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
55obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o 59obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
56obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
57obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o 60obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o
58obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o 61obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
59obj-$(CONFIG_PATA_QDI) += pata_qdi.o
60obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o 62obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
61obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
62obj-$(CONFIG_PATA_RDC) += pata_rdc.o 63obj-$(CONFIG_PATA_RDC) += pata_rdc.o
63obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
64obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o 64obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
65obj-$(CONFIG_PATA_SCC) += pata_scc.o
66obj-$(CONFIG_PATA_SCH) += pata_sch.o
65obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o 67obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
66obj-$(CONFIG_PATA_SIL680) += pata_sil680.o 68obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
69obj-$(CONFIG_PATA_SIS) += pata_sis.o
67obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o 70obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o
71obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
68obj-$(CONFIG_PATA_VIA) += pata_via.o 72obj-$(CONFIG_PATA_VIA) += pata_via.o
69obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o 73obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
70obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o 74
71obj-$(CONFIG_PATA_SIS) += pata_sis.o 75# SFF PIO only
72obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o 76obj-$(CONFIG_PATA_AT32) += pata_at32.o
77obj-$(CONFIG_PATA_AT91) += pata_at91.o
78obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
79obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
73obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o 80obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
74obj-$(CONFIG_PATA_SCC) += pata_scc.o 81obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
75obj-$(CONFIG_PATA_SCH) += pata_sch.o 82obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
76obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o 83obj-$(CONFIG_PATA_OPTI) += pata_opti.o
77obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o 84obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
85obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
78obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o 86obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
79obj-$(CONFIG_PATA_AT91) += pata_at91.o
80obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o 87obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
81obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o 88obj-$(CONFIG_PATA_QDI) += pata_qdi.o
89obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
90obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
91obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o
92
93obj-$(CONFIG_PATA_PXA) += pata_pxa.o
94
82# Should be last but two libata driver 95# Should be last but two libata driver
83obj-$(CONFIG_PATA_ACPI) += pata_acpi.o 96obj-$(CONFIG_PATA_ACPI) += pata_acpi.o
84# Should be last but one libata driver 97# Should be last but one libata driver
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5326af28a410..99d0e5a51148 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -46,402 +46,52 @@
46#include <scsi/scsi_host.h> 46#include <scsi/scsi_host.h>
47#include <scsi/scsi_cmnd.h> 47#include <scsi/scsi_cmnd.h>
48#include <linux/libata.h> 48#include <linux/libata.h>
49#include "ahci.h"
49 50
50#define DRV_NAME "ahci" 51#define DRV_NAME "ahci"
51#define DRV_VERSION "3.0" 52#define DRV_VERSION "3.0"
52 53
53/* Enclosure Management Control */
54#define EM_CTRL_MSG_TYPE 0x000f0000
55
56/* Enclosure Management LED Message Type */
57#define EM_MSG_LED_HBA_PORT 0x0000000f
58#define EM_MSG_LED_PMP_SLOT 0x0000ff00
59#define EM_MSG_LED_VALUE 0xffff0000
60#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
61#define EM_MSG_LED_VALUE_OFF 0xfff80000
62#define EM_MSG_LED_VALUE_ON 0x00010000
63
64static int ahci_skip_host_reset;
65static int ahci_ignore_sss;
66
67module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
68MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
69
70module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
71MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
72
73static int ahci_enable_alpm(struct ata_port *ap,
74 enum link_pm policy);
75static void ahci_disable_alpm(struct ata_port *ap);
76static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
77static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
78 size_t size);
79static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
80 ssize_t size);
81
82enum { 54enum {
83 AHCI_PCI_BAR = 5, 55 AHCI_PCI_BAR = 5,
84 AHCI_MAX_PORTS = 32,
85 AHCI_MAX_SG = 168, /* hardware max is 64K */
86 AHCI_DMA_BOUNDARY = 0xffffffff,
87 AHCI_MAX_CMDS = 32,
88 AHCI_CMD_SZ = 32,
89 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
90 AHCI_RX_FIS_SZ = 256,
91 AHCI_CMD_TBL_CDB = 0x40,
92 AHCI_CMD_TBL_HDR_SZ = 0x80,
93 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
94 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
95 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
96 AHCI_RX_FIS_SZ,
97 AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
98 AHCI_CMD_TBL_AR_SZ +
99 (AHCI_RX_FIS_SZ * 16),
100 AHCI_IRQ_ON_SG = (1 << 31),
101 AHCI_CMD_ATAPI = (1 << 5),
102 AHCI_CMD_WRITE = (1 << 6),
103 AHCI_CMD_PREFETCH = (1 << 7),
104 AHCI_CMD_RESET = (1 << 8),
105 AHCI_CMD_CLR_BUSY = (1 << 10),
106
107 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
108 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
109 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
110
111 board_ahci = 0,
112 board_ahci_vt8251 = 1,
113 board_ahci_ign_iferr = 2,
114 board_ahci_sb600 = 3,
115 board_ahci_mv = 4,
116 board_ahci_sb700 = 5, /* for SB700 and SB800 */
117 board_ahci_mcp65 = 6,
118 board_ahci_nopmp = 7,
119 board_ahci_yesncq = 8,
120 board_ahci_nosntf = 9,
121
122 /* global controller registers */
123 HOST_CAP = 0x00, /* host capabilities */
124 HOST_CTL = 0x04, /* global host control */
125 HOST_IRQ_STAT = 0x08, /* interrupt status */
126 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
127 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
128 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
129 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
130 HOST_CAP2 = 0x24, /* host capabilities, extended */
131
132 /* HOST_CTL bits */
133 HOST_RESET = (1 << 0), /* reset controller; self-clear */
134 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
135 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
136
137 /* HOST_CAP bits */
138 HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
139 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
140 HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
141 HOST_CAP_PART = (1 << 13), /* Partial state capable */
142 HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
143 HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
144 HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
145 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
146 HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
147 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
148 HOST_CAP_LED = (1 << 25), /* Supports activity LED */
149 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
150 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
151 HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
152 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
153 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
154 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
155
156 /* HOST_CAP2 bits */
157 HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
158 HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
159 HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
160
161 /* registers for each SATA port */
162 PORT_LST_ADDR = 0x00, /* command list DMA addr */
163 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
164 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
165 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
166 PORT_IRQ_STAT = 0x10, /* interrupt status */
167 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
168 PORT_CMD = 0x18, /* port command */
169 PORT_TFDATA = 0x20, /* taskfile data */
170 PORT_SIG = 0x24, /* device TF signature */
171 PORT_CMD_ISSUE = 0x38, /* command issue */
172 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
173 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
174 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
175 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
176 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
177 PORT_FBS = 0x40, /* FIS-based Switching */
178
179 /* PORT_IRQ_{STAT,MASK} bits */
180 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
181 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
182 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
183 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
184 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
185 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
186 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
187 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
188
189 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
190 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
191 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
192 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
193 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
194 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
195 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
196 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
197 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
198
199 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
200 PORT_IRQ_IF_ERR |
201 PORT_IRQ_CONNECT |
202 PORT_IRQ_PHYRDY |
203 PORT_IRQ_UNK_FIS |
204 PORT_IRQ_BAD_PMP,
205 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
206 PORT_IRQ_TF_ERR |
207 PORT_IRQ_HBUS_DATA_ERR,
208 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
209 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
210 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
211
212 /* PORT_CMD bits */
213 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
214 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
215 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
216 PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
217 PORT_CMD_PMP = (1 << 17), /* PMP attached */
218 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
219 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
220 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
221 PORT_CMD_CLO = (1 << 3), /* Command list override */
222 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
223 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
224 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
225
226 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
227 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
228 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
229 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
230
231 PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
232 PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
233 PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
234 PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
235 PORT_FBS_SDE = (1 << 2), /* FBS single device error */
236 PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
237 PORT_FBS_EN = (1 << 0), /* Enable FBS */
238
239 /* hpriv->flags bits */
240 AHCI_HFLAG_NO_NCQ = (1 << 0),
241 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
242 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
243 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
244 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
245 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
246 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
247 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
248 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
249 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
250 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
251 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
252 link offline */
253 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
254
255 /* ap->flags bits */
256
257 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
258 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
259 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
260 ATA_FLAG_IPM,
261
262 ICH_MAP = 0x90, /* ICH MAP register */
263
264 /* em constants */
265 EM_MAX_SLOTS = 8,
266 EM_MAX_RETRY = 5,
267
268 /* em_ctl bits */
269 EM_CTL_RST = (1 << 9), /* Reset */
270 EM_CTL_TM = (1 << 8), /* Transmit Message */
271 EM_CTL_ALHD = (1 << 26), /* Activity LED */
272};
273
274struct ahci_cmd_hdr {
275 __le32 opts;
276 __le32 status;
277 __le32 tbl_addr;
278 __le32 tbl_addr_hi;
279 __le32 reserved[4];
280};
281
282struct ahci_sg {
283 __le32 addr;
284 __le32 addr_hi;
285 __le32 reserved;
286 __le32 flags_size;
287};
288
289struct ahci_em_priv {
290 enum sw_activity blink_policy;
291 struct timer_list timer;
292 unsigned long saved_activity;
293 unsigned long activity;
294 unsigned long led_state;
295};
296
297struct ahci_host_priv {
298 unsigned int flags; /* AHCI_HFLAG_* */
299 u32 cap; /* cap to use */
300 u32 cap2; /* cap2 to use */
301 u32 port_map; /* port map to use */
302 u32 saved_cap; /* saved initial cap */
303 u32 saved_cap2; /* saved initial cap2 */
304 u32 saved_port_map; /* saved initial port_map */
305 u32 em_loc; /* enclosure management location */
306}; 56};
307 57
308struct ahci_port_priv { 58enum board_ids {
309 struct ata_link *active_link; 59 /* board IDs by feature in alphabetical order */
310 struct ahci_cmd_hdr *cmd_slot; 60 board_ahci,
311 dma_addr_t cmd_slot_dma; 61 board_ahci_ign_iferr,
312 void *cmd_tbl; 62 board_ahci_nosntf,
313 dma_addr_t cmd_tbl_dma; 63 board_ahci_yes_fbs,
314 void *rx_fis; 64
315 dma_addr_t rx_fis_dma; 65 /* board IDs for specific chipsets in alphabetical order */
316 /* for NCQ spurious interrupt analysis */ 66 board_ahci_mcp65,
317 unsigned int ncq_saw_d2h:1; 67 board_ahci_mcp77,
318 unsigned int ncq_saw_dmas:1; 68 board_ahci_mcp89,
319 unsigned int ncq_saw_sdb:1; 69 board_ahci_mv,
320 u32 intr_mask; /* interrupts to enable */ 70 board_ahci_sb600,
321 bool fbs_supported; /* set iff FBS is supported */ 71 board_ahci_sb700, /* for SB700 and SB800 */
322 bool fbs_enabled; /* set iff FBS is enabled */ 72 board_ahci_vt8251,
323 int fbs_last_dev; /* save FBS.DEV of last FIS */ 73
324 /* enclosure management info per PM slot */ 74 /* aliases */
325 struct ahci_em_priv em_priv[EM_MAX_SLOTS]; 75 board_ahci_mcp_linux = board_ahci_mcp65,
76 board_ahci_mcp67 = board_ahci_mcp65,
77 board_ahci_mcp73 = board_ahci_mcp65,
78 board_ahci_mcp79 = board_ahci_mcp77,
326}; 79};
327 80
328static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
329static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
330static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 81static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
331static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
332static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
333static int ahci_port_start(struct ata_port *ap);
334static void ahci_port_stop(struct ata_port *ap);
335static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
336static void ahci_qc_prep(struct ata_queued_cmd *qc);
337static void ahci_freeze(struct ata_port *ap);
338static void ahci_thaw(struct ata_port *ap);
339static void ahci_enable_fbs(struct ata_port *ap);
340static void ahci_disable_fbs(struct ata_port *ap);
341static void ahci_pmp_attach(struct ata_port *ap);
342static void ahci_pmp_detach(struct ata_port *ap);
343static int ahci_softreset(struct ata_link *link, unsigned int *class,
344 unsigned long deadline);
345static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, 82static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
346 unsigned long deadline); 83 unsigned long deadline);
347static int ahci_hardreset(struct ata_link *link, unsigned int *class,
348 unsigned long deadline);
349static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 84static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
350 unsigned long deadline); 85 unsigned long deadline);
351static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, 86static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
352 unsigned long deadline); 87 unsigned long deadline);
353static void ahci_postreset(struct ata_link *link, unsigned int *class);
354static void ahci_error_handler(struct ata_port *ap);
355static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
356static int ahci_port_resume(struct ata_port *ap);
357static void ahci_dev_config(struct ata_device *dev);
358static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
359 u32 opts);
360#ifdef CONFIG_PM 88#ifdef CONFIG_PM
361static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
362static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 89static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
363static int ahci_pci_device_resume(struct pci_dev *pdev); 90static int ahci_pci_device_resume(struct pci_dev *pdev);
364#endif 91#endif
365static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
366static ssize_t ahci_activity_store(struct ata_device *dev,
367 enum sw_activity val);
368static void ahci_init_sw_activity(struct ata_link *link);
369
370static ssize_t ahci_show_host_caps(struct device *dev,
371 struct device_attribute *attr, char *buf);
372static ssize_t ahci_show_host_cap2(struct device *dev,
373 struct device_attribute *attr, char *buf);
374static ssize_t ahci_show_host_version(struct device *dev,
375 struct device_attribute *attr, char *buf);
376static ssize_t ahci_show_port_cmd(struct device *dev,
377 struct device_attribute *attr, char *buf);
378
379static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
380static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
381static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
382static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
383
384static struct device_attribute *ahci_shost_attrs[] = {
385 &dev_attr_link_power_management_policy,
386 &dev_attr_em_message_type,
387 &dev_attr_em_message,
388 &dev_attr_ahci_host_caps,
389 &dev_attr_ahci_host_cap2,
390 &dev_attr_ahci_host_version,
391 &dev_attr_ahci_port_cmd,
392 NULL
393};
394
395static struct device_attribute *ahci_sdev_attrs[] = {
396 &dev_attr_sw_activity,
397 &dev_attr_unload_heads,
398 NULL
399};
400 92
401static struct scsi_host_template ahci_sht = { 93static struct scsi_host_template ahci_sht = {
402 ATA_NCQ_SHT(DRV_NAME), 94 AHCI_SHT("ahci"),
403 .can_queue = AHCI_MAX_CMDS - 1,
404 .sg_tablesize = AHCI_MAX_SG,
405 .dma_boundary = AHCI_DMA_BOUNDARY,
406 .shost_attrs = ahci_shost_attrs,
407 .sdev_attrs = ahci_sdev_attrs,
408};
409
410static struct ata_port_operations ahci_ops = {
411 .inherits = &sata_pmp_port_ops,
412
413 .qc_defer = ahci_pmp_qc_defer,
414 .qc_prep = ahci_qc_prep,
415 .qc_issue = ahci_qc_issue,
416 .qc_fill_rtf = ahci_qc_fill_rtf,
417
418 .freeze = ahci_freeze,
419 .thaw = ahci_thaw,
420 .softreset = ahci_softreset,
421 .hardreset = ahci_hardreset,
422 .postreset = ahci_postreset,
423 .pmp_softreset = ahci_softreset,
424 .error_handler = ahci_error_handler,
425 .post_internal_cmd = ahci_post_internal_cmd,
426 .dev_config = ahci_dev_config,
427
428 .scr_read = ahci_scr_read,
429 .scr_write = ahci_scr_write,
430 .pmp_attach = ahci_pmp_attach,
431 .pmp_detach = ahci_pmp_detach,
432
433 .enable_pm = ahci_enable_alpm,
434 .disable_pm = ahci_disable_alpm,
435 .em_show = ahci_led_show,
436 .em_store = ahci_led_store,
437 .sw_activity_show = ahci_activity_show,
438 .sw_activity_store = ahci_activity_store,
439#ifdef CONFIG_PM
440 .port_suspend = ahci_port_suspend,
441 .port_resume = ahci_port_resume,
442#endif
443 .port_start = ahci_port_start,
444 .port_stop = ahci_port_stop,
445}; 95};
446 96
447static struct ata_port_operations ahci_vt8251_ops = { 97static struct ata_port_operations ahci_vt8251_ops = {
@@ -463,6 +113,7 @@ static struct ata_port_operations ahci_sb600_ops = {
463#define AHCI_HFLAGS(flags) .private_data = (void *)(flags) 113#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
464 114
465static const struct ata_port_info ahci_port_info[] = { 115static const struct ata_port_info ahci_port_info[] = {
116 /* by features */
466 [board_ahci] = 117 [board_ahci] =
467 { 118 {
468 .flags = AHCI_FLAG_COMMON, 119 .flags = AHCI_FLAG_COMMON,
@@ -470,81 +121,91 @@ static const struct ata_port_info ahci_port_info[] = {
470 .udma_mask = ATA_UDMA6, 121 .udma_mask = ATA_UDMA6,
471 .port_ops = &ahci_ops, 122 .port_ops = &ahci_ops,
472 }, 123 },
473 [board_ahci_vt8251] = 124 [board_ahci_ign_iferr] =
474 { 125 {
475 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), 126 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
476 .flags = AHCI_FLAG_COMMON, 127 .flags = AHCI_FLAG_COMMON,
477 .pio_mask = ATA_PIO4, 128 .pio_mask = ATA_PIO4,
478 .udma_mask = ATA_UDMA6, 129 .udma_mask = ATA_UDMA6,
479 .port_ops = &ahci_vt8251_ops, 130 .port_ops = &ahci_ops,
480 }, 131 },
481 [board_ahci_ign_iferr] = 132 [board_ahci_nosntf] =
482 { 133 {
483 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), 134 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
484 .flags = AHCI_FLAG_COMMON, 135 .flags = AHCI_FLAG_COMMON,
485 .pio_mask = ATA_PIO4, 136 .pio_mask = ATA_PIO4,
486 .udma_mask = ATA_UDMA6, 137 .udma_mask = ATA_UDMA6,
487 .port_ops = &ahci_ops, 138 .port_ops = &ahci_ops,
488 }, 139 },
489 [board_ahci_sb600] = 140 [board_ahci_yes_fbs] =
490 { 141 {
491 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 142 AHCI_HFLAGS (AHCI_HFLAG_YES_FBS),
492 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
493 AHCI_HFLAG_32BIT_ONLY),
494 .flags = AHCI_FLAG_COMMON, 143 .flags = AHCI_FLAG_COMMON,
495 .pio_mask = ATA_PIO4, 144 .pio_mask = ATA_PIO4,
496 .udma_mask = ATA_UDMA6, 145 .udma_mask = ATA_UDMA6,
497 .port_ops = &ahci_sb600_ops, 146 .port_ops = &ahci_ops,
498 }, 147 },
499 [board_ahci_mv] = 148 /* by chipsets */
149 [board_ahci_mcp65] =
500 { 150 {
501 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 151 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
502 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), 152 AHCI_HFLAG_YES_NCQ),
503 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 153 .flags = AHCI_FLAG_COMMON,
504 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
505 .pio_mask = ATA_PIO4, 154 .pio_mask = ATA_PIO4,
506 .udma_mask = ATA_UDMA6, 155 .udma_mask = ATA_UDMA6,
507 .port_ops = &ahci_ops, 156 .port_ops = &ahci_ops,
508 }, 157 },
509 [board_ahci_sb700] = /* for SB700 and SB800 */ 158 [board_ahci_mcp77] =
510 { 159 {
511 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), 160 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP),
512 .flags = AHCI_FLAG_COMMON, 161 .flags = AHCI_FLAG_COMMON,
513 .pio_mask = ATA_PIO4, 162 .pio_mask = ATA_PIO4,
514 .udma_mask = ATA_UDMA6, 163 .udma_mask = ATA_UDMA6,
515 .port_ops = &ahci_sb600_ops, 164 .port_ops = &ahci_ops,
516 }, 165 },
517 [board_ahci_mcp65] = 166 [board_ahci_mcp89] =
518 { 167 {
519 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), 168 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA),
520 .flags = AHCI_FLAG_COMMON, 169 .flags = AHCI_FLAG_COMMON,
521 .pio_mask = ATA_PIO4, 170 .pio_mask = ATA_PIO4,
522 .udma_mask = ATA_UDMA6, 171 .udma_mask = ATA_UDMA6,
523 .port_ops = &ahci_ops, 172 .port_ops = &ahci_ops,
524 }, 173 },
525 [board_ahci_nopmp] = 174 [board_ahci_mv] =
526 { 175 {
527 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP), 176 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
528 .flags = AHCI_FLAG_COMMON, 177 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
178 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
179 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
529 .pio_mask = ATA_PIO4, 180 .pio_mask = ATA_PIO4,
530 .udma_mask = ATA_UDMA6, 181 .udma_mask = ATA_UDMA6,
531 .port_ops = &ahci_ops, 182 .port_ops = &ahci_ops,
532 }, 183 },
533 [board_ahci_yesncq] = 184 [board_ahci_sb600] =
534 { 185 {
535 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), 186 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
187 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
188 AHCI_HFLAG_32BIT_ONLY),
536 .flags = AHCI_FLAG_COMMON, 189 .flags = AHCI_FLAG_COMMON,
537 .pio_mask = ATA_PIO4, 190 .pio_mask = ATA_PIO4,
538 .udma_mask = ATA_UDMA6, 191 .udma_mask = ATA_UDMA6,
539 .port_ops = &ahci_ops, 192 .port_ops = &ahci_sb600_ops,
540 }, 193 },
541 [board_ahci_nosntf] = 194 [board_ahci_sb700] = /* for SB700 and SB800 */
542 { 195 {
543 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF), 196 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
544 .flags = AHCI_FLAG_COMMON, 197 .flags = AHCI_FLAG_COMMON,
545 .pio_mask = ATA_PIO4, 198 .pio_mask = ATA_PIO4,
546 .udma_mask = ATA_UDMA6, 199 .udma_mask = ATA_UDMA6,
547 .port_ops = &ahci_ops, 200 .port_ops = &ahci_sb600_ops,
201 },
202 [board_ahci_vt8251] =
203 {
204 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
205 .flags = AHCI_FLAG_COMMON,
206 .pio_mask = ATA_PIO4,
207 .udma_mask = ATA_UDMA6,
208 .port_ops = &ahci_vt8251_ops,
548 }, 209 },
549}; 210};
550 211
@@ -596,6 +257,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
596 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ 257 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
597 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ 258 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
598 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ 259 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
260 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
261 { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
262 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
599 263
600 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 264 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
601 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 265 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -629,82 +293,82 @@ static const struct pci_device_id ahci_pci_tbl[] = {
629 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ 293 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
630 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ 294 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
631 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ 295 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
632 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */ 296 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 }, /* MCP67 */
633 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */ 297 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 }, /* MCP67 */
634 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */ 298 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 }, /* MCP67 */
635 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */ 299 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 }, /* MCP67 */
636 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */ 300 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 }, /* MCP67 */
637 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */ 301 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 }, /* MCP67 */
638 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */ 302 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 }, /* MCP67 */
639 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */ 303 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 }, /* MCP67 */
640 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */ 304 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 }, /* MCP67 */
641 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */ 305 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 }, /* MCP67 */
642 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */ 306 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 }, /* MCP67 */
643 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */ 307 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 }, /* MCP67 */
644 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */ 308 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux }, /* Linux ID */
645 { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq }, /* Linux ID */ 309 { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux }, /* Linux ID */
646 { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq }, /* Linux ID */ 310 { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux }, /* Linux ID */
647 { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq }, /* Linux ID */ 311 { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux }, /* Linux ID */
648 { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq }, /* Linux ID */ 312 { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux }, /* Linux ID */
649 { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq }, /* Linux ID */ 313 { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux }, /* Linux ID */
650 { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq }, /* Linux ID */ 314 { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux }, /* Linux ID */
651 { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq }, /* Linux ID */ 315 { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux }, /* Linux ID */
652 { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq }, /* Linux ID */ 316 { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux }, /* Linux ID */
653 { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq }, /* Linux ID */ 317 { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux }, /* Linux ID */
654 { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq }, /* Linux ID */ 318 { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux }, /* Linux ID */
655 { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq }, /* Linux ID */ 319 { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux }, /* Linux ID */
656 { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq }, /* Linux ID */ 320 { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux }, /* Linux ID */
657 { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq }, /* Linux ID */ 321 { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux }, /* Linux ID */
658 { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq }, /* Linux ID */ 322 { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux }, /* Linux ID */
659 { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq }, /* Linux ID */ 323 { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux }, /* Linux ID */
660 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */ 324 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 }, /* MCP73 */
661 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */ 325 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 }, /* MCP73 */
662 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */ 326 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 }, /* MCP73 */
663 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */ 327 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 }, /* MCP73 */
664 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */ 328 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 }, /* MCP73 */
665 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */ 329 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 }, /* MCP73 */
666 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */ 330 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 }, /* MCP73 */
667 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */ 331 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 }, /* MCP73 */
668 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */ 332 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 }, /* MCP73 */
669 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */ 333 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 }, /* MCP73 */
670 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */ 334 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 }, /* MCP73 */
671 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */ 335 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 }, /* MCP73 */
672 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */ 336 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 }, /* MCP77 */
673 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */ 337 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 }, /* MCP77 */
674 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */ 338 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 }, /* MCP77 */
675 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */ 339 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 }, /* MCP77 */
676 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */ 340 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 }, /* MCP77 */
677 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */ 341 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 }, /* MCP77 */
678 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */ 342 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 }, /* MCP77 */
679 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */ 343 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 }, /* MCP77 */
680 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */ 344 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 }, /* MCP77 */
681 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ 345 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 }, /* MCP77 */
682 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ 346 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 }, /* MCP77 */
683 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ 347 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 }, /* MCP77 */
684 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */ 348 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 }, /* MCP79 */
685 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */ 349 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 }, /* MCP79 */
686 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */ 350 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 }, /* MCP79 */
687 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */ 351 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 }, /* MCP79 */
688 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */ 352 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 }, /* MCP79 */
689 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */ 353 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 }, /* MCP79 */
690 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */ 354 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 }, /* MCP79 */
691 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */ 355 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 }, /* MCP79 */
692 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */ 356 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 }, /* MCP79 */
693 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ 357 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 }, /* MCP79 */
694 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ 358 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 }, /* MCP79 */
695 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ 359 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 }, /* MCP79 */
696 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */ 360 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 }, /* MCP89 */
697 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */ 361 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 }, /* MCP89 */
698 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */ 362 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 }, /* MCP89 */
699 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */ 363 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 }, /* MCP89 */
700 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */ 364 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 }, /* MCP89 */
701 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */ 365 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 }, /* MCP89 */
702 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */ 366 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 }, /* MCP89 */
703 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */ 367 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 }, /* MCP89 */
704 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */ 368 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 }, /* MCP89 */
705 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */ 369 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 }, /* MCP89 */
706 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */ 370 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 }, /* MCP89 */
707 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */ 371 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 }, /* MCP89 */
708 372
709 /* SiS */ 373 /* SiS */
710 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 374 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
@@ -714,6 +378,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
714 /* Marvell */ 378 /* Marvell */
715 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ 379 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
716 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ 380 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
381 { PCI_DEVICE(0x1b4b, 0x9123),
382 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
717 383
718 /* Promise */ 384 /* Promise */
719 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 385 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
@@ -737,12 +403,6 @@ static struct pci_driver ahci_pci_driver = {
737#endif 403#endif
738}; 404};
739 405
740static int ahci_em_messages = 1;
741module_param(ahci_em_messages, int, 0444);
742/* add other LED protocol types when they become supported */
743MODULE_PARM_DESC(ahci_em_messages,
744 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
745
746#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE) 406#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
747static int marvell_enable; 407static int marvell_enable;
748#else 408#else
@@ -752,166 +412,15 @@ module_param(marvell_enable, int, 0644);
752MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)"); 412MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
753 413
754 414
755static inline int ahci_nr_ports(u32 cap) 415static void ahci_pci_save_initial_config(struct pci_dev *pdev,
756{ 416 struct ahci_host_priv *hpriv)
757 return (cap & 0x1f) + 1;
758}
759
760static inline void __iomem *__ahci_port_base(struct ata_host *host,
761 unsigned int port_no)
762{ 417{
763 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 418 unsigned int force_port_map = 0;
764 419 unsigned int mask_port_map = 0;
765 return mmio + 0x100 + (port_no * 0x80);
766}
767
768static inline void __iomem *ahci_port_base(struct ata_port *ap)
769{
770 return __ahci_port_base(ap->host, ap->port_no);
771}
772
773static void ahci_enable_ahci(void __iomem *mmio)
774{
775 int i;
776 u32 tmp;
777 420
778 /* turn on AHCI_EN */ 421 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
779 tmp = readl(mmio + HOST_CTL); 422 dev_info(&pdev->dev, "JMB361 has only one port\n");
780 if (tmp & HOST_AHCI_EN) 423 force_port_map = 1;
781 return;
782
783 /* Some controllers need AHCI_EN to be written multiple times.
784 * Try a few times before giving up.
785 */
786 for (i = 0; i < 5; i++) {
787 tmp |= HOST_AHCI_EN;
788 writel(tmp, mmio + HOST_CTL);
789 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
790 if (tmp & HOST_AHCI_EN)
791 return;
792 msleep(10);
793 }
794
795 WARN_ON(1);
796}
797
798static ssize_t ahci_show_host_caps(struct device *dev,
799 struct device_attribute *attr, char *buf)
800{
801 struct Scsi_Host *shost = class_to_shost(dev);
802 struct ata_port *ap = ata_shost_to_port(shost);
803 struct ahci_host_priv *hpriv = ap->host->private_data;
804
805 return sprintf(buf, "%x\n", hpriv->cap);
806}
807
808static ssize_t ahci_show_host_cap2(struct device *dev,
809 struct device_attribute *attr, char *buf)
810{
811 struct Scsi_Host *shost = class_to_shost(dev);
812 struct ata_port *ap = ata_shost_to_port(shost);
813 struct ahci_host_priv *hpriv = ap->host->private_data;
814
815 return sprintf(buf, "%x\n", hpriv->cap2);
816}
817
818static ssize_t ahci_show_host_version(struct device *dev,
819 struct device_attribute *attr, char *buf)
820{
821 struct Scsi_Host *shost = class_to_shost(dev);
822 struct ata_port *ap = ata_shost_to_port(shost);
823 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
824
825 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
826}
827
828static ssize_t ahci_show_port_cmd(struct device *dev,
829 struct device_attribute *attr, char *buf)
830{
831 struct Scsi_Host *shost = class_to_shost(dev);
832 struct ata_port *ap = ata_shost_to_port(shost);
833 void __iomem *port_mmio = ahci_port_base(ap);
834
835 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
836}
837
838/**
839 * ahci_save_initial_config - Save and fixup initial config values
840 * @pdev: target PCI device
841 * @hpriv: host private area to store config values
842 *
843 * Some registers containing configuration info might be setup by
844 * BIOS and might be cleared on reset. This function saves the
845 * initial values of those registers into @hpriv such that they
846 * can be restored after controller reset.
847 *
848 * If inconsistent, config values are fixed up by this function.
849 *
850 * LOCKING:
851 * None.
852 */
853static void ahci_save_initial_config(struct pci_dev *pdev,
854 struct ahci_host_priv *hpriv)
855{
856 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
857 u32 cap, cap2, vers, port_map;
858 int i;
859 int mv;
860
861 /* make sure AHCI mode is enabled before accessing CAP */
862 ahci_enable_ahci(mmio);
863
864 /* Values prefixed with saved_ are written back to host after
865 * reset. Values without are used for driver operation.
866 */
867 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
868 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
869
870 /* CAP2 register is only defined for AHCI 1.2 and later */
871 vers = readl(mmio + HOST_VERSION);
872 if ((vers >> 16) > 1 ||
873 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
874 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
875 else
876 hpriv->saved_cap2 = cap2 = 0;
877
878 /* some chips have errata preventing 64bit use */
879 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
880 dev_printk(KERN_INFO, &pdev->dev,
881 "controller can't do 64bit DMA, forcing 32bit\n");
882 cap &= ~HOST_CAP_64;
883 }
884
885 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
886 dev_printk(KERN_INFO, &pdev->dev,
887 "controller can't do NCQ, turning off CAP_NCQ\n");
888 cap &= ~HOST_CAP_NCQ;
889 }
890
891 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
892 dev_printk(KERN_INFO, &pdev->dev,
893 "controller can do NCQ, turning on CAP_NCQ\n");
894 cap |= HOST_CAP_NCQ;
895 }
896
897 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
898 dev_printk(KERN_INFO, &pdev->dev,
899 "controller can't do PMP, turning off CAP_PMP\n");
900 cap &= ~HOST_CAP_PMP;
901 }
902
903 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
904 dev_printk(KERN_INFO, &pdev->dev,
905 "controller can't do SNTF, turning off CAP_SNTF\n");
906 cap &= ~HOST_CAP_SNTF;
907 }
908
909 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
910 port_map != 1) {
911 dev_printk(KERN_INFO, &pdev->dev,
912 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
913 port_map, 1);
914 port_map = 1;
915 } 424 }
916 425
917 /* 426 /*
@@ -921,469 +430,25 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
921 */ 430 */
922 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 431 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
923 if (pdev->device == 0x6121) 432 if (pdev->device == 0x6121)
924 mv = 0x3; 433 mask_port_map = 0x3;
925 else 434 else
926 mv = 0xf; 435 mask_port_map = 0xf;
927 dev_printk(KERN_ERR, &pdev->dev, 436 dev_info(&pdev->dev,
928 "MV_AHCI HACK: port_map %x -> %x\n",
929 port_map,
930 port_map & mv);
931 dev_printk(KERN_ERR, &pdev->dev,
932 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); 437 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
933
934 port_map &= mv;
935 }
936
937 /* cross check port_map and cap.n_ports */
938 if (port_map) {
939 int map_ports = 0;
940
941 for (i = 0; i < AHCI_MAX_PORTS; i++)
942 if (port_map & (1 << i))
943 map_ports++;
944
945 /* If PI has more ports than n_ports, whine, clear
946 * port_map and let it be generated from n_ports.
947 */
948 if (map_ports > ahci_nr_ports(cap)) {
949 dev_printk(KERN_WARNING, &pdev->dev,
950 "implemented port map (0x%x) contains more "
951 "ports than nr_ports (%u), using nr_ports\n",
952 port_map, ahci_nr_ports(cap));
953 port_map = 0;
954 }
955 }
956
957 /* fabricate port_map from cap.nr_ports */
958 if (!port_map) {
959 port_map = (1 << ahci_nr_ports(cap)) - 1;
960 dev_printk(KERN_WARNING, &pdev->dev,
961 "forcing PORTS_IMPL to 0x%x\n", port_map);
962
963 /* write the fixed up value to the PI register */
964 hpriv->saved_port_map = port_map;
965 }
966
967 /* record values to use during operation */
968 hpriv->cap = cap;
969 hpriv->cap2 = cap2;
970 hpriv->port_map = port_map;
971}
972
973/**
974 * ahci_restore_initial_config - Restore initial config
975 * @host: target ATA host
976 *
977 * Restore initial config stored by ahci_save_initial_config().
978 *
979 * LOCKING:
980 * None.
981 */
982static void ahci_restore_initial_config(struct ata_host *host)
983{
984 struct ahci_host_priv *hpriv = host->private_data;
985 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
986
987 writel(hpriv->saved_cap, mmio + HOST_CAP);
988 if (hpriv->saved_cap2)
989 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
990 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
991 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
992}
993
994static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
995{
996 static const int offset[] = {
997 [SCR_STATUS] = PORT_SCR_STAT,
998 [SCR_CONTROL] = PORT_SCR_CTL,
999 [SCR_ERROR] = PORT_SCR_ERR,
1000 [SCR_ACTIVE] = PORT_SCR_ACT,
1001 [SCR_NOTIFICATION] = PORT_SCR_NTF,
1002 };
1003 struct ahci_host_priv *hpriv = ap->host->private_data;
1004
1005 if (sc_reg < ARRAY_SIZE(offset) &&
1006 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
1007 return offset[sc_reg];
1008 return 0;
1009}
1010
1011static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1012{
1013 void __iomem *port_mmio = ahci_port_base(link->ap);
1014 int offset = ahci_scr_offset(link->ap, sc_reg);
1015
1016 if (offset) {
1017 *val = readl(port_mmio + offset);
1018 return 0;
1019 }
1020 return -EINVAL;
1021}
1022
1023static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1024{
1025 void __iomem *port_mmio = ahci_port_base(link->ap);
1026 int offset = ahci_scr_offset(link->ap, sc_reg);
1027
1028 if (offset) {
1029 writel(val, port_mmio + offset);
1030 return 0;
1031 }
1032 return -EINVAL;
1033}
1034
1035static void ahci_start_engine(struct ata_port *ap)
1036{
1037 void __iomem *port_mmio = ahci_port_base(ap);
1038 u32 tmp;
1039
1040 /* start DMA */
1041 tmp = readl(port_mmio + PORT_CMD);
1042 tmp |= PORT_CMD_START;
1043 writel(tmp, port_mmio + PORT_CMD);
1044 readl(port_mmio + PORT_CMD); /* flush */
1045}
1046
1047static int ahci_stop_engine(struct ata_port *ap)
1048{
1049 void __iomem *port_mmio = ahci_port_base(ap);
1050 u32 tmp;
1051
1052 tmp = readl(port_mmio + PORT_CMD);
1053
1054 /* check if the HBA is idle */
1055 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
1056 return 0;
1057
1058 /* setting HBA to idle */
1059 tmp &= ~PORT_CMD_START;
1060 writel(tmp, port_mmio + PORT_CMD);
1061
1062 /* wait for engine to stop. This could be as long as 500 msec */
1063 tmp = ata_wait_register(port_mmio + PORT_CMD,
1064 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
1065 if (tmp & PORT_CMD_LIST_ON)
1066 return -EIO;
1067
1068 return 0;
1069}
1070
1071static void ahci_start_fis_rx(struct ata_port *ap)
1072{
1073 void __iomem *port_mmio = ahci_port_base(ap);
1074 struct ahci_host_priv *hpriv = ap->host->private_data;
1075 struct ahci_port_priv *pp = ap->private_data;
1076 u32 tmp;
1077
1078 /* set FIS registers */
1079 if (hpriv->cap & HOST_CAP_64)
1080 writel((pp->cmd_slot_dma >> 16) >> 16,
1081 port_mmio + PORT_LST_ADDR_HI);
1082 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
1083
1084 if (hpriv->cap & HOST_CAP_64)
1085 writel((pp->rx_fis_dma >> 16) >> 16,
1086 port_mmio + PORT_FIS_ADDR_HI);
1087 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
1088
1089 /* enable FIS reception */
1090 tmp = readl(port_mmio + PORT_CMD);
1091 tmp |= PORT_CMD_FIS_RX;
1092 writel(tmp, port_mmio + PORT_CMD);
1093
1094 /* flush */
1095 readl(port_mmio + PORT_CMD);
1096}
1097
1098static int ahci_stop_fis_rx(struct ata_port *ap)
1099{
1100 void __iomem *port_mmio = ahci_port_base(ap);
1101 u32 tmp;
1102
1103 /* disable FIS reception */
1104 tmp = readl(port_mmio + PORT_CMD);
1105 tmp &= ~PORT_CMD_FIS_RX;
1106 writel(tmp, port_mmio + PORT_CMD);
1107
1108 /* wait for completion, spec says 500ms, give it 1000 */
1109 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1110 PORT_CMD_FIS_ON, 10, 1000);
1111 if (tmp & PORT_CMD_FIS_ON)
1112 return -EBUSY;
1113
1114 return 0;
1115}
1116
1117static void ahci_power_up(struct ata_port *ap)
1118{
1119 struct ahci_host_priv *hpriv = ap->host->private_data;
1120 void __iomem *port_mmio = ahci_port_base(ap);
1121 u32 cmd;
1122
1123 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1124
1125 /* spin up device */
1126 if (hpriv->cap & HOST_CAP_SSS) {
1127 cmd |= PORT_CMD_SPIN_UP;
1128 writel(cmd, port_mmio + PORT_CMD);
1129 }
1130
1131 /* wake up link */
1132 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1133}
1134
1135static void ahci_disable_alpm(struct ata_port *ap)
1136{
1137 struct ahci_host_priv *hpriv = ap->host->private_data;
1138 void __iomem *port_mmio = ahci_port_base(ap);
1139 u32 cmd;
1140 struct ahci_port_priv *pp = ap->private_data;
1141
1142 /* IPM bits should be disabled by libata-core */
1143 /* get the existing command bits */
1144 cmd = readl(port_mmio + PORT_CMD);
1145
1146 /* disable ALPM and ASP */
1147 cmd &= ~PORT_CMD_ASP;
1148 cmd &= ~PORT_CMD_ALPE;
1149
1150 /* force the interface back to active */
1151 cmd |= PORT_CMD_ICC_ACTIVE;
1152
1153 /* write out new cmd value */
1154 writel(cmd, port_mmio + PORT_CMD);
1155 cmd = readl(port_mmio + PORT_CMD);
1156
1157 /* wait 10ms to be sure we've come out of any low power state */
1158 msleep(10);
1159
1160 /* clear out any PhyRdy stuff from interrupt status */
1161 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1162
1163 /* go ahead and clean out PhyRdy Change from Serror too */
1164 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1165
1166 /*
1167 * Clear flag to indicate that we should ignore all PhyRdy
1168 * state changes
1169 */
1170 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1171
1172 /*
1173 * Enable interrupts on Phy Ready.
1174 */
1175 pp->intr_mask |= PORT_IRQ_PHYRDY;
1176 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1177
1178 /*
1179 * don't change the link pm policy - we can be called
1180 * just to turn of link pm temporarily
1181 */
1182}
1183
1184static int ahci_enable_alpm(struct ata_port *ap,
1185 enum link_pm policy)
1186{
1187 struct ahci_host_priv *hpriv = ap->host->private_data;
1188 void __iomem *port_mmio = ahci_port_base(ap);
1189 u32 cmd;
1190 struct ahci_port_priv *pp = ap->private_data;
1191 u32 asp;
1192
1193 /* Make sure the host is capable of link power management */
1194 if (!(hpriv->cap & HOST_CAP_ALPM))
1195 return -EINVAL;
1196
1197 switch (policy) {
1198 case MAX_PERFORMANCE:
1199 case NOT_AVAILABLE:
1200 /*
1201 * if we came here with NOT_AVAILABLE,
1202 * it just means this is the first time we
1203 * have tried to enable - default to max performance,
1204 * and let the user go to lower power modes on request.
1205 */
1206 ahci_disable_alpm(ap);
1207 return 0;
1208 case MIN_POWER:
1209 /* configure HBA to enter SLUMBER */
1210 asp = PORT_CMD_ASP;
1211 break;
1212 case MEDIUM_POWER:
1213 /* configure HBA to enter PARTIAL */
1214 asp = 0;
1215 break;
1216 default:
1217 return -EINVAL;
1218 }
1219
1220 /*
1221 * Disable interrupts on Phy Ready. This keeps us from
1222 * getting woken up due to spurious phy ready interrupts
1223 * TBD - Hot plug should be done via polling now, is
1224 * that even supported?
1225 */
1226 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1227 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1228
1229 /*
1230 * Set a flag to indicate that we should ignore all PhyRdy
1231 * state changes since these can happen now whenever we
1232 * change link state
1233 */
1234 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1235
1236 /* get the existing command bits */
1237 cmd = readl(port_mmio + PORT_CMD);
1238
1239 /*
1240 * Set ASP based on Policy
1241 */
1242 cmd |= asp;
1243
1244 /*
1245 * Setting this bit will instruct the HBA to aggressively
1246 * enter a lower power link state when it's appropriate and
1247 * based on the value set above for ASP
1248 */
1249 cmd |= PORT_CMD_ALPE;
1250
1251 /* write out new cmd value */
1252 writel(cmd, port_mmio + PORT_CMD);
1253 cmd = readl(port_mmio + PORT_CMD);
1254
1255 /* IPM bits should be set by libata-core */
1256 return 0;
1257}
1258
1259#ifdef CONFIG_PM
1260static void ahci_power_down(struct ata_port *ap)
1261{
1262 struct ahci_host_priv *hpriv = ap->host->private_data;
1263 void __iomem *port_mmio = ahci_port_base(ap);
1264 u32 cmd, scontrol;
1265
1266 if (!(hpriv->cap & HOST_CAP_SSS))
1267 return;
1268
1269 /* put device into listen mode, first set PxSCTL.DET to 0 */
1270 scontrol = readl(port_mmio + PORT_SCR_CTL);
1271 scontrol &= ~0xf;
1272 writel(scontrol, port_mmio + PORT_SCR_CTL);
1273
1274 /* then set PxCMD.SUD to 0 */
1275 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1276 cmd &= ~PORT_CMD_SPIN_UP;
1277 writel(cmd, port_mmio + PORT_CMD);
1278}
1279#endif
1280
1281static void ahci_start_port(struct ata_port *ap)
1282{
1283 struct ahci_port_priv *pp = ap->private_data;
1284 struct ata_link *link;
1285 struct ahci_em_priv *emp;
1286 ssize_t rc;
1287 int i;
1288
1289 /* enable FIS reception */
1290 ahci_start_fis_rx(ap);
1291
1292 /* enable DMA */
1293 ahci_start_engine(ap);
1294
1295 /* turn on LEDs */
1296 if (ap->flags & ATA_FLAG_EM) {
1297 ata_for_each_link(link, ap, EDGE) {
1298 emp = &pp->em_priv[link->pmp];
1299
1300 /* EM Transmit bit maybe busy during init */
1301 for (i = 0; i < EM_MAX_RETRY; i++) {
1302 rc = ahci_transmit_led_message(ap,
1303 emp->led_state,
1304 4);
1305 if (rc == -EBUSY)
1306 msleep(1);
1307 else
1308 break;
1309 }
1310 }
1311 } 438 }
1312 439
1313 if (ap->flags & ATA_FLAG_SW_ACTIVITY) 440 ahci_save_initial_config(&pdev->dev, hpriv, force_port_map,
1314 ata_for_each_link(link, ap, EDGE) 441 mask_port_map);
1315 ahci_init_sw_activity(link);
1316
1317} 442}
1318 443
1319static int ahci_deinit_port(struct ata_port *ap, const char **emsg) 444static int ahci_pci_reset_controller(struct ata_host *host)
1320{
1321 int rc;
1322
1323 /* disable DMA */
1324 rc = ahci_stop_engine(ap);
1325 if (rc) {
1326 *emsg = "failed to stop engine";
1327 return rc;
1328 }
1329
1330 /* disable FIS reception */
1331 rc = ahci_stop_fis_rx(ap);
1332 if (rc) {
1333 *emsg = "failed stop FIS RX";
1334 return rc;
1335 }
1336
1337 return 0;
1338}
1339
1340static int ahci_reset_controller(struct ata_host *host)
1341{ 445{
1342 struct pci_dev *pdev = to_pci_dev(host->dev); 446 struct pci_dev *pdev = to_pci_dev(host->dev);
1343 struct ahci_host_priv *hpriv = host->private_data;
1344 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1345 u32 tmp;
1346 447
1347 /* we must be in AHCI mode, before using anything 448 ahci_reset_controller(host);
1348 * AHCI-specific, such as HOST_RESET.
1349 */
1350 ahci_enable_ahci(mmio);
1351
1352 /* global controller reset */
1353 if (!ahci_skip_host_reset) {
1354 tmp = readl(mmio + HOST_CTL);
1355 if ((tmp & HOST_RESET) == 0) {
1356 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1357 readl(mmio + HOST_CTL); /* flush */
1358 }
1359
1360 /*
1361 * to perform host reset, OS should set HOST_RESET
1362 * and poll until this bit is read to be "0".
1363 * reset must complete within 1 second, or
1364 * the hardware should be considered fried.
1365 */
1366 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1367 HOST_RESET, 10, 1000);
1368
1369 if (tmp & HOST_RESET) {
1370 dev_printk(KERN_ERR, host->dev,
1371 "controller reset failed (0x%x)\n", tmp);
1372 return -EIO;
1373 }
1374
1375 /* turn on AHCI mode */
1376 ahci_enable_ahci(mmio);
1377
1378 /* Some registers might be cleared on reset. Restore
1379 * initial values.
1380 */
1381 ahci_restore_initial_config(host);
1382 } else
1383 dev_printk(KERN_INFO, host->dev,
1384 "skipping global host reset\n");
1385 449
1386 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 450 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
451 struct ahci_host_priv *hpriv = host->private_data;
1387 u16 tmp16; 452 u16 tmp16;
1388 453
1389 /* configure PCS */ 454 /* configure PCS */
@@ -1397,267 +462,10 @@ static int ahci_reset_controller(struct ata_host *host)
1397 return 0; 462 return 0;
1398} 463}
1399 464
1400static void ahci_sw_activity(struct ata_link *link) 465static void ahci_pci_init_controller(struct ata_host *host)
1401{
1402 struct ata_port *ap = link->ap;
1403 struct ahci_port_priv *pp = ap->private_data;
1404 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1405
1406 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1407 return;
1408
1409 emp->activity++;
1410 if (!timer_pending(&emp->timer))
1411 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1412}
1413
1414static void ahci_sw_activity_blink(unsigned long arg)
1415{
1416 struct ata_link *link = (struct ata_link *)arg;
1417 struct ata_port *ap = link->ap;
1418 struct ahci_port_priv *pp = ap->private_data;
1419 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1420 unsigned long led_message = emp->led_state;
1421 u32 activity_led_state;
1422 unsigned long flags;
1423
1424 led_message &= EM_MSG_LED_VALUE;
1425 led_message |= ap->port_no | (link->pmp << 8);
1426
1427 /* check to see if we've had activity. If so,
1428 * toggle state of LED and reset timer. If not,
1429 * turn LED to desired idle state.
1430 */
1431 spin_lock_irqsave(ap->lock, flags);
1432 if (emp->saved_activity != emp->activity) {
1433 emp->saved_activity = emp->activity;
1434 /* get the current LED state */
1435 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1436
1437 if (activity_led_state)
1438 activity_led_state = 0;
1439 else
1440 activity_led_state = 1;
1441
1442 /* clear old state */
1443 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1444
1445 /* toggle state */
1446 led_message |= (activity_led_state << 16);
1447 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1448 } else {
1449 /* switch to idle */
1450 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1451 if (emp->blink_policy == BLINK_OFF)
1452 led_message |= (1 << 16);
1453 }
1454 spin_unlock_irqrestore(ap->lock, flags);
1455 ahci_transmit_led_message(ap, led_message, 4);
1456}
1457
1458static void ahci_init_sw_activity(struct ata_link *link)
1459{
1460 struct ata_port *ap = link->ap;
1461 struct ahci_port_priv *pp = ap->private_data;
1462 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1463
1464 /* init activity stats, setup timer */
1465 emp->saved_activity = emp->activity = 0;
1466 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1467
1468 /* check our blink policy and set flag for link if it's enabled */
1469 if (emp->blink_policy)
1470 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1471}
1472
1473static int ahci_reset_em(struct ata_host *host)
1474{
1475 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1476 u32 em_ctl;
1477
1478 em_ctl = readl(mmio + HOST_EM_CTL);
1479 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1480 return -EINVAL;
1481
1482 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1483 return 0;
1484}
1485
1486static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1487 ssize_t size)
1488{
1489 struct ahci_host_priv *hpriv = ap->host->private_data;
1490 struct ahci_port_priv *pp = ap->private_data;
1491 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1492 u32 em_ctl;
1493 u32 message[] = {0, 0};
1494 unsigned long flags;
1495 int pmp;
1496 struct ahci_em_priv *emp;
1497
1498 /* get the slot number from the message */
1499 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1500 if (pmp < EM_MAX_SLOTS)
1501 emp = &pp->em_priv[pmp];
1502 else
1503 return -EINVAL;
1504
1505 spin_lock_irqsave(ap->lock, flags);
1506
1507 /*
1508 * if we are still busy transmitting a previous message,
1509 * do not allow
1510 */
1511 em_ctl = readl(mmio + HOST_EM_CTL);
1512 if (em_ctl & EM_CTL_TM) {
1513 spin_unlock_irqrestore(ap->lock, flags);
1514 return -EBUSY;
1515 }
1516
1517 /*
1518 * create message header - this is all zero except for
1519 * the message size, which is 4 bytes.
1520 */
1521 message[0] |= (4 << 8);
1522
1523 /* ignore 0:4 of byte zero, fill in port info yourself */
1524 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1525
1526 /* write message to EM_LOC */
1527 writel(message[0], mmio + hpriv->em_loc);
1528 writel(message[1], mmio + hpriv->em_loc+4);
1529
1530 /* save off new led state for port/slot */
1531 emp->led_state = state;
1532
1533 /*
1534 * tell hardware to transmit the message
1535 */
1536 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1537
1538 spin_unlock_irqrestore(ap->lock, flags);
1539 return size;
1540}
1541
1542static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1543{
1544 struct ahci_port_priv *pp = ap->private_data;
1545 struct ata_link *link;
1546 struct ahci_em_priv *emp;
1547 int rc = 0;
1548
1549 ata_for_each_link(link, ap, EDGE) {
1550 emp = &pp->em_priv[link->pmp];
1551 rc += sprintf(buf, "%lx\n", emp->led_state);
1552 }
1553 return rc;
1554}
1555
1556static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1557 size_t size)
1558{
1559 int state;
1560 int pmp;
1561 struct ahci_port_priv *pp = ap->private_data;
1562 struct ahci_em_priv *emp;
1563
1564 state = simple_strtoul(buf, NULL, 0);
1565
1566 /* get the slot number from the message */
1567 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1568 if (pmp < EM_MAX_SLOTS)
1569 emp = &pp->em_priv[pmp];
1570 else
1571 return -EINVAL;
1572
1573 /* mask off the activity bits if we are in sw_activity
1574 * mode, user should turn off sw_activity before setting
1575 * activity led through em_message
1576 */
1577 if (emp->blink_policy)
1578 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1579
1580 return ahci_transmit_led_message(ap, state, size);
1581}
1582
1583static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1584{
1585 struct ata_link *link = dev->link;
1586 struct ata_port *ap = link->ap;
1587 struct ahci_port_priv *pp = ap->private_data;
1588 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1589 u32 port_led_state = emp->led_state;
1590
1591 /* save the desired Activity LED behavior */
1592 if (val == OFF) {
1593 /* clear LFLAG */
1594 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1595
1596 /* set the LED to OFF */
1597 port_led_state &= EM_MSG_LED_VALUE_OFF;
1598 port_led_state |= (ap->port_no | (link->pmp << 8));
1599 ahci_transmit_led_message(ap, port_led_state, 4);
1600 } else {
1601 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1602 if (val == BLINK_OFF) {
1603 /* set LED to ON for idle */
1604 port_led_state &= EM_MSG_LED_VALUE_OFF;
1605 port_led_state |= (ap->port_no | (link->pmp << 8));
1606 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1607 ahci_transmit_led_message(ap, port_led_state, 4);
1608 }
1609 }
1610 emp->blink_policy = val;
1611 return 0;
1612}
1613
1614static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1615{
1616 struct ata_link *link = dev->link;
1617 struct ata_port *ap = link->ap;
1618 struct ahci_port_priv *pp = ap->private_data;
1619 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1620
1621 /* display the saved value of activity behavior for this
1622 * disk.
1623 */
1624 return sprintf(buf, "%d\n", emp->blink_policy);
1625}
1626
1627static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1628 int port_no, void __iomem *mmio,
1629 void __iomem *port_mmio)
1630{
1631 const char *emsg = NULL;
1632 int rc;
1633 u32 tmp;
1634
1635 /* make sure port is not active */
1636 rc = ahci_deinit_port(ap, &emsg);
1637 if (rc)
1638 dev_printk(KERN_WARNING, &pdev->dev,
1639 "%s (%d)\n", emsg, rc);
1640
1641 /* clear SError */
1642 tmp = readl(port_mmio + PORT_SCR_ERR);
1643 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1644 writel(tmp, port_mmio + PORT_SCR_ERR);
1645
1646 /* clear port IRQ */
1647 tmp = readl(port_mmio + PORT_IRQ_STAT);
1648 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1649 if (tmp)
1650 writel(tmp, port_mmio + PORT_IRQ_STAT);
1651
1652 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1653}
1654
1655static void ahci_init_controller(struct ata_host *host)
1656{ 466{
1657 struct ahci_host_priv *hpriv = host->private_data; 467 struct ahci_host_priv *hpriv = host->private_data;
1658 struct pci_dev *pdev = to_pci_dev(host->dev); 468 struct pci_dev *pdev = to_pci_dev(host->dev);
1659 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1660 int i;
1661 void __iomem *port_mmio; 469 void __iomem *port_mmio;
1662 u32 tmp; 470 u32 tmp;
1663 int mv; 471 int mv;
@@ -1678,220 +486,7 @@ static void ahci_init_controller(struct ata_host *host)
1678 writel(tmp, port_mmio + PORT_IRQ_STAT); 486 writel(tmp, port_mmio + PORT_IRQ_STAT);
1679 } 487 }
1680 488
1681 for (i = 0; i < host->n_ports; i++) { 489 ahci_init_controller(host);
1682 struct ata_port *ap = host->ports[i];
1683
1684 port_mmio = ahci_port_base(ap);
1685 if (ata_port_is_dummy(ap))
1686 continue;
1687
1688 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1689 }
1690
1691 tmp = readl(mmio + HOST_CTL);
1692 VPRINTK("HOST_CTL 0x%x\n", tmp);
1693 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1694 tmp = readl(mmio + HOST_CTL);
1695 VPRINTK("HOST_CTL 0x%x\n", tmp);
1696}
1697
1698static void ahci_dev_config(struct ata_device *dev)
1699{
1700 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1701
1702 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1703 dev->max_sectors = 255;
1704 ata_dev_printk(dev, KERN_INFO,
1705 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1706 }
1707}
1708
1709static unsigned int ahci_dev_classify(struct ata_port *ap)
1710{
1711 void __iomem *port_mmio = ahci_port_base(ap);
1712 struct ata_taskfile tf;
1713 u32 tmp;
1714
1715 tmp = readl(port_mmio + PORT_SIG);
1716 tf.lbah = (tmp >> 24) & 0xff;
1717 tf.lbam = (tmp >> 16) & 0xff;
1718 tf.lbal = (tmp >> 8) & 0xff;
1719 tf.nsect = (tmp) & 0xff;
1720
1721 return ata_dev_classify(&tf);
1722}
1723
1724static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1725 u32 opts)
1726{
1727 dma_addr_t cmd_tbl_dma;
1728
1729 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1730
1731 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1732 pp->cmd_slot[tag].status = 0;
1733 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1734 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1735}
1736
1737static int ahci_kick_engine(struct ata_port *ap)
1738{
1739 void __iomem *port_mmio = ahci_port_base(ap);
1740 struct ahci_host_priv *hpriv = ap->host->private_data;
1741 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1742 u32 tmp;
1743 int busy, rc;
1744
1745 /* stop engine */
1746 rc = ahci_stop_engine(ap);
1747 if (rc)
1748 goto out_restart;
1749
1750 /* need to do CLO?
1751 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1752 */
1753 busy = status & (ATA_BUSY | ATA_DRQ);
1754 if (!busy && !sata_pmp_attached(ap)) {
1755 rc = 0;
1756 goto out_restart;
1757 }
1758
1759 if (!(hpriv->cap & HOST_CAP_CLO)) {
1760 rc = -EOPNOTSUPP;
1761 goto out_restart;
1762 }
1763
1764 /* perform CLO */
1765 tmp = readl(port_mmio + PORT_CMD);
1766 tmp |= PORT_CMD_CLO;
1767 writel(tmp, port_mmio + PORT_CMD);
1768
1769 rc = 0;
1770 tmp = ata_wait_register(port_mmio + PORT_CMD,
1771 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1772 if (tmp & PORT_CMD_CLO)
1773 rc = -EIO;
1774
1775 /* restart engine */
1776 out_restart:
1777 ahci_start_engine(ap);
1778 return rc;
1779}
1780
1781static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1782 struct ata_taskfile *tf, int is_cmd, u16 flags,
1783 unsigned long timeout_msec)
1784{
1785 const u32 cmd_fis_len = 5; /* five dwords */
1786 struct ahci_port_priv *pp = ap->private_data;
1787 void __iomem *port_mmio = ahci_port_base(ap);
1788 u8 *fis = pp->cmd_tbl;
1789 u32 tmp;
1790
1791 /* prep the command */
1792 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1793 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1794
1795 /* issue & wait */
1796 writel(1, port_mmio + PORT_CMD_ISSUE);
1797
1798 if (timeout_msec) {
1799 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1800 1, timeout_msec);
1801 if (tmp & 0x1) {
1802 ahci_kick_engine(ap);
1803 return -EBUSY;
1804 }
1805 } else
1806 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1807
1808 return 0;
1809}
1810
1811static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1812 int pmp, unsigned long deadline,
1813 int (*check_ready)(struct ata_link *link))
1814{
1815 struct ata_port *ap = link->ap;
1816 struct ahci_host_priv *hpriv = ap->host->private_data;
1817 const char *reason = NULL;
1818 unsigned long now, msecs;
1819 struct ata_taskfile tf;
1820 int rc;
1821
1822 DPRINTK("ENTER\n");
1823
1824 /* prepare for SRST (AHCI-1.1 10.4.1) */
1825 rc = ahci_kick_engine(ap);
1826 if (rc && rc != -EOPNOTSUPP)
1827 ata_link_printk(link, KERN_WARNING,
1828 "failed to reset engine (errno=%d)\n", rc);
1829
1830 ata_tf_init(link->device, &tf);
1831
1832 /* issue the first D2H Register FIS */
1833 msecs = 0;
1834 now = jiffies;
1835 if (time_after(now, deadline))
1836 msecs = jiffies_to_msecs(deadline - now);
1837
1838 tf.ctl |= ATA_SRST;
1839 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1840 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1841 rc = -EIO;
1842 reason = "1st FIS failed";
1843 goto fail;
1844 }
1845
1846 /* spec says at least 5us, but be generous and sleep for 1ms */
1847 msleep(1);
1848
1849 /* issue the second D2H Register FIS */
1850 tf.ctl &= ~ATA_SRST;
1851 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1852
1853 /* wait for link to become ready */
1854 rc = ata_wait_after_reset(link, deadline, check_ready);
1855 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1856 /*
1857 * Workaround for cases where link online status can't
1858 * be trusted. Treat device readiness timeout as link
1859 * offline.
1860 */
1861 ata_link_printk(link, KERN_INFO,
1862 "device not ready, treating as offline\n");
1863 *class = ATA_DEV_NONE;
1864 } else if (rc) {
1865 /* link occupied, -ENODEV too is an error */
1866 reason = "device not ready";
1867 goto fail;
1868 } else
1869 *class = ahci_dev_classify(ap);
1870
1871 DPRINTK("EXIT, class=%u\n", *class);
1872 return 0;
1873
1874 fail:
1875 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1876 return rc;
1877}
1878
1879static int ahci_check_ready(struct ata_link *link)
1880{
1881 void __iomem *port_mmio = ahci_port_base(link->ap);
1882 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1883
1884 return ata_check_ready(status);
1885}
1886
1887static int ahci_softreset(struct ata_link *link, unsigned int *class,
1888 unsigned long deadline)
1889{
1890 int pmp = sata_srst_pmp(link);
1891
1892 DPRINTK("ENTER\n");
1893
1894 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1895} 490}
1896 491
1897static int ahci_sb600_check_ready(struct ata_link *link) 492static int ahci_sb600_check_ready(struct ata_link *link)
@@ -1943,38 +538,6 @@ static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1943 return rc; 538 return rc;
1944} 539}
1945 540
1946static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1947 unsigned long deadline)
1948{
1949 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1950 struct ata_port *ap = link->ap;
1951 struct ahci_port_priv *pp = ap->private_data;
1952 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1953 struct ata_taskfile tf;
1954 bool online;
1955 int rc;
1956
1957 DPRINTK("ENTER\n");
1958
1959 ahci_stop_engine(ap);
1960
1961 /* clear D2H reception area to properly wait for D2H FIS */
1962 ata_tf_init(link->device, &tf);
1963 tf.command = 0x80;
1964 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1965
1966 rc = sata_link_hardreset(link, timing, deadline, &online,
1967 ahci_check_ready);
1968
1969 ahci_start_engine(ap);
1970
1971 if (online)
1972 *class = ahci_dev_classify(ap);
1973
1974 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1975 return rc;
1976}
1977
1978static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 541static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1979 unsigned long deadline) 542 unsigned long deadline)
1980{ 543{
@@ -2043,605 +606,12 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
2043 return rc; 606 return rc;
2044} 607}
2045 608
2046static void ahci_postreset(struct ata_link *link, unsigned int *class)
2047{
2048 struct ata_port *ap = link->ap;
2049 void __iomem *port_mmio = ahci_port_base(ap);
2050 u32 new_tmp, tmp;
2051
2052 ata_std_postreset(link, class);
2053
2054 /* Make sure port's ATAPI bit is set appropriately */
2055 new_tmp = tmp = readl(port_mmio + PORT_CMD);
2056 if (*class == ATA_DEV_ATAPI)
2057 new_tmp |= PORT_CMD_ATAPI;
2058 else
2059 new_tmp &= ~PORT_CMD_ATAPI;
2060 if (new_tmp != tmp) {
2061 writel(new_tmp, port_mmio + PORT_CMD);
2062 readl(port_mmio + PORT_CMD); /* flush */
2063 }
2064}
2065
2066static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
2067{
2068 struct scatterlist *sg;
2069 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
2070 unsigned int si;
2071
2072 VPRINTK("ENTER\n");
2073
2074 /*
2075 * Next, the S/G list.
2076 */
2077 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2078 dma_addr_t addr = sg_dma_address(sg);
2079 u32 sg_len = sg_dma_len(sg);
2080
2081 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
2082 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
2083 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
2084 }
2085
2086 return si;
2087}
2088
2089static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
2090{
2091 struct ata_port *ap = qc->ap;
2092 struct ahci_port_priv *pp = ap->private_data;
2093
2094 if (!sata_pmp_attached(ap) || pp->fbs_enabled)
2095 return ata_std_qc_defer(qc);
2096 else
2097 return sata_pmp_qc_defer_cmd_switch(qc);
2098}
2099
2100static void ahci_qc_prep(struct ata_queued_cmd *qc)
2101{
2102 struct ata_port *ap = qc->ap;
2103 struct ahci_port_priv *pp = ap->private_data;
2104 int is_atapi = ata_is_atapi(qc->tf.protocol);
2105 void *cmd_tbl;
2106 u32 opts;
2107 const u32 cmd_fis_len = 5; /* five dwords */
2108 unsigned int n_elem;
2109
2110 /*
2111 * Fill in command table information. First, the header,
2112 * a SATA Register - Host to Device command FIS.
2113 */
2114 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2115
2116 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2117 if (is_atapi) {
2118 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2119 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2120 }
2121
2122 n_elem = 0;
2123 if (qc->flags & ATA_QCFLAG_DMAMAP)
2124 n_elem = ahci_fill_sg(qc, cmd_tbl);
2125
2126 /*
2127 * Fill in command slot information.
2128 */
2129 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2130 if (qc->tf.flags & ATA_TFLAG_WRITE)
2131 opts |= AHCI_CMD_WRITE;
2132 if (is_atapi)
2133 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2134
2135 ahci_fill_cmd_slot(pp, qc->tag, opts);
2136}
2137
2138static void ahci_fbs_dec_intr(struct ata_port *ap)
2139{
2140 struct ahci_port_priv *pp = ap->private_data;
2141 void __iomem *port_mmio = ahci_port_base(ap);
2142 u32 fbs = readl(port_mmio + PORT_FBS);
2143 int retries = 3;
2144
2145 DPRINTK("ENTER\n");
2146 BUG_ON(!pp->fbs_enabled);
2147
2148 /* time to wait for DEC is not specified by AHCI spec,
2149 * add a retry loop for safety.
2150 */
2151 writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
2152 fbs = readl(port_mmio + PORT_FBS);
2153 while ((fbs & PORT_FBS_DEC) && retries--) {
2154 udelay(1);
2155 fbs = readl(port_mmio + PORT_FBS);
2156 }
2157
2158 if (fbs & PORT_FBS_DEC)
2159 dev_printk(KERN_ERR, ap->host->dev,
2160 "failed to clear device error\n");
2161}
2162
2163static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2164{
2165 struct ahci_host_priv *hpriv = ap->host->private_data;
2166 struct ahci_port_priv *pp = ap->private_data;
2167 struct ata_eh_info *host_ehi = &ap->link.eh_info;
2168 struct ata_link *link = NULL;
2169 struct ata_queued_cmd *active_qc;
2170 struct ata_eh_info *active_ehi;
2171 bool fbs_need_dec = false;
2172 u32 serror;
2173
2174 /* determine active link with error */
2175 if (pp->fbs_enabled) {
2176 void __iomem *port_mmio = ahci_port_base(ap);
2177 u32 fbs = readl(port_mmio + PORT_FBS);
2178 int pmp = fbs >> PORT_FBS_DWE_OFFSET;
2179
2180 if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
2181 ata_link_online(&ap->pmp_link[pmp])) {
2182 link = &ap->pmp_link[pmp];
2183 fbs_need_dec = true;
2184 }
2185
2186 } else
2187 ata_for_each_link(link, ap, EDGE)
2188 if (ata_link_active(link))
2189 break;
2190
2191 if (!link)
2192 link = &ap->link;
2193
2194 active_qc = ata_qc_from_tag(ap, link->active_tag);
2195 active_ehi = &link->eh_info;
2196
2197 /* record irq stat */
2198 ata_ehi_clear_desc(host_ehi);
2199 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2200
2201 /* AHCI needs SError cleared; otherwise, it might lock up */
2202 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2203 ahci_scr_write(&ap->link, SCR_ERROR, serror);
2204 host_ehi->serror |= serror;
2205
2206 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
2207 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2208 irq_stat &= ~PORT_IRQ_IF_ERR;
2209
2210 if (irq_stat & PORT_IRQ_TF_ERR) {
2211 /* If qc is active, charge it; otherwise, the active
2212 * link. There's no active qc on NCQ errors. It will
2213 * be determined by EH by reading log page 10h.
2214 */
2215 if (active_qc)
2216 active_qc->err_mask |= AC_ERR_DEV;
2217 else
2218 active_ehi->err_mask |= AC_ERR_DEV;
2219
2220 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2221 host_ehi->serror &= ~SERR_INTERNAL;
2222 }
2223
2224 if (irq_stat & PORT_IRQ_UNK_FIS) {
2225 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2226
2227 active_ehi->err_mask |= AC_ERR_HSM;
2228 active_ehi->action |= ATA_EH_RESET;
2229 ata_ehi_push_desc(active_ehi,
2230 "unknown FIS %08x %08x %08x %08x" ,
2231 unk[0], unk[1], unk[2], unk[3]);
2232 }
2233
2234 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2235 active_ehi->err_mask |= AC_ERR_HSM;
2236 active_ehi->action |= ATA_EH_RESET;
2237 ata_ehi_push_desc(active_ehi, "incorrect PMP");
2238 }
2239
2240 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2241 host_ehi->err_mask |= AC_ERR_HOST_BUS;
2242 host_ehi->action |= ATA_EH_RESET;
2243 ata_ehi_push_desc(host_ehi, "host bus error");
2244 }
2245
2246 if (irq_stat & PORT_IRQ_IF_ERR) {
2247 if (fbs_need_dec)
2248 active_ehi->err_mask |= AC_ERR_DEV;
2249 else {
2250 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2251 host_ehi->action |= ATA_EH_RESET;
2252 }
2253
2254 ata_ehi_push_desc(host_ehi, "interface fatal error");
2255 }
2256
2257 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2258 ata_ehi_hotplugged(host_ehi);
2259 ata_ehi_push_desc(host_ehi, "%s",
2260 irq_stat & PORT_IRQ_CONNECT ?
2261 "connection status changed" : "PHY RDY changed");
2262 }
2263
2264 /* okay, let's hand over to EH */
2265
2266 if (irq_stat & PORT_IRQ_FREEZE)
2267 ata_port_freeze(ap);
2268 else if (fbs_need_dec) {
2269 ata_link_abort(link);
2270 ahci_fbs_dec_intr(ap);
2271 } else
2272 ata_port_abort(ap);
2273}
2274
2275static void ahci_port_intr(struct ata_port *ap)
2276{
2277 void __iomem *port_mmio = ahci_port_base(ap);
2278 struct ata_eh_info *ehi = &ap->link.eh_info;
2279 struct ahci_port_priv *pp = ap->private_data;
2280 struct ahci_host_priv *hpriv = ap->host->private_data;
2281 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2282 u32 status, qc_active = 0;
2283 int rc;
2284
2285 status = readl(port_mmio + PORT_IRQ_STAT);
2286 writel(status, port_mmio + PORT_IRQ_STAT);
2287
2288 /* ignore BAD_PMP while resetting */
2289 if (unlikely(resetting))
2290 status &= ~PORT_IRQ_BAD_PMP;
2291
2292 /* If we are getting PhyRdy, this is
2293 * just a power state change, we should
2294 * clear out this, plus the PhyRdy/Comm
2295 * Wake bits from Serror
2296 */
2297 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2298 (status & PORT_IRQ_PHYRDY)) {
2299 status &= ~PORT_IRQ_PHYRDY;
2300 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2301 }
2302
2303 if (unlikely(status & PORT_IRQ_ERROR)) {
2304 ahci_error_intr(ap, status);
2305 return;
2306 }
2307
2308 if (status & PORT_IRQ_SDB_FIS) {
2309 /* If SNotification is available, leave notification
2310 * handling to sata_async_notification(). If not,
2311 * emulate it by snooping SDB FIS RX area.
2312 *
2313 * Snooping FIS RX area is probably cheaper than
2314 * poking SNotification but some constrollers which
2315 * implement SNotification, ICH9 for example, don't
2316 * store AN SDB FIS into receive area.
2317 */
2318 if (hpriv->cap & HOST_CAP_SNTF)
2319 sata_async_notification(ap);
2320 else {
2321 /* If the 'N' bit in word 0 of the FIS is set,
2322 * we just received asynchronous notification.
2323 * Tell libata about it.
2324 *
2325 * Lack of SNotification should not appear in
2326 * ahci 1.2, so the workaround is unnecessary
2327 * when FBS is enabled.
2328 */
2329 if (pp->fbs_enabled)
2330 WARN_ON_ONCE(1);
2331 else {
2332 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2333 u32 f0 = le32_to_cpu(f[0]);
2334 if (f0 & (1 << 15))
2335 sata_async_notification(ap);
2336 }
2337 }
2338 }
2339
2340 /* pp->active_link is not reliable once FBS is enabled, both
2341 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
2342 * NCQ and non-NCQ commands may be in flight at the same time.
2343 */
2344 if (pp->fbs_enabled) {
2345 if (ap->qc_active) {
2346 qc_active = readl(port_mmio + PORT_SCR_ACT);
2347 qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
2348 }
2349 } else {
2350 /* pp->active_link is valid iff any command is in flight */
2351 if (ap->qc_active && pp->active_link->sactive)
2352 qc_active = readl(port_mmio + PORT_SCR_ACT);
2353 else
2354 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2355 }
2356
2357 rc = ata_qc_complete_multiple(ap, qc_active);
2358
2359 /* while resetting, invalid completions are expected */
2360 if (unlikely(rc < 0 && !resetting)) {
2361 ehi->err_mask |= AC_ERR_HSM;
2362 ehi->action |= ATA_EH_RESET;
2363 ata_port_freeze(ap);
2364 }
2365}
2366
2367static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2368{
2369 struct ata_host *host = dev_instance;
2370 struct ahci_host_priv *hpriv;
2371 unsigned int i, handled = 0;
2372 void __iomem *mmio;
2373 u32 irq_stat, irq_masked;
2374
2375 VPRINTK("ENTER\n");
2376
2377 hpriv = host->private_data;
2378 mmio = host->iomap[AHCI_PCI_BAR];
2379
2380 /* sigh. 0xffffffff is a valid return from h/w */
2381 irq_stat = readl(mmio + HOST_IRQ_STAT);
2382 if (!irq_stat)
2383 return IRQ_NONE;
2384
2385 irq_masked = irq_stat & hpriv->port_map;
2386
2387 spin_lock(&host->lock);
2388
2389 for (i = 0; i < host->n_ports; i++) {
2390 struct ata_port *ap;
2391
2392 if (!(irq_masked & (1 << i)))
2393 continue;
2394
2395 ap = host->ports[i];
2396 if (ap) {
2397 ahci_port_intr(ap);
2398 VPRINTK("port %u\n", i);
2399 } else {
2400 VPRINTK("port %u (no irq)\n", i);
2401 if (ata_ratelimit())
2402 dev_printk(KERN_WARNING, host->dev,
2403 "interrupt on disabled port %u\n", i);
2404 }
2405
2406 handled = 1;
2407 }
2408
2409 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2410 * it should be cleared after all the port events are cleared;
2411 * otherwise, it will raise a spurious interrupt after each
2412 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2413 * information.
2414 *
2415 * Also, use the unmasked value to clear interrupt as spurious
2416 * pending event on a dummy port might cause screaming IRQ.
2417 */
2418 writel(irq_stat, mmio + HOST_IRQ_STAT);
2419
2420 spin_unlock(&host->lock);
2421
2422 VPRINTK("EXIT\n");
2423
2424 return IRQ_RETVAL(handled);
2425}
2426
2427static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2428{
2429 struct ata_port *ap = qc->ap;
2430 void __iomem *port_mmio = ahci_port_base(ap);
2431 struct ahci_port_priv *pp = ap->private_data;
2432
2433 /* Keep track of the currently active link. It will be used
2434 * in completion path to determine whether NCQ phase is in
2435 * progress.
2436 */
2437 pp->active_link = qc->dev->link;
2438
2439 if (qc->tf.protocol == ATA_PROT_NCQ)
2440 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2441
2442 if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
2443 u32 fbs = readl(port_mmio + PORT_FBS);
2444 fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
2445 fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
2446 writel(fbs, port_mmio + PORT_FBS);
2447 pp->fbs_last_dev = qc->dev->link->pmp;
2448 }
2449
2450 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2451
2452 ahci_sw_activity(qc->dev->link);
2453
2454 return 0;
2455}
2456
2457static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2458{
2459 struct ahci_port_priv *pp = qc->ap->private_data;
2460 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2461
2462 if (pp->fbs_enabled)
2463 d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
2464
2465 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2466 return true;
2467}
2468
2469static void ahci_freeze(struct ata_port *ap)
2470{
2471 void __iomem *port_mmio = ahci_port_base(ap);
2472
2473 /* turn IRQ off */
2474 writel(0, port_mmio + PORT_IRQ_MASK);
2475}
2476
2477static void ahci_thaw(struct ata_port *ap)
2478{
2479 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2480 void __iomem *port_mmio = ahci_port_base(ap);
2481 u32 tmp;
2482 struct ahci_port_priv *pp = ap->private_data;
2483
2484 /* clear IRQ */
2485 tmp = readl(port_mmio + PORT_IRQ_STAT);
2486 writel(tmp, port_mmio + PORT_IRQ_STAT);
2487 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2488
2489 /* turn IRQ back on */
2490 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2491}
2492
2493static void ahci_error_handler(struct ata_port *ap)
2494{
2495 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2496 /* restart engine */
2497 ahci_stop_engine(ap);
2498 ahci_start_engine(ap);
2499 }
2500
2501 sata_pmp_error_handler(ap);
2502}
2503
2504static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2505{
2506 struct ata_port *ap = qc->ap;
2507
2508 /* make DMA engine forget about the failed command */
2509 if (qc->flags & ATA_QCFLAG_FAILED)
2510 ahci_kick_engine(ap);
2511}
2512
2513static void ahci_enable_fbs(struct ata_port *ap)
2514{
2515 struct ahci_port_priv *pp = ap->private_data;
2516 void __iomem *port_mmio = ahci_port_base(ap);
2517 u32 fbs;
2518 int rc;
2519
2520 if (!pp->fbs_supported)
2521 return;
2522
2523 fbs = readl(port_mmio + PORT_FBS);
2524 if (fbs & PORT_FBS_EN) {
2525 pp->fbs_enabled = true;
2526 pp->fbs_last_dev = -1; /* initialization */
2527 return;
2528 }
2529
2530 rc = ahci_stop_engine(ap);
2531 if (rc)
2532 return;
2533
2534 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
2535 fbs = readl(port_mmio + PORT_FBS);
2536 if (fbs & PORT_FBS_EN) {
2537 dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
2538 pp->fbs_enabled = true;
2539 pp->fbs_last_dev = -1; /* initialization */
2540 } else
2541 dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
2542
2543 ahci_start_engine(ap);
2544}
2545
2546static void ahci_disable_fbs(struct ata_port *ap)
2547{
2548 struct ahci_port_priv *pp = ap->private_data;
2549 void __iomem *port_mmio = ahci_port_base(ap);
2550 u32 fbs;
2551 int rc;
2552
2553 if (!pp->fbs_supported)
2554 return;
2555
2556 fbs = readl(port_mmio + PORT_FBS);
2557 if ((fbs & PORT_FBS_EN) == 0) {
2558 pp->fbs_enabled = false;
2559 return;
2560 }
2561
2562 rc = ahci_stop_engine(ap);
2563 if (rc)
2564 return;
2565
2566 writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
2567 fbs = readl(port_mmio + PORT_FBS);
2568 if (fbs & PORT_FBS_EN)
2569 dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
2570 else {
2571 dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
2572 pp->fbs_enabled = false;
2573 }
2574
2575 ahci_start_engine(ap);
2576}
2577
2578static void ahci_pmp_attach(struct ata_port *ap)
2579{
2580 void __iomem *port_mmio = ahci_port_base(ap);
2581 struct ahci_port_priv *pp = ap->private_data;
2582 u32 cmd;
2583
2584 cmd = readl(port_mmio + PORT_CMD);
2585 cmd |= PORT_CMD_PMP;
2586 writel(cmd, port_mmio + PORT_CMD);
2587
2588 ahci_enable_fbs(ap);
2589
2590 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2591 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2592}
2593
2594static void ahci_pmp_detach(struct ata_port *ap)
2595{
2596 void __iomem *port_mmio = ahci_port_base(ap);
2597 struct ahci_port_priv *pp = ap->private_data;
2598 u32 cmd;
2599
2600 ahci_disable_fbs(ap);
2601
2602 cmd = readl(port_mmio + PORT_CMD);
2603 cmd &= ~PORT_CMD_PMP;
2604 writel(cmd, port_mmio + PORT_CMD);
2605
2606 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2607 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2608}
2609
2610static int ahci_port_resume(struct ata_port *ap)
2611{
2612 ahci_power_up(ap);
2613 ahci_start_port(ap);
2614
2615 if (sata_pmp_attached(ap))
2616 ahci_pmp_attach(ap);
2617 else
2618 ahci_pmp_detach(ap);
2619
2620 return 0;
2621}
2622
2623#ifdef CONFIG_PM 609#ifdef CONFIG_PM
2624static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2625{
2626 const char *emsg = NULL;
2627 int rc;
2628
2629 rc = ahci_deinit_port(ap, &emsg);
2630 if (rc == 0)
2631 ahci_power_down(ap);
2632 else {
2633 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2634 ahci_start_port(ap);
2635 }
2636
2637 return rc;
2638}
2639
2640static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 610static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2641{ 611{
2642 struct ata_host *host = dev_get_drvdata(&pdev->dev); 612 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2643 struct ahci_host_priv *hpriv = host->private_data; 613 struct ahci_host_priv *hpriv = host->private_data;
2644 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 614 void __iomem *mmio = hpriv->mmio;
2645 u32 ctl; 615 u32 ctl;
2646 616
2647 if (mesg.event & PM_EVENT_SUSPEND && 617 if (mesg.event & PM_EVENT_SUSPEND &&
@@ -2675,11 +645,11 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
2675 return rc; 645 return rc;
2676 646
2677 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 647 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2678 rc = ahci_reset_controller(host); 648 rc = ahci_pci_reset_controller(host);
2679 if (rc) 649 if (rc)
2680 return rc; 650 return rc;
2681 651
2682 ahci_init_controller(host); 652 ahci_pci_init_controller(host);
2683 } 653 }
2684 654
2685 ata_host_resume(host); 655 ata_host_resume(host);
@@ -2688,92 +658,6 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
2688} 658}
2689#endif 659#endif
2690 660
2691static int ahci_port_start(struct ata_port *ap)
2692{
2693 struct ahci_host_priv *hpriv = ap->host->private_data;
2694 struct device *dev = ap->host->dev;
2695 struct ahci_port_priv *pp;
2696 void *mem;
2697 dma_addr_t mem_dma;
2698 size_t dma_sz, rx_fis_sz;
2699
2700 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2701 if (!pp)
2702 return -ENOMEM;
2703
2704 /* check FBS capability */
2705 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
2706 void __iomem *port_mmio = ahci_port_base(ap);
2707 u32 cmd = readl(port_mmio + PORT_CMD);
2708 if (cmd & PORT_CMD_FBSCP)
2709 pp->fbs_supported = true;
2710 else
2711 dev_printk(KERN_WARNING, dev,
2712 "The port is not capable of FBS\n");
2713 }
2714
2715 if (pp->fbs_supported) {
2716 dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
2717 rx_fis_sz = AHCI_RX_FIS_SZ * 16;
2718 } else {
2719 dma_sz = AHCI_PORT_PRIV_DMA_SZ;
2720 rx_fis_sz = AHCI_RX_FIS_SZ;
2721 }
2722
2723 mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
2724 if (!mem)
2725 return -ENOMEM;
2726 memset(mem, 0, dma_sz);
2727
2728 /*
2729 * First item in chunk of DMA memory: 32-slot command table,
2730 * 32 bytes each in size
2731 */
2732 pp->cmd_slot = mem;
2733 pp->cmd_slot_dma = mem_dma;
2734
2735 mem += AHCI_CMD_SLOT_SZ;
2736 mem_dma += AHCI_CMD_SLOT_SZ;
2737
2738 /*
2739 * Second item: Received-FIS area
2740 */
2741 pp->rx_fis = mem;
2742 pp->rx_fis_dma = mem_dma;
2743
2744 mem += rx_fis_sz;
2745 mem_dma += rx_fis_sz;
2746
2747 /*
2748 * Third item: data area for storing a single command
2749 * and its scatter-gather table
2750 */
2751 pp->cmd_tbl = mem;
2752 pp->cmd_tbl_dma = mem_dma;
2753
2754 /*
2755 * Save off initial list of interrupts to be enabled.
2756 * This could be changed later
2757 */
2758 pp->intr_mask = DEF_PORT_IRQ;
2759
2760 ap->private_data = pp;
2761
2762 /* engage engines, captain */
2763 return ahci_port_resume(ap);
2764}
2765
2766static void ahci_port_stop(struct ata_port *ap)
2767{
2768 const char *emsg = NULL;
2769 int rc;
2770
2771 /* de-initialize port */
2772 rc = ahci_deinit_port(ap, &emsg);
2773 if (rc)
2774 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2775}
2776
2777static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) 661static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2778{ 662{
2779 int rc; 663 int rc;
@@ -2806,31 +690,12 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2806 return 0; 690 return 0;
2807} 691}
2808 692
2809static void ahci_print_info(struct ata_host *host) 693static void ahci_pci_print_info(struct ata_host *host)
2810{ 694{
2811 struct ahci_host_priv *hpriv = host->private_data;
2812 struct pci_dev *pdev = to_pci_dev(host->dev); 695 struct pci_dev *pdev = to_pci_dev(host->dev);
2813 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2814 u32 vers, cap, cap2, impl, speed;
2815 const char *speed_s;
2816 u16 cc; 696 u16 cc;
2817 const char *scc_s; 697 const char *scc_s;
2818 698
2819 vers = readl(mmio + HOST_VERSION);
2820 cap = hpriv->cap;
2821 cap2 = hpriv->cap2;
2822 impl = hpriv->port_map;
2823
2824 speed = (cap >> 20) & 0xf;
2825 if (speed == 1)
2826 speed_s = "1.5";
2827 else if (speed == 2)
2828 speed_s = "3";
2829 else if (speed == 3)
2830 speed_s = "6";
2831 else
2832 speed_s = "?";
2833
2834 pci_read_config_word(pdev, 0x0a, &cc); 699 pci_read_config_word(pdev, 0x0a, &cc);
2835 if (cc == PCI_CLASS_STORAGE_IDE) 700 if (cc == PCI_CLASS_STORAGE_IDE)
2836 scc_s = "IDE"; 701 scc_s = "IDE";
@@ -2841,50 +706,7 @@ static void ahci_print_info(struct ata_host *host)
2841 else 706 else
2842 scc_s = "unknown"; 707 scc_s = "unknown";
2843 708
2844 dev_printk(KERN_INFO, &pdev->dev, 709 ahci_print_info(host, scc_s);
2845 "AHCI %02x%02x.%02x%02x "
2846 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2847 ,
2848
2849 (vers >> 24) & 0xff,
2850 (vers >> 16) & 0xff,
2851 (vers >> 8) & 0xff,
2852 vers & 0xff,
2853
2854 ((cap >> 8) & 0x1f) + 1,
2855 (cap & 0x1f) + 1,
2856 speed_s,
2857 impl,
2858 scc_s);
2859
2860 dev_printk(KERN_INFO, &pdev->dev,
2861 "flags: "
2862 "%s%s%s%s%s%s%s"
2863 "%s%s%s%s%s%s%s"
2864 "%s%s%s%s%s%s\n"
2865 ,
2866
2867 cap & HOST_CAP_64 ? "64bit " : "",
2868 cap & HOST_CAP_NCQ ? "ncq " : "",
2869 cap & HOST_CAP_SNTF ? "sntf " : "",
2870 cap & HOST_CAP_MPS ? "ilck " : "",
2871 cap & HOST_CAP_SSS ? "stag " : "",
2872 cap & HOST_CAP_ALPM ? "pm " : "",
2873 cap & HOST_CAP_LED ? "led " : "",
2874 cap & HOST_CAP_CLO ? "clo " : "",
2875 cap & HOST_CAP_ONLY ? "only " : "",
2876 cap & HOST_CAP_PMP ? "pmp " : "",
2877 cap & HOST_CAP_FBS ? "fbs " : "",
2878 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2879 cap & HOST_CAP_SSC ? "slum " : "",
2880 cap & HOST_CAP_PART ? "part " : "",
2881 cap & HOST_CAP_CCC ? "ccc " : "",
2882 cap & HOST_CAP_EMS ? "ems " : "",
2883 cap & HOST_CAP_SXS ? "sxs " : "",
2884 cap2 & HOST_CAP2_APST ? "apst " : "",
2885 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2886 cap2 & HOST_CAP2_BOH ? "boh " : ""
2887 );
2888} 710}
2889 711
2890/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is 712/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
@@ -3238,7 +1060,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3238 1060
3239 VPRINTK("ENTER\n"); 1061 VPRINTK("ENTER\n");
3240 1062
3241 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS); 1063 WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
3242 1064
3243 if (!printed_version++) 1065 if (!printed_version++)
3244 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1066 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -3249,6 +1071,16 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3249 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) 1071 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
3250 return -ENODEV; 1072 return -ENODEV;
3251 1073
1074 /*
1075 * For some reason, MCP89 on MacBook 7,1 doesn't work with
1076 * ahci, use ata_generic instead.
1077 */
1078 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
1079 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA &&
1080 pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1081 pdev->subsystem_device == 0xcb89)
1082 return -ENODEV;
1083
3252 /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode. 1084 /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
3253 * At the moment, we can only use the AHCI mode. Let the users know 1085 * At the moment, we can only use the AHCI mode. Let the users know
3254 * that for SAS drives they're out of luck. 1086 * that for SAS drives they're out of luck.
@@ -3308,41 +1140,28 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3308 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) 1140 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
3309 pci_intx(pdev, 1); 1141 pci_intx(pdev, 1);
3310 1142
1143 hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
1144
3311 /* save initial config */ 1145 /* save initial config */
3312 ahci_save_initial_config(pdev, hpriv); 1146 ahci_pci_save_initial_config(pdev, hpriv);
3313 1147
3314 /* prepare host */ 1148 /* prepare host */
3315 if (hpriv->cap & HOST_CAP_NCQ) { 1149 if (hpriv->cap & HOST_CAP_NCQ) {
3316 pi.flags |= ATA_FLAG_NCQ; 1150 pi.flags |= ATA_FLAG_NCQ;
3317 /* Auto-activate optimization is supposed to be supported on 1151 /*
3318 all AHCI controllers indicating NCQ support, but it seems 1152 * Auto-activate optimization is supposed to be
3319 to be broken at least on some NVIDIA MCP79 chipsets. 1153 * supported on all AHCI controllers indicating NCQ
3320 Until we get info on which NVIDIA chipsets don't have this 1154 * capability, but it seems to be broken on some
3321 issue, if any, disable AA on all NVIDIA AHCIs. */ 1155 * chipsets including NVIDIAs.
3322 if (pdev->vendor != PCI_VENDOR_ID_NVIDIA) 1156 */
1157 if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA))
3323 pi.flags |= ATA_FLAG_FPDMA_AA; 1158 pi.flags |= ATA_FLAG_FPDMA_AA;
3324 } 1159 }
3325 1160
3326 if (hpriv->cap & HOST_CAP_PMP) 1161 if (hpriv->cap & HOST_CAP_PMP)
3327 pi.flags |= ATA_FLAG_PMP; 1162 pi.flags |= ATA_FLAG_PMP;
3328 1163
3329 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) { 1164 ahci_set_em_messages(hpriv, &pi);
3330 u8 messages;
3331 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
3332 u32 em_loc = readl(mmio + HOST_EM_LOC);
3333 u32 em_ctl = readl(mmio + HOST_EM_CTL);
3334
3335 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
3336
3337 /* we only support LED message type right now */
3338 if ((messages & 0x01) && (ahci_em_messages == 1)) {
3339 /* store em_loc */
3340 hpriv->em_loc = ((em_loc >> 16) * 4);
3341 pi.flags |= ATA_FLAG_EM;
3342 if (!(em_ctl & EM_CTL_ALHD))
3343 pi.flags |= ATA_FLAG_SW_ACTIVITY;
3344 }
3345 }
3346 1165
3347 if (ahci_broken_system_poweroff(pdev)) { 1166 if (ahci_broken_system_poweroff(pdev)) {
3348 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN; 1167 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
@@ -3372,7 +1191,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3372 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 1191 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3373 if (!host) 1192 if (!host)
3374 return -ENOMEM; 1193 return -ENOMEM;
3375 host->iomap = pcim_iomap_table(pdev);
3376 host->private_data = hpriv; 1194 host->private_data = hpriv;
3377 1195
3378 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 1196 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
@@ -3395,7 +1213,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3395 1213
3396 /* set enclosure management message type */ 1214 /* set enclosure management message type */
3397 if (ap->flags & ATA_FLAG_EM) 1215 if (ap->flags & ATA_FLAG_EM)
3398 ap->em_message_type = ahci_em_messages; 1216 ap->em_message_type = hpriv->em_msg_type;
3399 1217
3400 1218
3401 /* disabled/not-implemented port */ 1219 /* disabled/not-implemented port */
@@ -3414,12 +1232,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3414 if (rc) 1232 if (rc)
3415 return rc; 1233 return rc;
3416 1234
3417 rc = ahci_reset_controller(host); 1235 rc = ahci_pci_reset_controller(host);
3418 if (rc) 1236 if (rc)
3419 return rc; 1237 return rc;
3420 1238
3421 ahci_init_controller(host); 1239 ahci_pci_init_controller(host);
3422 ahci_print_info(host); 1240 ahci_pci_print_info(host);
3423 1241
3424 pci_set_master(pdev); 1242 pci_set_master(pdev);
3425 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, 1243 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
new file mode 100644
index 000000000000..e5fdeebf9ef0
--- /dev/null
+++ b/drivers/ata/ahci.h
@@ -0,0 +1,354 @@
1/*
2 * ahci.h - Common AHCI SATA definitions and declarations
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35#ifndef _AHCI_H
36#define _AHCI_H
37
38#include <linux/libata.h>
39
40/* Enclosure Management Control */
41#define EM_CTRL_MSG_TYPE 0x000f0000
42
43/* Enclosure Management LED Message Type */
44#define EM_MSG_LED_HBA_PORT 0x0000000f
45#define EM_MSG_LED_PMP_SLOT 0x0000ff00
46#define EM_MSG_LED_VALUE 0xffff0000
47#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
48#define EM_MSG_LED_VALUE_OFF 0xfff80000
49#define EM_MSG_LED_VALUE_ON 0x00010000
50
51enum {
52 AHCI_MAX_PORTS = 32,
53 AHCI_MAX_SG = 168, /* hardware max is 64K */
54 AHCI_DMA_BOUNDARY = 0xffffffff,
55 AHCI_MAX_CMDS = 32,
56 AHCI_CMD_SZ = 32,
57 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
58 AHCI_RX_FIS_SZ = 256,
59 AHCI_CMD_TBL_CDB = 0x40,
60 AHCI_CMD_TBL_HDR_SZ = 0x80,
61 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
62 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
63 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
64 AHCI_RX_FIS_SZ,
65 AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
66 AHCI_CMD_TBL_AR_SZ +
67 (AHCI_RX_FIS_SZ * 16),
68 AHCI_IRQ_ON_SG = (1 << 31),
69 AHCI_CMD_ATAPI = (1 << 5),
70 AHCI_CMD_WRITE = (1 << 6),
71 AHCI_CMD_PREFETCH = (1 << 7),
72 AHCI_CMD_RESET = (1 << 8),
73 AHCI_CMD_CLR_BUSY = (1 << 10),
74
75 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
76 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78
79 /* global controller registers */
80 HOST_CAP = 0x00, /* host capabilities */
81 HOST_CTL = 0x04, /* global host control */
82 HOST_IRQ_STAT = 0x08, /* interrupt status */
83 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
84 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
85 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
86 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
87 HOST_CAP2 = 0x24, /* host capabilities, extended */
88
89 /* HOST_CTL bits */
90 HOST_RESET = (1 << 0), /* reset controller; self-clear */
91 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
93
94 /* HOST_CAP bits */
95 HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
96 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
97 HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
98 HOST_CAP_PART = (1 << 13), /* Partial state capable */
99 HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
100 HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
101 HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
102 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
103 HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
104 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
105 HOST_CAP_LED = (1 << 25), /* Supports activity LED */
106 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
107 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
108 HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
109 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
110 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
111 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
112
113 /* HOST_CAP2 bits */
114 HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
115 HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
116 HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
117
118 /* registers for each SATA port */
119 PORT_LST_ADDR = 0x00, /* command list DMA addr */
120 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
121 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
122 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
123 PORT_IRQ_STAT = 0x10, /* interrupt status */
124 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
125 PORT_CMD = 0x18, /* port command */
126 PORT_TFDATA = 0x20, /* taskfile data */
127 PORT_SIG = 0x24, /* device TF signature */
128 PORT_CMD_ISSUE = 0x38, /* command issue */
129 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
130 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
131 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
132 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
133 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
134 PORT_FBS = 0x40, /* FIS-based Switching */
135
136 /* PORT_IRQ_{STAT,MASK} bits */
137 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
138 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
139 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
140 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
141 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
142 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
143 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
144 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
145
146 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
147 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
148 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
149 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
150 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
151 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
152 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
153 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
154 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
155
156 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
157 PORT_IRQ_IF_ERR |
158 PORT_IRQ_CONNECT |
159 PORT_IRQ_PHYRDY |
160 PORT_IRQ_UNK_FIS |
161 PORT_IRQ_BAD_PMP,
162 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
163 PORT_IRQ_TF_ERR |
164 PORT_IRQ_HBUS_DATA_ERR,
165 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
166 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
167 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
168
169 /* PORT_CMD bits */
170 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
171 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
172 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
173 PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
174 PORT_CMD_PMP = (1 << 17), /* PMP attached */
175 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
176 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
177 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
178 PORT_CMD_CLO = (1 << 3), /* Command list override */
179 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
180 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
181 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
182
183 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
184 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
185 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
186 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
187
188 PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
189 PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
190 PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
191 PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
192 PORT_FBS_SDE = (1 << 2), /* FBS single device error */
193 PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
194 PORT_FBS_EN = (1 << 0), /* Enable FBS */
195
196 /* hpriv->flags bits */
197 AHCI_HFLAG_NO_NCQ = (1 << 0),
198 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
199 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
200 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
201 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
202 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
203 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
204 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
205 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
206 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
207 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
208 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
209 link offline */
210 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
211 AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
212 AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */
213
214 /* ap->flags bits */
215
216 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
217 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
218 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
219 ATA_FLAG_IPM,
220
221 ICH_MAP = 0x90, /* ICH MAP register */
222
223 /* em constants */
224 EM_MAX_SLOTS = 8,
225 EM_MAX_RETRY = 5,
226
227 /* em_ctl bits */
228 EM_CTL_RST = (1 << 9), /* Reset */
229 EM_CTL_TM = (1 << 8), /* Transmit Message */
230 EM_CTL_MR = (1 << 0), /* Message Recieved */
231 EM_CTL_ALHD = (1 << 26), /* Activity LED */
232 EM_CTL_XMT = (1 << 25), /* Transmit Only */
233 EM_CTL_SMB = (1 << 24), /* Single Message Buffer */
234
235 /* em message type */
236 EM_MSG_TYPE_LED = (1 << 0), /* LED */
237 EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */
238 EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */
239 EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */
240};
241
242struct ahci_cmd_hdr {
243 __le32 opts;
244 __le32 status;
245 __le32 tbl_addr;
246 __le32 tbl_addr_hi;
247 __le32 reserved[4];
248};
249
250struct ahci_sg {
251 __le32 addr;
252 __le32 addr_hi;
253 __le32 reserved;
254 __le32 flags_size;
255};
256
257struct ahci_em_priv {
258 enum sw_activity blink_policy;
259 struct timer_list timer;
260 unsigned long saved_activity;
261 unsigned long activity;
262 unsigned long led_state;
263};
264
265struct ahci_port_priv {
266 struct ata_link *active_link;
267 struct ahci_cmd_hdr *cmd_slot;
268 dma_addr_t cmd_slot_dma;
269 void *cmd_tbl;
270 dma_addr_t cmd_tbl_dma;
271 void *rx_fis;
272 dma_addr_t rx_fis_dma;
273 /* for NCQ spurious interrupt analysis */
274 unsigned int ncq_saw_d2h:1;
275 unsigned int ncq_saw_dmas:1;
276 unsigned int ncq_saw_sdb:1;
277 u32 intr_mask; /* interrupts to enable */
278 bool fbs_supported; /* set iff FBS is supported */
279 bool fbs_enabled; /* set iff FBS is enabled */
280 int fbs_last_dev; /* save FBS.DEV of last FIS */
281 /* enclosure management info per PM slot */
282 struct ahci_em_priv em_priv[EM_MAX_SLOTS];
283};
284
285struct ahci_host_priv {
286 void __iomem * mmio; /* bus-independant mem map */
287 unsigned int flags; /* AHCI_HFLAG_* */
288 u32 cap; /* cap to use */
289 u32 cap2; /* cap2 to use */
290 u32 port_map; /* port map to use */
291 u32 saved_cap; /* saved initial cap */
292 u32 saved_cap2; /* saved initial cap2 */
293 u32 saved_port_map; /* saved initial port_map */
294 u32 em_loc; /* enclosure management location */
295 u32 em_buf_sz; /* EM buffer size in byte */
296 u32 em_msg_type; /* EM message type */
297};
298
299extern int ahci_ignore_sss;
300
301extern struct device_attribute *ahci_shost_attrs[];
302extern struct device_attribute *ahci_sdev_attrs[];
303
304#define AHCI_SHT(drv_name) \
305 ATA_NCQ_SHT(drv_name), \
306 .can_queue = AHCI_MAX_CMDS - 1, \
307 .sg_tablesize = AHCI_MAX_SG, \
308 .dma_boundary = AHCI_DMA_BOUNDARY, \
309 .shost_attrs = ahci_shost_attrs, \
310 .sdev_attrs = ahci_sdev_attrs
311
312extern struct ata_port_operations ahci_ops;
313
314void ahci_save_initial_config(struct device *dev,
315 struct ahci_host_priv *hpriv,
316 unsigned int force_port_map,
317 unsigned int mask_port_map);
318void ahci_init_controller(struct ata_host *host);
319int ahci_reset_controller(struct ata_host *host);
320
321int ahci_do_softreset(struct ata_link *link, unsigned int *class,
322 int pmp, unsigned long deadline,
323 int (*check_ready)(struct ata_link *link));
324
325int ahci_stop_engine(struct ata_port *ap);
326void ahci_start_engine(struct ata_port *ap);
327int ahci_check_ready(struct ata_link *link);
328int ahci_kick_engine(struct ata_port *ap);
329void ahci_set_em_messages(struct ahci_host_priv *hpriv,
330 struct ata_port_info *pi);
331int ahci_reset_em(struct ata_host *host);
332irqreturn_t ahci_interrupt(int irq, void *dev_instance);
333void ahci_print_info(struct ata_host *host, const char *scc_s);
334
335static inline void __iomem *__ahci_port_base(struct ata_host *host,
336 unsigned int port_no)
337{
338 struct ahci_host_priv *hpriv = host->private_data;
339 void __iomem *mmio = hpriv->mmio;
340
341 return mmio + 0x100 + (port_no * 0x80);
342}
343
344static inline void __iomem *ahci_port_base(struct ata_port *ap)
345{
346 return __ahci_port_base(ap->host, ap->port_no);
347}
348
349static inline int ahci_nr_ports(u32 cap)
350{
351 return (cap & 0x1f) + 1;
352}
353
354#endif /* _AHCI_H */
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
new file mode 100644
index 000000000000..84b643270e7a
--- /dev/null
+++ b/drivers/ata/ahci_platform.c
@@ -0,0 +1,200 @@
1/*
2 * AHCI SATA platform driver
3 *
4 * Copyright 2004-2005 Red Hat, Inc.
5 * Jeff Garzik <jgarzik@pobox.com>
6 * Copyright 2010 MontaVista Software, LLC.
7 * Anton Vorontsov <avorontsov@ru.mvista.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 */
14
15#include <linux/kernel.h>
16#include <linux/gfp.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/device.h>
21#include <linux/platform_device.h>
22#include <linux/libata.h>
23#include <linux/ahci_platform.h>
24#include "ahci.h"
25
26static struct scsi_host_template ahci_platform_sht = {
27 AHCI_SHT("ahci_platform"),
28};
29
30static int __init ahci_probe(struct platform_device *pdev)
31{
32 struct device *dev = &pdev->dev;
33 struct ahci_platform_data *pdata = dev->platform_data;
34 struct ata_port_info pi = {
35 .flags = AHCI_FLAG_COMMON,
36 .pio_mask = ATA_PIO4,
37 .udma_mask = ATA_UDMA6,
38 .port_ops = &ahci_ops,
39 };
40 const struct ata_port_info *ppi[] = { &pi, NULL };
41 struct ahci_host_priv *hpriv;
42 struct ata_host *host;
43 struct resource *mem;
44 int irq;
45 int n_ports;
46 int i;
47 int rc;
48
49 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
50 if (!mem) {
51 dev_err(dev, "no mmio space\n");
52 return -EINVAL;
53 }
54
55 irq = platform_get_irq(pdev, 0);
56 if (irq <= 0) {
57 dev_err(dev, "no irq\n");
58 return -EINVAL;
59 }
60
61 if (pdata && pdata->ata_port_info)
62 pi = *pdata->ata_port_info;
63
64 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
65 if (!hpriv) {
66 dev_err(dev, "can't alloc ahci_host_priv\n");
67 return -ENOMEM;
68 }
69
70 hpriv->flags |= (unsigned long)pi.private_data;
71
72 hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
73 if (!hpriv->mmio) {
74 dev_err(dev, "can't map %pR\n", mem);
75 return -ENOMEM;
76 }
77
78 /*
79 * Some platforms might need to prepare for mmio region access,
80 * which could be done in the following init call. So, the mmio
81 * region shouldn't be accessed before init (if provided) has
82 * returned successfully.
83 */
84 if (pdata && pdata->init) {
85 rc = pdata->init(dev, hpriv->mmio);
86 if (rc)
87 return rc;
88 }
89
90 ahci_save_initial_config(dev, hpriv,
91 pdata ? pdata->force_port_map : 0,
92 pdata ? pdata->mask_port_map : 0);
93
94 /* prepare host */
95 if (hpriv->cap & HOST_CAP_NCQ)
96 pi.flags |= ATA_FLAG_NCQ;
97
98 if (hpriv->cap & HOST_CAP_PMP)
99 pi.flags |= ATA_FLAG_PMP;
100
101 ahci_set_em_messages(hpriv, &pi);
102
103 /* CAP.NP sometimes indicate the index of the last enabled
104 * port, at other times, that of the last possible port, so
105 * determining the maximum port number requires looking at
106 * both CAP.NP and port_map.
107 */
108 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
109
110 host = ata_host_alloc_pinfo(dev, ppi, n_ports);
111 if (!host) {
112 rc = -ENOMEM;
113 goto err0;
114 }
115
116 host->private_data = hpriv;
117
118 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
119 host->flags |= ATA_HOST_PARALLEL_SCAN;
120 else
121 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
122
123 if (pi.flags & ATA_FLAG_EM)
124 ahci_reset_em(host);
125
126 for (i = 0; i < host->n_ports; i++) {
127 struct ata_port *ap = host->ports[i];
128
129 ata_port_desc(ap, "mmio %pR", mem);
130 ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
131
132 /* set initial link pm policy */
133 ap->pm_policy = NOT_AVAILABLE;
134
135 /* set enclosure management message type */
136 if (ap->flags & ATA_FLAG_EM)
137 ap->em_message_type = hpriv->em_msg_type;
138
139 /* disabled/not-implemented port */
140 if (!(hpriv->port_map & (1 << i)))
141 ap->ops = &ata_dummy_port_ops;
142 }
143
144 rc = ahci_reset_controller(host);
145 if (rc)
146 goto err0;
147
148 ahci_init_controller(host);
149 ahci_print_info(host, "platform");
150
151 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
152 &ahci_platform_sht);
153 if (rc)
154 goto err0;
155
156 return 0;
157err0:
158 if (pdata && pdata->exit)
159 pdata->exit(dev);
160 return rc;
161}
162
163static int __devexit ahci_remove(struct platform_device *pdev)
164{
165 struct device *dev = &pdev->dev;
166 struct ahci_platform_data *pdata = dev->platform_data;
167 struct ata_host *host = dev_get_drvdata(dev);
168
169 ata_host_detach(host);
170
171 if (pdata && pdata->exit)
172 pdata->exit(dev);
173
174 return 0;
175}
176
177static struct platform_driver ahci_driver = {
178 .remove = __devexit_p(ahci_remove),
179 .driver = {
180 .name = "ahci",
181 .owner = THIS_MODULE,
182 },
183};
184
185static int __init ahci_init(void)
186{
187 return platform_driver_probe(&ahci_driver, ahci_probe);
188}
189module_init(ahci_init);
190
191static void __exit ahci_exit(void)
192{
193 platform_driver_unregister(&ahci_driver);
194}
195module_exit(ahci_exit);
196
197MODULE_DESCRIPTION("AHCI SATA platform driver");
198MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
199MODULE_LICENSE("GPL");
200MODULE_ALIAS("platform:ahci");
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 33fb614f9784..cc5f7726bde7 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -32,6 +32,11 @@
32 * A generic parallel ATA driver using libata 32 * A generic parallel ATA driver using libata
33 */ 33 */
34 34
35enum {
36 ATA_GEN_CLASS_MATCH = (1 << 0),
37 ATA_GEN_FORCE_DMA = (1 << 1),
38};
39
35/** 40/**
36 * generic_set_mode - mode setting 41 * generic_set_mode - mode setting
37 * @link: link to set up 42 * @link: link to set up
@@ -46,16 +51,16 @@
46static int generic_set_mode(struct ata_link *link, struct ata_device **unused) 51static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
47{ 52{
48 struct ata_port *ap = link->ap; 53 struct ata_port *ap = link->ap;
54 const struct pci_device_id *id = ap->host->private_data;
49 int dma_enabled = 0; 55 int dma_enabled = 0;
50 struct ata_device *dev; 56 struct ata_device *dev;
51 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
52 57
53 /* Bits 5 and 6 indicate if DMA is active on master/slave */ 58 if (id->driver_data & ATA_GEN_FORCE_DMA) {
54 if (ap->ioaddr.bmdma_addr) 59 dma_enabled = 0xff;
60 } else if (ap->ioaddr.bmdma_addr) {
61 /* Bits 5 and 6 indicate if DMA is active on master/slave */
55 dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 62 dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
56 63 }
57 if (pdev->vendor == PCI_VENDOR_ID_CENATEK)
58 dma_enabled = 0xFF;
59 64
60 ata_for_each_dev(dev, link, ENABLED) { 65 ata_for_each_dev(dev, link, ENABLED) {
61 /* We don't really care */ 66 /* We don't really care */
@@ -126,7 +131,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
126 const struct ata_port_info *ppi[] = { &info, NULL }; 131 const struct ata_port_info *ppi[] = { &info, NULL };
127 132
128 /* Don't use the generic entry unless instructed to do so */ 133 /* Don't use the generic entry unless instructed to do so */
129 if (id->driver_data == 1 && all_generic_ide == 0) 134 if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0)
130 return -ENODEV; 135 return -ENODEV;
131 136
132 /* Devices that need care */ 137 /* Devices that need care */
@@ -155,7 +160,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
155 return rc; 160 return rc;
156 pcim_pin_device(dev); 161 pcim_pin_device(dev);
157 } 162 }
158 return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL, 0); 163 return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0);
159} 164}
160 165
161static struct pci_device_id ata_generic[] = { 166static struct pci_device_id ata_generic[] = {
@@ -167,7 +172,15 @@ static struct pci_device_id ata_generic[] = {
167 { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), }, 172 { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), },
168 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), }, 173 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), },
169 { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), }, 174 { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), },
170 { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), }, 175 { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE),
176 .driver_data = ATA_GEN_FORCE_DMA },
177 /*
178 * For some reason, MCP89 on MacBook 7,1 doesn't work with
179 * ahci, use ata_generic instead.
180 */
181 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA,
182 PCI_VENDOR_ID_APPLE, 0xcb89,
183 .driver_data = ATA_GEN_FORCE_DMA },
171#if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE) 184#if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE)
172 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, 185 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
173 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, 186 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
@@ -175,7 +188,8 @@ static struct pci_device_id ata_generic[] = {
175 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), }, 188 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), },
176#endif 189#endif
177 /* Must come last. If you add entries adjust this table appropriately */ 190 /* Must come last. If you add entries adjust this table appropriately */
178 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1}, 191 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL),
192 .driver_data = ATA_GEN_CLASS_MATCH },
179 { 0, }, 193 { 0, },
180}; 194};
181 195
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 83bc49fac9bb..d712675d0a96 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -43,7 +43,7 @@
43 * driver the list of errata that are relevant is below, going back to 43 * driver the list of errata that are relevant is below, going back to
44 * PIIX4. Older device documentation is now a bit tricky to find. 44 * PIIX4. Older device documentation is now a bit tricky to find.
45 * 45 *
46 * The chipsets all follow very much the same design. The orginal Triton 46 * The chipsets all follow very much the same design. The original Triton
47 * series chipsets do _not_ support independant device timings, but this 47 * series chipsets do _not_ support independant device timings, but this
48 * is fixed in Triton II. With the odd mobile exception the chips then 48 * is fixed in Triton II. With the odd mobile exception the chips then
49 * change little except in gaining more modes until SATA arrives. This 49 * change little except in gaining more modes until SATA arrives. This
@@ -158,6 +158,7 @@ struct piix_map_db {
158struct piix_host_priv { 158struct piix_host_priv {
159 const int *map; 159 const int *map;
160 u32 saved_iocfg; 160 u32 saved_iocfg;
161 spinlock_t sidpr_lock; /* FIXME: remove once locking in EH is fixed */
161 void __iomem *sidpr; 162 void __iomem *sidpr;
162}; 163};
163 164
@@ -301,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = {
301 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 302 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
302 /* SATA Controller IDE (CPT) */ 303 /* SATA Controller IDE (CPT) */
303 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 304 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
305 /* SATA Controller IDE (PBG) */
306 { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
307 /* SATA Controller IDE (PBG) */
308 { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
304 { } /* terminate list */ 309 { } /* terminate list */
305}; 310};
306 311
@@ -951,12 +956,15 @@ static int piix_sidpr_scr_read(struct ata_link *link,
951 unsigned int reg, u32 *val) 956 unsigned int reg, u32 *val)
952{ 957{
953 struct piix_host_priv *hpriv = link->ap->host->private_data; 958 struct piix_host_priv *hpriv = link->ap->host->private_data;
959 unsigned long flags;
954 960
955 if (reg >= ARRAY_SIZE(piix_sidx_map)) 961 if (reg >= ARRAY_SIZE(piix_sidx_map))
956 return -EINVAL; 962 return -EINVAL;
957 963
964 spin_lock_irqsave(&hpriv->sidpr_lock, flags);
958 piix_sidpr_sel(link, reg); 965 piix_sidpr_sel(link, reg);
959 *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA); 966 *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
967 spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
960 return 0; 968 return 0;
961} 969}
962 970
@@ -964,12 +972,15 @@ static int piix_sidpr_scr_write(struct ata_link *link,
964 unsigned int reg, u32 val) 972 unsigned int reg, u32 val)
965{ 973{
966 struct piix_host_priv *hpriv = link->ap->host->private_data; 974 struct piix_host_priv *hpriv = link->ap->host->private_data;
975 unsigned long flags;
967 976
968 if (reg >= ARRAY_SIZE(piix_sidx_map)) 977 if (reg >= ARRAY_SIZE(piix_sidx_map))
969 return -EINVAL; 978 return -EINVAL;
970 979
980 spin_lock_irqsave(&hpriv->sidpr_lock, flags);
971 piix_sidpr_sel(link, reg); 981 piix_sidpr_sel(link, reg);
972 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA); 982 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
983 spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
973 return 0; 984 return 0;
974} 985}
975 986
@@ -1566,6 +1577,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1566 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 1577 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1567 if (!hpriv) 1578 if (!hpriv)
1568 return -ENOMEM; 1579 return -ENOMEM;
1580 spin_lock_init(&hpriv->sidpr_lock);
1569 1581
1570 /* Save IOCFG, this will be used for cable detection, quirk 1582 /* Save IOCFG, this will be used for cable detection, quirk
1571 * detection and restoration on detach. This is necessary 1583 * detection and restoration on detach. This is necessary
@@ -1589,7 +1601,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1589 hpriv->map = piix_init_sata_map(pdev, port_info, 1601 hpriv->map = piix_init_sata_map(pdev, port_info,
1590 piix_map_db_table[ent->driver_data]); 1602 piix_map_db_table[ent->driver_data]);
1591 1603
1592 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 1604 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
1593 if (rc) 1605 if (rc)
1594 return rc; 1606 return rc;
1595 host->private_data = hpriv; 1607 host->private_data = hpriv;
@@ -1626,7 +1638,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1626 host->flags |= ATA_HOST_PARALLEL_SCAN; 1638 host->flags |= ATA_HOST_PARALLEL_SCAN;
1627 1639
1628 pci_set_master(pdev); 1640 pci_set_master(pdev);
1629 return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht); 1641 return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &piix_sht);
1630} 1642}
1631 1643
1632static void piix_remove_one(struct pci_dev *pdev) 1644static void piix_remove_one(struct pci_dev *pdev)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
new file mode 100644
index 000000000000..8eea309ea212
--- /dev/null
+++ b/drivers/ata/libahci.c
@@ -0,0 +1,2206 @@
1/*
2 * libahci.c - Common AHCI SATA low-level routines
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/gfp.h>
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/dma-mapping.h>
43#include <linux/device.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_cmnd.h>
46#include <linux/libata.h>
47#include "ahci.h"
48
49static int ahci_skip_host_reset;
50int ahci_ignore_sss;
51EXPORT_SYMBOL_GPL(ahci_ignore_sss);
52
53module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
54MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
55
56module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
57MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
58
59static int ahci_enable_alpm(struct ata_port *ap,
60 enum link_pm policy);
61static void ahci_disable_alpm(struct ata_port *ap);
62static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
63static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
64 size_t size);
65static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
66 ssize_t size);
67
68
69
70static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
71static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
72static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
73static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
74static int ahci_port_start(struct ata_port *ap);
75static void ahci_port_stop(struct ata_port *ap);
76static void ahci_qc_prep(struct ata_queued_cmd *qc);
77static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
78static void ahci_freeze(struct ata_port *ap);
79static void ahci_thaw(struct ata_port *ap);
80static void ahci_enable_fbs(struct ata_port *ap);
81static void ahci_disable_fbs(struct ata_port *ap);
82static void ahci_pmp_attach(struct ata_port *ap);
83static void ahci_pmp_detach(struct ata_port *ap);
84static int ahci_softreset(struct ata_link *link, unsigned int *class,
85 unsigned long deadline);
86static int ahci_hardreset(struct ata_link *link, unsigned int *class,
87 unsigned long deadline);
88static void ahci_postreset(struct ata_link *link, unsigned int *class);
89static void ahci_error_handler(struct ata_port *ap);
90static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
91static int ahci_port_resume(struct ata_port *ap);
92static void ahci_dev_config(struct ata_device *dev);
93static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
94 u32 opts);
95#ifdef CONFIG_PM
96static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
97#endif
98static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
99static ssize_t ahci_activity_store(struct ata_device *dev,
100 enum sw_activity val);
101static void ahci_init_sw_activity(struct ata_link *link);
102
103static ssize_t ahci_show_host_caps(struct device *dev,
104 struct device_attribute *attr, char *buf);
105static ssize_t ahci_show_host_cap2(struct device *dev,
106 struct device_attribute *attr, char *buf);
107static ssize_t ahci_show_host_version(struct device *dev,
108 struct device_attribute *attr, char *buf);
109static ssize_t ahci_show_port_cmd(struct device *dev,
110 struct device_attribute *attr, char *buf);
111static ssize_t ahci_read_em_buffer(struct device *dev,
112 struct device_attribute *attr, char *buf);
113static ssize_t ahci_store_em_buffer(struct device *dev,
114 struct device_attribute *attr,
115 const char *buf, size_t size);
116
117static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
118static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
119static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
120static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
121static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
122 ahci_read_em_buffer, ahci_store_em_buffer);
123
124struct device_attribute *ahci_shost_attrs[] = {
125 &dev_attr_link_power_management_policy,
126 &dev_attr_em_message_type,
127 &dev_attr_em_message,
128 &dev_attr_ahci_host_caps,
129 &dev_attr_ahci_host_cap2,
130 &dev_attr_ahci_host_version,
131 &dev_attr_ahci_port_cmd,
132 &dev_attr_em_buffer,
133 NULL
134};
135EXPORT_SYMBOL_GPL(ahci_shost_attrs);
136
137struct device_attribute *ahci_sdev_attrs[] = {
138 &dev_attr_sw_activity,
139 &dev_attr_unload_heads,
140 NULL
141};
142EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
143
144struct ata_port_operations ahci_ops = {
145 .inherits = &sata_pmp_port_ops,
146
147 .qc_defer = ahci_pmp_qc_defer,
148 .qc_prep = ahci_qc_prep,
149 .qc_issue = ahci_qc_issue,
150 .qc_fill_rtf = ahci_qc_fill_rtf,
151
152 .freeze = ahci_freeze,
153 .thaw = ahci_thaw,
154 .softreset = ahci_softreset,
155 .hardreset = ahci_hardreset,
156 .postreset = ahci_postreset,
157 .pmp_softreset = ahci_softreset,
158 .error_handler = ahci_error_handler,
159 .post_internal_cmd = ahci_post_internal_cmd,
160 .dev_config = ahci_dev_config,
161
162 .scr_read = ahci_scr_read,
163 .scr_write = ahci_scr_write,
164 .pmp_attach = ahci_pmp_attach,
165 .pmp_detach = ahci_pmp_detach,
166
167 .enable_pm = ahci_enable_alpm,
168 .disable_pm = ahci_disable_alpm,
169 .em_show = ahci_led_show,
170 .em_store = ahci_led_store,
171 .sw_activity_show = ahci_activity_show,
172 .sw_activity_store = ahci_activity_store,
173#ifdef CONFIG_PM
174 .port_suspend = ahci_port_suspend,
175 .port_resume = ahci_port_resume,
176#endif
177 .port_start = ahci_port_start,
178 .port_stop = ahci_port_stop,
179};
180EXPORT_SYMBOL_GPL(ahci_ops);
181
182int ahci_em_messages = 1;
183EXPORT_SYMBOL_GPL(ahci_em_messages);
184module_param(ahci_em_messages, int, 0444);
185/* add other LED protocol types when they become supported */
186MODULE_PARM_DESC(ahci_em_messages,
187 "AHCI Enclosure Management Message control (0 = off, 1 = on)");
188
189static void ahci_enable_ahci(void __iomem *mmio)
190{
191 int i;
192 u32 tmp;
193
194 /* turn on AHCI_EN */
195 tmp = readl(mmio + HOST_CTL);
196 if (tmp & HOST_AHCI_EN)
197 return;
198
199 /* Some controllers need AHCI_EN to be written multiple times.
200 * Try a few times before giving up.
201 */
202 for (i = 0; i < 5; i++) {
203 tmp |= HOST_AHCI_EN;
204 writel(tmp, mmio + HOST_CTL);
205 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
206 if (tmp & HOST_AHCI_EN)
207 return;
208 msleep(10);
209 }
210
211 WARN_ON(1);
212}
213
214static ssize_t ahci_show_host_caps(struct device *dev,
215 struct device_attribute *attr, char *buf)
216{
217 struct Scsi_Host *shost = class_to_shost(dev);
218 struct ata_port *ap = ata_shost_to_port(shost);
219 struct ahci_host_priv *hpriv = ap->host->private_data;
220
221 return sprintf(buf, "%x\n", hpriv->cap);
222}
223
224static ssize_t ahci_show_host_cap2(struct device *dev,
225 struct device_attribute *attr, char *buf)
226{
227 struct Scsi_Host *shost = class_to_shost(dev);
228 struct ata_port *ap = ata_shost_to_port(shost);
229 struct ahci_host_priv *hpriv = ap->host->private_data;
230
231 return sprintf(buf, "%x\n", hpriv->cap2);
232}
233
234static ssize_t ahci_show_host_version(struct device *dev,
235 struct device_attribute *attr, char *buf)
236{
237 struct Scsi_Host *shost = class_to_shost(dev);
238 struct ata_port *ap = ata_shost_to_port(shost);
239 struct ahci_host_priv *hpriv = ap->host->private_data;
240 void __iomem *mmio = hpriv->mmio;
241
242 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
243}
244
245static ssize_t ahci_show_port_cmd(struct device *dev,
246 struct device_attribute *attr, char *buf)
247{
248 struct Scsi_Host *shost = class_to_shost(dev);
249 struct ata_port *ap = ata_shost_to_port(shost);
250 void __iomem *port_mmio = ahci_port_base(ap);
251
252 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
253}
254
255static ssize_t ahci_read_em_buffer(struct device *dev,
256 struct device_attribute *attr, char *buf)
257{
258 struct Scsi_Host *shost = class_to_shost(dev);
259 struct ata_port *ap = ata_shost_to_port(shost);
260 struct ahci_host_priv *hpriv = ap->host->private_data;
261 void __iomem *mmio = hpriv->mmio;
262 void __iomem *em_mmio = mmio + hpriv->em_loc;
263 u32 em_ctl, msg;
264 unsigned long flags;
265 size_t count;
266 int i;
267
268 spin_lock_irqsave(ap->lock, flags);
269
270 em_ctl = readl(mmio + HOST_EM_CTL);
271 if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
272 !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
273 spin_unlock_irqrestore(ap->lock, flags);
274 return -EINVAL;
275 }
276
277 if (!(em_ctl & EM_CTL_MR)) {
278 spin_unlock_irqrestore(ap->lock, flags);
279 return -EAGAIN;
280 }
281
282 if (!(em_ctl & EM_CTL_SMB))
283 em_mmio += hpriv->em_buf_sz;
284
285 count = hpriv->em_buf_sz;
286
287 /* the count should not be larger than PAGE_SIZE */
288 if (count > PAGE_SIZE) {
289 if (printk_ratelimit())
290 ata_port_printk(ap, KERN_WARNING,
291 "EM read buffer size too large: "
292 "buffer size %u, page size %lu\n",
293 hpriv->em_buf_sz, PAGE_SIZE);
294 count = PAGE_SIZE;
295 }
296
297 for (i = 0; i < count; i += 4) {
298 msg = readl(em_mmio + i);
299 buf[i] = msg & 0xff;
300 buf[i + 1] = (msg >> 8) & 0xff;
301 buf[i + 2] = (msg >> 16) & 0xff;
302 buf[i + 3] = (msg >> 24) & 0xff;
303 }
304
305 spin_unlock_irqrestore(ap->lock, flags);
306
307 return i;
308}
309
310static ssize_t ahci_store_em_buffer(struct device *dev,
311 struct device_attribute *attr,
312 const char *buf, size_t size)
313{
314 struct Scsi_Host *shost = class_to_shost(dev);
315 struct ata_port *ap = ata_shost_to_port(shost);
316 struct ahci_host_priv *hpriv = ap->host->private_data;
317 void __iomem *mmio = hpriv->mmio;
318 void __iomem *em_mmio = mmio + hpriv->em_loc;
319 const unsigned char *msg_buf = buf;
320 u32 em_ctl, msg;
321 unsigned long flags;
322 int i;
323
324 /* check size validity */
325 if (!(ap->flags & ATA_FLAG_EM) ||
326 !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) ||
327 size % 4 || size > hpriv->em_buf_sz)
328 return -EINVAL;
329
330 spin_lock_irqsave(ap->lock, flags);
331
332 em_ctl = readl(mmio + HOST_EM_CTL);
333 if (em_ctl & EM_CTL_TM) {
334 spin_unlock_irqrestore(ap->lock, flags);
335 return -EBUSY;
336 }
337
338 for (i = 0; i < size; i += 4) {
339 msg = msg_buf[i] | msg_buf[i + 1] << 8 |
340 msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24;
341 writel(msg, em_mmio + i);
342 }
343
344 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
345
346 spin_unlock_irqrestore(ap->lock, flags);
347
348 return size;
349}
350
351/**
352 * ahci_save_initial_config - Save and fixup initial config values
353 * @dev: target AHCI device
354 * @hpriv: host private area to store config values
355 * @force_port_map: force port map to a specified value
356 * @mask_port_map: mask out particular bits from port map
357 *
358 * Some registers containing configuration info might be setup by
359 * BIOS and might be cleared on reset. This function saves the
360 * initial values of those registers into @hpriv such that they
361 * can be restored after controller reset.
362 *
363 * If inconsistent, config values are fixed up by this function.
364 *
365 * LOCKING:
366 * None.
367 */
368void ahci_save_initial_config(struct device *dev,
369 struct ahci_host_priv *hpriv,
370 unsigned int force_port_map,
371 unsigned int mask_port_map)
372{
373 void __iomem *mmio = hpriv->mmio;
374 u32 cap, cap2, vers, port_map;
375 int i;
376
377 /* make sure AHCI mode is enabled before accessing CAP */
378 ahci_enable_ahci(mmio);
379
380 /* Values prefixed with saved_ are written back to host after
381 * reset. Values without are used for driver operation.
382 */
383 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
384 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
385
386 /* CAP2 register is only defined for AHCI 1.2 and later */
387 vers = readl(mmio + HOST_VERSION);
388 if ((vers >> 16) > 1 ||
389 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
390 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
391 else
392 hpriv->saved_cap2 = cap2 = 0;
393
394 /* some chips have errata preventing 64bit use */
395 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
396 dev_printk(KERN_INFO, dev,
397 "controller can't do 64bit DMA, forcing 32bit\n");
398 cap &= ~HOST_CAP_64;
399 }
400
401 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
402 dev_printk(KERN_INFO, dev,
403 "controller can't do NCQ, turning off CAP_NCQ\n");
404 cap &= ~HOST_CAP_NCQ;
405 }
406
407 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
408 dev_printk(KERN_INFO, dev,
409 "controller can do NCQ, turning on CAP_NCQ\n");
410 cap |= HOST_CAP_NCQ;
411 }
412
413 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
414 dev_printk(KERN_INFO, dev,
415 "controller can't do PMP, turning off CAP_PMP\n");
416 cap &= ~HOST_CAP_PMP;
417 }
418
419 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
420 dev_printk(KERN_INFO, dev,
421 "controller can't do SNTF, turning off CAP_SNTF\n");
422 cap &= ~HOST_CAP_SNTF;
423 }
424
425 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
426 dev_printk(KERN_INFO, dev,
427 "controller can do FBS, turning on CAP_FBS\n");
428 cap |= HOST_CAP_FBS;
429 }
430
431 if (force_port_map && port_map != force_port_map) {
432 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
433 port_map, force_port_map);
434 port_map = force_port_map;
435 }
436
437 if (mask_port_map) {
438 dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
439 port_map,
440 port_map & mask_port_map);
441 port_map &= mask_port_map;
442 }
443
444 /* cross check port_map and cap.n_ports */
445 if (port_map) {
446 int map_ports = 0;
447
448 for (i = 0; i < AHCI_MAX_PORTS; i++)
449 if (port_map & (1 << i))
450 map_ports++;
451
452 /* If PI has more ports than n_ports, whine, clear
453 * port_map and let it be generated from n_ports.
454 */
455 if (map_ports > ahci_nr_ports(cap)) {
456 dev_printk(KERN_WARNING, dev,
457 "implemented port map (0x%x) contains more "
458 "ports than nr_ports (%u), using nr_ports\n",
459 port_map, ahci_nr_ports(cap));
460 port_map = 0;
461 }
462 }
463
464 /* fabricate port_map from cap.nr_ports */
465 if (!port_map) {
466 port_map = (1 << ahci_nr_ports(cap)) - 1;
467 dev_printk(KERN_WARNING, dev,
468 "forcing PORTS_IMPL to 0x%x\n", port_map);
469
470 /* write the fixed up value to the PI register */
471 hpriv->saved_port_map = port_map;
472 }
473
474 /* record values to use during operation */
475 hpriv->cap = cap;
476 hpriv->cap2 = cap2;
477 hpriv->port_map = port_map;
478}
479EXPORT_SYMBOL_GPL(ahci_save_initial_config);
480
481/**
482 * ahci_restore_initial_config - Restore initial config
483 * @host: target ATA host
484 *
485 * Restore initial config stored by ahci_save_initial_config().
486 *
487 * LOCKING:
488 * None.
489 */
490static void ahci_restore_initial_config(struct ata_host *host)
491{
492 struct ahci_host_priv *hpriv = host->private_data;
493 void __iomem *mmio = hpriv->mmio;
494
495 writel(hpriv->saved_cap, mmio + HOST_CAP);
496 if (hpriv->saved_cap2)
497 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
498 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
499 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
500}
501
502static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
503{
504 static const int offset[] = {
505 [SCR_STATUS] = PORT_SCR_STAT,
506 [SCR_CONTROL] = PORT_SCR_CTL,
507 [SCR_ERROR] = PORT_SCR_ERR,
508 [SCR_ACTIVE] = PORT_SCR_ACT,
509 [SCR_NOTIFICATION] = PORT_SCR_NTF,
510 };
511 struct ahci_host_priv *hpriv = ap->host->private_data;
512
513 if (sc_reg < ARRAY_SIZE(offset) &&
514 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
515 return offset[sc_reg];
516 return 0;
517}
518
519static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
520{
521 void __iomem *port_mmio = ahci_port_base(link->ap);
522 int offset = ahci_scr_offset(link->ap, sc_reg);
523
524 if (offset) {
525 *val = readl(port_mmio + offset);
526 return 0;
527 }
528 return -EINVAL;
529}
530
531static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
532{
533 void __iomem *port_mmio = ahci_port_base(link->ap);
534 int offset = ahci_scr_offset(link->ap, sc_reg);
535
536 if (offset) {
537 writel(val, port_mmio + offset);
538 return 0;
539 }
540 return -EINVAL;
541}
542
543void ahci_start_engine(struct ata_port *ap)
544{
545 void __iomem *port_mmio = ahci_port_base(ap);
546 u32 tmp;
547
548 /* start DMA */
549 tmp = readl(port_mmio + PORT_CMD);
550 tmp |= PORT_CMD_START;
551 writel(tmp, port_mmio + PORT_CMD);
552 readl(port_mmio + PORT_CMD); /* flush */
553}
554EXPORT_SYMBOL_GPL(ahci_start_engine);
555
556int ahci_stop_engine(struct ata_port *ap)
557{
558 void __iomem *port_mmio = ahci_port_base(ap);
559 u32 tmp;
560
561 tmp = readl(port_mmio + PORT_CMD);
562
563 /* check if the HBA is idle */
564 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
565 return 0;
566
567 /* setting HBA to idle */
568 tmp &= ~PORT_CMD_START;
569 writel(tmp, port_mmio + PORT_CMD);
570
571 /* wait for engine to stop. This could be as long as 500 msec */
572 tmp = ata_wait_register(port_mmio + PORT_CMD,
573 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
574 if (tmp & PORT_CMD_LIST_ON)
575 return -EIO;
576
577 return 0;
578}
579EXPORT_SYMBOL_GPL(ahci_stop_engine);
580
581static void ahci_start_fis_rx(struct ata_port *ap)
582{
583 void __iomem *port_mmio = ahci_port_base(ap);
584 struct ahci_host_priv *hpriv = ap->host->private_data;
585 struct ahci_port_priv *pp = ap->private_data;
586 u32 tmp;
587
588 /* set FIS registers */
589 if (hpriv->cap & HOST_CAP_64)
590 writel((pp->cmd_slot_dma >> 16) >> 16,
591 port_mmio + PORT_LST_ADDR_HI);
592 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
593
594 if (hpriv->cap & HOST_CAP_64)
595 writel((pp->rx_fis_dma >> 16) >> 16,
596 port_mmio + PORT_FIS_ADDR_HI);
597 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
598
599 /* enable FIS reception */
600 tmp = readl(port_mmio + PORT_CMD);
601 tmp |= PORT_CMD_FIS_RX;
602 writel(tmp, port_mmio + PORT_CMD);
603
604 /* flush */
605 readl(port_mmio + PORT_CMD);
606}
607
608static int ahci_stop_fis_rx(struct ata_port *ap)
609{
610 void __iomem *port_mmio = ahci_port_base(ap);
611 u32 tmp;
612
613 /* disable FIS reception */
614 tmp = readl(port_mmio + PORT_CMD);
615 tmp &= ~PORT_CMD_FIS_RX;
616 writel(tmp, port_mmio + PORT_CMD);
617
618 /* wait for completion, spec says 500ms, give it 1000 */
619 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
620 PORT_CMD_FIS_ON, 10, 1000);
621 if (tmp & PORT_CMD_FIS_ON)
622 return -EBUSY;
623
624 return 0;
625}
626
627static void ahci_power_up(struct ata_port *ap)
628{
629 struct ahci_host_priv *hpriv = ap->host->private_data;
630 void __iomem *port_mmio = ahci_port_base(ap);
631 u32 cmd;
632
633 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
634
635 /* spin up device */
636 if (hpriv->cap & HOST_CAP_SSS) {
637 cmd |= PORT_CMD_SPIN_UP;
638 writel(cmd, port_mmio + PORT_CMD);
639 }
640
641 /* wake up link */
642 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
643}
644
645static void ahci_disable_alpm(struct ata_port *ap)
646{
647 struct ahci_host_priv *hpriv = ap->host->private_data;
648 void __iomem *port_mmio = ahci_port_base(ap);
649 u32 cmd;
650 struct ahci_port_priv *pp = ap->private_data;
651
652 /* IPM bits should be disabled by libata-core */
653 /* get the existing command bits */
654 cmd = readl(port_mmio + PORT_CMD);
655
656 /* disable ALPM and ASP */
657 cmd &= ~PORT_CMD_ASP;
658 cmd &= ~PORT_CMD_ALPE;
659
660 /* force the interface back to active */
661 cmd |= PORT_CMD_ICC_ACTIVE;
662
663 /* write out new cmd value */
664 writel(cmd, port_mmio + PORT_CMD);
665 cmd = readl(port_mmio + PORT_CMD);
666
667 /* wait 10ms to be sure we've come out of any low power state */
668 msleep(10);
669
670 /* clear out any PhyRdy stuff from interrupt status */
671 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
672
673 /* go ahead and clean out PhyRdy Change from Serror too */
674 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
675
676 /*
677 * Clear flag to indicate that we should ignore all PhyRdy
678 * state changes
679 */
680 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
681
682 /*
683 * Enable interrupts on Phy Ready.
684 */
685 pp->intr_mask |= PORT_IRQ_PHYRDY;
686 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
687
688 /*
689 * don't change the link pm policy - we can be called
690 * just to turn of link pm temporarily
691 */
692}
693
694static int ahci_enable_alpm(struct ata_port *ap,
695 enum link_pm policy)
696{
697 struct ahci_host_priv *hpriv = ap->host->private_data;
698 void __iomem *port_mmio = ahci_port_base(ap);
699 u32 cmd;
700 struct ahci_port_priv *pp = ap->private_data;
701 u32 asp;
702
703 /* Make sure the host is capable of link power management */
704 if (!(hpriv->cap & HOST_CAP_ALPM))
705 return -EINVAL;
706
707 switch (policy) {
708 case MAX_PERFORMANCE:
709 case NOT_AVAILABLE:
710 /*
711 * if we came here with NOT_AVAILABLE,
712 * it just means this is the first time we
713 * have tried to enable - default to max performance,
714 * and let the user go to lower power modes on request.
715 */
716 ahci_disable_alpm(ap);
717 return 0;
718 case MIN_POWER:
719 /* configure HBA to enter SLUMBER */
720 asp = PORT_CMD_ASP;
721 break;
722 case MEDIUM_POWER:
723 /* configure HBA to enter PARTIAL */
724 asp = 0;
725 break;
726 default:
727 return -EINVAL;
728 }
729
730 /*
731 * Disable interrupts on Phy Ready. This keeps us from
732 * getting woken up due to spurious phy ready interrupts
733 * TBD - Hot plug should be done via polling now, is
734 * that even supported?
735 */
736 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
737 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
738
739 /*
740 * Set a flag to indicate that we should ignore all PhyRdy
741 * state changes since these can happen now whenever we
742 * change link state
743 */
744 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
745
746 /* get the existing command bits */
747 cmd = readl(port_mmio + PORT_CMD);
748
749 /*
750 * Set ASP based on Policy
751 */
752 cmd |= asp;
753
754 /*
755 * Setting this bit will instruct the HBA to aggressively
756 * enter a lower power link state when it's appropriate and
757 * based on the value set above for ASP
758 */
759 cmd |= PORT_CMD_ALPE;
760
761 /* write out new cmd value */
762 writel(cmd, port_mmio + PORT_CMD);
763 cmd = readl(port_mmio + PORT_CMD);
764
765 /* IPM bits should be set by libata-core */
766 return 0;
767}
768
769#ifdef CONFIG_PM
770static void ahci_power_down(struct ata_port *ap)
771{
772 struct ahci_host_priv *hpriv = ap->host->private_data;
773 void __iomem *port_mmio = ahci_port_base(ap);
774 u32 cmd, scontrol;
775
776 if (!(hpriv->cap & HOST_CAP_SSS))
777 return;
778
779 /* put device into listen mode, first set PxSCTL.DET to 0 */
780 scontrol = readl(port_mmio + PORT_SCR_CTL);
781 scontrol &= ~0xf;
782 writel(scontrol, port_mmio + PORT_SCR_CTL);
783
784 /* then set PxCMD.SUD to 0 */
785 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
786 cmd &= ~PORT_CMD_SPIN_UP;
787 writel(cmd, port_mmio + PORT_CMD);
788}
789#endif
790
791static void ahci_start_port(struct ata_port *ap)
792{
793 struct ahci_port_priv *pp = ap->private_data;
794 struct ata_link *link;
795 struct ahci_em_priv *emp;
796 ssize_t rc;
797 int i;
798
799 /* enable FIS reception */
800 ahci_start_fis_rx(ap);
801
802 /* enable DMA */
803 ahci_start_engine(ap);
804
805 /* turn on LEDs */
806 if (ap->flags & ATA_FLAG_EM) {
807 ata_for_each_link(link, ap, EDGE) {
808 emp = &pp->em_priv[link->pmp];
809
810 /* EM Transmit bit maybe busy during init */
811 for (i = 0; i < EM_MAX_RETRY; i++) {
812 rc = ahci_transmit_led_message(ap,
813 emp->led_state,
814 4);
815 if (rc == -EBUSY)
816 msleep(1);
817 else
818 break;
819 }
820 }
821 }
822
823 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
824 ata_for_each_link(link, ap, EDGE)
825 ahci_init_sw_activity(link);
826
827}
828
829static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
830{
831 int rc;
832
833 /* disable DMA */
834 rc = ahci_stop_engine(ap);
835 if (rc) {
836 *emsg = "failed to stop engine";
837 return rc;
838 }
839
840 /* disable FIS reception */
841 rc = ahci_stop_fis_rx(ap);
842 if (rc) {
843 *emsg = "failed stop FIS RX";
844 return rc;
845 }
846
847 return 0;
848}
849
850int ahci_reset_controller(struct ata_host *host)
851{
852 struct ahci_host_priv *hpriv = host->private_data;
853 void __iomem *mmio = hpriv->mmio;
854 u32 tmp;
855
856 /* we must be in AHCI mode, before using anything
857 * AHCI-specific, such as HOST_RESET.
858 */
859 ahci_enable_ahci(mmio);
860
861 /* global controller reset */
862 if (!ahci_skip_host_reset) {
863 tmp = readl(mmio + HOST_CTL);
864 if ((tmp & HOST_RESET) == 0) {
865 writel(tmp | HOST_RESET, mmio + HOST_CTL);
866 readl(mmio + HOST_CTL); /* flush */
867 }
868
869 /*
870 * to perform host reset, OS should set HOST_RESET
871 * and poll until this bit is read to be "0".
872 * reset must complete within 1 second, or
873 * the hardware should be considered fried.
874 */
875 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
876 HOST_RESET, 10, 1000);
877
878 if (tmp & HOST_RESET) {
879 dev_printk(KERN_ERR, host->dev,
880 "controller reset failed (0x%x)\n", tmp);
881 return -EIO;
882 }
883
884 /* turn on AHCI mode */
885 ahci_enable_ahci(mmio);
886
887 /* Some registers might be cleared on reset. Restore
888 * initial values.
889 */
890 ahci_restore_initial_config(host);
891 } else
892 dev_printk(KERN_INFO, host->dev,
893 "skipping global host reset\n");
894
895 return 0;
896}
897EXPORT_SYMBOL_GPL(ahci_reset_controller);
898
899static void ahci_sw_activity(struct ata_link *link)
900{
901 struct ata_port *ap = link->ap;
902 struct ahci_port_priv *pp = ap->private_data;
903 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
904
905 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
906 return;
907
908 emp->activity++;
909 if (!timer_pending(&emp->timer))
910 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
911}
912
913static void ahci_sw_activity_blink(unsigned long arg)
914{
915 struct ata_link *link = (struct ata_link *)arg;
916 struct ata_port *ap = link->ap;
917 struct ahci_port_priv *pp = ap->private_data;
918 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
919 unsigned long led_message = emp->led_state;
920 u32 activity_led_state;
921 unsigned long flags;
922
923 led_message &= EM_MSG_LED_VALUE;
924 led_message |= ap->port_no | (link->pmp << 8);
925
926 /* check to see if we've had activity. If so,
927 * toggle state of LED and reset timer. If not,
928 * turn LED to desired idle state.
929 */
930 spin_lock_irqsave(ap->lock, flags);
931 if (emp->saved_activity != emp->activity) {
932 emp->saved_activity = emp->activity;
933 /* get the current LED state */
934 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
935
936 if (activity_led_state)
937 activity_led_state = 0;
938 else
939 activity_led_state = 1;
940
941 /* clear old state */
942 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
943
944 /* toggle state */
945 led_message |= (activity_led_state << 16);
946 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
947 } else {
948 /* switch to idle */
949 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
950 if (emp->blink_policy == BLINK_OFF)
951 led_message |= (1 << 16);
952 }
953 spin_unlock_irqrestore(ap->lock, flags);
954 ahci_transmit_led_message(ap, led_message, 4);
955}
956
957static void ahci_init_sw_activity(struct ata_link *link)
958{
959 struct ata_port *ap = link->ap;
960 struct ahci_port_priv *pp = ap->private_data;
961 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
962
963 /* init activity stats, setup timer */
964 emp->saved_activity = emp->activity = 0;
965 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
966
967 /* check our blink policy and set flag for link if it's enabled */
968 if (emp->blink_policy)
969 link->flags |= ATA_LFLAG_SW_ACTIVITY;
970}
971
972int ahci_reset_em(struct ata_host *host)
973{
974 struct ahci_host_priv *hpriv = host->private_data;
975 void __iomem *mmio = hpriv->mmio;
976 u32 em_ctl;
977
978 em_ctl = readl(mmio + HOST_EM_CTL);
979 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
980 return -EINVAL;
981
982 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
983 return 0;
984}
985EXPORT_SYMBOL_GPL(ahci_reset_em);
986
987static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
988 ssize_t size)
989{
990 struct ahci_host_priv *hpriv = ap->host->private_data;
991 struct ahci_port_priv *pp = ap->private_data;
992 void __iomem *mmio = hpriv->mmio;
993 u32 em_ctl;
994 u32 message[] = {0, 0};
995 unsigned long flags;
996 int pmp;
997 struct ahci_em_priv *emp;
998
999 /* get the slot number from the message */
1000 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1001 if (pmp < EM_MAX_SLOTS)
1002 emp = &pp->em_priv[pmp];
1003 else
1004 return -EINVAL;
1005
1006 spin_lock_irqsave(ap->lock, flags);
1007
1008 /*
1009 * if we are still busy transmitting a previous message,
1010 * do not allow
1011 */
1012 em_ctl = readl(mmio + HOST_EM_CTL);
1013 if (em_ctl & EM_CTL_TM) {
1014 spin_unlock_irqrestore(ap->lock, flags);
1015 return -EBUSY;
1016 }
1017
1018 if (hpriv->em_msg_type & EM_MSG_TYPE_LED) {
1019 /*
1020 * create message header - this is all zero except for
1021 * the message size, which is 4 bytes.
1022 */
1023 message[0] |= (4 << 8);
1024
1025 /* ignore 0:4 of byte zero, fill in port info yourself */
1026 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1027
1028 /* write message to EM_LOC */
1029 writel(message[0], mmio + hpriv->em_loc);
1030 writel(message[1], mmio + hpriv->em_loc+4);
1031
1032 /*
1033 * tell hardware to transmit the message
1034 */
1035 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1036 }
1037
1038 /* save off new led state for port/slot */
1039 emp->led_state = state;
1040
1041 spin_unlock_irqrestore(ap->lock, flags);
1042 return size;
1043}
1044
1045static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1046{
1047 struct ahci_port_priv *pp = ap->private_data;
1048 struct ata_link *link;
1049 struct ahci_em_priv *emp;
1050 int rc = 0;
1051
1052 ata_for_each_link(link, ap, EDGE) {
1053 emp = &pp->em_priv[link->pmp];
1054 rc += sprintf(buf, "%lx\n", emp->led_state);
1055 }
1056 return rc;
1057}
1058
1059static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1060 size_t size)
1061{
1062 int state;
1063 int pmp;
1064 struct ahci_port_priv *pp = ap->private_data;
1065 struct ahci_em_priv *emp;
1066
1067 state = simple_strtoul(buf, NULL, 0);
1068
1069 /* get the slot number from the message */
1070 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1071 if (pmp < EM_MAX_SLOTS)
1072 emp = &pp->em_priv[pmp];
1073 else
1074 return -EINVAL;
1075
1076 /* mask off the activity bits if we are in sw_activity
1077 * mode, user should turn off sw_activity before setting
1078 * activity led through em_message
1079 */
1080 if (emp->blink_policy)
1081 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1082
1083 return ahci_transmit_led_message(ap, state, size);
1084}
1085
1086static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1087{
1088 struct ata_link *link = dev->link;
1089 struct ata_port *ap = link->ap;
1090 struct ahci_port_priv *pp = ap->private_data;
1091 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1092 u32 port_led_state = emp->led_state;
1093
1094 /* save the desired Activity LED behavior */
1095 if (val == OFF) {
1096 /* clear LFLAG */
1097 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1098
1099 /* set the LED to OFF */
1100 port_led_state &= EM_MSG_LED_VALUE_OFF;
1101 port_led_state |= (ap->port_no | (link->pmp << 8));
1102 ahci_transmit_led_message(ap, port_led_state, 4);
1103 } else {
1104 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1105 if (val == BLINK_OFF) {
1106 /* set LED to ON for idle */
1107 port_led_state &= EM_MSG_LED_VALUE_OFF;
1108 port_led_state |= (ap->port_no | (link->pmp << 8));
1109 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1110 ahci_transmit_led_message(ap, port_led_state, 4);
1111 }
1112 }
1113 emp->blink_policy = val;
1114 return 0;
1115}
1116
1117static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1118{
1119 struct ata_link *link = dev->link;
1120 struct ata_port *ap = link->ap;
1121 struct ahci_port_priv *pp = ap->private_data;
1122 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1123
1124 /* display the saved value of activity behavior for this
1125 * disk.
1126 */
1127 return sprintf(buf, "%d\n", emp->blink_policy);
1128}
1129
1130static void ahci_port_init(struct device *dev, struct ata_port *ap,
1131 int port_no, void __iomem *mmio,
1132 void __iomem *port_mmio)
1133{
1134 const char *emsg = NULL;
1135 int rc;
1136 u32 tmp;
1137
1138 /* make sure port is not active */
1139 rc = ahci_deinit_port(ap, &emsg);
1140 if (rc)
1141 dev_warn(dev, "%s (%d)\n", emsg, rc);
1142
1143 /* clear SError */
1144 tmp = readl(port_mmio + PORT_SCR_ERR);
1145 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1146 writel(tmp, port_mmio + PORT_SCR_ERR);
1147
1148 /* clear port IRQ */
1149 tmp = readl(port_mmio + PORT_IRQ_STAT);
1150 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1151 if (tmp)
1152 writel(tmp, port_mmio + PORT_IRQ_STAT);
1153
1154 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1155}
1156
1157void ahci_init_controller(struct ata_host *host)
1158{
1159 struct ahci_host_priv *hpriv = host->private_data;
1160 void __iomem *mmio = hpriv->mmio;
1161 int i;
1162 void __iomem *port_mmio;
1163 u32 tmp;
1164
1165 for (i = 0; i < host->n_ports; i++) {
1166 struct ata_port *ap = host->ports[i];
1167
1168 port_mmio = ahci_port_base(ap);
1169 if (ata_port_is_dummy(ap))
1170 continue;
1171
1172 ahci_port_init(host->dev, ap, i, mmio, port_mmio);
1173 }
1174
1175 tmp = readl(mmio + HOST_CTL);
1176 VPRINTK("HOST_CTL 0x%x\n", tmp);
1177 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1178 tmp = readl(mmio + HOST_CTL);
1179 VPRINTK("HOST_CTL 0x%x\n", tmp);
1180}
1181EXPORT_SYMBOL_GPL(ahci_init_controller);
1182
1183static void ahci_dev_config(struct ata_device *dev)
1184{
1185 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1186
1187 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1188 dev->max_sectors = 255;
1189 ata_dev_printk(dev, KERN_INFO,
1190 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1191 }
1192}
1193
1194static unsigned int ahci_dev_classify(struct ata_port *ap)
1195{
1196 void __iomem *port_mmio = ahci_port_base(ap);
1197 struct ata_taskfile tf;
1198 u32 tmp;
1199
1200 tmp = readl(port_mmio + PORT_SIG);
1201 tf.lbah = (tmp >> 24) & 0xff;
1202 tf.lbam = (tmp >> 16) & 0xff;
1203 tf.lbal = (tmp >> 8) & 0xff;
1204 tf.nsect = (tmp) & 0xff;
1205
1206 return ata_dev_classify(&tf);
1207}
1208
1209static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1210 u32 opts)
1211{
1212 dma_addr_t cmd_tbl_dma;
1213
1214 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1215
1216 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1217 pp->cmd_slot[tag].status = 0;
1218 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1219 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1220}
1221
1222int ahci_kick_engine(struct ata_port *ap)
1223{
1224 void __iomem *port_mmio = ahci_port_base(ap);
1225 struct ahci_host_priv *hpriv = ap->host->private_data;
1226 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1227 u32 tmp;
1228 int busy, rc;
1229
1230 /* stop engine */
1231 rc = ahci_stop_engine(ap);
1232 if (rc)
1233 goto out_restart;
1234
1235 /* need to do CLO?
1236 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1237 */
1238 busy = status & (ATA_BUSY | ATA_DRQ);
1239 if (!busy && !sata_pmp_attached(ap)) {
1240 rc = 0;
1241 goto out_restart;
1242 }
1243
1244 if (!(hpriv->cap & HOST_CAP_CLO)) {
1245 rc = -EOPNOTSUPP;
1246 goto out_restart;
1247 }
1248
1249 /* perform CLO */
1250 tmp = readl(port_mmio + PORT_CMD);
1251 tmp |= PORT_CMD_CLO;
1252 writel(tmp, port_mmio + PORT_CMD);
1253
1254 rc = 0;
1255 tmp = ata_wait_register(port_mmio + PORT_CMD,
1256 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1257 if (tmp & PORT_CMD_CLO)
1258 rc = -EIO;
1259
1260 /* restart engine */
1261 out_restart:
1262 ahci_start_engine(ap);
1263 return rc;
1264}
1265EXPORT_SYMBOL_GPL(ahci_kick_engine);
1266
1267static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1268 struct ata_taskfile *tf, int is_cmd, u16 flags,
1269 unsigned long timeout_msec)
1270{
1271 const u32 cmd_fis_len = 5; /* five dwords */
1272 struct ahci_port_priv *pp = ap->private_data;
1273 void __iomem *port_mmio = ahci_port_base(ap);
1274 u8 *fis = pp->cmd_tbl;
1275 u32 tmp;
1276
1277 /* prep the command */
1278 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1279 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1280
1281 /* issue & wait */
1282 writel(1, port_mmio + PORT_CMD_ISSUE);
1283
1284 if (timeout_msec) {
1285 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1286 1, timeout_msec);
1287 if (tmp & 0x1) {
1288 ahci_kick_engine(ap);
1289 return -EBUSY;
1290 }
1291 } else
1292 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1293
1294 return 0;
1295}
1296
1297int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1298 int pmp, unsigned long deadline,
1299 int (*check_ready)(struct ata_link *link))
1300{
1301 struct ata_port *ap = link->ap;
1302 struct ahci_host_priv *hpriv = ap->host->private_data;
1303 const char *reason = NULL;
1304 unsigned long now, msecs;
1305 struct ata_taskfile tf;
1306 int rc;
1307
1308 DPRINTK("ENTER\n");
1309
1310 /* prepare for SRST (AHCI-1.1 10.4.1) */
1311 rc = ahci_kick_engine(ap);
1312 if (rc && rc != -EOPNOTSUPP)
1313 ata_link_printk(link, KERN_WARNING,
1314 "failed to reset engine (errno=%d)\n", rc);
1315
1316 ata_tf_init(link->device, &tf);
1317
1318 /* issue the first D2H Register FIS */
1319 msecs = 0;
1320 now = jiffies;
1321 if (time_after(deadline, now))
1322 msecs = jiffies_to_msecs(deadline - now);
1323
1324 tf.ctl |= ATA_SRST;
1325 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1326 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1327 rc = -EIO;
1328 reason = "1st FIS failed";
1329 goto fail;
1330 }
1331
1332 /* spec says at least 5us, but be generous and sleep for 1ms */
1333 msleep(1);
1334
1335 /* issue the second D2H Register FIS */
1336 tf.ctl &= ~ATA_SRST;
1337 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1338
1339 /* wait for link to become ready */
1340 rc = ata_wait_after_reset(link, deadline, check_ready);
1341 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1342 /*
1343 * Workaround for cases where link online status can't
1344 * be trusted. Treat device readiness timeout as link
1345 * offline.
1346 */
1347 ata_link_printk(link, KERN_INFO,
1348 "device not ready, treating as offline\n");
1349 *class = ATA_DEV_NONE;
1350 } else if (rc) {
1351 /* link occupied, -ENODEV too is an error */
1352 reason = "device not ready";
1353 goto fail;
1354 } else
1355 *class = ahci_dev_classify(ap);
1356
1357 DPRINTK("EXIT, class=%u\n", *class);
1358 return 0;
1359
1360 fail:
1361 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1362 return rc;
1363}
1364
1365int ahci_check_ready(struct ata_link *link)
1366{
1367 void __iomem *port_mmio = ahci_port_base(link->ap);
1368 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1369
1370 return ata_check_ready(status);
1371}
1372EXPORT_SYMBOL_GPL(ahci_check_ready);
1373
1374static int ahci_softreset(struct ata_link *link, unsigned int *class,
1375 unsigned long deadline)
1376{
1377 int pmp = sata_srst_pmp(link);
1378
1379 DPRINTK("ENTER\n");
1380
1381 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1382}
1383EXPORT_SYMBOL_GPL(ahci_do_softreset);
1384
1385static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1386 unsigned long deadline)
1387{
1388 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1389 struct ata_port *ap = link->ap;
1390 struct ahci_port_priv *pp = ap->private_data;
1391 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1392 struct ata_taskfile tf;
1393 bool online;
1394 int rc;
1395
1396 DPRINTK("ENTER\n");
1397
1398 ahci_stop_engine(ap);
1399
1400 /* clear D2H reception area to properly wait for D2H FIS */
1401 ata_tf_init(link->device, &tf);
1402 tf.command = 0x80;
1403 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1404
1405 rc = sata_link_hardreset(link, timing, deadline, &online,
1406 ahci_check_ready);
1407
1408 ahci_start_engine(ap);
1409
1410 if (online)
1411 *class = ahci_dev_classify(ap);
1412
1413 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1414 return rc;
1415}
1416
1417static void ahci_postreset(struct ata_link *link, unsigned int *class)
1418{
1419 struct ata_port *ap = link->ap;
1420 void __iomem *port_mmio = ahci_port_base(ap);
1421 u32 new_tmp, tmp;
1422
1423 ata_std_postreset(link, class);
1424
1425 /* Make sure port's ATAPI bit is set appropriately */
1426 new_tmp = tmp = readl(port_mmio + PORT_CMD);
1427 if (*class == ATA_DEV_ATAPI)
1428 new_tmp |= PORT_CMD_ATAPI;
1429 else
1430 new_tmp &= ~PORT_CMD_ATAPI;
1431 if (new_tmp != tmp) {
1432 writel(new_tmp, port_mmio + PORT_CMD);
1433 readl(port_mmio + PORT_CMD); /* flush */
1434 }
1435}
1436
1437static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1438{
1439 struct scatterlist *sg;
1440 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1441 unsigned int si;
1442
1443 VPRINTK("ENTER\n");
1444
1445 /*
1446 * Next, the S/G list.
1447 */
1448 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1449 dma_addr_t addr = sg_dma_address(sg);
1450 u32 sg_len = sg_dma_len(sg);
1451
1452 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1453 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1454 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1455 }
1456
1457 return si;
1458}
1459
1460static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
1461{
1462 struct ata_port *ap = qc->ap;
1463 struct ahci_port_priv *pp = ap->private_data;
1464
1465 if (!sata_pmp_attached(ap) || pp->fbs_enabled)
1466 return ata_std_qc_defer(qc);
1467 else
1468 return sata_pmp_qc_defer_cmd_switch(qc);
1469}
1470
1471static void ahci_qc_prep(struct ata_queued_cmd *qc)
1472{
1473 struct ata_port *ap = qc->ap;
1474 struct ahci_port_priv *pp = ap->private_data;
1475 int is_atapi = ata_is_atapi(qc->tf.protocol);
1476 void *cmd_tbl;
1477 u32 opts;
1478 const u32 cmd_fis_len = 5; /* five dwords */
1479 unsigned int n_elem;
1480
1481 /*
1482 * Fill in command table information. First, the header,
1483 * a SATA Register - Host to Device command FIS.
1484 */
1485 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1486
1487 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1488 if (is_atapi) {
1489 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1490 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1491 }
1492
1493 n_elem = 0;
1494 if (qc->flags & ATA_QCFLAG_DMAMAP)
1495 n_elem = ahci_fill_sg(qc, cmd_tbl);
1496
1497 /*
1498 * Fill in command slot information.
1499 */
1500 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1501 if (qc->tf.flags & ATA_TFLAG_WRITE)
1502 opts |= AHCI_CMD_WRITE;
1503 if (is_atapi)
1504 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1505
1506 ahci_fill_cmd_slot(pp, qc->tag, opts);
1507}
1508
1509static void ahci_fbs_dec_intr(struct ata_port *ap)
1510{
1511 struct ahci_port_priv *pp = ap->private_data;
1512 void __iomem *port_mmio = ahci_port_base(ap);
1513 u32 fbs = readl(port_mmio + PORT_FBS);
1514 int retries = 3;
1515
1516 DPRINTK("ENTER\n");
1517 BUG_ON(!pp->fbs_enabled);
1518
1519 /* time to wait for DEC is not specified by AHCI spec,
1520 * add a retry loop for safety.
1521 */
1522 writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
1523 fbs = readl(port_mmio + PORT_FBS);
1524 while ((fbs & PORT_FBS_DEC) && retries--) {
1525 udelay(1);
1526 fbs = readl(port_mmio + PORT_FBS);
1527 }
1528
1529 if (fbs & PORT_FBS_DEC)
1530 dev_printk(KERN_ERR, ap->host->dev,
1531 "failed to clear device error\n");
1532}
1533
1534static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1535{
1536 struct ahci_host_priv *hpriv = ap->host->private_data;
1537 struct ahci_port_priv *pp = ap->private_data;
1538 struct ata_eh_info *host_ehi = &ap->link.eh_info;
1539 struct ata_link *link = NULL;
1540 struct ata_queued_cmd *active_qc;
1541 struct ata_eh_info *active_ehi;
1542 bool fbs_need_dec = false;
1543 u32 serror;
1544
1545 /* determine active link with error */
1546 if (pp->fbs_enabled) {
1547 void __iomem *port_mmio = ahci_port_base(ap);
1548 u32 fbs = readl(port_mmio + PORT_FBS);
1549 int pmp = fbs >> PORT_FBS_DWE_OFFSET;
1550
1551 if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
1552 ata_link_online(&ap->pmp_link[pmp])) {
1553 link = &ap->pmp_link[pmp];
1554 fbs_need_dec = true;
1555 }
1556
1557 } else
1558 ata_for_each_link(link, ap, EDGE)
1559 if (ata_link_active(link))
1560 break;
1561
1562 if (!link)
1563 link = &ap->link;
1564
1565 active_qc = ata_qc_from_tag(ap, link->active_tag);
1566 active_ehi = &link->eh_info;
1567
1568 /* record irq stat */
1569 ata_ehi_clear_desc(host_ehi);
1570 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1571
1572 /* AHCI needs SError cleared; otherwise, it might lock up */
1573 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
1574 ahci_scr_write(&ap->link, SCR_ERROR, serror);
1575 host_ehi->serror |= serror;
1576
1577 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1578 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1579 irq_stat &= ~PORT_IRQ_IF_ERR;
1580
1581 if (irq_stat & PORT_IRQ_TF_ERR) {
1582 /* If qc is active, charge it; otherwise, the active
1583 * link. There's no active qc on NCQ errors. It will
1584 * be determined by EH by reading log page 10h.
1585 */
1586 if (active_qc)
1587 active_qc->err_mask |= AC_ERR_DEV;
1588 else
1589 active_ehi->err_mask |= AC_ERR_DEV;
1590
1591 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1592 host_ehi->serror &= ~SERR_INTERNAL;
1593 }
1594
1595 if (irq_stat & PORT_IRQ_UNK_FIS) {
1596 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1597
1598 active_ehi->err_mask |= AC_ERR_HSM;
1599 active_ehi->action |= ATA_EH_RESET;
1600 ata_ehi_push_desc(active_ehi,
1601 "unknown FIS %08x %08x %08x %08x" ,
1602 unk[0], unk[1], unk[2], unk[3]);
1603 }
1604
1605 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
1606 active_ehi->err_mask |= AC_ERR_HSM;
1607 active_ehi->action |= ATA_EH_RESET;
1608 ata_ehi_push_desc(active_ehi, "incorrect PMP");
1609 }
1610
1611 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1612 host_ehi->err_mask |= AC_ERR_HOST_BUS;
1613 host_ehi->action |= ATA_EH_RESET;
1614 ata_ehi_push_desc(host_ehi, "host bus error");
1615 }
1616
1617 if (irq_stat & PORT_IRQ_IF_ERR) {
1618 if (fbs_need_dec)
1619 active_ehi->err_mask |= AC_ERR_DEV;
1620 else {
1621 host_ehi->err_mask |= AC_ERR_ATA_BUS;
1622 host_ehi->action |= ATA_EH_RESET;
1623 }
1624
1625 ata_ehi_push_desc(host_ehi, "interface fatal error");
1626 }
1627
1628 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1629 ata_ehi_hotplugged(host_ehi);
1630 ata_ehi_push_desc(host_ehi, "%s",
1631 irq_stat & PORT_IRQ_CONNECT ?
1632 "connection status changed" : "PHY RDY changed");
1633 }
1634
1635 /* okay, let's hand over to EH */
1636
1637 if (irq_stat & PORT_IRQ_FREEZE)
1638 ata_port_freeze(ap);
1639 else if (fbs_need_dec) {
1640 ata_link_abort(link);
1641 ahci_fbs_dec_intr(ap);
1642 } else
1643 ata_port_abort(ap);
1644}
1645
1646static void ahci_port_intr(struct ata_port *ap)
1647{
1648 void __iomem *port_mmio = ahci_port_base(ap);
1649 struct ata_eh_info *ehi = &ap->link.eh_info;
1650 struct ahci_port_priv *pp = ap->private_data;
1651 struct ahci_host_priv *hpriv = ap->host->private_data;
1652 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
1653 u32 status, qc_active = 0;
1654 int rc;
1655
1656 status = readl(port_mmio + PORT_IRQ_STAT);
1657 writel(status, port_mmio + PORT_IRQ_STAT);
1658
1659 /* ignore BAD_PMP while resetting */
1660 if (unlikely(resetting))
1661 status &= ~PORT_IRQ_BAD_PMP;
1662
1663 /* If we are getting PhyRdy, this is
1664 * just a power state change, we should
1665 * clear out this, plus the PhyRdy/Comm
1666 * Wake bits from Serror
1667 */
1668 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
1669 (status & PORT_IRQ_PHYRDY)) {
1670 status &= ~PORT_IRQ_PHYRDY;
1671 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1672 }
1673
1674 if (unlikely(status & PORT_IRQ_ERROR)) {
1675 ahci_error_intr(ap, status);
1676 return;
1677 }
1678
1679 if (status & PORT_IRQ_SDB_FIS) {
1680 /* If SNotification is available, leave notification
1681 * handling to sata_async_notification(). If not,
1682 * emulate it by snooping SDB FIS RX area.
1683 *
1684 * Snooping FIS RX area is probably cheaper than
1685 * poking SNotification but some constrollers which
1686 * implement SNotification, ICH9 for example, don't
1687 * store AN SDB FIS into receive area.
1688 */
1689 if (hpriv->cap & HOST_CAP_SNTF)
1690 sata_async_notification(ap);
1691 else {
1692 /* If the 'N' bit in word 0 of the FIS is set,
1693 * we just received asynchronous notification.
1694 * Tell libata about it.
1695 *
1696 * Lack of SNotification should not appear in
1697 * ahci 1.2, so the workaround is unnecessary
1698 * when FBS is enabled.
1699 */
1700 if (pp->fbs_enabled)
1701 WARN_ON_ONCE(1);
1702 else {
1703 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
1704 u32 f0 = le32_to_cpu(f[0]);
1705 if (f0 & (1 << 15))
1706 sata_async_notification(ap);
1707 }
1708 }
1709 }
1710
1711 /* pp->active_link is not reliable once FBS is enabled, both
1712 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
1713 * NCQ and non-NCQ commands may be in flight at the same time.
1714 */
1715 if (pp->fbs_enabled) {
1716 if (ap->qc_active) {
1717 qc_active = readl(port_mmio + PORT_SCR_ACT);
1718 qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
1719 }
1720 } else {
1721 /* pp->active_link is valid iff any command is in flight */
1722 if (ap->qc_active && pp->active_link->sactive)
1723 qc_active = readl(port_mmio + PORT_SCR_ACT);
1724 else
1725 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1726 }
1727
1728
1729 rc = ata_qc_complete_multiple(ap, qc_active);
1730
1731 /* while resetting, invalid completions are expected */
1732 if (unlikely(rc < 0 && !resetting)) {
1733 ehi->err_mask |= AC_ERR_HSM;
1734 ehi->action |= ATA_EH_RESET;
1735 ata_port_freeze(ap);
1736 }
1737}
1738
1739irqreturn_t ahci_interrupt(int irq, void *dev_instance)
1740{
1741 struct ata_host *host = dev_instance;
1742 struct ahci_host_priv *hpriv;
1743 unsigned int i, handled = 0;
1744 void __iomem *mmio;
1745 u32 irq_stat, irq_masked;
1746
1747 VPRINTK("ENTER\n");
1748
1749 hpriv = host->private_data;
1750 mmio = hpriv->mmio;
1751
1752 /* sigh. 0xffffffff is a valid return from h/w */
1753 irq_stat = readl(mmio + HOST_IRQ_STAT);
1754 if (!irq_stat)
1755 return IRQ_NONE;
1756
1757 irq_masked = irq_stat & hpriv->port_map;
1758
1759 spin_lock(&host->lock);
1760
1761 for (i = 0; i < host->n_ports; i++) {
1762 struct ata_port *ap;
1763
1764 if (!(irq_masked & (1 << i)))
1765 continue;
1766
1767 ap = host->ports[i];
1768 if (ap) {
1769 ahci_port_intr(ap);
1770 VPRINTK("port %u\n", i);
1771 } else {
1772 VPRINTK("port %u (no irq)\n", i);
1773 if (ata_ratelimit())
1774 dev_printk(KERN_WARNING, host->dev,
1775 "interrupt on disabled port %u\n", i);
1776 }
1777
1778 handled = 1;
1779 }
1780
1781 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
1782 * it should be cleared after all the port events are cleared;
1783 * otherwise, it will raise a spurious interrupt after each
1784 * valid one. Please read section 10.6.2 of ahci 1.1 for more
1785 * information.
1786 *
1787 * Also, use the unmasked value to clear interrupt as spurious
1788 * pending event on a dummy port might cause screaming IRQ.
1789 */
1790 writel(irq_stat, mmio + HOST_IRQ_STAT);
1791
1792 spin_unlock(&host->lock);
1793
1794 VPRINTK("EXIT\n");
1795
1796 return IRQ_RETVAL(handled);
1797}
1798EXPORT_SYMBOL_GPL(ahci_interrupt);
1799
1800static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1801{
1802 struct ata_port *ap = qc->ap;
1803 void __iomem *port_mmio = ahci_port_base(ap);
1804 struct ahci_port_priv *pp = ap->private_data;
1805
1806 /* Keep track of the currently active link. It will be used
1807 * in completion path to determine whether NCQ phase is in
1808 * progress.
1809 */
1810 pp->active_link = qc->dev->link;
1811
1812 if (qc->tf.protocol == ATA_PROT_NCQ)
1813 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1814
1815 if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
1816 u32 fbs = readl(port_mmio + PORT_FBS);
1817 fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
1818 fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
1819 writel(fbs, port_mmio + PORT_FBS);
1820 pp->fbs_last_dev = qc->dev->link->pmp;
1821 }
1822
1823 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1824
1825 ahci_sw_activity(qc->dev->link);
1826
1827 return 0;
1828}
1829
1830static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
1831{
1832 struct ahci_port_priv *pp = qc->ap->private_data;
1833 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1834
1835 if (pp->fbs_enabled)
1836 d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
1837
1838 ata_tf_from_fis(d2h_fis, &qc->result_tf);
1839 return true;
1840}
1841
1842static void ahci_freeze(struct ata_port *ap)
1843{
1844 void __iomem *port_mmio = ahci_port_base(ap);
1845
1846 /* turn IRQ off */
1847 writel(0, port_mmio + PORT_IRQ_MASK);
1848}
1849
1850static void ahci_thaw(struct ata_port *ap)
1851{
1852 struct ahci_host_priv *hpriv = ap->host->private_data;
1853 void __iomem *mmio = hpriv->mmio;
1854 void __iomem *port_mmio = ahci_port_base(ap);
1855 u32 tmp;
1856 struct ahci_port_priv *pp = ap->private_data;
1857
1858 /* clear IRQ */
1859 tmp = readl(port_mmio + PORT_IRQ_STAT);
1860 writel(tmp, port_mmio + PORT_IRQ_STAT);
1861 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
1862
1863 /* turn IRQ back on */
1864 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1865}
1866
1867static void ahci_error_handler(struct ata_port *ap)
1868{
1869 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1870 /* restart engine */
1871 ahci_stop_engine(ap);
1872 ahci_start_engine(ap);
1873 }
1874
1875 sata_pmp_error_handler(ap);
1876
1877 if (!ata_dev_enabled(ap->link.device))
1878 ahci_stop_engine(ap);
1879}
1880
1881static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1882{
1883 struct ata_port *ap = qc->ap;
1884
1885 /* make DMA engine forget about the failed command */
1886 if (qc->flags & ATA_QCFLAG_FAILED)
1887 ahci_kick_engine(ap);
1888}
1889
1890static void ahci_enable_fbs(struct ata_port *ap)
1891{
1892 struct ahci_port_priv *pp = ap->private_data;
1893 void __iomem *port_mmio = ahci_port_base(ap);
1894 u32 fbs;
1895 int rc;
1896
1897 if (!pp->fbs_supported)
1898 return;
1899
1900 fbs = readl(port_mmio + PORT_FBS);
1901 if (fbs & PORT_FBS_EN) {
1902 pp->fbs_enabled = true;
1903 pp->fbs_last_dev = -1; /* initialization */
1904 return;
1905 }
1906
1907 rc = ahci_stop_engine(ap);
1908 if (rc)
1909 return;
1910
1911 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
1912 fbs = readl(port_mmio + PORT_FBS);
1913 if (fbs & PORT_FBS_EN) {
1914 dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
1915 pp->fbs_enabled = true;
1916 pp->fbs_last_dev = -1; /* initialization */
1917 } else
1918 dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
1919
1920 ahci_start_engine(ap);
1921}
1922
1923static void ahci_disable_fbs(struct ata_port *ap)
1924{
1925 struct ahci_port_priv *pp = ap->private_data;
1926 void __iomem *port_mmio = ahci_port_base(ap);
1927 u32 fbs;
1928 int rc;
1929
1930 if (!pp->fbs_supported)
1931 return;
1932
1933 fbs = readl(port_mmio + PORT_FBS);
1934 if ((fbs & PORT_FBS_EN) == 0) {
1935 pp->fbs_enabled = false;
1936 return;
1937 }
1938
1939 rc = ahci_stop_engine(ap);
1940 if (rc)
1941 return;
1942
1943 writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
1944 fbs = readl(port_mmio + PORT_FBS);
1945 if (fbs & PORT_FBS_EN)
1946 dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
1947 else {
1948 dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
1949 pp->fbs_enabled = false;
1950 }
1951
1952 ahci_start_engine(ap);
1953}
1954
1955static void ahci_pmp_attach(struct ata_port *ap)
1956{
1957 void __iomem *port_mmio = ahci_port_base(ap);
1958 struct ahci_port_priv *pp = ap->private_data;
1959 u32 cmd;
1960
1961 cmd = readl(port_mmio + PORT_CMD);
1962 cmd |= PORT_CMD_PMP;
1963 writel(cmd, port_mmio + PORT_CMD);
1964
1965 ahci_enable_fbs(ap);
1966
1967 pp->intr_mask |= PORT_IRQ_BAD_PMP;
1968 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1969}
1970
1971static void ahci_pmp_detach(struct ata_port *ap)
1972{
1973 void __iomem *port_mmio = ahci_port_base(ap);
1974 struct ahci_port_priv *pp = ap->private_data;
1975 u32 cmd;
1976
1977 ahci_disable_fbs(ap);
1978
1979 cmd = readl(port_mmio + PORT_CMD);
1980 cmd &= ~PORT_CMD_PMP;
1981 writel(cmd, port_mmio + PORT_CMD);
1982
1983 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
1984 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1985}
1986
1987static int ahci_port_resume(struct ata_port *ap)
1988{
1989 ahci_power_up(ap);
1990 ahci_start_port(ap);
1991
1992 if (sata_pmp_attached(ap))
1993 ahci_pmp_attach(ap);
1994 else
1995 ahci_pmp_detach(ap);
1996
1997 return 0;
1998}
1999
2000#ifdef CONFIG_PM
2001static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2002{
2003 const char *emsg = NULL;
2004 int rc;
2005
2006 rc = ahci_deinit_port(ap, &emsg);
2007 if (rc == 0)
2008 ahci_power_down(ap);
2009 else {
2010 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2011 ahci_start_port(ap);
2012 }
2013
2014 return rc;
2015}
2016#endif
2017
2018static int ahci_port_start(struct ata_port *ap)
2019{
2020 struct ahci_host_priv *hpriv = ap->host->private_data;
2021 struct device *dev = ap->host->dev;
2022 struct ahci_port_priv *pp;
2023 void *mem;
2024 dma_addr_t mem_dma;
2025 size_t dma_sz, rx_fis_sz;
2026
2027 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2028 if (!pp)
2029 return -ENOMEM;
2030
2031 /* check FBS capability */
2032 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
2033 void __iomem *port_mmio = ahci_port_base(ap);
2034 u32 cmd = readl(port_mmio + PORT_CMD);
2035 if (cmd & PORT_CMD_FBSCP)
2036 pp->fbs_supported = true;
2037 else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
2038 dev_printk(KERN_INFO, dev,
2039 "port %d can do FBS, forcing FBSCP\n",
2040 ap->port_no);
2041 pp->fbs_supported = true;
2042 } else
2043 dev_printk(KERN_WARNING, dev,
2044 "port %d is not capable of FBS\n",
2045 ap->port_no);
2046 }
2047
2048 if (pp->fbs_supported) {
2049 dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
2050 rx_fis_sz = AHCI_RX_FIS_SZ * 16;
2051 } else {
2052 dma_sz = AHCI_PORT_PRIV_DMA_SZ;
2053 rx_fis_sz = AHCI_RX_FIS_SZ;
2054 }
2055
2056 mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
2057 if (!mem)
2058 return -ENOMEM;
2059 memset(mem, 0, dma_sz);
2060
2061 /*
2062 * First item in chunk of DMA memory: 32-slot command table,
2063 * 32 bytes each in size
2064 */
2065 pp->cmd_slot = mem;
2066 pp->cmd_slot_dma = mem_dma;
2067
2068 mem += AHCI_CMD_SLOT_SZ;
2069 mem_dma += AHCI_CMD_SLOT_SZ;
2070
2071 /*
2072 * Second item: Received-FIS area
2073 */
2074 pp->rx_fis = mem;
2075 pp->rx_fis_dma = mem_dma;
2076
2077 mem += rx_fis_sz;
2078 mem_dma += rx_fis_sz;
2079
2080 /*
2081 * Third item: data area for storing a single command
2082 * and its scatter-gather table
2083 */
2084 pp->cmd_tbl = mem;
2085 pp->cmd_tbl_dma = mem_dma;
2086
2087 /*
2088 * Save off initial list of interrupts to be enabled.
2089 * This could be changed later
2090 */
2091 pp->intr_mask = DEF_PORT_IRQ;
2092
2093 ap->private_data = pp;
2094
2095 /* engage engines, captain */
2096 return ahci_port_resume(ap);
2097}
2098
2099static void ahci_port_stop(struct ata_port *ap)
2100{
2101 const char *emsg = NULL;
2102 int rc;
2103
2104 /* de-initialize port */
2105 rc = ahci_deinit_port(ap, &emsg);
2106 if (rc)
2107 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2108}
2109
2110void ahci_print_info(struct ata_host *host, const char *scc_s)
2111{
2112 struct ahci_host_priv *hpriv = host->private_data;
2113 void __iomem *mmio = hpriv->mmio;
2114 u32 vers, cap, cap2, impl, speed;
2115 const char *speed_s;
2116
2117 vers = readl(mmio + HOST_VERSION);
2118 cap = hpriv->cap;
2119 cap2 = hpriv->cap2;
2120 impl = hpriv->port_map;
2121
2122 speed = (cap >> 20) & 0xf;
2123 if (speed == 1)
2124 speed_s = "1.5";
2125 else if (speed == 2)
2126 speed_s = "3";
2127 else if (speed == 3)
2128 speed_s = "6";
2129 else
2130 speed_s = "?";
2131
2132 dev_info(host->dev,
2133 "AHCI %02x%02x.%02x%02x "
2134 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2135 ,
2136
2137 (vers >> 24) & 0xff,
2138 (vers >> 16) & 0xff,
2139 (vers >> 8) & 0xff,
2140 vers & 0xff,
2141
2142 ((cap >> 8) & 0x1f) + 1,
2143 (cap & 0x1f) + 1,
2144 speed_s,
2145 impl,
2146 scc_s);
2147
2148 dev_info(host->dev,
2149 "flags: "
2150 "%s%s%s%s%s%s%s"
2151 "%s%s%s%s%s%s%s"
2152 "%s%s%s%s%s%s\n"
2153 ,
2154
2155 cap & HOST_CAP_64 ? "64bit " : "",
2156 cap & HOST_CAP_NCQ ? "ncq " : "",
2157 cap & HOST_CAP_SNTF ? "sntf " : "",
2158 cap & HOST_CAP_MPS ? "ilck " : "",
2159 cap & HOST_CAP_SSS ? "stag " : "",
2160 cap & HOST_CAP_ALPM ? "pm " : "",
2161 cap & HOST_CAP_LED ? "led " : "",
2162 cap & HOST_CAP_CLO ? "clo " : "",
2163 cap & HOST_CAP_ONLY ? "only " : "",
2164 cap & HOST_CAP_PMP ? "pmp " : "",
2165 cap & HOST_CAP_FBS ? "fbs " : "",
2166 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2167 cap & HOST_CAP_SSC ? "slum " : "",
2168 cap & HOST_CAP_PART ? "part " : "",
2169 cap & HOST_CAP_CCC ? "ccc " : "",
2170 cap & HOST_CAP_EMS ? "ems " : "",
2171 cap & HOST_CAP_SXS ? "sxs " : "",
2172 cap2 & HOST_CAP2_APST ? "apst " : "",
2173 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2174 cap2 & HOST_CAP2_BOH ? "boh " : ""
2175 );
2176}
2177EXPORT_SYMBOL_GPL(ahci_print_info);
2178
2179void ahci_set_em_messages(struct ahci_host_priv *hpriv,
2180 struct ata_port_info *pi)
2181{
2182 u8 messages;
2183 void __iomem *mmio = hpriv->mmio;
2184 u32 em_loc = readl(mmio + HOST_EM_LOC);
2185 u32 em_ctl = readl(mmio + HOST_EM_CTL);
2186
2187 if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
2188 return;
2189
2190 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2191
2192 if (messages) {
2193 /* store em_loc */
2194 hpriv->em_loc = ((em_loc >> 16) * 4);
2195 hpriv->em_buf_sz = ((em_loc & 0xff) * 4);
2196 hpriv->em_msg_type = messages;
2197 pi->flags |= ATA_FLAG_EM;
2198 if (!(em_ctl & EM_CTL_ALHD))
2199 pi->flags |= ATA_FLAG_SW_ACTIVITY;
2200 }
2201}
2202EXPORT_SYMBOL_GPL(ahci_set_em_messages);
2203
2204MODULE_AUTHOR("Jeff Garzik");
2205MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
2206MODULE_LICENSE("GPL");
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 7b5eea7e01dc..8b5ea399a4f4 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -145,12 +145,6 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
145 struct ata_eh_info *ehi = &ap->link.eh_info; 145 struct ata_eh_info *ehi = &ap->link.eh_info;
146 int wait = 0; 146 int wait = 0;
147 unsigned long flags; 147 unsigned long flags;
148 acpi_handle handle;
149
150 if (dev)
151 handle = dev->acpi_handle;
152 else
153 handle = ap->acpi_handle;
154 148
155 spin_lock_irqsave(ap->lock, flags); 149 spin_lock_irqsave(ap->lock, flags);
156 /* 150 /*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 49cffb6094a3..932eaee50245 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -65,6 +65,7 @@
65#include <linux/libata.h> 65#include <linux/libata.h>
66#include <asm/byteorder.h> 66#include <asm/byteorder.h>
67#include <linux/cdrom.h> 67#include <linux/cdrom.h>
68#include <linux/ratelimit.h>
68 69
69#include "libata.h" 70#include "libata.h"
70 71
@@ -96,9 +97,6 @@ static void ata_dev_xfermask(struct ata_device *dev);
96static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 97static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
97 98
98unsigned int ata_print_id = 1; 99unsigned int ata_print_id = 1;
99static struct workqueue_struct *ata_wq;
100
101struct workqueue_struct *ata_aux_wq;
102 100
103struct ata_force_param { 101struct ata_force_param {
104 const char *name; 102 const char *name;
@@ -160,6 +158,10 @@ int libata_allow_tpm = 0;
160module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 158module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
161MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 159MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
162 160
161static int atapi_an;
162module_param(atapi_an, int, 0444);
163MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
164
163MODULE_AUTHOR("Jeff Garzik"); 165MODULE_AUTHOR("Jeff Garzik");
164MODULE_DESCRIPTION("Library module for ATA devices"); 166MODULE_DESCRIPTION("Library module for ATA devices");
165MODULE_LICENSE("GPL"); 167MODULE_LICENSE("GPL");
@@ -1685,52 +1687,6 @@ unsigned long ata_id_xfermask(const u16 *id)
1685 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1687 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1686} 1688}
1687 1689
1688/**
1689 * ata_pio_queue_task - Queue port_task
1690 * @ap: The ata_port to queue port_task for
1691 * @data: data for @fn to use
1692 * @delay: delay time in msecs for workqueue function
1693 *
1694 * Schedule @fn(@data) for execution after @delay jiffies using
1695 * port_task. There is one port_task per port and it's the
1696 * user(low level driver)'s responsibility to make sure that only
1697 * one task is active at any given time.
1698 *
1699 * libata core layer takes care of synchronization between
1700 * port_task and EH. ata_pio_queue_task() may be ignored for EH
1701 * synchronization.
1702 *
1703 * LOCKING:
1704 * Inherited from caller.
1705 */
1706void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1707{
1708 ap->port_task_data = data;
1709
1710 /* may fail if ata_port_flush_task() in progress */
1711 queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1712}
1713
1714/**
1715 * ata_port_flush_task - Flush port_task
1716 * @ap: The ata_port to flush port_task for
1717 *
1718 * After this function completes, port_task is guranteed not to
1719 * be running or scheduled.
1720 *
1721 * LOCKING:
1722 * Kernel thread context (may sleep)
1723 */
1724void ata_port_flush_task(struct ata_port *ap)
1725{
1726 DPRINTK("ENTER\n");
1727
1728 cancel_rearming_delayed_work(&ap->port_task);
1729
1730 if (ata_msg_ctl(ap))
1731 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1732}
1733
1734static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1690static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1735{ 1691{
1736 struct completion *waiting = qc->private_data; 1692 struct completion *waiting = qc->private_data;
@@ -1852,7 +1808,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1852 1808
1853 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1809 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1854 1810
1855 ata_port_flush_task(ap); 1811 ata_sff_flush_pio_task(ap);
1856 1812
1857 if (!rc) { 1813 if (!rc) {
1858 spin_lock_irqsave(ap->lock, flags); 1814 spin_lock_irqsave(ap->lock, flags);
@@ -1906,22 +1862,6 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1906 ap->qc_active = preempted_qc_active; 1862 ap->qc_active = preempted_qc_active;
1907 ap->nr_active_links = preempted_nr_active_links; 1863 ap->nr_active_links = preempted_nr_active_links;
1908 1864
1909 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1910 * Until those drivers are fixed, we detect the condition
1911 * here, fail the command with AC_ERR_SYSTEM and reenable the
1912 * port.
1913 *
1914 * Note that this doesn't change any behavior as internal
1915 * command failure results in disabling the device in the
1916 * higher layer for LLDDs without new reset/EH callbacks.
1917 *
1918 * Kill the following code as soon as those drivers are fixed.
1919 */
1920 if (ap->flags & ATA_FLAG_DISABLED) {
1921 err_mask |= AC_ERR_SYSTEM;
1922 ata_port_probe(ap);
1923 }
1924
1925 spin_unlock_irqrestore(ap->lock, flags); 1865 spin_unlock_irqrestore(ap->lock, flags);
1926 1866
1927 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1867 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
@@ -2184,6 +2124,14 @@ retry:
2184 goto err_out; 2124 goto err_out;
2185 } 2125 }
2186 2126
2127 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
2128 ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, "
2129 "class=%d may_fallback=%d tried_spinup=%d\n",
2130 class, may_fallback, tried_spinup);
2131 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
2132 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
2133 }
2134
2187 /* Falling back doesn't make sense if ID data was read 2135 /* Falling back doesn't make sense if ID data was read
2188 * successfully at least once. 2136 * successfully at least once.
2189 */ 2137 */
@@ -2572,7 +2520,8 @@ int ata_dev_configure(struct ata_device *dev)
2572 * to enable ATAPI AN to discern between PHY status 2520 * to enable ATAPI AN to discern between PHY status
2573 * changed notifications and ATAPI ANs. 2521 * changed notifications and ATAPI ANs.
2574 */ 2522 */
2575 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2523 if (atapi_an &&
2524 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2576 (!sata_pmp_attached(ap) || 2525 (!sata_pmp_attached(ap) ||
2577 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2526 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2578 unsigned int err_mask; 2527 unsigned int err_mask;
@@ -2767,8 +2716,6 @@ int ata_bus_probe(struct ata_port *ap)
2767 int rc; 2716 int rc;
2768 struct ata_device *dev; 2717 struct ata_device *dev;
2769 2718
2770 ata_port_probe(ap);
2771
2772 ata_for_each_dev(dev, &ap->link, ALL) 2719 ata_for_each_dev(dev, &ap->link, ALL)
2773 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2720 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2774 2721
@@ -2796,8 +2743,7 @@ int ata_bus_probe(struct ata_port *ap)
2796 ap->ops->phy_reset(ap); 2743 ap->ops->phy_reset(ap);
2797 2744
2798 ata_for_each_dev(dev, &ap->link, ALL) { 2745 ata_for_each_dev(dev, &ap->link, ALL) {
2799 if (!(ap->flags & ATA_FLAG_DISABLED) && 2746 if (dev->class != ATA_DEV_UNKNOWN)
2800 dev->class != ATA_DEV_UNKNOWN)
2801 classes[dev->devno] = dev->class; 2747 classes[dev->devno] = dev->class;
2802 else 2748 else
2803 classes[dev->devno] = ATA_DEV_NONE; 2749 classes[dev->devno] = ATA_DEV_NONE;
@@ -2805,8 +2751,6 @@ int ata_bus_probe(struct ata_port *ap)
2805 dev->class = ATA_DEV_UNKNOWN; 2751 dev->class = ATA_DEV_UNKNOWN;
2806 } 2752 }
2807 2753
2808 ata_port_probe(ap);
2809
2810 /* read IDENTIFY page and configure devices. We have to do the identify 2754 /* read IDENTIFY page and configure devices. We have to do the identify
2811 specific sequence bass-ackwards so that PDIAG- is released by 2755 specific sequence bass-ackwards so that PDIAG- is released by
2812 the slave device */ 2756 the slave device */
@@ -2856,8 +2800,6 @@ int ata_bus_probe(struct ata_port *ap)
2856 ata_for_each_dev(dev, &ap->link, ENABLED) 2800 ata_for_each_dev(dev, &ap->link, ENABLED)
2857 return 0; 2801 return 0;
2858 2802
2859 /* no device present, disable port */
2860 ata_port_disable(ap);
2861 return -ENODEV; 2803 return -ENODEV;
2862 2804
2863 fail: 2805 fail:
@@ -2889,22 +2831,6 @@ int ata_bus_probe(struct ata_port *ap)
2889} 2831}
2890 2832
2891/** 2833/**
2892 * ata_port_probe - Mark port as enabled
2893 * @ap: Port for which we indicate enablement
2894 *
2895 * Modify @ap data structure such that the system
2896 * thinks that the entire port is enabled.
2897 *
2898 * LOCKING: host lock, or some other form of
2899 * serialization.
2900 */
2901
2902void ata_port_probe(struct ata_port *ap)
2903{
2904 ap->flags &= ~ATA_FLAG_DISABLED;
2905}
2906
2907/**
2908 * sata_print_link_status - Print SATA link status 2834 * sata_print_link_status - Print SATA link status
2909 * @link: SATA link to printk link status about 2835 * @link: SATA link to printk link status about
2910 * 2836 *
@@ -2951,26 +2877,6 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
2951} 2877}
2952 2878
2953/** 2879/**
2954 * ata_port_disable - Disable port.
2955 * @ap: Port to be disabled.
2956 *
2957 * Modify @ap data structure such that the system
2958 * thinks that the entire port is disabled, and should
2959 * never attempt to probe or communicate with devices
2960 * on this port.
2961 *
2962 * LOCKING: host lock, or some other form of
2963 * serialization.
2964 */
2965
2966void ata_port_disable(struct ata_port *ap)
2967{
2968 ap->link.device[0].class = ATA_DEV_NONE;
2969 ap->link.device[1].class = ATA_DEV_NONE;
2970 ap->flags |= ATA_FLAG_DISABLED;
2971}
2972
2973/**
2974 * sata_down_spd_limit - adjust SATA spd limit downward 2880 * sata_down_spd_limit - adjust SATA spd limit downward
2975 * @link: Link to adjust SATA spd limit for 2881 * @link: Link to adjust SATA spd limit for
2976 * @spd_limit: Additional limit 2882 * @spd_limit: Additional limit
@@ -3631,9 +3537,15 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3631 int (*check_ready)(struct ata_link *link)) 3537 int (*check_ready)(struct ata_link *link))
3632{ 3538{
3633 unsigned long start = jiffies; 3539 unsigned long start = jiffies;
3634 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3540 unsigned long nodev_deadline;
3635 int warned = 0; 3541 int warned = 0;
3636 3542
3543 /* choose which 0xff timeout to use, read comment in libata.h */
3544 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3545 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3546 else
3547 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3548
3637 /* Slave readiness can't be tested separately from master. On 3549 /* Slave readiness can't be tested separately from master. On
3638 * M/S emulation configuration, this function should be called 3550 * M/S emulation configuration, this function should be called
3639 * only on the master and it will handle both master and slave. 3551 * only on the master and it will handle both master and slave.
@@ -3651,12 +3563,12 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3651 if (ready > 0) 3563 if (ready > 0)
3652 return 0; 3564 return 0;
3653 3565
3654 /* -ENODEV could be transient. Ignore -ENODEV if link 3566 /*
3567 * -ENODEV could be transient. Ignore -ENODEV if link
3655 * is online. Also, some SATA devices take a long 3568 * is online. Also, some SATA devices take a long
3656 * time to clear 0xff after reset. For example, 3569 * time to clear 0xff after reset. Wait for
3657 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum 3570 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3658 * GoVault needs even more than that. Wait for 3571 * offline.
3659 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3660 * 3572 *
3661 * Note that some PATA controllers (pata_ali) explode 3573 * Note that some PATA controllers (pata_ali) explode
3662 * if status register is read more than once when 3574 * if status register is read more than once when
@@ -4205,9 +4117,8 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4205 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 4117 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4206 ata_dev_printk(dev, KERN_WARNING, 4118 ata_dev_printk(dev, KERN_WARNING,
4207 "new n_sectors matches native, probably " 4119 "new n_sectors matches native, probably "
4208 "late HPA unlock, continuing\n"); 4120 "late HPA unlock, n_sectors updated\n");
4209 /* keep using the old n_sectors */ 4121 /* use the larger n_sectors */
4210 dev->n_sectors = n_sectors;
4211 return 0; 4122 return 0;
4212 } 4123 }
4213 4124
@@ -4254,15 +4165,13 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4254 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4165 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4255 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4166 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4256 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4167 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4257 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA }, 4168 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4258 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4259 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4169 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4260 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4170 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4261 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4171 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4262 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4172 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4263 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4173 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4264 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA }, 4174 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4265 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4266 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4175 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4267 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4176 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4268 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4177 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
@@ -4298,70 +4207,16 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4298 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4207 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4299 4208
4300 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4209 /* Seagate NCQ + FLUSH CACHE firmware bug */
4301 { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ | 4210 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4302 ATA_HORKAGE_FIRMWARE_WARN },
4303 { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ |
4304 ATA_HORKAGE_FIRMWARE_WARN },
4305 { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ |
4306 ATA_HORKAGE_FIRMWARE_WARN },
4307 { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ |
4308 ATA_HORKAGE_FIRMWARE_WARN },
4309 { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ |
4310 ATA_HORKAGE_FIRMWARE_WARN }, 4211 ATA_HORKAGE_FIRMWARE_WARN },
4311 4212
4312 { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ | 4213 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4313 ATA_HORKAGE_FIRMWARE_WARN },
4314 { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ |
4315 ATA_HORKAGE_FIRMWARE_WARN },
4316 { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ |
4317 ATA_HORKAGE_FIRMWARE_WARN },
4318 { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ |
4319 ATA_HORKAGE_FIRMWARE_WARN },
4320 { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ |
4321 ATA_HORKAGE_FIRMWARE_WARN }, 4214 ATA_HORKAGE_FIRMWARE_WARN },
4322 4215
4323 { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ | 4216 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4324 ATA_HORKAGE_FIRMWARE_WARN },
4325 { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ |
4326 ATA_HORKAGE_FIRMWARE_WARN },
4327 { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ |
4328 ATA_HORKAGE_FIRMWARE_WARN },
4329 { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ |
4330 ATA_HORKAGE_FIRMWARE_WARN },
4331 { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ |
4332 ATA_HORKAGE_FIRMWARE_WARN }, 4217 ATA_HORKAGE_FIRMWARE_WARN },
4333 4218
4334 { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ | 4219 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4335 ATA_HORKAGE_FIRMWARE_WARN },
4336 { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ |
4337 ATA_HORKAGE_FIRMWARE_WARN },
4338 { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ |
4339 ATA_HORKAGE_FIRMWARE_WARN },
4340 { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ |
4341 ATA_HORKAGE_FIRMWARE_WARN },
4342 { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ |
4343 ATA_HORKAGE_FIRMWARE_WARN },
4344
4345 { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ |
4346 ATA_HORKAGE_FIRMWARE_WARN },
4347 { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ |
4348 ATA_HORKAGE_FIRMWARE_WARN },
4349 { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ |
4350 ATA_HORKAGE_FIRMWARE_WARN },
4351 { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ |
4352 ATA_HORKAGE_FIRMWARE_WARN },
4353 { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ |
4354 ATA_HORKAGE_FIRMWARE_WARN },
4355
4356 { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ |
4357 ATA_HORKAGE_FIRMWARE_WARN },
4358 { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ |
4359 ATA_HORKAGE_FIRMWARE_WARN },
4360 { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ |
4361 ATA_HORKAGE_FIRMWARE_WARN },
4362 { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ |
4363 ATA_HORKAGE_FIRMWARE_WARN },
4364 { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ |
4365 ATA_HORKAGE_FIRMWARE_WARN }, 4220 ATA_HORKAGE_FIRMWARE_WARN },
4366 4221
4367 /* Blacklist entries taken from Silicon Image 3124/3132 4222 /* Blacklist entries taken from Silicon Image 3124/3132
@@ -4390,12 +4245,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4390 /* Devices which get the IVB wrong */ 4245 /* Devices which get the IVB wrong */
4391 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4246 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4392 /* Maybe we should just blacklist TSSTcorp... */ 4247 /* Maybe we should just blacklist TSSTcorp... */
4393 { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, }, 4248 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4394 { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, },
4395 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
4396 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4397 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4398 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
4399 4249
4400 /* Devices that do not need bridging limits applied */ 4250 /* Devices that do not need bridging limits applied */
4401 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4251 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
@@ -4413,29 +4263,73 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4413 { } 4263 { }
4414}; 4264};
4415 4265
4416static int strn_pattern_cmp(const char *patt, const char *name, int wildchar) 4266/**
4267 * glob_match - match a text string against a glob-style pattern
4268 * @text: the string to be examined
4269 * @pattern: the glob-style pattern to be matched against
4270 *
4271 * Either/both of text and pattern can be empty strings.
4272 *
4273 * Match text against a glob-style pattern, with wildcards and simple sets:
4274 *
4275 * ? matches any single character.
4276 * * matches any run of characters.
4277 * [xyz] matches a single character from the set: x, y, or z.
4278 * [a-d] matches a single character from the range: a, b, c, or d.
4279 * [a-d0-9] matches a single character from either range.
4280 *
4281 * The special characters ?, [, -, or *, can be matched using a set, eg. [*]
4282 * Behaviour with malformed patterns is undefined, though generally reasonable.
4283 *
4284 * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx"
4285 *
4286 * This function uses one level of recursion per '*' in pattern.
4287 * Since it calls _nothing_ else, and has _no_ explicit local variables,
4288 * this will not cause stack problems for any reasonable use here.
4289 *
4290 * RETURNS:
4291 * 0 on match, 1 otherwise.
4292 */
4293static int glob_match (const char *text, const char *pattern)
4417{ 4294{
4418 const char *p; 4295 do {
4419 int len; 4296 /* Match single character or a '?' wildcard */
4420 4297 if (*text == *pattern || *pattern == '?') {
4421 /* 4298 if (!*pattern++)
4422 * check for trailing wildcard: *\0 4299 return 0; /* End of both strings: match */
4423 */ 4300 } else {
4424 p = strchr(patt, wildchar); 4301 /* Match single char against a '[' bracketed ']' pattern set */
4425 if (p && ((*(p + 1)) == 0)) 4302 if (!*text || *pattern != '[')
4426 len = p - patt; 4303 break; /* Not a pattern set */
4427 else { 4304 while (*++pattern && *pattern != ']' && *text != *pattern) {
4428 len = strlen(name); 4305 if (*pattern == '-' && *(pattern - 1) != '[')
4429 if (!len) { 4306 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4430 if (!*patt) 4307 ++pattern;
4431 return 0; 4308 break;
4432 return -1; 4309 }
4310 }
4311 if (!*pattern || *pattern == ']')
4312 return 1; /* No match */
4313 while (*pattern && *pattern++ != ']');
4314 }
4315 } while (*++text && *pattern);
4316
4317 /* Match any run of chars against a '*' wildcard */
4318 if (*pattern == '*') {
4319 if (!*++pattern)
4320 return 0; /* Match: avoid recursion at end of pattern */
4321 /* Loop to handle additional pattern chars after the wildcard */
4322 while (*text) {
4323 if (glob_match(text, pattern) == 0)
4324 return 0; /* Remainder matched */
4325 ++text; /* Absorb (match) this char and try again */
4433 } 4326 }
4434 } 4327 }
4435 4328 if (!*text && !*pattern)
4436 return strncmp(patt, name, len); 4329 return 0; /* End of both strings: match */
4330 return 1; /* No match */
4437} 4331}
4438 4332
4439static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4333static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4440{ 4334{
4441 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4335 unsigned char model_num[ATA_ID_PROD_LEN + 1];
@@ -4446,10 +4340,10 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4446 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4340 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4447 4341
4448 while (ad->model_num) { 4342 while (ad->model_num) {
4449 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) { 4343 if (!glob_match(model_num, ad->model_num)) {
4450 if (ad->model_rev == NULL) 4344 if (ad->model_rev == NULL)
4451 return ad->horkage; 4345 return ad->horkage;
4452 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*')) 4346 if (!glob_match(model_rev, ad->model_rev))
4453 return ad->horkage; 4347 return ad->horkage;
4454 } 4348 }
4455 ad++; 4349 ad++;
@@ -5217,15 +5111,18 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5217 qc->flags |= ATA_QCFLAG_ACTIVE; 5111 qc->flags |= ATA_QCFLAG_ACTIVE;
5218 ap->qc_active |= 1 << qc->tag; 5112 ap->qc_active |= 1 << qc->tag;
5219 5113
5220 /* We guarantee to LLDs that they will have at least one 5114 /*
5115 * We guarantee to LLDs that they will have at least one
5221 * non-zero sg if the command is a data command. 5116 * non-zero sg if the command is a data command.
5222 */ 5117 */
5223 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); 5118 if (WARN_ON_ONCE(ata_is_data(prot) &&
5119 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5120 goto sys_err;
5224 5121
5225 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5122 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5226 (ap->flags & ATA_FLAG_PIO_DMA))) 5123 (ap->flags & ATA_FLAG_PIO_DMA)))
5227 if (ata_sg_setup(qc)) 5124 if (ata_sg_setup(qc))
5228 goto sg_err; 5125 goto sys_err;
5229 5126
5230 /* if device is sleeping, schedule reset and abort the link */ 5127 /* if device is sleeping, schedule reset and abort the link */
5231 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5128 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
@@ -5242,7 +5139,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5242 goto err; 5139 goto err;
5243 return; 5140 return;
5244 5141
5245sg_err: 5142sys_err:
5246 qc->err_mask |= AC_ERR_SYSTEM; 5143 qc->err_mask |= AC_ERR_SYSTEM;
5247err: 5144err:
5248 ata_qc_complete(qc); 5145 ata_qc_complete(qc);
@@ -5521,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5521 */ 5418 */
5522int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5419int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5523{ 5420{
5421 unsigned int ehi_flags = ATA_EHI_QUIET;
5524 int rc; 5422 int rc;
5525 5423
5526 /* 5424 /*
@@ -5529,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5529 */ 5427 */
5530 ata_lpm_enable(host); 5428 ata_lpm_enable(host);
5531 5429
5532 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 5430 /*
5431 * On some hardware, device fails to respond after spun down
5432 * for suspend. As the device won't be used before being
5433 * resumed, we don't need to touch the device. Ask EH to skip
5434 * the usual stuff and proceed directly to suspend.
5435 *
5436 * http://thread.gmane.org/gmane.linux.ide/46764
5437 */
5438 if (mesg.event == PM_EVENT_SUSPEND)
5439 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5440
5441 rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
5533 if (rc == 0) 5442 if (rc == 0)
5534 host->dev->power.power_state = mesg; 5443 host->dev->power.power_state = mesg;
5535 return rc; 5444 return rc;
@@ -5558,30 +5467,6 @@ void ata_host_resume(struct ata_host *host)
5558#endif 5467#endif
5559 5468
5560/** 5469/**
5561 * ata_port_start - Set port up for dma.
5562 * @ap: Port to initialize
5563 *
5564 * Called just after data structures for each port are
5565 * initialized. Allocates space for PRD table.
5566 *
5567 * May be used as the port_start() entry in ata_port_operations.
5568 *
5569 * LOCKING:
5570 * Inherited from caller.
5571 */
5572int ata_port_start(struct ata_port *ap)
5573{
5574 struct device *dev = ap->dev;
5575
5576 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5577 GFP_KERNEL);
5578 if (!ap->prd)
5579 return -ENOMEM;
5580
5581 return 0;
5582}
5583
5584/**
5585 * ata_dev_init - Initialize an ata_device structure 5470 * ata_dev_init - Initialize an ata_device structure
5586 * @dev: Device structure to initialize 5471 * @dev: Device structure to initialize
5587 * 5472 *
@@ -5709,12 +5594,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5709 5594
5710 ap->pflags |= ATA_PFLAG_INITIALIZING; 5595 ap->pflags |= ATA_PFLAG_INITIALIZING;
5711 ap->lock = &host->lock; 5596 ap->lock = &host->lock;
5712 ap->flags = ATA_FLAG_DISABLED;
5713 ap->print_id = -1; 5597 ap->print_id = -1;
5714 ap->ctl = ATA_DEVCTL_OBS;
5715 ap->host = host; 5598 ap->host = host;
5716 ap->dev = host->dev; 5599 ap->dev = host->dev;
5717 ap->last_ctl = 0xFF;
5718 5600
5719#if defined(ATA_VERBOSE_DEBUG) 5601#if defined(ATA_VERBOSE_DEBUG)
5720 /* turn on all debugging levels */ 5602 /* turn on all debugging levels */
@@ -5725,11 +5607,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5725 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5607 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5726#endif 5608#endif
5727 5609
5728#ifdef CONFIG_ATA_SFF 5610 mutex_init(&ap->scsi_scan_mutex);
5729 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5730#else
5731 INIT_DELAYED_WORK(&ap->port_task, NULL);
5732#endif
5733 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5611 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5734 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5612 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5735 INIT_LIST_HEAD(&ap->eh_done_q); 5613 INIT_LIST_HEAD(&ap->eh_done_q);
@@ -5747,6 +5625,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5747 ap->stats.unhandled_irq = 1; 5625 ap->stats.unhandled_irq = 1;
5748 ap->stats.idle_irq = 1; 5626 ap->stats.idle_irq = 1;
5749#endif 5627#endif
5628 ata_sff_port_init(ap);
5629
5750 return ap; 5630 return ap;
5751} 5631}
5752 5632
@@ -6138,8 +6018,6 @@ static void async_port_probe(void *data, async_cookie_t cookie)
6138 struct ata_eh_info *ehi = &ap->link.eh_info; 6018 struct ata_eh_info *ehi = &ap->link.eh_info;
6139 unsigned long flags; 6019 unsigned long flags;
6140 6020
6141 ata_port_probe(ap);
6142
6143 /* kick EH for boot probing */ 6021 /* kick EH for boot probing */
6144 spin_lock_irqsave(ap->lock, flags); 6022 spin_lock_irqsave(ap->lock, flags);
6145 6023
@@ -6503,6 +6381,7 @@ static int __init ata_parse_force_one(char **cur,
6503 { "3.0Gbps", .spd_limit = 2 }, 6381 { "3.0Gbps", .spd_limit = 2 },
6504 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6382 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6505 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6383 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6384 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6506 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6385 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6507 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6386 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6508 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6387 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
@@ -6663,62 +6542,34 @@ static void __init ata_parse_force_param(void)
6663 6542
6664static int __init ata_init(void) 6543static int __init ata_init(void)
6665{ 6544{
6666 ata_parse_force_param(); 6545 int rc = -ENOMEM;
6667 6546
6668 /* 6547 ata_parse_force_param();
6669 * FIXME: In UP case, there is only one workqueue thread and if you
6670 * have more than one PIO device, latency is bloody awful, with
6671 * occasional multi-second "hiccups" as one PIO device waits for
6672 * another. It's an ugly wart that users DO occasionally complain
6673 * about; luckily most users have at most one PIO polled device.
6674 */
6675 ata_wq = create_workqueue("ata");
6676 if (!ata_wq)
6677 goto free_force_tbl;
6678 6548
6679 ata_aux_wq = create_singlethread_workqueue("ata_aux"); 6549 rc = ata_sff_init();
6680 if (!ata_aux_wq) 6550 if (rc) {
6681 goto free_wq; 6551 kfree(ata_force_tbl);
6552 return rc;
6553 }
6682 6554
6683 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6555 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6684 return 0; 6556 return 0;
6685
6686free_wq:
6687 destroy_workqueue(ata_wq);
6688free_force_tbl:
6689 kfree(ata_force_tbl);
6690 return -ENOMEM;
6691} 6557}
6692 6558
6693static void __exit ata_exit(void) 6559static void __exit ata_exit(void)
6694{ 6560{
6561 ata_sff_exit();
6695 kfree(ata_force_tbl); 6562 kfree(ata_force_tbl);
6696 destroy_workqueue(ata_wq);
6697 destroy_workqueue(ata_aux_wq);
6698} 6563}
6699 6564
6700subsys_initcall(ata_init); 6565subsys_initcall(ata_init);
6701module_exit(ata_exit); 6566module_exit(ata_exit);
6702 6567
6703static unsigned long ratelimit_time; 6568static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6704static DEFINE_SPINLOCK(ata_ratelimit_lock);
6705 6569
6706int ata_ratelimit(void) 6570int ata_ratelimit(void)
6707{ 6571{
6708 int rc; 6572 return __ratelimit(&ratelimit);
6709 unsigned long flags;
6710
6711 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6712
6713 if (time_after(jiffies, ratelimit_time)) {
6714 rc = 1;
6715 ratelimit_time = jiffies + (HZ/5);
6716 } else
6717 rc = 0;
6718
6719 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6720
6721 return rc;
6722} 6573}
6723 6574
6724/** 6575/**
@@ -6805,6 +6656,7 @@ EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6805EXPORT_SYMBOL_GPL(ata_link_next); 6656EXPORT_SYMBOL_GPL(ata_link_next);
6806EXPORT_SYMBOL_GPL(ata_dev_next); 6657EXPORT_SYMBOL_GPL(ata_dev_next);
6807EXPORT_SYMBOL_GPL(ata_std_bios_param); 6658EXPORT_SYMBOL_GPL(ata_std_bios_param);
6659EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6808EXPORT_SYMBOL_GPL(ata_host_init); 6660EXPORT_SYMBOL_GPL(ata_host_init);
6809EXPORT_SYMBOL_GPL(ata_host_alloc); 6661EXPORT_SYMBOL_GPL(ata_host_alloc);
6810EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6662EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
@@ -6826,11 +6678,9 @@ EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6826EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 6678EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6827EXPORT_SYMBOL_GPL(ata_mode_string); 6679EXPORT_SYMBOL_GPL(ata_mode_string);
6828EXPORT_SYMBOL_GPL(ata_id_xfermask); 6680EXPORT_SYMBOL_GPL(ata_id_xfermask);
6829EXPORT_SYMBOL_GPL(ata_port_start);
6830EXPORT_SYMBOL_GPL(ata_do_set_mode); 6681EXPORT_SYMBOL_GPL(ata_do_set_mode);
6831EXPORT_SYMBOL_GPL(ata_std_qc_defer); 6682EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6832EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 6683EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6833EXPORT_SYMBOL_GPL(ata_port_probe);
6834EXPORT_SYMBOL_GPL(ata_dev_disable); 6684EXPORT_SYMBOL_GPL(ata_dev_disable);
6835EXPORT_SYMBOL_GPL(sata_set_spd); 6685EXPORT_SYMBOL_GPL(sata_set_spd);
6836EXPORT_SYMBOL_GPL(ata_wait_after_reset); 6686EXPORT_SYMBOL_GPL(ata_wait_after_reset);
@@ -6842,7 +6692,6 @@ EXPORT_SYMBOL_GPL(sata_std_hardreset);
6842EXPORT_SYMBOL_GPL(ata_std_postreset); 6692EXPORT_SYMBOL_GPL(ata_std_postreset);
6843EXPORT_SYMBOL_GPL(ata_dev_classify); 6693EXPORT_SYMBOL_GPL(ata_dev_classify);
6844EXPORT_SYMBOL_GPL(ata_dev_pair); 6694EXPORT_SYMBOL_GPL(ata_dev_pair);
6845EXPORT_SYMBOL_GPL(ata_port_disable);
6846EXPORT_SYMBOL_GPL(ata_ratelimit); 6695EXPORT_SYMBOL_GPL(ata_ratelimit);
6847EXPORT_SYMBOL_GPL(ata_wait_register); 6696EXPORT_SYMBOL_GPL(ata_wait_register);
6848EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6697EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
@@ -6864,7 +6713,6 @@ EXPORT_SYMBOL_GPL(ata_id_c_string);
6864EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 6713EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6865EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6714EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6866 6715
6867EXPORT_SYMBOL_GPL(ata_pio_queue_task);
6868EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6716EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6869EXPORT_SYMBOL_GPL(ata_timing_find_mode); 6717EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6870EXPORT_SYMBOL_GPL(ata_timing_compute); 6718EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 228740f356c9..e48302eae55f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -550,8 +550,8 @@ void ata_scsi_error(struct Scsi_Host *host)
550 550
551 DPRINTK("ENTER\n"); 551 DPRINTK("ENTER\n");
552 552
553 /* synchronize with port task */ 553 /* make sure sff pio task is not running */
554 ata_port_flush_task(ap); 554 ata_sff_flush_pio_task(ap);
555 555
556 /* synchronize with host lock and sort out timeouts */ 556 /* synchronize with host lock and sort out timeouts */
557 557
@@ -727,7 +727,7 @@ void ata_scsi_error(struct Scsi_Host *host)
727 if (ap->pflags & ATA_PFLAG_LOADING) 727 if (ap->pflags & ATA_PFLAG_LOADING)
728 ap->pflags &= ~ATA_PFLAG_LOADING; 728 ap->pflags &= ~ATA_PFLAG_LOADING;
729 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 729 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
730 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0); 730 schedule_delayed_work(&ap->hotplug_task, 0);
731 731
732 if (ap->pflags & ATA_PFLAG_RECOVERED) 732 if (ap->pflags & ATA_PFLAG_RECOVERED)
733 ata_port_printk(ap, KERN_INFO, "EH complete\n"); 733 ata_port_printk(ap, KERN_INFO, "EH complete\n");
@@ -2214,6 +2214,7 @@ const char *ata_get_cmd_descript(u8 command)
2214 { ATA_CMD_SMART, "SMART" }, 2214 { ATA_CMD_SMART, "SMART" },
2215 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 2215 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2216 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2216 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
2217 { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
2217 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 2218 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2218 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 2219 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2219 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 2220 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
@@ -2944,7 +2945,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
2944 ehc->i.flags |= ATA_EHI_SETMODE; 2945 ehc->i.flags |= ATA_EHI_SETMODE;
2945 2946
2946 /* schedule the scsi_rescan_device() here */ 2947 /* schedule the scsi_rescan_device() here */
2947 queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); 2948 schedule_work(&(ap->scsi_rescan_task));
2948 } else if (dev->class == ATA_DEV_UNKNOWN && 2949 } else if (dev->class == ATA_DEV_UNKNOWN &&
2949 ehc->tries[dev->devno] && 2950 ehc->tries[dev->devno] &&
2950 ata_class_enabled(ehc->classes[dev->devno])) { 2951 ata_class_enabled(ehc->classes[dev->devno])) {
@@ -3234,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link)
3234 if (link->flags & ATA_LFLAG_DISABLED) 3235 if (link->flags & ATA_LFLAG_DISABLED)
3235 return 1; 3236 return 1;
3236 3237
3238 /* skip if explicitly requested */
3239 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3240 return 1;
3241
3237 /* thaw frozen port and recover failed devices */ 3242 /* thaw frozen port and recover failed devices */
3238 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3243 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3239 return 0; 3244 return 0;
@@ -3684,7 +3689,7 @@ void ata_std_error_handler(struct ata_port *ap)
3684 ata_reset_fn_t hardreset = ops->hardreset; 3689 ata_reset_fn_t hardreset = ops->hardreset;
3685 3690
3686 /* ignore built-in hardreset if SCR access is not available */ 3691 /* ignore built-in hardreset if SCR access is not available */
3687 if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) 3692 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
3688 hardreset = NULL; 3693 hardreset = NULL;
3689 3694
3690 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3695 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 00305f41ed86..224faabd7b7e 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -231,10 +231,14 @@ static const char *sata_pmp_spec_rev_str(const u32 *gscr)
231 return "<unknown>"; 231 return "<unknown>";
232} 232}
233 233
234#define PMP_GSCR_SII_POL 129
235
234static int sata_pmp_configure(struct ata_device *dev, int print_info) 236static int sata_pmp_configure(struct ata_device *dev, int print_info)
235{ 237{
236 struct ata_port *ap = dev->link->ap; 238 struct ata_port *ap = dev->link->ap;
237 u32 *gscr = dev->gscr; 239 u32 *gscr = dev->gscr;
240 u16 vendor = sata_pmp_gscr_vendor(gscr);
241 u16 devid = sata_pmp_gscr_devid(gscr);
238 unsigned int err_mask = 0; 242 unsigned int err_mask = 0;
239 const char *reason; 243 const char *reason;
240 int nr_ports, rc; 244 int nr_ports, rc;
@@ -260,12 +264,34 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
260 goto fail; 264 goto fail;
261 } 265 }
262 266
267 /* Disable sending Early R_OK.
268 * With "cached read" HDD testing and multiple ports busy on a SATA
269 * host controller, 3726 PMP will very rarely drop a deferred
270 * R_OK that was intended for the host. Symptom will be all
271 * 5 drives under test will timeout, get reset, and recover.
272 */
273 if (vendor == 0x1095 && devid == 0x3726) {
274 u32 reg;
275
276 err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
277 if (err_mask) {
278 rc = -EIO;
279 reason = "failed to read Sil3726 Private Register";
280 goto fail;
281 }
282 reg &= ~0x1;
283 err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
284 if (err_mask) {
285 rc = -EIO;
286 reason = "failed to write Sil3726 Private Register";
287 goto fail;
288 }
289 }
290
263 if (print_info) { 291 if (print_info) {
264 ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, " 292 ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
265 "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n", 293 "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
266 sata_pmp_spec_rev_str(gscr), 294 sata_pmp_spec_rev_str(gscr), vendor, devid,
267 sata_pmp_gscr_vendor(gscr),
268 sata_pmp_gscr_devid(gscr),
269 sata_pmp_gscr_rev(gscr), 295 sata_pmp_gscr_rev(gscr),
270 nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN], 296 nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
271 gscr[SATA_PMP_GSCR_FEAT]); 297 gscr[SATA_PMP_GSCR_FEAT]);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0088cdeb0b1e..a89172c100f5 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -415,6 +415,35 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
415} 415}
416 416
417/** 417/**
418 * ata_scsi_unlock_native_capacity - unlock native capacity
419 * @sdev: SCSI device to adjust device capacity for
420 *
421 * This function is called if a partition on @sdev extends beyond
422 * the end of the device. It requests EH to unlock HPA.
423 *
424 * LOCKING:
425 * Defined by the SCSI layer. Might sleep.
426 */
427void ata_scsi_unlock_native_capacity(struct scsi_device *sdev)
428{
429 struct ata_port *ap = ata_shost_to_port(sdev->host);
430 struct ata_device *dev;
431 unsigned long flags;
432
433 spin_lock_irqsave(ap->lock, flags);
434
435 dev = ata_scsi_find_dev(ap, sdev);
436 if (dev && dev->n_sectors < dev->n_native_sectors) {
437 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
438 dev->link->eh_info.action |= ATA_EH_RESET;
439 ata_port_schedule_eh(ap);
440 }
441
442 spin_unlock_irqrestore(ap->lock, flags);
443 ata_port_wait_eh(ap);
444}
445
446/**
418 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl 447 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
419 * @ap: target port 448 * @ap: target port
420 * @sdev: SCSI device to get identify data for 449 * @sdev: SCSI device to get identify data for
@@ -1082,10 +1111,10 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
1082 */ 1111 */
1083static int atapi_drain_needed(struct request *rq) 1112static int atapi_drain_needed(struct request *rq)
1084{ 1113{
1085 if (likely(!blk_pc_request(rq))) 1114 if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
1086 return 0; 1115 return 0;
1087 1116
1088 if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW)) 1117 if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE))
1089 return 0; 1118 return 0;
1090 1119
1091 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; 1120 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
@@ -3345,9 +3374,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
3345 struct ata_link *link; 3374 struct ata_link *link;
3346 struct ata_device *dev; 3375 struct ata_device *dev;
3347 3376
3348 if (ap->flags & ATA_FLAG_DISABLED)
3349 return;
3350
3351 repeat: 3377 repeat:
3352 ata_for_each_link(link, ap, EDGE) { 3378 ata_for_each_link(link, ap, EDGE) {
3353 ata_for_each_dev(dev, link, ENABLED) { 3379 ata_for_each_dev(dev, link, ENABLED) {
@@ -3409,7 +3435,7 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
3409 " switching to async\n"); 3435 " switching to async\n");
3410 } 3436 }
3411 3437
3412 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 3438 queue_delayed_work(system_long_wq, &ap->hotplug_task,
3413 round_jiffies_relative(HZ)); 3439 round_jiffies_relative(HZ));
3414} 3440}
3415 3441
@@ -3556,6 +3582,7 @@ void ata_scsi_hotplug(struct work_struct *work)
3556 } 3582 }
3557 3583
3558 DPRINTK("ENTER\n"); 3584 DPRINTK("ENTER\n");
3585 mutex_lock(&ap->scsi_scan_mutex);
3559 3586
3560 /* Unplug detached devices. We cannot use link iterator here 3587 /* Unplug detached devices. We cannot use link iterator here
3561 * because PMP links have to be scanned even if PMP is 3588 * because PMP links have to be scanned even if PMP is
@@ -3569,6 +3596,7 @@ void ata_scsi_hotplug(struct work_struct *work)
3569 /* scan for new ones */ 3596 /* scan for new ones */
3570 ata_scsi_scan_host(ap, 0); 3597 ata_scsi_scan_host(ap, 0);
3571 3598
3599 mutex_unlock(&ap->scsi_scan_mutex);
3572 DPRINTK("EXIT\n"); 3600 DPRINTK("EXIT\n");
3573} 3601}
3574 3602
@@ -3647,9 +3675,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3647 * @work: Pointer to ATA port to perform scsi_rescan_device() 3675 * @work: Pointer to ATA port to perform scsi_rescan_device()
3648 * 3676 *
3649 * After ATA pass thru (SAT) commands are executed successfully, 3677 * After ATA pass thru (SAT) commands are executed successfully,
3650 * libata need to propagate the changes to SCSI layer. This 3678 * libata need to propagate the changes to SCSI layer.
3651 * function must be executed from ata_aux_wq such that sdev
3652 * attach/detach don't race with rescan.
3653 * 3679 *
3654 * LOCKING: 3680 * LOCKING:
3655 * Kernel thread context (may sleep). 3681 * Kernel thread context (may sleep).
@@ -3662,6 +3688,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
3662 struct ata_device *dev; 3688 struct ata_device *dev;
3663 unsigned long flags; 3689 unsigned long flags;
3664 3690
3691 mutex_lock(&ap->scsi_scan_mutex);
3665 spin_lock_irqsave(ap->lock, flags); 3692 spin_lock_irqsave(ap->lock, flags);
3666 3693
3667 ata_for_each_link(link, ap, EDGE) { 3694 ata_for_each_link(link, ap, EDGE) {
@@ -3681,6 +3708,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
3681 } 3708 }
3682 3709
3683 spin_unlock_irqrestore(ap->lock, flags); 3710 spin_unlock_irqrestore(ap->lock, flags);
3711 mutex_unlock(&ap->scsi_scan_mutex);
3684} 3712}
3685 3713
3686/** 3714/**
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index e3877b6843c9..e30c537cce32 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -40,10 +40,12 @@
40 40
41#include "libata.h" 41#include "libata.h"
42 42
43static struct workqueue_struct *ata_sff_wq;
44
43const struct ata_port_operations ata_sff_port_ops = { 45const struct ata_port_operations ata_sff_port_ops = {
44 .inherits = &ata_base_port_ops, 46 .inherits = &ata_base_port_ops,
45 47
46 .qc_prep = ata_sff_qc_prep, 48 .qc_prep = ata_noop_qc_prep,
47 .qc_issue = ata_sff_qc_issue, 49 .qc_issue = ata_sff_qc_issue,
48 .qc_fill_rtf = ata_sff_qc_fill_rtf, 50 .qc_fill_rtf = ata_sff_qc_fill_rtf,
49 51
@@ -53,9 +55,7 @@ const struct ata_port_operations ata_sff_port_ops = {
53 .softreset = ata_sff_softreset, 55 .softreset = ata_sff_softreset,
54 .hardreset = sata_sff_hardreset, 56 .hardreset = sata_sff_hardreset,
55 .postreset = ata_sff_postreset, 57 .postreset = ata_sff_postreset,
56 .drain_fifo = ata_sff_drain_fifo,
57 .error_handler = ata_sff_error_handler, 58 .error_handler = ata_sff_error_handler,
58 .post_internal_cmd = ata_sff_post_internal_cmd,
59 59
60 .sff_dev_select = ata_sff_dev_select, 60 .sff_dev_select = ata_sff_dev_select,
61 .sff_check_status = ata_sff_check_status, 61 .sff_check_status = ata_sff_check_status,
@@ -63,178 +63,12 @@ const struct ata_port_operations ata_sff_port_ops = {
63 .sff_tf_read = ata_sff_tf_read, 63 .sff_tf_read = ata_sff_tf_read,
64 .sff_exec_command = ata_sff_exec_command, 64 .sff_exec_command = ata_sff_exec_command,
65 .sff_data_xfer = ata_sff_data_xfer, 65 .sff_data_xfer = ata_sff_data_xfer,
66 .sff_irq_on = ata_sff_irq_on, 66 .sff_drain_fifo = ata_sff_drain_fifo,
67 .sff_irq_clear = ata_sff_irq_clear,
68 67
69 .lost_interrupt = ata_sff_lost_interrupt, 68 .lost_interrupt = ata_sff_lost_interrupt,
70
71 .port_start = ata_sff_port_start,
72}; 69};
73EXPORT_SYMBOL_GPL(ata_sff_port_ops); 70EXPORT_SYMBOL_GPL(ata_sff_port_ops);
74 71
75const struct ata_port_operations ata_bmdma_port_ops = {
76 .inherits = &ata_sff_port_ops,
77
78 .mode_filter = ata_bmdma_mode_filter,
79
80 .bmdma_setup = ata_bmdma_setup,
81 .bmdma_start = ata_bmdma_start,
82 .bmdma_stop = ata_bmdma_stop,
83 .bmdma_status = ata_bmdma_status,
84};
85EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
86
87const struct ata_port_operations ata_bmdma32_port_ops = {
88 .inherits = &ata_bmdma_port_ops,
89
90 .sff_data_xfer = ata_sff_data_xfer32,
91 .port_start = ata_sff_port_start32,
92};
93EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
94
95/**
96 * ata_fill_sg - Fill PCI IDE PRD table
97 * @qc: Metadata associated with taskfile to be transferred
98 *
99 * Fill PCI IDE PRD (scatter-gather) table with segments
100 * associated with the current disk command.
101 *
102 * LOCKING:
103 * spin_lock_irqsave(host lock)
104 *
105 */
106static void ata_fill_sg(struct ata_queued_cmd *qc)
107{
108 struct ata_port *ap = qc->ap;
109 struct scatterlist *sg;
110 unsigned int si, pi;
111
112 pi = 0;
113 for_each_sg(qc->sg, sg, qc->n_elem, si) {
114 u32 addr, offset;
115 u32 sg_len, len;
116
117 /* determine if physical DMA addr spans 64K boundary.
118 * Note h/w doesn't support 64-bit, so we unconditionally
119 * truncate dma_addr_t to u32.
120 */
121 addr = (u32) sg_dma_address(sg);
122 sg_len = sg_dma_len(sg);
123
124 while (sg_len) {
125 offset = addr & 0xffff;
126 len = sg_len;
127 if ((offset + sg_len) > 0x10000)
128 len = 0x10000 - offset;
129
130 ap->prd[pi].addr = cpu_to_le32(addr);
131 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
132 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
133
134 pi++;
135 sg_len -= len;
136 addr += len;
137 }
138 }
139
140 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
141}
142
143/**
144 * ata_fill_sg_dumb - Fill PCI IDE PRD table
145 * @qc: Metadata associated with taskfile to be transferred
146 *
147 * Fill PCI IDE PRD (scatter-gather) table with segments
148 * associated with the current disk command. Perform the fill
149 * so that we avoid writing any length 64K records for
150 * controllers that don't follow the spec.
151 *
152 * LOCKING:
153 * spin_lock_irqsave(host lock)
154 *
155 */
156static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
157{
158 struct ata_port *ap = qc->ap;
159 struct scatterlist *sg;
160 unsigned int si, pi;
161
162 pi = 0;
163 for_each_sg(qc->sg, sg, qc->n_elem, si) {
164 u32 addr, offset;
165 u32 sg_len, len, blen;
166
167 /* determine if physical DMA addr spans 64K boundary.
168 * Note h/w doesn't support 64-bit, so we unconditionally
169 * truncate dma_addr_t to u32.
170 */
171 addr = (u32) sg_dma_address(sg);
172 sg_len = sg_dma_len(sg);
173
174 while (sg_len) {
175 offset = addr & 0xffff;
176 len = sg_len;
177 if ((offset + sg_len) > 0x10000)
178 len = 0x10000 - offset;
179
180 blen = len & 0xffff;
181 ap->prd[pi].addr = cpu_to_le32(addr);
182 if (blen == 0) {
183 /* Some PATA chipsets like the CS5530 can't
184 cope with 0x0000 meaning 64K as the spec
185 says */
186 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
187 blen = 0x8000;
188 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
189 }
190 ap->prd[pi].flags_len = cpu_to_le32(blen);
191 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
192
193 pi++;
194 sg_len -= len;
195 addr += len;
196 }
197 }
198
199 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
200}
201
202/**
203 * ata_sff_qc_prep - Prepare taskfile for submission
204 * @qc: Metadata associated with taskfile to be prepared
205 *
206 * Prepare ATA taskfile for submission.
207 *
208 * LOCKING:
209 * spin_lock_irqsave(host lock)
210 */
211void ata_sff_qc_prep(struct ata_queued_cmd *qc)
212{
213 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
214 return;
215
216 ata_fill_sg(qc);
217}
218EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
219
220/**
221 * ata_sff_dumb_qc_prep - Prepare taskfile for submission
222 * @qc: Metadata associated with taskfile to be prepared
223 *
224 * Prepare ATA taskfile for submission.
225 *
226 * LOCKING:
227 * spin_lock_irqsave(host lock)
228 */
229void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
230{
231 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
232 return;
233
234 ata_fill_sg_dumb(qc);
235}
236EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
237
238/** 72/**
239 * ata_sff_check_status - Read device status reg & clear interrupt 73 * ata_sff_check_status - Read device status reg & clear interrupt
240 * @ap: port where the device is 74 * @ap: port where the device is
@@ -446,6 +280,27 @@ int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
446EXPORT_SYMBOL_GPL(ata_sff_wait_ready); 280EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
447 281
448/** 282/**
283 * ata_sff_set_devctl - Write device control reg
284 * @ap: port where the device is
285 * @ctl: value to write
286 *
287 * Writes ATA taskfile device control register.
288 *
289 * Note: may NOT be used as the sff_set_devctl() entry in
290 * ata_port_operations.
291 *
292 * LOCKING:
293 * Inherited from caller.
294 */
295static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
296{
297 if (ap->ops->sff_set_devctl)
298 ap->ops->sff_set_devctl(ap, ctl);
299 else
300 iowrite8(ctl, ap->ioaddr.ctl_addr);
301}
302
303/**
449 * ata_sff_dev_select - Select device 0/1 on ATA bus 304 * ata_sff_dev_select - Select device 0/1 on ATA bus
450 * @ap: ATA channel to manipulate 305 * @ap: ATA channel to manipulate
451 * @device: ATA device (numbered from zero) to select 306 * @device: ATA device (numbered from zero) to select
@@ -491,7 +346,7 @@ EXPORT_SYMBOL_GPL(ata_sff_dev_select);
491 * LOCKING: 346 * LOCKING:
492 * caller. 347 * caller.
493 */ 348 */
494void ata_dev_select(struct ata_port *ap, unsigned int device, 349static void ata_dev_select(struct ata_port *ap, unsigned int device,
495 unsigned int wait, unsigned int can_sleep) 350 unsigned int wait, unsigned int can_sleep)
496{ 351{
497 if (ata_msg_probe(ap)) 352 if (ata_msg_probe(ap))
@@ -517,50 +372,34 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
517 * Enable interrupts on a legacy IDE device using MMIO or PIO, 372 * Enable interrupts on a legacy IDE device using MMIO or PIO,
518 * wait for idle, clear any pending interrupts. 373 * wait for idle, clear any pending interrupts.
519 * 374 *
375 * Note: may NOT be used as the sff_irq_on() entry in
376 * ata_port_operations.
377 *
520 * LOCKING: 378 * LOCKING:
521 * Inherited from caller. 379 * Inherited from caller.
522 */ 380 */
523u8 ata_sff_irq_on(struct ata_port *ap) 381void ata_sff_irq_on(struct ata_port *ap)
524{ 382{
525 struct ata_ioports *ioaddr = &ap->ioaddr; 383 struct ata_ioports *ioaddr = &ap->ioaddr;
526 u8 tmp; 384
385 if (ap->ops->sff_irq_on) {
386 ap->ops->sff_irq_on(ap);
387 return;
388 }
527 389
528 ap->ctl &= ~ATA_NIEN; 390 ap->ctl &= ~ATA_NIEN;
529 ap->last_ctl = ap->ctl; 391 ap->last_ctl = ap->ctl;
530 392
531 if (ioaddr->ctl_addr) 393 if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
532 iowrite8(ap->ctl, ioaddr->ctl_addr); 394 ata_sff_set_devctl(ap, ap->ctl);
533 tmp = ata_wait_idle(ap); 395 ata_wait_idle(ap);
534
535 ap->ops->sff_irq_clear(ap);
536 396
537 return tmp; 397 if (ap->ops->sff_irq_clear)
398 ap->ops->sff_irq_clear(ap);
538} 399}
539EXPORT_SYMBOL_GPL(ata_sff_irq_on); 400EXPORT_SYMBOL_GPL(ata_sff_irq_on);
540 401
541/** 402/**
542 * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt.
543 * @ap: Port associated with this ATA transaction.
544 *
545 * Clear interrupt and error flags in DMA status register.
546 *
547 * May be used as the irq_clear() entry in ata_port_operations.
548 *
549 * LOCKING:
550 * spin_lock_irqsave(host lock)
551 */
552void ata_sff_irq_clear(struct ata_port *ap)
553{
554 void __iomem *mmio = ap->ioaddr.bmdma_addr;
555
556 if (!mmio)
557 return;
558
559 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
560}
561EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
562
563/**
564 * ata_sff_tf_load - send taskfile registers to host controller 403 * ata_sff_tf_load - send taskfile registers to host controller
565 * @ap: Port to which output is sent 404 * @ap: Port to which output is sent
566 * @tf: ATA taskfile register set 405 * @tf: ATA taskfile register set
@@ -894,7 +733,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
894 do_write); 733 do_write);
895 } 734 }
896 735
897 if (!do_write) 736 if (!do_write && !PageSlab(page))
898 flush_dcache_page(page); 737 flush_dcache_page(page);
899 738
900 qc->curbytes += qc->sect_size; 739 qc->curbytes += qc->sect_size;
@@ -962,11 +801,15 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
962 case ATAPI_PROT_NODATA: 801 case ATAPI_PROT_NODATA:
963 ap->hsm_task_state = HSM_ST_LAST; 802 ap->hsm_task_state = HSM_ST_LAST;
964 break; 803 break;
804#ifdef CONFIG_ATA_BMDMA
965 case ATAPI_PROT_DMA: 805 case ATAPI_PROT_DMA:
966 ap->hsm_task_state = HSM_ST_LAST; 806 ap->hsm_task_state = HSM_ST_LAST;
967 /* initiate bmdma */ 807 /* initiate bmdma */
968 ap->ops->bmdma_start(qc); 808 ap->ops->bmdma_start(qc);
969 break; 809 break;
810#endif /* CONFIG_ATA_BMDMA */
811 default:
812 BUG();
970 } 813 }
971} 814}
972 815
@@ -1165,7 +1008,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1165 qc = ata_qc_from_tag(ap, qc->tag); 1008 qc = ata_qc_from_tag(ap, qc->tag);
1166 if (qc) { 1009 if (qc) {
1167 if (likely(!(qc->err_mask & AC_ERR_HSM))) { 1010 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
1168 ap->ops->sff_irq_on(ap); 1011 ata_sff_irq_on(ap);
1169 ata_qc_complete(qc); 1012 ata_qc_complete(qc);
1170 } else 1013 } else
1171 ata_port_freeze(ap); 1014 ata_port_freeze(ap);
@@ -1181,7 +1024,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1181 } else { 1024 } else {
1182 if (in_wq) { 1025 if (in_wq) {
1183 spin_lock_irqsave(ap->lock, flags); 1026 spin_lock_irqsave(ap->lock, flags);
1184 ap->ops->sff_irq_on(ap); 1027 ata_sff_irq_on(ap);
1185 ata_qc_complete(qc); 1028 ata_qc_complete(qc);
1186 spin_unlock_irqrestore(ap->lock, flags); 1029 spin_unlock_irqrestore(ap->lock, flags);
1187 } else 1030 } else
@@ -1202,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1202int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 1045int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1203 u8 status, int in_wq) 1046 u8 status, int in_wq)
1204{ 1047{
1205 struct ata_eh_info *ehi = &ap->link.eh_info; 1048 struct ata_link *link = qc->dev->link;
1049 struct ata_eh_info *ehi = &link->eh_info;
1206 unsigned long flags = 0; 1050 unsigned long flags = 0;
1207 int poll_next; 1051 int poll_next;
1208 1052
@@ -1293,7 +1137,7 @@ fsm_start:
1293 if (in_wq) 1137 if (in_wq)
1294 spin_unlock_irqrestore(ap->lock, flags); 1138 spin_unlock_irqrestore(ap->lock, flags);
1295 1139
1296 /* if polling, ata_pio_task() handles the rest. 1140 /* if polling, ata_sff_pio_task() handles the rest.
1297 * otherwise, interrupt handler takes over from here. 1141 * otherwise, interrupt handler takes over from here.
1298 */ 1142 */
1299 break; 1143 break;
@@ -1458,14 +1302,48 @@ fsm_start:
1458} 1302}
1459EXPORT_SYMBOL_GPL(ata_sff_hsm_move); 1303EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1460 1304
1461void ata_pio_task(struct work_struct *work) 1305void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1306{
1307 struct ata_port *ap = link->ap;
1308
1309 WARN_ON((ap->sff_pio_task_link != NULL) &&
1310 (ap->sff_pio_task_link != link));
1311 ap->sff_pio_task_link = link;
1312
1313 /* may fail if ata_sff_flush_pio_task() in progress */
1314 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
1315 msecs_to_jiffies(delay));
1316}
1317EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1318
1319void ata_sff_flush_pio_task(struct ata_port *ap)
1320{
1321 DPRINTK("ENTER\n");
1322
1323 cancel_rearming_delayed_work(&ap->sff_pio_task);
1324 ap->hsm_task_state = HSM_ST_IDLE;
1325
1326 if (ata_msg_ctl(ap))
1327 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1328}
1329
1330static void ata_sff_pio_task(struct work_struct *work)
1462{ 1331{
1463 struct ata_port *ap = 1332 struct ata_port *ap =
1464 container_of(work, struct ata_port, port_task.work); 1333 container_of(work, struct ata_port, sff_pio_task.work);
1465 struct ata_queued_cmd *qc = ap->port_task_data; 1334 struct ata_link *link = ap->sff_pio_task_link;
1335 struct ata_queued_cmd *qc;
1466 u8 status; 1336 u8 status;
1467 int poll_next; 1337 int poll_next;
1468 1338
1339 BUG_ON(ap->sff_pio_task_link == NULL);
1340 /* qc can be NULL if timeout occurred */
1341 qc = ata_qc_from_tag(ap, link->active_tag);
1342 if (!qc) {
1343 ap->sff_pio_task_link = NULL;
1344 return;
1345 }
1346
1469fsm_start: 1347fsm_start:
1470 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); 1348 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1471 1349
@@ -1481,11 +1359,16 @@ fsm_start:
1481 msleep(2); 1359 msleep(2);
1482 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1360 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1483 if (status & ATA_BUSY) { 1361 if (status & ATA_BUSY) {
1484 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE); 1362 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1485 return; 1363 return;
1486 } 1364 }
1487 } 1365 }
1488 1366
1367 /*
1368 * hsm_move() may trigger another command to be processed.
1369 * clean the link beforehand.
1370 */
1371 ap->sff_pio_task_link = NULL;
1489 /* move the HSM */ 1372 /* move the HSM */
1490 poll_next = ata_sff_hsm_move(ap, qc, status, 1); 1373 poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1491 1374
@@ -1497,15 +1380,11 @@ fsm_start:
1497} 1380}
1498 1381
1499/** 1382/**
1500 * ata_sff_qc_issue - issue taskfile to device in proto-dependent manner 1383 * ata_sff_qc_issue - issue taskfile to a SFF controller
1501 * @qc: command to issue to device 1384 * @qc: command to issue to device
1502 * 1385 *
1503 * Using various libata functions and hooks, this function 1386 * This function issues a PIO or NODATA command to a SFF
1504 * starts an ATA command. ATA commands are grouped into 1387 * controller.
1505 * classes called "protocols", and issuing each type of protocol
1506 * is slightly different.
1507 *
1508 * May be used as the qc_issue() entry in ata_port_operations.
1509 * 1388 *
1510 * LOCKING: 1389 * LOCKING:
1511 * spin_lock_irqsave(host lock) 1390 * spin_lock_irqsave(host lock)
@@ -1516,27 +1395,13 @@ fsm_start:
1516unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) 1395unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1517{ 1396{
1518 struct ata_port *ap = qc->ap; 1397 struct ata_port *ap = qc->ap;
1398 struct ata_link *link = qc->dev->link;
1519 1399
1520 /* Use polling pio if the LLD doesn't handle 1400 /* Use polling pio if the LLD doesn't handle
1521 * interrupt driven pio and atapi CDB interrupt. 1401 * interrupt driven pio and atapi CDB interrupt.
1522 */ 1402 */
1523 if (ap->flags & ATA_FLAG_PIO_POLLING) { 1403 if (ap->flags & ATA_FLAG_PIO_POLLING)
1524 switch (qc->tf.protocol) { 1404 qc->tf.flags |= ATA_TFLAG_POLLING;
1525 case ATA_PROT_PIO:
1526 case ATA_PROT_NODATA:
1527 case ATAPI_PROT_PIO:
1528 case ATAPI_PROT_NODATA:
1529 qc->tf.flags |= ATA_TFLAG_POLLING;
1530 break;
1531 case ATAPI_PROT_DMA:
1532 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
1533 /* see ata_dma_blacklisted() */
1534 BUG();
1535 break;
1536 default:
1537 break;
1538 }
1539 }
1540 1405
1541 /* select the device */ 1406 /* select the device */
1542 ata_dev_select(ap, qc->dev->devno, 1, 0); 1407 ata_dev_select(ap, qc->dev->devno, 1, 0);
@@ -1551,17 +1416,8 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1551 ap->hsm_task_state = HSM_ST_LAST; 1416 ap->hsm_task_state = HSM_ST_LAST;
1552 1417
1553 if (qc->tf.flags & ATA_TFLAG_POLLING) 1418 if (qc->tf.flags & ATA_TFLAG_POLLING)
1554 ata_pio_queue_task(ap, qc, 0); 1419 ata_sff_queue_pio_task(link, 0);
1555
1556 break;
1557
1558 case ATA_PROT_DMA:
1559 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
1560 1420
1561 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
1562 ap->ops->bmdma_setup(qc); /* set up bmdma */
1563 ap->ops->bmdma_start(qc); /* initiate bmdma */
1564 ap->hsm_task_state = HSM_ST_LAST;
1565 break; 1421 break;
1566 1422
1567 case ATA_PROT_PIO: 1423 case ATA_PROT_PIO:
@@ -1573,20 +1429,21 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1573 if (qc->tf.flags & ATA_TFLAG_WRITE) { 1429 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1574 /* PIO data out protocol */ 1430 /* PIO data out protocol */
1575 ap->hsm_task_state = HSM_ST_FIRST; 1431 ap->hsm_task_state = HSM_ST_FIRST;
1576 ata_pio_queue_task(ap, qc, 0); 1432 ata_sff_queue_pio_task(link, 0);
1577 1433
1578 /* always send first data block using 1434 /* always send first data block using the
1579 * the ata_pio_task() codepath. 1435 * ata_sff_pio_task() codepath.
1580 */ 1436 */
1581 } else { 1437 } else {
1582 /* PIO data in protocol */ 1438 /* PIO data in protocol */
1583 ap->hsm_task_state = HSM_ST; 1439 ap->hsm_task_state = HSM_ST;
1584 1440
1585 if (qc->tf.flags & ATA_TFLAG_POLLING) 1441 if (qc->tf.flags & ATA_TFLAG_POLLING)
1586 ata_pio_queue_task(ap, qc, 0); 1442 ata_sff_queue_pio_task(link, 0);
1587 1443
1588 /* if polling, ata_pio_task() handles the rest. 1444 /* if polling, ata_sff_pio_task() handles the
1589 * otherwise, interrupt handler takes over from here. 1445 * rest. otherwise, interrupt handler takes
1446 * over from here.
1590 */ 1447 */
1591 } 1448 }
1592 1449
@@ -1604,19 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1604 /* send cdb by polling if no cdb interrupt */ 1461 /* send cdb by polling if no cdb interrupt */
1605 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 1462 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1606 (qc->tf.flags & ATA_TFLAG_POLLING)) 1463 (qc->tf.flags & ATA_TFLAG_POLLING))
1607 ata_pio_queue_task(ap, qc, 0); 1464 ata_sff_queue_pio_task(link, 0);
1608 break;
1609
1610 case ATAPI_PROT_DMA:
1611 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
1612
1613 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
1614 ap->ops->bmdma_setup(qc); /* set up bmdma */
1615 ap->hsm_task_state = HSM_ST_FIRST;
1616
1617 /* send cdb by polling if no cdb interrupt */
1618 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1619 ata_pio_queue_task(ap, qc, 0);
1620 break; 1465 break;
1621 1466
1622 default: 1467 default:
@@ -1648,27 +1493,27 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1648} 1493}
1649EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); 1494EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1650 1495
1651/** 1496static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1652 * ata_sff_host_intr - Handle host interrupt for given (port, task)
1653 * @ap: Port on which interrupt arrived (possibly...)
1654 * @qc: Taskfile currently active in engine
1655 *
1656 * Handle host interrupt for given queued command. Currently,
1657 * only DMA interrupts are handled. All other commands are
1658 * handled via polling with interrupts disabled (nIEN bit).
1659 *
1660 * LOCKING:
1661 * spin_lock_irqsave(host lock)
1662 *
1663 * RETURNS:
1664 * One if interrupt was handled, zero if not (shared irq).
1665 */
1666unsigned int ata_sff_host_intr(struct ata_port *ap,
1667 struct ata_queued_cmd *qc)
1668{ 1497{
1669 struct ata_eh_info *ehi = &ap->link.eh_info; 1498 ap->stats.idle_irq++;
1670 u8 status, host_stat = 0; 1499
1671 bool bmdma_stopped = false; 1500#ifdef ATA_IRQ_TRAP
1501 if ((ap->stats.idle_irq % 1000) == 0) {
1502 ap->ops->sff_check_status(ap);
1503 if (ap->ops->sff_irq_clear)
1504 ap->ops->sff_irq_clear(ap);
1505 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
1506 return 1;
1507 }
1508#endif
1509 return 0; /* irq not handled */
1510}
1511
1512static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1513 struct ata_queued_cmd *qc,
1514 bool hsmv_on_idle)
1515{
1516 u8 status;
1672 1517
1673 VPRINTK("ata%u: protocol %d task_state %d\n", 1518 VPRINTK("ata%u: protocol %d task_state %d\n",
1674 ap->print_id, qc->tf.protocol, ap->hsm_task_state); 1519 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
@@ -1685,90 +1530,56 @@ unsigned int ata_sff_host_intr(struct ata_port *ap,
1685 * need to check ata_is_atapi(qc->tf.protocol) again. 1530 * need to check ata_is_atapi(qc->tf.protocol) again.
1686 */ 1531 */
1687 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 1532 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1688 goto idle_irq; 1533 return ata_sff_idle_irq(ap);
1689 break;
1690 case HSM_ST_LAST:
1691 if (qc->tf.protocol == ATA_PROT_DMA ||
1692 qc->tf.protocol == ATAPI_PROT_DMA) {
1693 /* check status of DMA engine */
1694 host_stat = ap->ops->bmdma_status(ap);
1695 VPRINTK("ata%u: host_stat 0x%X\n",
1696 ap->print_id, host_stat);
1697
1698 /* if it's not our irq... */
1699 if (!(host_stat & ATA_DMA_INTR))
1700 goto idle_irq;
1701
1702 /* before we do anything else, clear DMA-Start bit */
1703 ap->ops->bmdma_stop(qc);
1704 bmdma_stopped = true;
1705
1706 if (unlikely(host_stat & ATA_DMA_ERR)) {
1707 /* error when transfering data to/from memory */
1708 qc->err_mask |= AC_ERR_HOST_BUS;
1709 ap->hsm_task_state = HSM_ST_ERR;
1710 }
1711 }
1712 break; 1534 break;
1713 case HSM_ST: 1535 case HSM_ST:
1536 case HSM_ST_LAST:
1714 break; 1537 break;
1715 default: 1538 default:
1716 goto idle_irq; 1539 return ata_sff_idle_irq(ap);
1717 } 1540 }
1718 1541
1719
1720 /* check main status, clearing INTRQ if needed */ 1542 /* check main status, clearing INTRQ if needed */
1721 status = ata_sff_irq_status(ap); 1543 status = ata_sff_irq_status(ap);
1722 if (status & ATA_BUSY) { 1544 if (status & ATA_BUSY) {
1723 if (bmdma_stopped) { 1545 if (hsmv_on_idle) {
1724 /* BMDMA engine is already stopped, we're screwed */ 1546 /* BMDMA engine is already stopped, we're screwed */
1725 qc->err_mask |= AC_ERR_HSM; 1547 qc->err_mask |= AC_ERR_HSM;
1726 ap->hsm_task_state = HSM_ST_ERR; 1548 ap->hsm_task_state = HSM_ST_ERR;
1727 } else 1549 } else
1728 goto idle_irq; 1550 return ata_sff_idle_irq(ap);
1729 } 1551 }
1730 1552
1731 /* ack bmdma irq events */ 1553 /* clear irq events */
1732 ap->ops->sff_irq_clear(ap); 1554 if (ap->ops->sff_irq_clear)
1555 ap->ops->sff_irq_clear(ap);
1733 1556
1734 ata_sff_hsm_move(ap, qc, status, 0); 1557 ata_sff_hsm_move(ap, qc, status, 0);
1735 1558
1736 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
1737 qc->tf.protocol == ATAPI_PROT_DMA))
1738 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
1739
1740 return 1; /* irq handled */ 1559 return 1; /* irq handled */
1741
1742idle_irq:
1743 ap->stats.idle_irq++;
1744
1745#ifdef ATA_IRQ_TRAP
1746 if ((ap->stats.idle_irq % 1000) == 0) {
1747 ap->ops->sff_check_status(ap);
1748 ap->ops->sff_irq_clear(ap);
1749 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
1750 return 1;
1751 }
1752#endif
1753 return 0; /* irq not handled */
1754} 1560}
1755EXPORT_SYMBOL_GPL(ata_sff_host_intr);
1756 1561
1757/** 1562/**
1758 * ata_sff_interrupt - Default ATA host interrupt handler 1563 * ata_sff_port_intr - Handle SFF port interrupt
1759 * @irq: irq line (unused) 1564 * @ap: Port on which interrupt arrived (possibly...)
1760 * @dev_instance: pointer to our ata_host information structure 1565 * @qc: Taskfile currently active in engine
1761 * 1566 *
1762 * Default interrupt handler for PCI IDE devices. Calls 1567 * Handle port interrupt for given queued command.
1763 * ata_sff_host_intr() for each port that is not disabled.
1764 * 1568 *
1765 * LOCKING: 1569 * LOCKING:
1766 * Obtains host lock during operation. 1570 * spin_lock_irqsave(host lock)
1767 * 1571 *
1768 * RETURNS: 1572 * RETURNS:
1769 * IRQ_NONE or IRQ_HANDLED. 1573 * One if interrupt was handled, zero if not (shared irq).
1770 */ 1574 */
1771irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) 1575unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1576{
1577 return __ata_sff_port_intr(ap, qc, false);
1578}
1579EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1580
1581static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1582 unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1772{ 1583{
1773 struct ata_host *host = dev_instance; 1584 struct ata_host *host = dev_instance;
1774 bool retried = false; 1585 bool retried = false;
@@ -1785,13 +1596,10 @@ retry:
1785 struct ata_port *ap = host->ports[i]; 1596 struct ata_port *ap = host->ports[i];
1786 struct ata_queued_cmd *qc; 1597 struct ata_queued_cmd *qc;
1787 1598
1788 if (unlikely(ap->flags & ATA_FLAG_DISABLED))
1789 continue;
1790
1791 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1599 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1792 if (qc) { 1600 if (qc) {
1793 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) 1601 if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1794 handled |= ata_sff_host_intr(ap, qc); 1602 handled |= port_intr(ap, qc);
1795 else 1603 else
1796 polling |= 1 << i; 1604 polling |= 1 << i;
1797 } else 1605 } else
@@ -1818,7 +1626,8 @@ retry:
1818 1626
1819 if (idle & (1 << i)) { 1627 if (idle & (1 << i)) {
1820 ap->ops->sff_check_status(ap); 1628 ap->ops->sff_check_status(ap);
1821 ap->ops->sff_irq_clear(ap); 1629 if (ap->ops->sff_irq_clear)
1630 ap->ops->sff_irq_clear(ap);
1822 } else { 1631 } else {
1823 /* clear INTRQ and check if BUSY cleared */ 1632 /* clear INTRQ and check if BUSY cleared */
1824 if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) 1633 if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
@@ -1840,6 +1649,25 @@ retry:
1840 1649
1841 return IRQ_RETVAL(handled); 1650 return IRQ_RETVAL(handled);
1842} 1651}
1652
1653/**
1654 * ata_sff_interrupt - Default SFF ATA host interrupt handler
1655 * @irq: irq line (unused)
1656 * @dev_instance: pointer to our ata_host information structure
1657 *
1658 * Default interrupt handler for PCI IDE devices. Calls
1659 * ata_sff_port_intr() for each port that is not disabled.
1660 *
1661 * LOCKING:
1662 * Obtains host lock during operation.
1663 *
1664 * RETURNS:
1665 * IRQ_NONE or IRQ_HANDLED.
1666 */
1667irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1668{
1669 return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1670}
1843EXPORT_SYMBOL_GPL(ata_sff_interrupt); 1671EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1844 1672
1845/** 1673/**
@@ -1862,11 +1690,8 @@ void ata_sff_lost_interrupt(struct ata_port *ap)
1862 1690
1863 /* Only one outstanding command per SFF channel */ 1691 /* Only one outstanding command per SFF channel */
1864 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1692 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1865 /* Check we have a live one.. */ 1693 /* We cannot lose an interrupt on a non-existent or polled command */
1866 if (qc == NULL || !(qc->flags & ATA_QCFLAG_ACTIVE)) 1694 if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1867 return;
1868 /* We cannot lose an interrupt on a polled command */
1869 if (qc->tf.flags & ATA_TFLAG_POLLING)
1870 return; 1695 return;
1871 /* See if the controller thinks it is still busy - if so the command 1696 /* See if the controller thinks it is still busy - if so the command
1872 isn't a lost IRQ but is still in progress */ 1697 isn't a lost IRQ but is still in progress */
@@ -1880,7 +1705,7 @@ void ata_sff_lost_interrupt(struct ata_port *ap)
1880 status); 1705 status);
1881 /* Run the host interrupt logic as if the interrupt had not been 1706 /* Run the host interrupt logic as if the interrupt had not been
1882 lost */ 1707 lost */
1883 ata_sff_host_intr(ap, qc); 1708 ata_sff_port_intr(ap, qc);
1884} 1709}
1885EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); 1710EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1886 1711
@@ -1888,20 +1713,18 @@ EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1888 * ata_sff_freeze - Freeze SFF controller port 1713 * ata_sff_freeze - Freeze SFF controller port
1889 * @ap: port to freeze 1714 * @ap: port to freeze
1890 * 1715 *
1891 * Freeze BMDMA controller port. 1716 * Freeze SFF controller port.
1892 * 1717 *
1893 * LOCKING: 1718 * LOCKING:
1894 * Inherited from caller. 1719 * Inherited from caller.
1895 */ 1720 */
1896void ata_sff_freeze(struct ata_port *ap) 1721void ata_sff_freeze(struct ata_port *ap)
1897{ 1722{
1898 struct ata_ioports *ioaddr = &ap->ioaddr;
1899
1900 ap->ctl |= ATA_NIEN; 1723 ap->ctl |= ATA_NIEN;
1901 ap->last_ctl = ap->ctl; 1724 ap->last_ctl = ap->ctl;
1902 1725
1903 if (ioaddr->ctl_addr) 1726 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
1904 iowrite8(ap->ctl, ioaddr->ctl_addr); 1727 ata_sff_set_devctl(ap, ap->ctl);
1905 1728
1906 /* Under certain circumstances, some controllers raise IRQ on 1729 /* Under certain circumstances, some controllers raise IRQ on
1907 * ATA_NIEN manipulation. Also, many controllers fail to mask 1730 * ATA_NIEN manipulation. Also, many controllers fail to mask
@@ -1909,7 +1732,8 @@ void ata_sff_freeze(struct ata_port *ap)
1909 */ 1732 */
1910 ap->ops->sff_check_status(ap); 1733 ap->ops->sff_check_status(ap);
1911 1734
1912 ap->ops->sff_irq_clear(ap); 1735 if (ap->ops->sff_irq_clear)
1736 ap->ops->sff_irq_clear(ap);
1913} 1737}
1914EXPORT_SYMBOL_GPL(ata_sff_freeze); 1738EXPORT_SYMBOL_GPL(ata_sff_freeze);
1915 1739
@@ -1926,8 +1750,9 @@ void ata_sff_thaw(struct ata_port *ap)
1926{ 1750{
1927 /* clear & re-enable interrupts */ 1751 /* clear & re-enable interrupts */
1928 ap->ops->sff_check_status(ap); 1752 ap->ops->sff_check_status(ap);
1929 ap->ops->sff_irq_clear(ap); 1753 if (ap->ops->sff_irq_clear)
1930 ap->ops->sff_irq_on(ap); 1754 ap->ops->sff_irq_clear(ap);
1755 ata_sff_irq_on(ap);
1931} 1756}
1932EXPORT_SYMBOL_GPL(ata_sff_thaw); 1757EXPORT_SYMBOL_GPL(ata_sff_thaw);
1933 1758
@@ -2301,8 +2126,8 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2301 } 2126 }
2302 2127
2303 /* set up device control */ 2128 /* set up device control */
2304 if (ap->ioaddr.ctl_addr) { 2129 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
2305 iowrite8(ap->ctl, ap->ioaddr.ctl_addr); 2130 ata_sff_set_devctl(ap, ap->ctl);
2306 ap->last_ctl = ap->ctl; 2131 ap->last_ctl = ap->ctl;
2307 } 2132 }
2308} 2133}
@@ -2342,7 +2167,7 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2342EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); 2167EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2343 2168
2344/** 2169/**
2345 * ata_sff_error_handler - Stock error handler for BMDMA controller 2170 * ata_sff_error_handler - Stock error handler for SFF controller
2346 * @ap: port to handle error for 2171 * @ap: port to handle error for
2347 * 2172 *
2348 * Stock error handler for SFF controller. It can handle both 2173 * Stock error handler for SFF controller. It can handle both
@@ -2359,62 +2184,32 @@ void ata_sff_error_handler(struct ata_port *ap)
2359 ata_reset_fn_t hardreset = ap->ops->hardreset; 2184 ata_reset_fn_t hardreset = ap->ops->hardreset;
2360 struct ata_queued_cmd *qc; 2185 struct ata_queued_cmd *qc;
2361 unsigned long flags; 2186 unsigned long flags;
2362 int thaw = 0;
2363 2187
2364 qc = __ata_qc_from_tag(ap, ap->link.active_tag); 2188 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2365 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) 2189 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2366 qc = NULL; 2190 qc = NULL;
2367 2191
2368 /* reset PIO HSM and stop DMA engine */
2369 spin_lock_irqsave(ap->lock, flags); 2192 spin_lock_irqsave(ap->lock, flags);
2370 2193
2371 ap->hsm_task_state = HSM_ST_IDLE; 2194 /*
2372 2195 * We *MUST* do FIFO draining before we issue a reset as
2373 if (ap->ioaddr.bmdma_addr && 2196 * several devices helpfully clear their internal state and
2374 qc && (qc->tf.protocol == ATA_PROT_DMA || 2197 * will lock solid if we touch the data port post reset. Pass
2375 qc->tf.protocol == ATAPI_PROT_DMA)) { 2198 * qc in case anyone wants to do different PIO/DMA recovery or
2376 u8 host_stat; 2199 * has per command fixups
2377
2378 host_stat = ap->ops->bmdma_status(ap);
2379
2380 /* BMDMA controllers indicate host bus error by
2381 * setting DMA_ERR bit and timing out. As it wasn't
2382 * really a timeout event, adjust error mask and
2383 * cancel frozen state.
2384 */
2385 if (qc->err_mask == AC_ERR_TIMEOUT
2386 && (host_stat & ATA_DMA_ERR)) {
2387 qc->err_mask = AC_ERR_HOST_BUS;
2388 thaw = 1;
2389 }
2390
2391 ap->ops->bmdma_stop(qc);
2392 }
2393
2394 ata_sff_sync(ap); /* FIXME: We don't need this */
2395 ap->ops->sff_check_status(ap);
2396 ap->ops->sff_irq_clear(ap);
2397 /* We *MUST* do FIFO draining before we issue a reset as several
2398 * devices helpfully clear their internal state and will lock solid
2399 * if we touch the data port post reset. Pass qc in case anyone wants
2400 * to do different PIO/DMA recovery or has per command fixups
2401 */ 2200 */
2402 if (ap->ops->drain_fifo) 2201 if (ap->ops->sff_drain_fifo)
2403 ap->ops->drain_fifo(qc); 2202 ap->ops->sff_drain_fifo(qc);
2404 2203
2405 spin_unlock_irqrestore(ap->lock, flags); 2204 spin_unlock_irqrestore(ap->lock, flags);
2406 2205
2407 if (thaw) 2206 /* ignore ata_sff_softreset if ctl isn't accessible */
2408 ata_eh_thaw_port(ap);
2409
2410 /* PIO and DMA engines have been stopped, perform recovery */
2411
2412 /* Ignore ata_sff_softreset if ctl isn't accessible and
2413 * built-in hardresets if SCR access isn't available.
2414 */
2415 if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) 2207 if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
2416 softreset = NULL; 2208 softreset = NULL;
2417 if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) 2209
2210 /* ignore built-in hardresets if SCR access is not available */
2211 if ((hardreset == sata_std_hardreset ||
2212 hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2418 hardreset = NULL; 2213 hardreset = NULL;
2419 2214
2420 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, 2215 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
@@ -2423,73 +2218,6 @@ void ata_sff_error_handler(struct ata_port *ap)
2423EXPORT_SYMBOL_GPL(ata_sff_error_handler); 2218EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2424 2219
2425/** 2220/**
2426 * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller
2427 * @qc: internal command to clean up
2428 *
2429 * LOCKING:
2430 * Kernel thread context (may sleep)
2431 */
2432void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
2433{
2434 struct ata_port *ap = qc->ap;
2435 unsigned long flags;
2436
2437 spin_lock_irqsave(ap->lock, flags);
2438
2439 ap->hsm_task_state = HSM_ST_IDLE;
2440
2441 if (ap->ioaddr.bmdma_addr)
2442 ap->ops->bmdma_stop(qc);
2443
2444 spin_unlock_irqrestore(ap->lock, flags);
2445}
2446EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
2447
2448/**
2449 * ata_sff_port_start - Set port up for dma.
2450 * @ap: Port to initialize
2451 *
2452 * Called just after data structures for each port are
2453 * initialized. Allocates space for PRD table if the device
2454 * is DMA capable SFF.
2455 *
2456 * May be used as the port_start() entry in ata_port_operations.
2457 *
2458 * LOCKING:
2459 * Inherited from caller.
2460 */
2461int ata_sff_port_start(struct ata_port *ap)
2462{
2463 if (ap->ioaddr.bmdma_addr)
2464 return ata_port_start(ap);
2465 return 0;
2466}
2467EXPORT_SYMBOL_GPL(ata_sff_port_start);
2468
2469/**
2470 * ata_sff_port_start32 - Set port up for dma.
2471 * @ap: Port to initialize
2472 *
2473 * Called just after data structures for each port are
2474 * initialized. Allocates space for PRD table if the device
2475 * is DMA capable SFF.
2476 *
2477 * May be used as the port_start() entry in ata_port_operations for
2478 * devices that are capable of 32bit PIO.
2479 *
2480 * LOCKING:
2481 * Inherited from caller.
2482 */
2483int ata_sff_port_start32(struct ata_port *ap)
2484{
2485 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
2486 if (ap->ioaddr.bmdma_addr)
2487 return ata_port_start(ap);
2488 return 0;
2489}
2490EXPORT_SYMBOL_GPL(ata_sff_port_start32);
2491
2492/**
2493 * ata_sff_std_ports - initialize ioaddr with standard port offsets. 2221 * ata_sff_std_ports - initialize ioaddr with standard port offsets.
2494 * @ioaddr: IO address structure to be initialized 2222 * @ioaddr: IO address structure to be initialized
2495 * 2223 *
@@ -2515,302 +2243,8 @@ void ata_sff_std_ports(struct ata_ioports *ioaddr)
2515} 2243}
2516EXPORT_SYMBOL_GPL(ata_sff_std_ports); 2244EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2517 2245
2518unsigned long ata_bmdma_mode_filter(struct ata_device *adev,
2519 unsigned long xfer_mask)
2520{
2521 /* Filter out DMA modes if the device has been configured by
2522 the BIOS as PIO only */
2523
2524 if (adev->link->ap->ioaddr.bmdma_addr == NULL)
2525 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2526 return xfer_mask;
2527}
2528EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
2529
2530/**
2531 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2532 * @qc: Info associated with this ATA transaction.
2533 *
2534 * LOCKING:
2535 * spin_lock_irqsave(host lock)
2536 */
2537void ata_bmdma_setup(struct ata_queued_cmd *qc)
2538{
2539 struct ata_port *ap = qc->ap;
2540 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2541 u8 dmactl;
2542
2543 /* load PRD table addr. */
2544 mb(); /* make sure PRD table writes are visible to controller */
2545 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2546
2547 /* specify data direction, triple-check start bit is clear */
2548 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2549 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2550 if (!rw)
2551 dmactl |= ATA_DMA_WR;
2552 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2553
2554 /* issue r/w command */
2555 ap->ops->sff_exec_command(ap, &qc->tf);
2556}
2557EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2558
2559/**
2560 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
2561 * @qc: Info associated with this ATA transaction.
2562 *
2563 * LOCKING:
2564 * spin_lock_irqsave(host lock)
2565 */
2566void ata_bmdma_start(struct ata_queued_cmd *qc)
2567{
2568 struct ata_port *ap = qc->ap;
2569 u8 dmactl;
2570
2571 /* start host DMA transaction */
2572 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2573 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2574
2575 /* Strictly, one may wish to issue an ioread8() here, to
2576 * flush the mmio write. However, control also passes
2577 * to the hardware at this point, and it will interrupt
2578 * us when we are to resume control. So, in effect,
2579 * we don't care when the mmio write flushes.
2580 * Further, a read of the DMA status register _immediately_
2581 * following the write may not be what certain flaky hardware
2582 * is expected, so I think it is best to not add a readb()
2583 * without first all the MMIO ATA cards/mobos.
2584 * Or maybe I'm just being paranoid.
2585 *
2586 * FIXME: The posting of this write means I/O starts are
2587 * unneccessarily delayed for MMIO
2588 */
2589}
2590EXPORT_SYMBOL_GPL(ata_bmdma_start);
2591
2592/**
2593 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
2594 * @qc: Command we are ending DMA for
2595 *
2596 * Clears the ATA_DMA_START flag in the dma control register
2597 *
2598 * May be used as the bmdma_stop() entry in ata_port_operations.
2599 *
2600 * LOCKING:
2601 * spin_lock_irqsave(host lock)
2602 */
2603void ata_bmdma_stop(struct ata_queued_cmd *qc)
2604{
2605 struct ata_port *ap = qc->ap;
2606 void __iomem *mmio = ap->ioaddr.bmdma_addr;
2607
2608 /* clear start/stop bit */
2609 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
2610 mmio + ATA_DMA_CMD);
2611
2612 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
2613 ata_sff_dma_pause(ap);
2614}
2615EXPORT_SYMBOL_GPL(ata_bmdma_stop);
2616
2617/**
2618 * ata_bmdma_status - Read PCI IDE BMDMA status
2619 * @ap: Port associated with this ATA transaction.
2620 *
2621 * Read and return BMDMA status register.
2622 *
2623 * May be used as the bmdma_status() entry in ata_port_operations.
2624 *
2625 * LOCKING:
2626 * spin_lock_irqsave(host lock)
2627 */
2628u8 ata_bmdma_status(struct ata_port *ap)
2629{
2630 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
2631}
2632EXPORT_SYMBOL_GPL(ata_bmdma_status);
2633
2634/**
2635 * ata_bus_reset - reset host port and associated ATA channel
2636 * @ap: port to reset
2637 *
2638 * This is typically the first time we actually start issuing
2639 * commands to the ATA channel. We wait for BSY to clear, then
2640 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2641 * result. Determine what devices, if any, are on the channel
2642 * by looking at the device 0/1 error register. Look at the signature
2643 * stored in each device's taskfile registers, to determine if
2644 * the device is ATA or ATAPI.
2645 *
2646 * LOCKING:
2647 * PCI/etc. bus probe sem.
2648 * Obtains host lock.
2649 *
2650 * SIDE EFFECTS:
2651 * Sets ATA_FLAG_DISABLED if bus reset fails.
2652 *
2653 * DEPRECATED:
2654 * This function is only for drivers which still use old EH and
2655 * will be removed soon.
2656 */
2657void ata_bus_reset(struct ata_port *ap)
2658{
2659 struct ata_device *device = ap->link.device;
2660 struct ata_ioports *ioaddr = &ap->ioaddr;
2661 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2662 u8 err;
2663 unsigned int dev0, dev1 = 0, devmask = 0;
2664 int rc;
2665
2666 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
2667
2668 /* determine if device 0/1 are present */
2669 if (ap->flags & ATA_FLAG_SATA_RESET)
2670 dev0 = 1;
2671 else {
2672 dev0 = ata_devchk(ap, 0);
2673 if (slave_possible)
2674 dev1 = ata_devchk(ap, 1);
2675 }
2676
2677 if (dev0)
2678 devmask |= (1 << 0);
2679 if (dev1)
2680 devmask |= (1 << 1);
2681
2682 /* select device 0 again */
2683 ap->ops->sff_dev_select(ap, 0);
2684
2685 /* issue bus reset */
2686 if (ap->flags & ATA_FLAG_SRST) {
2687 rc = ata_bus_softreset(ap, devmask,
2688 ata_deadline(jiffies, 40000));
2689 if (rc && rc != -ENODEV)
2690 goto err_out;
2691 }
2692
2693 /*
2694 * determine by signature whether we have ATA or ATAPI devices
2695 */
2696 device[0].class = ata_sff_dev_classify(&device[0], dev0, &err);
2697 if ((slave_possible) && (err != 0x81))
2698 device[1].class = ata_sff_dev_classify(&device[1], dev1, &err);
2699
2700 /* is double-select really necessary? */
2701 if (device[1].class != ATA_DEV_NONE)
2702 ap->ops->sff_dev_select(ap, 1);
2703 if (device[0].class != ATA_DEV_NONE)
2704 ap->ops->sff_dev_select(ap, 0);
2705
2706 /* if no devices were detected, disable this port */
2707 if ((device[0].class == ATA_DEV_NONE) &&
2708 (device[1].class == ATA_DEV_NONE))
2709 goto err_out;
2710
2711 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2712 /* set up device control for ATA_FLAG_SATA_RESET */
2713 iowrite8(ap->ctl, ioaddr->ctl_addr);
2714 ap->last_ctl = ap->ctl;
2715 }
2716
2717 DPRINTK("EXIT\n");
2718 return;
2719
2720err_out:
2721 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2722 ata_port_disable(ap);
2723
2724 DPRINTK("EXIT\n");
2725}
2726EXPORT_SYMBOL_GPL(ata_bus_reset);
2727
2728#ifdef CONFIG_PCI 2246#ifdef CONFIG_PCI
2729 2247
2730/**
2731 * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
2732 * @pdev: PCI device
2733 *
2734 * Some PCI ATA devices report simplex mode but in fact can be told to
2735 * enter non simplex mode. This implements the necessary logic to
2736 * perform the task on such devices. Calling it on other devices will
2737 * have -undefined- behaviour.
2738 */
2739int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
2740{
2741 unsigned long bmdma = pci_resource_start(pdev, 4);
2742 u8 simplex;
2743
2744 if (bmdma == 0)
2745 return -ENOENT;
2746
2747 simplex = inb(bmdma + 0x02);
2748 outb(simplex & 0x60, bmdma + 0x02);
2749 simplex = inb(bmdma + 0x02);
2750 if (simplex & 0x80)
2751 return -EOPNOTSUPP;
2752 return 0;
2753}
2754EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
2755
2756/**
2757 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
2758 * @host: target ATA host
2759 *
2760 * Acquire PCI BMDMA resources and initialize @host accordingly.
2761 *
2762 * LOCKING:
2763 * Inherited from calling layer (may sleep).
2764 *
2765 * RETURNS:
2766 * 0 on success, -errno otherwise.
2767 */
2768int ata_pci_bmdma_init(struct ata_host *host)
2769{
2770 struct device *gdev = host->dev;
2771 struct pci_dev *pdev = to_pci_dev(gdev);
2772 int i, rc;
2773
2774 /* No BAR4 allocation: No DMA */
2775 if (pci_resource_start(pdev, 4) == 0)
2776 return 0;
2777
2778 /* TODO: If we get no DMA mask we should fall back to PIO */
2779 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
2780 if (rc)
2781 return rc;
2782 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
2783 if (rc)
2784 return rc;
2785
2786 /* request and iomap DMA region */
2787 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
2788 if (rc) {
2789 dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
2790 return -ENOMEM;
2791 }
2792 host->iomap = pcim_iomap_table(pdev);
2793
2794 for (i = 0; i < 2; i++) {
2795 struct ata_port *ap = host->ports[i];
2796 void __iomem *bmdma = host->iomap[4] + 8 * i;
2797
2798 if (ata_port_is_dummy(ap))
2799 continue;
2800
2801 ap->ioaddr.bmdma_addr = bmdma;
2802 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
2803 (ioread8(bmdma + 2) & 0x80))
2804 host->flags |= ATA_HOST_SIMPLEX;
2805
2806 ata_port_desc(ap, "bmdma 0x%llx",
2807 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
2808 }
2809
2810 return 0;
2811}
2812EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
2813
2814static int ata_resources_present(struct pci_dev *pdev, int port) 2248static int ata_resources_present(struct pci_dev *pdev, int port)
2815{ 2249{
2816 int i; 2250 int i;
@@ -2905,13 +2339,13 @@ int ata_pci_sff_init_host(struct ata_host *host)
2905EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); 2339EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2906 2340
2907/** 2341/**
2908 * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host 2342 * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
2909 * @pdev: target PCI device 2343 * @pdev: target PCI device
2910 * @ppi: array of port_info, must be enough for two ports 2344 * @ppi: array of port_info, must be enough for two ports
2911 * @r_host: out argument for the initialized ATA host 2345 * @r_host: out argument for the initialized ATA host
2912 * 2346 *
2913 * Helper to allocate ATA host for @pdev, acquire all native PCI 2347 * Helper to allocate PIO-only SFF ATA host for @pdev, acquire
2914 * resources and initialize it accordingly in one go. 2348 * all PCI resources and initialize it accordingly in one go.
2915 * 2349 *
2916 * LOCKING: 2350 * LOCKING:
2917 * Inherited from calling layer (may sleep). 2351 * Inherited from calling layer (may sleep).
@@ -2941,22 +2375,10 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2941 if (rc) 2375 if (rc)
2942 goto err_out; 2376 goto err_out;
2943 2377
2944 /* init DMA related stuff */
2945 rc = ata_pci_bmdma_init(host);
2946 if (rc)
2947 goto err_bmdma;
2948
2949 devres_remove_group(&pdev->dev, NULL); 2378 devres_remove_group(&pdev->dev, NULL);
2950 *r_host = host; 2379 *r_host = host;
2951 return 0; 2380 return 0;
2952 2381
2953err_bmdma:
2954 /* This is necessary because PCI and iomap resources are
2955 * merged and releasing the top group won't release the
2956 * acquired resources if some of those have been acquired
2957 * before entering this function.
2958 */
2959 pcim_iounmap_regions(pdev, 0xf);
2960err_out: 2382err_out:
2961 devres_release_group(&pdev->dev, NULL); 2383 devres_release_group(&pdev->dev, NULL);
2962 return rc; 2384 return rc;
@@ -3057,8 +2479,21 @@ out:
3057} 2479}
3058EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); 2480EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
3059 2481
2482static const struct ata_port_info *ata_sff_find_valid_pi(
2483 const struct ata_port_info * const *ppi)
2484{
2485 int i;
2486
2487 /* look up the first valid port_info */
2488 for (i = 0; i < 2 && ppi[i]; i++)
2489 if (ppi[i]->port_ops != &ata_dummy_port_ops)
2490 return ppi[i];
2491
2492 return NULL;
2493}
2494
3060/** 2495/**
3061 * ata_pci_sff_init_one - Initialize/register PCI IDE host controller 2496 * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
3062 * @pdev: Controller to be initialized 2497 * @pdev: Controller to be initialized
3063 * @ppi: array of port_info, must be enough for two ports 2498 * @ppi: array of port_info, must be enough for two ports
3064 * @sht: scsi_host_template to use when registering the host 2499 * @sht: scsi_host_template to use when registering the host
@@ -3067,11 +2502,7 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
3067 * 2502 *
3068 * This is a helper function which can be called from a driver's 2503 * This is a helper function which can be called from a driver's
3069 * xxx_init_one() probe function if the hardware uses traditional 2504 * xxx_init_one() probe function if the hardware uses traditional
3070 * IDE taskfile registers. 2505 * IDE taskfile registers and is PIO only.
3071 *
3072 * This function calls pci_enable_device(), reserves its register
3073 * regions, sets the dma mask, enables bus master mode, and calls
3074 * ata_device_add()
3075 * 2506 *
3076 * ASSUMPTION: 2507 * ASSUMPTION:
3077 * Nobody makes a single channel controller that appears solely as 2508 * Nobody makes a single channel controller that appears solely as
@@ -3088,20 +2519,13 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
3088 struct scsi_host_template *sht, void *host_priv, int hflag) 2519 struct scsi_host_template *sht, void *host_priv, int hflag)
3089{ 2520{
3090 struct device *dev = &pdev->dev; 2521 struct device *dev = &pdev->dev;
3091 const struct ata_port_info *pi = NULL; 2522 const struct ata_port_info *pi;
3092 struct ata_host *host = NULL; 2523 struct ata_host *host = NULL;
3093 int i, rc; 2524 int rc;
3094 2525
3095 DPRINTK("ENTER\n"); 2526 DPRINTK("ENTER\n");
3096 2527
3097 /* look up the first valid port_info */ 2528 pi = ata_sff_find_valid_pi(ppi);
3098 for (i = 0; i < 2 && ppi[i]; i++) {
3099 if (ppi[i]->port_ops != &ata_dummy_port_ops) {
3100 pi = ppi[i];
3101 break;
3102 }
3103 }
3104
3105 if (!pi) { 2529 if (!pi) {
3106 dev_printk(KERN_ERR, &pdev->dev, 2530 dev_printk(KERN_ERR, &pdev->dev,
3107 "no valid port_info specified\n"); 2531 "no valid port_info specified\n");
@@ -3122,7 +2546,6 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
3122 host->private_data = host_priv; 2546 host->private_data = host_priv;
3123 host->flags |= hflag; 2547 host->flags |= hflag;
3124 2548
3125 pci_set_master(pdev);
3126 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); 2549 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
3127out: 2550out:
3128 if (rc == 0) 2551 if (rc == 0)
@@ -3135,3 +2558,791 @@ out:
3135EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); 2558EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
3136 2559
3137#endif /* CONFIG_PCI */ 2560#endif /* CONFIG_PCI */
2561
2562/*
2563 * BMDMA support
2564 */
2565
2566#ifdef CONFIG_ATA_BMDMA
2567
2568const struct ata_port_operations ata_bmdma_port_ops = {
2569 .inherits = &ata_sff_port_ops,
2570
2571 .error_handler = ata_bmdma_error_handler,
2572 .post_internal_cmd = ata_bmdma_post_internal_cmd,
2573
2574 .qc_prep = ata_bmdma_qc_prep,
2575 .qc_issue = ata_bmdma_qc_issue,
2576
2577 .sff_irq_clear = ata_bmdma_irq_clear,
2578 .bmdma_setup = ata_bmdma_setup,
2579 .bmdma_start = ata_bmdma_start,
2580 .bmdma_stop = ata_bmdma_stop,
2581 .bmdma_status = ata_bmdma_status,
2582
2583 .port_start = ata_bmdma_port_start,
2584};
2585EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2586
2587const struct ata_port_operations ata_bmdma32_port_ops = {
2588 .inherits = &ata_bmdma_port_ops,
2589
2590 .sff_data_xfer = ata_sff_data_xfer32,
2591 .port_start = ata_bmdma_port_start32,
2592};
2593EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2594
2595/**
2596 * ata_bmdma_fill_sg - Fill PCI IDE PRD table
2597 * @qc: Metadata associated with taskfile to be transferred
2598 *
2599 * Fill PCI IDE PRD (scatter-gather) table with segments
2600 * associated with the current disk command.
2601 *
2602 * LOCKING:
2603 * spin_lock_irqsave(host lock)
2604 *
2605 */
2606static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2607{
2608 struct ata_port *ap = qc->ap;
2609 struct ata_bmdma_prd *prd = ap->bmdma_prd;
2610 struct scatterlist *sg;
2611 unsigned int si, pi;
2612
2613 pi = 0;
2614 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2615 u32 addr, offset;
2616 u32 sg_len, len;
2617
2618 /* determine if physical DMA addr spans 64K boundary.
2619 * Note h/w doesn't support 64-bit, so we unconditionally
2620 * truncate dma_addr_t to u32.
2621 */
2622 addr = (u32) sg_dma_address(sg);
2623 sg_len = sg_dma_len(sg);
2624
2625 while (sg_len) {
2626 offset = addr & 0xffff;
2627 len = sg_len;
2628 if ((offset + sg_len) > 0x10000)
2629 len = 0x10000 - offset;
2630
2631 prd[pi].addr = cpu_to_le32(addr);
2632 prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2633 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2634
2635 pi++;
2636 sg_len -= len;
2637 addr += len;
2638 }
2639 }
2640
2641 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2642}
2643
2644/**
2645 * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2646 * @qc: Metadata associated with taskfile to be transferred
2647 *
2648 * Fill PCI IDE PRD (scatter-gather) table with segments
2649 * associated with the current disk command. Perform the fill
2650 * so that we avoid writing any length 64K records for
2651 * controllers that don't follow the spec.
2652 *
2653 * LOCKING:
2654 * spin_lock_irqsave(host lock)
2655 *
2656 */
2657static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2658{
2659 struct ata_port *ap = qc->ap;
2660 struct ata_bmdma_prd *prd = ap->bmdma_prd;
2661 struct scatterlist *sg;
2662 unsigned int si, pi;
2663
2664 pi = 0;
2665 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2666 u32 addr, offset;
2667 u32 sg_len, len, blen;
2668
2669 /* determine if physical DMA addr spans 64K boundary.
2670 * Note h/w doesn't support 64-bit, so we unconditionally
2671 * truncate dma_addr_t to u32.
2672 */
2673 addr = (u32) sg_dma_address(sg);
2674 sg_len = sg_dma_len(sg);
2675
2676 while (sg_len) {
2677 offset = addr & 0xffff;
2678 len = sg_len;
2679 if ((offset + sg_len) > 0x10000)
2680 len = 0x10000 - offset;
2681
2682 blen = len & 0xffff;
2683 prd[pi].addr = cpu_to_le32(addr);
2684 if (blen == 0) {
2685 /* Some PATA chipsets like the CS5530 can't
2686 cope with 0x0000 meaning 64K as the spec
2687 says */
2688 prd[pi].flags_len = cpu_to_le32(0x8000);
2689 blen = 0x8000;
2690 prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2691 }
2692 prd[pi].flags_len = cpu_to_le32(blen);
2693 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2694
2695 pi++;
2696 sg_len -= len;
2697 addr += len;
2698 }
2699 }
2700
2701 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2702}
2703
2704/**
2705 * ata_bmdma_qc_prep - Prepare taskfile for submission
2706 * @qc: Metadata associated with taskfile to be prepared
2707 *
2708 * Prepare ATA taskfile for submission.
2709 *
2710 * LOCKING:
2711 * spin_lock_irqsave(host lock)
2712 */
2713void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2714{
2715 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2716 return;
2717
2718 ata_bmdma_fill_sg(qc);
2719}
2720EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2721
2722/**
2723 * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2724 * @qc: Metadata associated with taskfile to be prepared
2725 *
2726 * Prepare ATA taskfile for submission.
2727 *
2728 * LOCKING:
2729 * spin_lock_irqsave(host lock)
2730 */
2731void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2732{
2733 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2734 return;
2735
2736 ata_bmdma_fill_sg_dumb(qc);
2737}
2738EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2739
2740/**
2741 * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
2742 * @qc: command to issue to device
2743 *
2744 * This function issues a PIO, NODATA or DMA command to a
2745 * SFF/BMDMA controller. PIO and NODATA are handled by
2746 * ata_sff_qc_issue().
2747 *
2748 * LOCKING:
2749 * spin_lock_irqsave(host lock)
2750 *
2751 * RETURNS:
2752 * Zero on success, AC_ERR_* mask on failure
2753 */
2754unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2755{
2756 struct ata_port *ap = qc->ap;
2757 struct ata_link *link = qc->dev->link;
2758
2759 /* defer PIO handling to sff_qc_issue */
2760 if (!ata_is_dma(qc->tf.protocol))
2761 return ata_sff_qc_issue(qc);
2762
2763 /* select the device */
2764 ata_dev_select(ap, qc->dev->devno, 1, 0);
2765
2766 /* start the command */
2767 switch (qc->tf.protocol) {
2768 case ATA_PROT_DMA:
2769 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2770
2771 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2772 ap->ops->bmdma_setup(qc); /* set up bmdma */
2773 ap->ops->bmdma_start(qc); /* initiate bmdma */
2774 ap->hsm_task_state = HSM_ST_LAST;
2775 break;
2776
2777 case ATAPI_PROT_DMA:
2778 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2779
2780 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2781 ap->ops->bmdma_setup(qc); /* set up bmdma */
2782 ap->hsm_task_state = HSM_ST_FIRST;
2783
2784 /* send cdb by polling if no cdb interrupt */
2785 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2786 ata_sff_queue_pio_task(link, 0);
2787 break;
2788
2789 default:
2790 WARN_ON(1);
2791 return AC_ERR_SYSTEM;
2792 }
2793
2794 return 0;
2795}
2796EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2797
2798/**
2799 * ata_bmdma_port_intr - Handle BMDMA port interrupt
2800 * @ap: Port on which interrupt arrived (possibly...)
2801 * @qc: Taskfile currently active in engine
2802 *
2803 * Handle port interrupt for given queued command.
2804 *
2805 * LOCKING:
2806 * spin_lock_irqsave(host lock)
2807 *
2808 * RETURNS:
2809 * One if interrupt was handled, zero if not (shared irq).
2810 */
2811unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2812{
2813 struct ata_eh_info *ehi = &ap->link.eh_info;
2814 u8 host_stat = 0;
2815 bool bmdma_stopped = false;
2816 unsigned int handled;
2817
2818 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2819 /* check status of DMA engine */
2820 host_stat = ap->ops->bmdma_status(ap);
2821 VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
2822
2823 /* if it's not our irq... */
2824 if (!(host_stat & ATA_DMA_INTR))
2825 return ata_sff_idle_irq(ap);
2826
2827 /* before we do anything else, clear DMA-Start bit */
2828 ap->ops->bmdma_stop(qc);
2829 bmdma_stopped = true;
2830
2831 if (unlikely(host_stat & ATA_DMA_ERR)) {
2832 /* error when transfering data to/from memory */
2833 qc->err_mask |= AC_ERR_HOST_BUS;
2834 ap->hsm_task_state = HSM_ST_ERR;
2835 }
2836 }
2837
2838 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2839
2840 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2841 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2842
2843 return handled;
2844}
2845EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2846
2847/**
2848 * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
2849 * @irq: irq line (unused)
2850 * @dev_instance: pointer to our ata_host information structure
2851 *
2852 * Default interrupt handler for PCI IDE devices. Calls
2853 * ata_bmdma_port_intr() for each port that is not disabled.
2854 *
2855 * LOCKING:
2856 * Obtains host lock during operation.
2857 *
2858 * RETURNS:
2859 * IRQ_NONE or IRQ_HANDLED.
2860 */
2861irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2862{
2863 return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2864}
2865EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2866
2867/**
2868 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
2869 * @ap: port to handle error for
2870 *
2871 * Stock error handler for BMDMA controller. It can handle both
2872 * PATA and SATA controllers. Most BMDMA controllers should be
2873 * able to use this EH as-is or with some added handling before
2874 * and after.
2875 *
2876 * LOCKING:
2877 * Kernel thread context (may sleep)
2878 */
2879void ata_bmdma_error_handler(struct ata_port *ap)
2880{
2881 struct ata_queued_cmd *qc;
2882 unsigned long flags;
2883 bool thaw = false;
2884
2885 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2886 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2887 qc = NULL;
2888
2889 /* reset PIO HSM and stop DMA engine */
2890 spin_lock_irqsave(ap->lock, flags);
2891
2892 if (qc && ata_is_dma(qc->tf.protocol)) {
2893 u8 host_stat;
2894
2895 host_stat = ap->ops->bmdma_status(ap);
2896
2897 /* BMDMA controllers indicate host bus error by
2898 * setting DMA_ERR bit and timing out. As it wasn't
2899 * really a timeout event, adjust error mask and
2900 * cancel frozen state.
2901 */
2902 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2903 qc->err_mask = AC_ERR_HOST_BUS;
2904 thaw = true;
2905 }
2906
2907 ap->ops->bmdma_stop(qc);
2908
2909 /* if we're gonna thaw, make sure IRQ is clear */
2910 if (thaw) {
2911 ap->ops->sff_check_status(ap);
2912 if (ap->ops->sff_irq_clear)
2913 ap->ops->sff_irq_clear(ap);
2914 }
2915 }
2916
2917 spin_unlock_irqrestore(ap->lock, flags);
2918
2919 if (thaw)
2920 ata_eh_thaw_port(ap);
2921
2922 ata_sff_error_handler(ap);
2923}
2924EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2925
2926/**
2927 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
2928 * @qc: internal command to clean up
2929 *
2930 * LOCKING:
2931 * Kernel thread context (may sleep)
2932 */
2933void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2934{
2935 struct ata_port *ap = qc->ap;
2936 unsigned long flags;
2937
2938 if (ata_is_dma(qc->tf.protocol)) {
2939 spin_lock_irqsave(ap->lock, flags);
2940 ap->ops->bmdma_stop(qc);
2941 spin_unlock_irqrestore(ap->lock, flags);
2942 }
2943}
2944EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2945
2946/**
2947 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
2948 * @ap: Port associated with this ATA transaction.
2949 *
2950 * Clear interrupt and error flags in DMA status register.
2951 *
2952 * May be used as the irq_clear() entry in ata_port_operations.
2953 *
2954 * LOCKING:
2955 * spin_lock_irqsave(host lock)
2956 */
2957void ata_bmdma_irq_clear(struct ata_port *ap)
2958{
2959 void __iomem *mmio = ap->ioaddr.bmdma_addr;
2960
2961 if (!mmio)
2962 return;
2963
2964 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
2965}
2966EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
2967
2968/**
2969 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2970 * @qc: Info associated with this ATA transaction.
2971 *
2972 * LOCKING:
2973 * spin_lock_irqsave(host lock)
2974 */
2975void ata_bmdma_setup(struct ata_queued_cmd *qc)
2976{
2977 struct ata_port *ap = qc->ap;
2978 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2979 u8 dmactl;
2980
2981 /* load PRD table addr. */
2982 mb(); /* make sure PRD table writes are visible to controller */
2983 iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2984
2985 /* specify data direction, triple-check start bit is clear */
2986 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2987 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2988 if (!rw)
2989 dmactl |= ATA_DMA_WR;
2990 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2991
2992 /* issue r/w command */
2993 ap->ops->sff_exec_command(ap, &qc->tf);
2994}
2995EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2996
2997/**
2998 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
2999 * @qc: Info associated with this ATA transaction.
3000 *
3001 * LOCKING:
3002 * spin_lock_irqsave(host lock)
3003 */
3004void ata_bmdma_start(struct ata_queued_cmd *qc)
3005{
3006 struct ata_port *ap = qc->ap;
3007 u8 dmactl;
3008
3009 /* start host DMA transaction */
3010 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3011 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3012
3013 /* Strictly, one may wish to issue an ioread8() here, to
3014 * flush the mmio write. However, control also passes
3015 * to the hardware at this point, and it will interrupt
3016 * us when we are to resume control. So, in effect,
3017 * we don't care when the mmio write flushes.
3018 * Further, a read of the DMA status register _immediately_
3019 * following the write may not be what certain flaky hardware
3020 * is expected, so I think it is best to not add a readb()
3021 * without first all the MMIO ATA cards/mobos.
3022 * Or maybe I'm just being paranoid.
3023 *
3024 * FIXME: The posting of this write means I/O starts are
3025 * unneccessarily delayed for MMIO
3026 */
3027}
3028EXPORT_SYMBOL_GPL(ata_bmdma_start);
3029
3030/**
3031 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3032 * @qc: Command we are ending DMA for
3033 *
3034 * Clears the ATA_DMA_START flag in the dma control register
3035 *
3036 * May be used as the bmdma_stop() entry in ata_port_operations.
3037 *
3038 * LOCKING:
3039 * spin_lock_irqsave(host lock)
3040 */
3041void ata_bmdma_stop(struct ata_queued_cmd *qc)
3042{
3043 struct ata_port *ap = qc->ap;
3044 void __iomem *mmio = ap->ioaddr.bmdma_addr;
3045
3046 /* clear start/stop bit */
3047 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3048 mmio + ATA_DMA_CMD);
3049
3050 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3051 ata_sff_dma_pause(ap);
3052}
3053EXPORT_SYMBOL_GPL(ata_bmdma_stop);
3054
3055/**
3056 * ata_bmdma_status - Read PCI IDE BMDMA status
3057 * @ap: Port associated with this ATA transaction.
3058 *
3059 * Read and return BMDMA status register.
3060 *
3061 * May be used as the bmdma_status() entry in ata_port_operations.
3062 *
3063 * LOCKING:
3064 * spin_lock_irqsave(host lock)
3065 */
3066u8 ata_bmdma_status(struct ata_port *ap)
3067{
3068 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3069}
3070EXPORT_SYMBOL_GPL(ata_bmdma_status);
3071
3072
3073/**
3074 * ata_bmdma_port_start - Set port up for bmdma.
3075 * @ap: Port to initialize
3076 *
3077 * Called just after data structures for each port are
3078 * initialized. Allocates space for PRD table.
3079 *
3080 * May be used as the port_start() entry in ata_port_operations.
3081 *
3082 * LOCKING:
3083 * Inherited from caller.
3084 */
3085int ata_bmdma_port_start(struct ata_port *ap)
3086{
3087 if (ap->mwdma_mask || ap->udma_mask) {
3088 ap->bmdma_prd =
3089 dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
3090 &ap->bmdma_prd_dma, GFP_KERNEL);
3091 if (!ap->bmdma_prd)
3092 return -ENOMEM;
3093 }
3094
3095 return 0;
3096}
3097EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
3098
3099/**
3100 * ata_bmdma_port_start32 - Set port up for dma.
3101 * @ap: Port to initialize
3102 *
3103 * Called just after data structures for each port are
3104 * initialized. Enables 32bit PIO and allocates space for PRD
3105 * table.
3106 *
3107 * May be used as the port_start() entry in ata_port_operations for
3108 * devices that are capable of 32bit PIO.
3109 *
3110 * LOCKING:
3111 * Inherited from caller.
3112 */
3113int ata_bmdma_port_start32(struct ata_port *ap)
3114{
3115 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3116 return ata_bmdma_port_start(ap);
3117}
3118EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3119
3120#ifdef CONFIG_PCI
3121
3122/**
3123 * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
3124 * @pdev: PCI device
3125 *
3126 * Some PCI ATA devices report simplex mode but in fact can be told to
3127 * enter non simplex mode. This implements the necessary logic to
3128 * perform the task on such devices. Calling it on other devices will
3129 * have -undefined- behaviour.
3130 */
3131int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3132{
3133 unsigned long bmdma = pci_resource_start(pdev, 4);
3134 u8 simplex;
3135
3136 if (bmdma == 0)
3137 return -ENOENT;
3138
3139 simplex = inb(bmdma + 0x02);
3140 outb(simplex & 0x60, bmdma + 0x02);
3141 simplex = inb(bmdma + 0x02);
3142 if (simplex & 0x80)
3143 return -EOPNOTSUPP;
3144 return 0;
3145}
3146EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3147
3148static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3149{
3150 int i;
3151
3152 dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n",
3153 reason);
3154
3155 for (i = 0; i < 2; i++) {
3156 host->ports[i]->mwdma_mask = 0;
3157 host->ports[i]->udma_mask = 0;
3158 }
3159}
3160
3161/**
3162 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
3163 * @host: target ATA host
3164 *
3165 * Acquire PCI BMDMA resources and initialize @host accordingly.
3166 *
3167 * LOCKING:
3168 * Inherited from calling layer (may sleep).
3169 */
3170void ata_pci_bmdma_init(struct ata_host *host)
3171{
3172 struct device *gdev = host->dev;
3173 struct pci_dev *pdev = to_pci_dev(gdev);
3174 int i, rc;
3175
3176 /* No BAR4 allocation: No DMA */
3177 if (pci_resource_start(pdev, 4) == 0) {
3178 ata_bmdma_nodma(host, "BAR4 is zero");
3179 return;
3180 }
3181
3182 /*
3183 * Some controllers require BMDMA region to be initialized
3184 * even if DMA is not in use to clear IRQ status via
3185 * ->sff_irq_clear method. Try to initialize bmdma_addr
3186 * regardless of dma masks.
3187 */
3188 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3189 if (rc)
3190 ata_bmdma_nodma(host, "failed to set dma mask");
3191 if (!rc) {
3192 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3193 if (rc)
3194 ata_bmdma_nodma(host,
3195 "failed to set consistent dma mask");
3196 }
3197
3198 /* request and iomap DMA region */
3199 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3200 if (rc) {
3201 ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3202 return;
3203 }
3204 host->iomap = pcim_iomap_table(pdev);
3205
3206 for (i = 0; i < 2; i++) {
3207 struct ata_port *ap = host->ports[i];
3208 void __iomem *bmdma = host->iomap[4] + 8 * i;
3209
3210 if (ata_port_is_dummy(ap))
3211 continue;
3212
3213 ap->ioaddr.bmdma_addr = bmdma;
3214 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3215 (ioread8(bmdma + 2) & 0x80))
3216 host->flags |= ATA_HOST_SIMPLEX;
3217
3218 ata_port_desc(ap, "bmdma 0x%llx",
3219 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3220 }
3221}
3222EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3223
3224/**
3225 * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
3226 * @pdev: target PCI device
3227 * @ppi: array of port_info, must be enough for two ports
3228 * @r_host: out argument for the initialized ATA host
3229 *
3230 * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
3231 * resources and initialize it accordingly in one go.
3232 *
3233 * LOCKING:
3234 * Inherited from calling layer (may sleep).
3235 *
3236 * RETURNS:
3237 * 0 on success, -errno otherwise.
3238 */
3239int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3240 const struct ata_port_info * const * ppi,
3241 struct ata_host **r_host)
3242{
3243 int rc;
3244
3245 rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3246 if (rc)
3247 return rc;
3248
3249 ata_pci_bmdma_init(*r_host);
3250 return 0;
3251}
3252EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3253
3254/**
3255 * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
3256 * @pdev: Controller to be initialized
3257 * @ppi: array of port_info, must be enough for two ports
3258 * @sht: scsi_host_template to use when registering the host
3259 * @host_priv: host private_data
3260 * @hflags: host flags
3261 *
3262 * This function is similar to ata_pci_sff_init_one() but also
3263 * takes care of BMDMA initialization.
3264 *
3265 * LOCKING:
3266 * Inherited from PCI layer (may sleep).
3267 *
3268 * RETURNS:
3269 * Zero on success, negative on errno-based value on error.
3270 */
3271int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3272 const struct ata_port_info * const * ppi,
3273 struct scsi_host_template *sht, void *host_priv,
3274 int hflags)
3275{
3276 struct device *dev = &pdev->dev;
3277 const struct ata_port_info *pi;
3278 struct ata_host *host = NULL;
3279 int rc;
3280
3281 DPRINTK("ENTER\n");
3282
3283 pi = ata_sff_find_valid_pi(ppi);
3284 if (!pi) {
3285 dev_printk(KERN_ERR, &pdev->dev,
3286 "no valid port_info specified\n");
3287 return -EINVAL;
3288 }
3289
3290 if (!devres_open_group(dev, NULL, GFP_KERNEL))
3291 return -ENOMEM;
3292
3293 rc = pcim_enable_device(pdev);
3294 if (rc)
3295 goto out;
3296
3297 /* prepare and activate BMDMA host */
3298 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
3299 if (rc)
3300 goto out;
3301 host->private_data = host_priv;
3302 host->flags |= hflags;
3303
3304 pci_set_master(pdev);
3305 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
3306 out:
3307 if (rc == 0)
3308 devres_remove_group(&pdev->dev, NULL);
3309 else
3310 devres_release_group(&pdev->dev, NULL);
3311
3312 return rc;
3313}
3314EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3315
3316#endif /* CONFIG_PCI */
3317#endif /* CONFIG_ATA_BMDMA */
3318
3319/**
3320 * ata_sff_port_init - Initialize SFF/BMDMA ATA port
3321 * @ap: Port to initialize
3322 *
3323 * Called on port allocation to initialize SFF/BMDMA specific
3324 * fields.
3325 *
3326 * LOCKING:
3327 * None.
3328 */
3329void ata_sff_port_init(struct ata_port *ap)
3330{
3331 INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3332 ap->ctl = ATA_DEVCTL_OBS;
3333 ap->last_ctl = 0xFF;
3334}
3335
3336int __init ata_sff_init(void)
3337{
3338 ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE);
3339 if (!ata_sff_wq)
3340 return -ENOMEM;
3341
3342 return 0;
3343}
3344
3345void __exit ata_sff_exit(void)
3346{
3347 destroy_workqueue(ata_sff_wq);
3348}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 823e63096362..9ce1ecc63e39 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -38,17 +38,6 @@ struct ata_scsi_args {
38 void (*done)(struct scsi_cmnd *); 38 void (*done)(struct scsi_cmnd *);
39}; 39};
40 40
41static inline int ata_is_builtin_hardreset(ata_reset_fn_t reset)
42{
43 if (reset == sata_std_hardreset)
44 return 1;
45#ifdef CONFIG_ATA_SFF
46 if (reset == sata_sff_hardreset)
47 return 1;
48#endif
49 return 0;
50}
51
52/* libata-core.c */ 41/* libata-core.c */
53enum { 42enum {
54 /* flags for ata_dev_read_id() */ 43 /* flags for ata_dev_read_id() */
@@ -65,7 +54,6 @@ enum {
65}; 54};
66 55
67extern unsigned int ata_print_id; 56extern unsigned int ata_print_id;
68extern struct workqueue_struct *ata_aux_wq;
69extern int atapi_passthru16; 57extern int atapi_passthru16;
70extern int libata_fua; 58extern int libata_fua;
71extern int libata_noacpi; 59extern int libata_noacpi;
@@ -79,7 +67,6 @@ extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
79 u64 block, u32 n_block, unsigned int tf_flags, 67 u64 block, u32 n_block, unsigned int tf_flags,
80 unsigned int tag); 68 unsigned int tag);
81extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev); 69extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
82extern void ata_port_flush_task(struct ata_port *ap);
83extern unsigned ata_exec_internal(struct ata_device *dev, 70extern unsigned ata_exec_internal(struct ata_device *dev,
84 struct ata_taskfile *tf, const u8 *cdb, 71 struct ata_taskfile *tf, const u8 *cdb,
85 int dma_dir, void *buf, unsigned int buflen, 72 int dma_dir, void *buf, unsigned int buflen,
@@ -202,10 +189,19 @@ static inline int sata_pmp_attach(struct ata_device *dev)
202 189
203/* libata-sff.c */ 190/* libata-sff.c */
204#ifdef CONFIG_ATA_SFF 191#ifdef CONFIG_ATA_SFF
205extern void ata_dev_select(struct ata_port *ap, unsigned int device, 192extern void ata_sff_flush_pio_task(struct ata_port *ap);
206 unsigned int wait, unsigned int can_sleep); 193extern void ata_sff_port_init(struct ata_port *ap);
207extern u8 ata_irq_on(struct ata_port *ap); 194extern int ata_sff_init(void);
208extern void ata_pio_task(struct work_struct *work); 195extern void ata_sff_exit(void);
196#else /* CONFIG_ATA_SFF */
197static inline void ata_sff_flush_pio_task(struct ata_port *ap)
198{ }
199static inline void ata_sff_port_init(struct ata_port *ap)
200{ }
201static inline int ata_sff_init(void)
202{ return 0; }
203static inline void ata_sff_exit(void)
204{ }
209#endif /* CONFIG_ATA_SFF */ 205#endif /* CONFIG_ATA_SFF */
210 206
211#endif /* __LIBATA_H__ */ 207#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 1ea2be0f4b94..c8d47034d5e9 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -101,7 +101,7 @@ static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device
101static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask) 101static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask)
102{ 102{
103 struct pata_acpi *acpi = adev->link->ap->private_data; 103 struct pata_acpi *acpi = adev->link->ap->private_data;
104 return ata_bmdma_mode_filter(adev, mask & acpi->mask[adev->devno]); 104 return mask & acpi->mask[adev->devno];
105} 105}
106 106
107/** 107/**
@@ -172,7 +172,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
172 struct pata_acpi *acpi = ap->private_data; 172 struct pata_acpi *acpi = ap->private_data;
173 173
174 if (acpi->gtm.flags & 0x10) 174 if (acpi->gtm.flags & 0x10)
175 return ata_sff_qc_issue(qc); 175 return ata_bmdma_qc_issue(qc);
176 176
177 if (adev != acpi->last) { 177 if (adev != acpi->last) {
178 pacpi_set_piomode(ap, adev); 178 pacpi_set_piomode(ap, adev);
@@ -180,7 +180,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
180 pacpi_set_dmamode(ap, adev); 180 pacpi_set_dmamode(ap, adev);
181 acpi->last = adev; 181 acpi->last = adev;
182 } 182 }
183 return ata_sff_qc_issue(qc); 183 return ata_bmdma_qc_issue(qc);
184} 184}
185 185
186/** 186/**
@@ -205,7 +205,7 @@ static int pacpi_port_start(struct ata_port *ap)
205 return -ENOMEM; 205 return -ENOMEM;
206 acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]); 206 acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]);
207 acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]); 207 acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]);
208 ret = ata_sff_port_start(ap); 208 ret = ata_bmdma_port_start(ap);
209 if (ret < 0) 209 if (ret < 0)
210 return ret; 210 return ret;
211 211
@@ -260,7 +260,7 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
260 return rc; 260 return rc;
261 pcim_pin_device(pdev); 261 pcim_pin_device(pdev);
262 } 262 }
263 return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL, 0); 263 return ata_pci_bmdma_init_one(pdev, ppi, &pacpi_sht, NULL, 0);
264} 264}
265 265
266static const struct pci_device_id pacpi_pci_tbl[] = { 266static const struct pci_device_id pacpi_pci_tbl[] = {
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index dc61b72f751c..794ec6e3275d 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -124,7 +124,7 @@ static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
124 ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 124 ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
125 if (strstr(model_num, "WDC")) 125 if (strstr(model_num, "WDC"))
126 return mask &= ~ATA_MASK_UDMA; 126 return mask &= ~ATA_MASK_UDMA;
127 return ata_bmdma_mode_filter(adev, mask); 127 return mask;
128} 128}
129 129
130/** 130/**
@@ -583,7 +583,10 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
583 ppi[0] = &info_20_udma; 583 ppi[0] = &info_20_udma;
584 } 584 }
585 585
586 return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0); 586 if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask)
587 return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0);
588 else
589 return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0);
587} 590}
588 591
589#ifdef CONFIG_PM 592#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index d95eca9c547e..620a07cabe31 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -574,7 +574,7 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
574 } 574 }
575 575
576 /* And fire it up */ 576 /* And fire it up */
577 return ata_pci_sff_init_one(pdev, ppi, &amd_sht, hpriv, 0); 577 return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0);
578} 578}
579 579
580#ifdef CONFIG_PM 580#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 4d066d6c30fa..2215632e4b31 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
74 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 74 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
75 75
76 /* Odd numbered device ids are the units with enable bits (the -R cards) */ 76 /* Odd numbered device ids are the units with enable bits (the -R cards) */
77 if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) 77 if ((pdev->device & 1) &&
78 !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
78 return -ENOENT; 79 return -ENOENT;
79 80
80 return ata_sff_prereset(link, deadline); 81 return ata_sff_prereset(link, deadline);
@@ -421,7 +422,7 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
421 422
422 BUG_ON(ppi[0] == NULL); 423 BUG_ON(ppi[0] == NULL);
423 424
424 return ata_pci_sff_init_one(pdev, ppi, &artop_sht, NULL, 0); 425 return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0);
425} 426}
426 427
427static const struct pci_device_id artop_pci_tbl[] = { 428static const struct pci_device_id artop_pci_tbl[] = {
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index c6a946aa252c..0da0dcc7dd08 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -202,7 +202,6 @@ static struct ata_port_operations pata_at91_port_ops = {
202 .sff_data_xfer = pata_at91_data_xfer_noirq, 202 .sff_data_xfer = pata_at91_data_xfer_noirq,
203 .set_piomode = pata_at91_set_piomode, 203 .set_piomode = pata_at91_set_piomode,
204 .cable_detect = ata_cable_40wire, 204 .cable_detect = ata_cable_40wire,
205 .port_start = ATA_OP_NULL,
206}; 205};
207 206
208static int __devinit pata_at91_probe(struct platform_device *pdev) 207static int __devinit pata_at91_probe(struct platform_device *pdev)
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index cbaf2eddac6b..43755616dc5a 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -217,7 +217,7 @@ static struct scsi_host_template atiixp_sht = {
217static struct ata_port_operations atiixp_port_ops = { 217static struct ata_port_operations atiixp_port_ops = {
218 .inherits = &ata_bmdma_port_ops, 218 .inherits = &ata_bmdma_port_ops,
219 219
220 .qc_prep = ata_sff_dumb_qc_prep, 220 .qc_prep = ata_bmdma_dumb_qc_prep,
221 .bmdma_start = atiixp_bmdma_start, 221 .bmdma_start = atiixp_bmdma_start,
222 .bmdma_stop = atiixp_bmdma_stop, 222 .bmdma_stop = atiixp_bmdma_stop,
223 223
@@ -246,8 +246,8 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
246 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i])) 246 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i]))
247 ppi[i] = &ata_dummy_port_info; 247 ppi[i] = &ata_dummy_port_info;
248 248
249 return ata_pci_sff_init_one(pdev, ppi, &atiixp_sht, NULL, 249 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
250 ATA_HOST_PARALLEL_SCAN); 250 ATA_HOST_PARALLEL_SCAN);
251} 251}
252 252
253static const struct pci_device_id atiixp[] = { 253static const struct pci_device_id atiixp[] = {
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index bb6e0746e07d..95295935dd95 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -525,7 +525,7 @@ static int atp867x_init_one(struct pci_dev *pdev,
525 525
526 pci_set_master(pdev); 526 pci_set_master(pdev);
527 527
528 rc = ata_host_activate(host, pdev->irq, ata_sff_interrupt, 528 rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
529 IRQF_SHARED, &atp867x_sht); 529 IRQF_SHARED, &atp867x_sht);
530 if (rc) 530 if (rc)
531 dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n"); 531 dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n");
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 02c81f12c702..9cae65de750e 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -821,6 +821,18 @@ static void bfin_dev_select(struct ata_port *ap, unsigned int device)
821} 821}
822 822
823/** 823/**
824 * bfin_set_devctl - Write device control reg
825 * @ap: port where the device is
826 * @ctl: value to write
827 */
828
829static u8 bfin_set_devctl(struct ata_port *ap, u8 ctl)
830{
831 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
832 write_atapi_register(base, ATA_REG_CTRL, ctl);
833}
834
835/**
824 * bfin_bmdma_setup - Set up IDE DMA transaction 836 * bfin_bmdma_setup - Set up IDE DMA transaction
825 * @qc: Info associated with this ATA transaction. 837 * @qc: Info associated with this ATA transaction.
826 * 838 *
@@ -1202,7 +1214,7 @@ static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
1202 * bfin_irq_clear - Clear ATAPI interrupt. 1214 * bfin_irq_clear - Clear ATAPI interrupt.
1203 * @ap: Port associated with this ATA transaction. 1215 * @ap: Port associated with this ATA transaction.
1204 * 1216 *
1205 * Note: Original code is ata_sff_irq_clear(). 1217 * Note: Original code is ata_bmdma_irq_clear().
1206 */ 1218 */
1207 1219
1208static void bfin_irq_clear(struct ata_port *ap) 1220static void bfin_irq_clear(struct ata_port *ap)
@@ -1216,56 +1228,6 @@ static void bfin_irq_clear(struct ata_port *ap)
1216} 1228}
1217 1229
1218/** 1230/**
1219 * bfin_irq_on - Enable interrupts on a port.
1220 * @ap: Port on which interrupts are enabled.
1221 *
1222 * Note: Original code is ata_sff_irq_on().
1223 */
1224
1225static unsigned char bfin_irq_on(struct ata_port *ap)
1226{
1227 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1228 u8 tmp;
1229
1230 dev_dbg(ap->dev, "in atapi irq on\n");
1231 ap->ctl &= ~ATA_NIEN;
1232 ap->last_ctl = ap->ctl;
1233
1234 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1235 tmp = ata_wait_idle(ap);
1236
1237 bfin_irq_clear(ap);
1238
1239 return tmp;
1240}
1241
1242/**
1243 * bfin_freeze - Freeze DMA controller port
1244 * @ap: port to freeze
1245 *
1246 * Note: Original code is ata_sff_freeze().
1247 */
1248
1249static void bfin_freeze(struct ata_port *ap)
1250{
1251 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1252
1253 dev_dbg(ap->dev, "in atapi dma freeze\n");
1254 ap->ctl |= ATA_NIEN;
1255 ap->last_ctl = ap->ctl;
1256
1257 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1258
1259 /* Under certain circumstances, some controllers raise IRQ on
1260 * ATA_NIEN manipulation. Also, many controllers fail to mask
1261 * previously pending IRQ on ATA_NIEN assertion. Clear it.
1262 */
1263 ap->ops->sff_check_status(ap);
1264
1265 bfin_irq_clear(ap);
1266}
1267
1268/**
1269 * bfin_thaw - Thaw DMA controller port 1231 * bfin_thaw - Thaw DMA controller port
1270 * @ap: port to thaw 1232 * @ap: port to thaw
1271 * 1233 *
@@ -1276,7 +1238,7 @@ void bfin_thaw(struct ata_port *ap)
1276{ 1238{
1277 dev_dbg(ap->dev, "in atapi dma thaw\n"); 1239 dev_dbg(ap->dev, "in atapi dma thaw\n");
1278 bfin_check_status(ap); 1240 bfin_check_status(ap);
1279 bfin_irq_on(ap); 1241 ata_sff_irq_on(ap);
1280} 1242}
1281 1243
1282/** 1244/**
@@ -1293,7 +1255,7 @@ static void bfin_postreset(struct ata_link *link, unsigned int *classes)
1293 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1255 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1294 1256
1295 /* re-enable interrupts */ 1257 /* re-enable interrupts */
1296 bfin_irq_on(ap); 1258 ata_sff_irq_on(ap);
1297 1259
1298 /* is double-select really necessary? */ 1260 /* is double-select really necessary? */
1299 if (classes[0] != ATA_DEV_NONE) 1261 if (classes[0] != ATA_DEV_NONE)
@@ -1438,18 +1400,12 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
1438 spin_lock_irqsave(&host->lock, flags); 1400 spin_lock_irqsave(&host->lock, flags);
1439 1401
1440 for (i = 0; i < host->n_ports; i++) { 1402 for (i = 0; i < host->n_ports; i++) {
1441 struct ata_port *ap; 1403 struct ata_port *ap = host->ports[i];
1404 struct ata_queued_cmd *qc;
1442 1405
1443 ap = host->ports[i]; 1406 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1444 if (ap && 1407 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1445 !(ap->flags & ATA_FLAG_DISABLED)) { 1408 handled |= bfin_ata_host_intr(ap, qc);
1446 struct ata_queued_cmd *qc;
1447
1448 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1449 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
1450 (qc->flags & ATA_QCFLAG_ACTIVE))
1451 handled |= bfin_ata_host_intr(ap, qc);
1452 }
1453 } 1409 }
1454 1410
1455 spin_unlock_irqrestore(&host->lock, flags); 1411 spin_unlock_irqrestore(&host->lock, flags);
@@ -1465,7 +1421,7 @@ static struct scsi_host_template bfin_sht = {
1465}; 1421};
1466 1422
1467static struct ata_port_operations bfin_pata_ops = { 1423static struct ata_port_operations bfin_pata_ops = {
1468 .inherits = &ata_sff_port_ops, 1424 .inherits = &ata_bmdma_port_ops,
1469 1425
1470 .set_piomode = bfin_set_piomode, 1426 .set_piomode = bfin_set_piomode,
1471 .set_dmamode = bfin_set_dmamode, 1427 .set_dmamode = bfin_set_dmamode,
@@ -1476,6 +1432,7 @@ static struct ata_port_operations bfin_pata_ops = {
1476 .sff_check_status = bfin_check_status, 1432 .sff_check_status = bfin_check_status,
1477 .sff_check_altstatus = bfin_check_altstatus, 1433 .sff_check_altstatus = bfin_check_altstatus,
1478 .sff_dev_select = bfin_dev_select, 1434 .sff_dev_select = bfin_dev_select,
1435 .sff_set_devctl = bfin_set_devctl,
1479 1436
1480 .bmdma_setup = bfin_bmdma_setup, 1437 .bmdma_setup = bfin_bmdma_setup,
1481 .bmdma_start = bfin_bmdma_start, 1438 .bmdma_start = bfin_bmdma_start,
@@ -1485,13 +1442,11 @@ static struct ata_port_operations bfin_pata_ops = {
1485 1442
1486 .qc_prep = ata_noop_qc_prep, 1443 .qc_prep = ata_noop_qc_prep,
1487 1444
1488 .freeze = bfin_freeze,
1489 .thaw = bfin_thaw, 1445 .thaw = bfin_thaw,
1490 .softreset = bfin_softreset, 1446 .softreset = bfin_softreset,
1491 .postreset = bfin_postreset, 1447 .postreset = bfin_postreset,
1492 1448
1493 .sff_irq_clear = bfin_irq_clear, 1449 .sff_irq_clear = bfin_irq_clear,
1494 .sff_irq_on = bfin_irq_on,
1495 1450
1496 .port_start = bfin_port_start, 1451 .port_start = bfin_port_start,
1497 .port_stop = bfin_port_stop, 1452 .port_stop = bfin_port_stop,
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 45896b3c6538..e5f289f59ca3 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -153,24 +153,20 @@ static int cmd640_port_start(struct ata_port *ap)
153 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 153 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
154 struct cmd640_reg *timing; 154 struct cmd640_reg *timing;
155 155
156 int ret = ata_sff_port_start(ap);
157 if (ret < 0)
158 return ret;
159
160 timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL); 156 timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL);
161 if (timing == NULL) 157 if (timing == NULL)
162 return -ENOMEM; 158 return -ENOMEM;
163 timing->last = -1; /* Force a load */ 159 timing->last = -1; /* Force a load */
164 ap->private_data = timing; 160 ap->private_data = timing;
165 return ret; 161 return 0;
166} 162}
167 163
168static struct scsi_host_template cmd640_sht = { 164static struct scsi_host_template cmd640_sht = {
169 ATA_BMDMA_SHT(DRV_NAME), 165 ATA_PIO_SHT(DRV_NAME),
170}; 166};
171 167
172static struct ata_port_operations cmd640_port_ops = { 168static struct ata_port_operations cmd640_port_ops = {
173 .inherits = &ata_bmdma_port_ops, 169 .inherits = &ata_sff_port_ops,
174 /* In theory xfer_noirq is not needed once we kill the prefetcher */ 170 /* In theory xfer_noirq is not needed once we kill the prefetcher */
175 .sff_data_xfer = ata_sff_data_xfer_noirq, 171 .sff_data_xfer = ata_sff_data_xfer_noirq,
176 .qc_issue = cmd640_qc_issue, 172 .qc_issue = cmd640_qc_issue,
@@ -181,13 +177,10 @@ static struct ata_port_operations cmd640_port_ops = {
181 177
182static void cmd640_hardware_init(struct pci_dev *pdev) 178static void cmd640_hardware_init(struct pci_dev *pdev)
183{ 179{
184 u8 r;
185 u8 ctrl; 180 u8 ctrl;
186 181
187 /* CMD640 detected, commiserations */ 182 /* CMD640 detected, commiserations */
188 pci_write_config_byte(pdev, 0x5B, 0x00); 183 pci_write_config_byte(pdev, 0x5B, 0x00);
189 /* Get version info */
190 pci_read_config_byte(pdev, CFR, &r);
191 /* PIO0 command cycles */ 184 /* PIO0 command cycles */
192 pci_write_config_byte(pdev, CMDTIM, 0); 185 pci_write_config_byte(pdev, CMDTIM, 0);
193 /* 512 byte bursts (sector) */ 186 /* 512 byte bursts (sector) */
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 4c81a71b8877..905ff76d3cbb 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -121,14 +121,8 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
121 121
122 if (pair) { 122 if (pair) {
123 struct ata_timing tp; 123 struct ata_timing tp;
124
125 ata_timing_compute(pair, pair->pio_mode, &tp, T, 0); 124 ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
126 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP); 125 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
127 if (pair->dma_mode) {
128 ata_timing_compute(pair, pair->dma_mode,
129 &tp, T, 0);
130 ata_timing_merge(&tp, &t, &t, ATA_TIMING_SETUP);
131 }
132 } 126 }
133 } 127 }
134 128
@@ -367,7 +361,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
367 pci_write_config_byte(pdev, UDIDETCR0, 0xF0); 361 pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
368#endif 362#endif
369 363
370 return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL, 0); 364 return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0);
371} 365}
372 366
373#ifdef CONFIG_PM 367#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 95ebdac517f2..030952f1f97c 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -110,7 +110,7 @@ static struct scsi_host_template cs5520_sht = {
110 110
111static struct ata_port_operations cs5520_port_ops = { 111static struct ata_port_operations cs5520_port_ops = {
112 .inherits = &ata_bmdma_port_ops, 112 .inherits = &ata_bmdma_port_ops,
113 .qc_prep = ata_sff_dumb_qc_prep, 113 .qc_prep = ata_bmdma_dumb_qc_prep,
114 .cable_detect = ata_cable_40wire, 114 .cable_detect = ata_cable_40wire,
115 .set_piomode = cs5520_set_piomode, 115 .set_piomode = cs5520_set_piomode,
116}; 116};
@@ -221,7 +221,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
221 continue; 221 continue;
222 222
223 rc = devm_request_irq(&pdev->dev, irq[ap->port_no], 223 rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
224 ata_sff_interrupt, 0, DRV_NAME, host); 224 ata_bmdma_interrupt, 0, DRV_NAME, host);
225 if (rc) 225 if (rc)
226 return rc; 226 return rc;
227 227
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 738ad2e14a97..f792330f0d8e 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -156,7 +156,7 @@ static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc)
156 cs5530_set_dmamode(ap, adev); 156 cs5530_set_dmamode(ap, adev);
157 } 157 }
158 158
159 return ata_sff_qc_issue(qc); 159 return ata_bmdma_qc_issue(qc);
160} 160}
161 161
162static struct scsi_host_template cs5530_sht = { 162static struct scsi_host_template cs5530_sht = {
@@ -167,7 +167,7 @@ static struct scsi_host_template cs5530_sht = {
167static struct ata_port_operations cs5530_port_ops = { 167static struct ata_port_operations cs5530_port_ops = {
168 .inherits = &ata_bmdma_port_ops, 168 .inherits = &ata_bmdma_port_ops,
169 169
170 .qc_prep = ata_sff_dumb_qc_prep, 170 .qc_prep = ata_bmdma_dumb_qc_prep,
171 .qc_issue = cs5530_qc_issue, 171 .qc_issue = cs5530_qc_issue,
172 172
173 .cable_detect = ata_cable_40wire, 173 .cable_detect = ata_cable_40wire,
@@ -324,7 +324,7 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
324 ppi[1] = &info_palmax_secondary; 324 ppi[1] = &info_palmax_secondary;
325 325
326 /* Now kick off ATA set up */ 326 /* Now kick off ATA set up */
327 return ata_pci_sff_init_one(pdev, ppi, &cs5530_sht, NULL, 0); 327 return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0);
328} 328}
329 329
330#ifdef CONFIG_PM 330#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index a02e6459fdcc..03a93186aa19 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -198,7 +198,7 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
198 rdmsr(ATAC_CH0D1_PIO, timings, dummy); 198 rdmsr(ATAC_CH0D1_PIO, timings, dummy);
199 if (CS5535_BAD_PIO(timings)) 199 if (CS5535_BAD_PIO(timings))
200 wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0); 200 wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0);
201 return ata_pci_sff_init_one(dev, ppi, &cs5535_sht, NULL, 0); 201 return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0);
202} 202}
203 203
204static const struct pci_device_id cs5535[] = { 204static const struct pci_device_id cs5535[] = {
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 914ae3506ff5..21ee23f89e88 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -260,7 +260,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
260 return -ENODEV; 260 return -ENODEV;
261 } 261 }
262 262
263 return ata_pci_sff_init_one(dev, ppi, &cs5536_sht, NULL, 0); 263 return ata_pci_bmdma_init_one(dev, ppi, &cs5536_sht, NULL, 0);
264} 264}
265 265
266static const struct pci_device_id cs5536[] = { 266static const struct pci_device_id cs5536[] = {
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 0fcc096b8dac..6d915b063d93 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -138,7 +138,7 @@ static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *i
138 if (PCI_FUNC(pdev->devfn) != 1) 138 if (PCI_FUNC(pdev->devfn) != 1)
139 return -ENODEV; 139 return -ENODEV;
140 140
141 return ata_pci_sff_init_one(pdev, ppi, &cy82c693_sht, NULL, 0); 141 return ata_pci_bmdma_init_one(pdev, ppi, &cy82c693_sht, NULL, 0);
142} 142}
143 143
144static const struct pci_device_id cy82c693[] = { 144static const struct pci_device_id cy82c693[] = {
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 3bac0e079691..a08834758ea2 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -277,8 +277,8 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
277 dev_printk(KERN_DEBUG, &pdev->dev, 277 dev_printk(KERN_DEBUG, &pdev->dev,
278 "version " DRV_VERSION "\n"); 278 "version " DRV_VERSION "\n");
279 279
280 return ata_pci_sff_init_one(pdev, ppi, &efar_sht, NULL, 280 return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL,
281 ATA_HOST_PARALLEL_SCAN); 281 ATA_HOST_PARALLEL_SCAN);
282} 282}
283 283
284static const struct pci_device_id efar_pci_tbl[] = { 284static const struct pci_device_id efar_pci_tbl[] = {
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index af49bfb57247..7688868557b9 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -182,7 +182,7 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
182 } else if (adev->class == ATA_DEV_ATAPI) 182 } else if (adev->class == ATA_DEV_ATAPI)
183 mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 183 mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
184 184
185 return ata_bmdma_mode_filter(adev, mask); 185 return mask;
186} 186}
187 187
188static int hpt36x_cable_detect(struct ata_port *ap) 188static int hpt36x_cable_detect(struct ata_port *ap)
@@ -361,7 +361,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
361 break; 361 break;
362 } 362 }
363 /* Now kick off ATA set up */ 363 /* Now kick off ATA set up */
364 return ata_pci_sff_init_one(dev, ppi, &hpt36x_sht, hpriv, 0); 364 return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0);
365} 365}
366 366
367#ifdef CONFIG_PM 367#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 8839307a64cf..9ae4c0830577 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -282,7 +282,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
282 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) 282 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
283 mask &= ~(0xE0 << ATA_SHIFT_UDMA); 283 mask &= ~(0xE0 << ATA_SHIFT_UDMA);
284 } 284 }
285 return ata_bmdma_mode_filter(adev, mask); 285 return mask;
286} 286}
287 287
288/** 288/**
@@ -298,7 +298,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
298 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) 298 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
299 mask &= ~(0xE0 << ATA_SHIFT_UDMA); 299 mask &= ~(0xE0 << ATA_SHIFT_UDMA);
300 } 300 }
301 return ata_bmdma_mode_filter(adev, mask); 301 return mask;
302} 302}
303 303
304/** 304/**
@@ -987,7 +987,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
987 } 987 }
988 988
989 /* Now kick off ATA set up */ 989 /* Now kick off ATA set up */
990 return ata_pci_sff_init_one(dev, ppi, &hpt37x_sht, private_data, 0); 990 return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0);
991} 991}
992 992
993static const struct pci_device_id hpt37x[] = { 993static const struct pci_device_id hpt37x[] = {
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 01457b266f3d..32f3463216b8 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -320,7 +320,7 @@ static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
320 320
321 hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23); 321 hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
322 } 322 }
323 return ata_sff_qc_issue(qc); 323 return ata_bmdma_qc_issue(qc);
324} 324}
325 325
326static struct scsi_host_template hpt3x2n_sht = { 326static struct scsi_host_template hpt3x2n_sht = {
@@ -548,7 +548,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
548 outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c); 548 outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
549 549
550 /* Now kick off ATA set up */ 550 /* Now kick off ATA set up */
551 return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0); 551 return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0);
552} 552}
553 553
554static const struct pci_device_id hpt3x2n[] = { 554static const struct pci_device_id hpt3x2n[] = {
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 727a81ce4c9f..b63d5e2d4628 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -248,7 +248,7 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
248 ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd"); 248 ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
249 } 249 }
250 pci_set_master(pdev); 250 pci_set_master(pdev);
251 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 251 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
252 IRQF_SHARED, &hpt3x3_sht); 252 IRQF_SHARED, &hpt3x3_sht);
253} 253}
254 254
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index fa812e206eeb..9f2889fe43b2 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -321,7 +321,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
321} 321}
322 322
323static struct ata_port_operations pata_icside_port_ops = { 323static struct ata_port_operations pata_icside_port_ops = {
324 .inherits = &ata_sff_port_ops, 324 .inherits = &ata_bmdma_port_ops,
325 /* no need to build any PRD tables for DMA */ 325 /* no need to build any PRD tables for DMA */
326 .qc_prep = ata_noop_qc_prep, 326 .qc_prep = ata_noop_qc_prep,
327 .sff_data_xfer = ata_sff_data_xfer_noirq, 327 .sff_data_xfer = ata_sff_data_xfer_noirq,
@@ -333,7 +333,8 @@ static struct ata_port_operations pata_icside_port_ops = {
333 .cable_detect = ata_cable_40wire, 333 .cable_detect = ata_cable_40wire,
334 .set_dmamode = pata_icside_set_dmamode, 334 .set_dmamode = pata_icside_set_dmamode,
335 .postreset = pata_icside_postreset, 335 .postreset = pata_icside_postreset,
336 .post_internal_cmd = pata_icside_bmdma_stop, 336
337 .port_start = ATA_OP_NULL, /* don't need PRD table */
337}; 338};
338 339
339static void __devinit 340static void __devinit
@@ -469,7 +470,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
469 pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]); 470 pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
470 } 471 }
471 472
472 return ata_host_activate(host, ec->irq, ata_sff_interrupt, 0, 473 return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0,
473 &pata_icside_sht); 474 &pata_icside_sht);
474} 475}
475 476
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index f971f0de88e6..4d142a2ab8fd 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -273,7 +273,7 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en
273 dev_printk(KERN_DEBUG, &pdev->dev, 273 dev_printk(KERN_DEBUG, &pdev->dev,
274 "version " DRV_VERSION "\n"); 274 "version " DRV_VERSION "\n");
275 275
276 return ata_pci_sff_init_one(pdev, ppi, &it8213_sht, NULL, 0); 276 return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0);
277} 277}
278 278
279static const struct pci_device_id it8213_pci_tbl[] = { 279static const struct pci_device_id it8213_pci_tbl[] = {
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 5cb286fd839e..bf88f71a21f4 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -430,7 +430,7 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
430 case 0xFC: /* Internal 'report rebuild state' */ 430 case 0xFC: /* Internal 'report rebuild state' */
431 /* Arguably should just no-op this one */ 431 /* Arguably should just no-op this one */
432 case ATA_CMD_SET_FEATURES: 432 case ATA_CMD_SET_FEATURES:
433 return ata_sff_qc_issue(qc); 433 return ata_bmdma_qc_issue(qc);
434 } 434 }
435 printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command); 435 printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
436 return AC_ERR_DEV; 436 return AC_ERR_DEV;
@@ -448,7 +448,7 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
448static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc) 448static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc)
449{ 449{
450 it821x_passthru_dev_select(qc->ap, qc->dev->devno); 450 it821x_passthru_dev_select(qc->ap, qc->dev->devno);
451 return ata_sff_qc_issue(qc); 451 return ata_bmdma_qc_issue(qc);
452} 452}
453 453
454/** 454/**
@@ -739,7 +739,7 @@ static int it821x_port_start(struct ata_port *ap)
739 struct it821x_dev *itdev; 739 struct it821x_dev *itdev;
740 u8 conf; 740 u8 conf;
741 741
742 int ret = ata_sff_port_start(ap); 742 int ret = ata_bmdma_port_start(ap);
743 if (ret < 0) 743 if (ret < 0)
744 return ret; 744 return ret;
745 745
@@ -933,7 +933,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
933 else 933 else
934 ppi[0] = &info_smart; 934 ppi[0] = &info_smart;
935 } 935 }
936 return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL, 0); 936 return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0);
937} 937}
938 938
939#ifdef CONFIG_PM 939#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 565e01e6ac7c..cb3babbb7035 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -144,7 +144,7 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
144 }; 144 };
145 const struct ata_port_info *ppi[] = { &info, NULL }; 145 const struct ata_port_info *ppi[] = { &info, NULL };
146 146
147 return ata_pci_sff_init_one(pdev, ppi, &jmicron_sht, NULL, 0); 147 return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
148} 148}
149 149
150static const struct pci_device_id jmicron_pci_tbl[] = { 150static const struct pci_device_id jmicron_pci_tbl[] = {
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 9df1ff7e1eaa..eaf194138f21 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -44,6 +44,9 @@
44 * Specific support is included for the ht6560a/ht6560b/opti82c611a/ 44 * Specific support is included for the ht6560a/ht6560b/opti82c611a/
45 * opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A 45 * opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A
46 * 46 *
47 * Support for the Winbond 83759A when operating in advanced mode.
48 * Multichip mode is not currently supported.
49 *
47 * Use the autospeed and pio_mask options with: 50 * Use the autospeed and pio_mask options with:
48 * Appian ADI/2 aka CLPD7220 or AIC25VL01. 51 * Appian ADI/2 aka CLPD7220 or AIC25VL01.
49 * Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with 52 * Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
@@ -135,12 +138,18 @@ static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */
135static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */ 138static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
136static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */ 139static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
137static int qdi; /* Set to probe QDI controllers */ 140static int qdi; /* Set to probe QDI controllers */
138static int winbond; /* Set to probe Winbond controllers,
139 give I/O port if non standard */
140static int autospeed; /* Chip present which snoops speed changes */ 141static int autospeed; /* Chip present which snoops speed changes */
141static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */ 142static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
142static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ 143static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
143 144
145#ifdef PATA_WINBOND_VLB_MODULE
146static int winbond = 1; /* Set to probe Winbond controllers,
147 give I/O port if non standard */
148#else
149static int winbond; /* Set to probe Winbond controllers,
150 give I/O port if non standard */
151#endif
152
144/** 153/**
145 * legacy_probe_add - Add interface to probe list 154 * legacy_probe_add - Add interface to probe list
146 * @port: Controller port 155 * @port: Controller port
@@ -1297,6 +1306,7 @@ MODULE_AUTHOR("Alan Cox");
1297MODULE_DESCRIPTION("low-level driver for legacy ATA"); 1306MODULE_DESCRIPTION("low-level driver for legacy ATA");
1298MODULE_LICENSE("GPL"); 1307MODULE_LICENSE("GPL");
1299MODULE_VERSION(DRV_VERSION); 1308MODULE_VERSION(DRV_VERSION);
1309MODULE_ALIAS("pata_winbond");
1300 1310
1301module_param(probe_all, int, 0); 1311module_param(probe_all, int, 0);
1302module_param(autospeed, int, 0); 1312module_param(autospeed, int, 0);
@@ -1305,6 +1315,7 @@ module_param(ht6560b, int, 0);
1305module_param(opti82c611a, int, 0); 1315module_param(opti82c611a, int, 0);
1306module_param(opti82c46x, int, 0); 1316module_param(opti82c46x, int, 0);
1307module_param(qdi, int, 0); 1317module_param(qdi, int, 0);
1318module_param(winbond, int, 0);
1308module_param(pio_mask, int, 0); 1319module_param(pio_mask, int, 0);
1309module_param(iordy_mask, int, 0); 1320module_param(iordy_mask, int, 0);
1310 1321
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 211b6438b3a0..75b49d01780b 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -720,6 +720,8 @@ static int pata_macio_port_start(struct ata_port *ap)
720 if (priv->dma_table_cpu == NULL) { 720 if (priv->dma_table_cpu == NULL) {
721 dev_err(priv->dev, "Unable to allocate DMA command list\n"); 721 dev_err(priv->dev, "Unable to allocate DMA command list\n");
722 ap->ioaddr.bmdma_addr = NULL; 722 ap->ioaddr.bmdma_addr = NULL;
723 ap->mwdma_mask = 0;
724 ap->udma_mask = 0;
723 } 725 }
724 return 0; 726 return 0;
725} 727}
@@ -917,7 +919,7 @@ static struct scsi_host_template pata_macio_sht = {
917}; 919};
918 920
919static struct ata_port_operations pata_macio_ops = { 921static struct ata_port_operations pata_macio_ops = {
920 .inherits = &ata_sff_port_ops, 922 .inherits = &ata_bmdma_port_ops,
921 923
922 .freeze = pata_macio_freeze, 924 .freeze = pata_macio_freeze,
923 .set_piomode = pata_macio_set_timings, 925 .set_piomode = pata_macio_set_timings,
@@ -925,7 +927,6 @@ static struct ata_port_operations pata_macio_ops = {
925 .cable_detect = pata_macio_cable_detect, 927 .cable_detect = pata_macio_cable_detect,
926 .sff_dev_select = pata_macio_dev_select, 928 .sff_dev_select = pata_macio_dev_select,
927 .qc_prep = pata_macio_qc_prep, 929 .qc_prep = pata_macio_qc_prep,
928 .mode_filter = ata_bmdma_mode_filter,
929 .bmdma_setup = pata_macio_bmdma_setup, 930 .bmdma_setup = pata_macio_bmdma_setup,
930 .bmdma_start = pata_macio_bmdma_start, 931 .bmdma_start = pata_macio_bmdma_start,
931 .bmdma_stop = pata_macio_bmdma_stop, 932 .bmdma_stop = pata_macio_bmdma_stop,
@@ -1109,7 +1110,7 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
1109 1110
1110 /* Start it up */ 1111 /* Start it up */
1111 priv->irq = irq; 1112 priv->irq = irq;
1112 return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0, 1113 return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
1113 &pata_macio_sht); 1114 &pata_macio_sht);
1114} 1115}
1115 1116
@@ -1139,7 +1140,7 @@ static int __devinit pata_macio_attach(struct macio_dev *mdev,
1139 "Failed to allocate private memory\n"); 1140 "Failed to allocate private memory\n");
1140 return -ENOMEM; 1141 return -ENOMEM;
1141 } 1142 }
1142 priv->node = of_node_get(mdev->ofdev.node); 1143 priv->node = of_node_get(mdev->ofdev.dev.of_node);
1143 priv->mdev = mdev; 1144 priv->mdev = mdev;
1144 priv->dev = &mdev->ofdev.dev; 1145 priv->dev = &mdev->ofdev.dev;
1145 1146
@@ -1354,8 +1355,11 @@ static struct of_device_id pata_macio_match[] =
1354 1355
1355static struct macio_driver pata_macio_driver = 1356static struct macio_driver pata_macio_driver =
1356{ 1357{
1357 .name = "pata-macio", 1358 .driver = {
1358 .match_table = pata_macio_match, 1359 .name = "pata-macio",
1360 .owner = THIS_MODULE,
1361 .of_match_table = pata_macio_match,
1362 },
1359 .probe = pata_macio_attach, 1363 .probe = pata_macio_attach,
1360 .remove = pata_macio_detach, 1364 .remove = pata_macio_detach,
1361#ifdef CONFIG_PM 1365#ifdef CONFIG_PM
@@ -1365,9 +1369,6 @@ static struct macio_driver pata_macio_driver =
1365#ifdef CONFIG_PMAC_MEDIABAY 1369#ifdef CONFIG_PMAC_MEDIABAY
1366 .mediabay_event = pata_macio_mb_event, 1370 .mediabay_event = pata_macio_mb_event,
1367#endif 1371#endif
1368 .driver = {
1369 .owner = THIS_MODULE,
1370 },
1371}; 1372};
1372 1373
1373static const struct pci_device_id pata_macio_pci_match[] = { 1374static const struct pci_device_id pata_macio_pci_match[] = {
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index e8ca02e5a71d..dd38083dcbeb 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -153,7 +153,7 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
153 return -ENODEV; 153 return -ENODEV;
154 } 154 }
155#endif 155#endif
156 return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL, 0); 156 return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0);
157} 157}
158 158
159static const struct pci_device_id marvell_pci_tbl[] = { 159static const struct pci_device_id marvell_pci_tbl[] = {
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 9f5b053611dd..8cc536e49a0a 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -64,13 +64,13 @@ struct mpc52xx_ata_priv {
64 64
65 65
66/* ATAPI-4 PIO specs (in ns) */ 66/* ATAPI-4 PIO specs (in ns) */
67static const int ataspec_t0[5] = {600, 383, 240, 180, 120}; 67static const u16 ataspec_t0[5] = {600, 383, 240, 180, 120};
68static const int ataspec_t1[5] = { 70, 50, 30, 30, 25}; 68static const u16 ataspec_t1[5] = { 70, 50, 30, 30, 25};
69static const int ataspec_t2_8[5] = {290, 290, 290, 80, 70}; 69static const u16 ataspec_t2_8[5] = {290, 290, 290, 80, 70};
70static const int ataspec_t2_16[5] = {165, 125, 100, 80, 70}; 70static const u16 ataspec_t2_16[5] = {165, 125, 100, 80, 70};
71static const int ataspec_t2i[5] = { 0, 0, 0, 70, 25}; 71static const u16 ataspec_t2i[5] = { 0, 0, 0, 70, 25};
72static const int ataspec_t4[5] = { 30, 20, 15, 10, 10}; 72static const u16 ataspec_t4[5] = { 30, 20, 15, 10, 10};
73static const int ataspec_ta[5] = { 35, 35, 35, 35, 35}; 73static const u16 ataspec_ta[5] = { 35, 35, 35, 35, 35};
74 74
75#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c))) 75#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c)))
76 76
@@ -78,13 +78,13 @@ static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
78 78
79/* ATAPI-4 MDMA specs (in clocks) */ 79/* ATAPI-4 MDMA specs (in clocks) */
80struct mdmaspec { 80struct mdmaspec {
81 u32 t0M; 81 u8 t0M;
82 u32 td; 82 u8 td;
83 u32 th; 83 u8 th;
84 u32 tj; 84 u8 tj;
85 u32 tkw; 85 u8 tkw;
86 u32 tm; 86 u8 tm;
87 u32 tn; 87 u8 tn;
88}; 88};
89 89
90static const struct mdmaspec mdmaspec66[3] = { 90static const struct mdmaspec mdmaspec66[3] = {
@@ -101,23 +101,23 @@ static const struct mdmaspec mdmaspec132[3] = {
101 101
102/* ATAPI-4 UDMA specs (in clocks) */ 102/* ATAPI-4 UDMA specs (in clocks) */
103struct udmaspec { 103struct udmaspec {
104 u32 tcyc; 104 u8 tcyc;
105 u32 t2cyc; 105 u8 t2cyc;
106 u32 tds; 106 u8 tds;
107 u32 tdh; 107 u8 tdh;
108 u32 tdvs; 108 u8 tdvs;
109 u32 tdvh; 109 u8 tdvh;
110 u32 tfs; 110 u8 tfs;
111 u32 tli; 111 u8 tli;
112 u32 tmli; 112 u8 tmli;
113 u32 taz; 113 u8 taz;
114 u32 tzah; 114 u8 tzah;
115 u32 tenv; 115 u8 tenv;
116 u32 tsr; 116 u8 tsr;
117 u32 trfs; 117 u8 trfs;
118 u32 trp; 118 u8 trp;
119 u32 tack; 119 u8 tack;
120 u32 tss; 120 u8 tss;
121}; 121};
122 122
123static const struct udmaspec udmaspec66[6] = { 123static const struct udmaspec udmaspec66[6] = {
@@ -270,7 +270,7 @@ mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
270{ 270{
271 struct mpc52xx_ata_timings *timing = &priv->timings[dev]; 271 struct mpc52xx_ata_timings *timing = &priv->timings[dev];
272 unsigned int ipb_period = priv->ipb_period; 272 unsigned int ipb_period = priv->ipb_period;
273 unsigned int t0, t1, t2_8, t2_16, t2i, t4, ta; 273 u32 t0, t1, t2_8, t2_16, t2i, t4, ta;
274 274
275 if ((pio < 0) || (pio > 4)) 275 if ((pio < 0) || (pio > 4))
276 return -EINVAL; 276 return -EINVAL;
@@ -299,8 +299,8 @@ mpc52xx_ata_compute_mdma_timings(struct mpc52xx_ata_priv *priv, int dev,
299 if (speed < 0 || speed > 2) 299 if (speed < 0 || speed > 2)
300 return -EINVAL; 300 return -EINVAL;
301 301
302 t->mdma1 = (s->t0M << 24) | (s->td << 16) | (s->tkw << 8) | (s->tm); 302 t->mdma1 = ((u32)s->t0M << 24) | ((u32)s->td << 16) | ((u32)s->tkw << 8) | s->tm;
303 t->mdma2 = (s->th << 24) | (s->tj << 16) | (s->tn << 8); 303 t->mdma2 = ((u32)s->th << 24) | ((u32)s->tj << 16) | ((u32)s->tn << 8);
304 t->using_udma = 0; 304 t->using_udma = 0;
305 305
306 return 0; 306 return 0;
@@ -316,11 +316,11 @@ mpc52xx_ata_compute_udma_timings(struct mpc52xx_ata_priv *priv, int dev,
316 if (speed < 0 || speed > 2) 316 if (speed < 0 || speed > 2)
317 return -EINVAL; 317 return -EINVAL;
318 318
319 t->udma1 = (s->t2cyc << 24) | (s->tcyc << 16) | (s->tds << 8) | s->tdh; 319 t->udma1 = ((u32)s->t2cyc << 24) | ((u32)s->tcyc << 16) | ((u32)s->tds << 8) | s->tdh;
320 t->udma2 = (s->tdvs << 24) | (s->tdvh << 16) | (s->tfs << 8) | s->tli; 320 t->udma2 = ((u32)s->tdvs << 24) | ((u32)s->tdvh << 16) | ((u32)s->tfs << 8) | s->tli;
321 t->udma3 = (s->tmli << 24) | (s->taz << 16) | (s->tenv << 8) | s->tsr; 321 t->udma3 = ((u32)s->tmli << 24) | ((u32)s->taz << 16) | ((u32)s->tenv << 8) | s->tsr;
322 t->udma4 = (s->tss << 24) | (s->trfs << 16) | (s->trp << 8) | s->tack; 322 t->udma4 = ((u32)s->tss << 24) | ((u32)s->trfs << 16) | ((u32)s->trp << 8) | s->tack;
323 t->udma5 = (s->tzah << 24); 323 t->udma5 = (u32)s->tzah << 24;
324 t->using_udma = 1; 324 t->using_udma = 1;
325 325
326 return 0; 326 return 0;
@@ -659,7 +659,7 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
659 ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs); 659 ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs);
660 660
661 /* activate host */ 661 /* activate host */
662 return ata_host_activate(host, priv->ata_irq, ata_sff_interrupt, 0, 662 return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0,
663 &mpc52xx_ata_sht); 663 &mpc52xx_ata_sht);
664} 664}
665 665
@@ -680,7 +680,7 @@ mpc52xx_ata_remove_one(struct device *dev)
680/* ======================================================================== */ 680/* ======================================================================== */
681 681
682static int __devinit 682static int __devinit
683mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match) 683mpc52xx_ata_probe(struct platform_device *op, const struct of_device_id *match)
684{ 684{
685 unsigned int ipb_freq; 685 unsigned int ipb_freq;
686 struct resource res_mem; 686 struct resource res_mem;
@@ -694,7 +694,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
694 struct bcom_task *dmatsk = NULL; 694 struct bcom_task *dmatsk = NULL;
695 695
696 /* Get ipb frequency */ 696 /* Get ipb frequency */
697 ipb_freq = mpc5xxx_get_bus_frequency(op->node); 697 ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
698 if (!ipb_freq) { 698 if (!ipb_freq) {
699 dev_err(&op->dev, "could not determine IPB bus frequency\n"); 699 dev_err(&op->dev, "could not determine IPB bus frequency\n");
700 return -ENODEV; 700 return -ENODEV;
@@ -702,7 +702,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
702 702
703 /* Get device base address from device tree, request the region 703 /* Get device base address from device tree, request the region
704 * and ioremap it. */ 704 * and ioremap it. */
705 rv = of_address_to_resource(op->node, 0, &res_mem); 705 rv = of_address_to_resource(op->dev.of_node, 0, &res_mem);
706 if (rv) { 706 if (rv) {
707 dev_err(&op->dev, "could not determine device base address\n"); 707 dev_err(&op->dev, "could not determine device base address\n");
708 return rv; 708 return rv;
@@ -735,14 +735,14 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
735 * The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and 735 * The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and
736 * UDMA modes 0, 1 and 2. 736 * UDMA modes 0, 1 and 2.
737 */ 737 */
738 prop = of_get_property(op->node, "mwdma-mode", &proplen); 738 prop = of_get_property(op->dev.of_node, "mwdma-mode", &proplen);
739 if ((prop) && (proplen >= 4)) 739 if ((prop) && (proplen >= 4))
740 mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1); 740 mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1);
741 prop = of_get_property(op->node, "udma-mode", &proplen); 741 prop = of_get_property(op->dev.of_node, "udma-mode", &proplen);
742 if ((prop) && (proplen >= 4)) 742 if ((prop) && (proplen >= 4))
743 udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1); 743 udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1);
744 744
745 ata_irq = irq_of_parse_and_map(op->node, 0); 745 ata_irq = irq_of_parse_and_map(op->dev.of_node, 0);
746 if (ata_irq == NO_IRQ) { 746 if (ata_irq == NO_IRQ) {
747 dev_err(&op->dev, "error mapping irq\n"); 747 dev_err(&op->dev, "error mapping irq\n");
748 return -EINVAL; 748 return -EINVAL;
@@ -821,7 +821,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
821} 821}
822 822
823static int 823static int
824mpc52xx_ata_remove(struct of_device *op) 824mpc52xx_ata_remove(struct platform_device *op)
825{ 825{
826 struct mpc52xx_ata_priv *priv; 826 struct mpc52xx_ata_priv *priv;
827 int task_irq; 827 int task_irq;
@@ -848,7 +848,7 @@ mpc52xx_ata_remove(struct of_device *op)
848#ifdef CONFIG_PM 848#ifdef CONFIG_PM
849 849
850static int 850static int
851mpc52xx_ata_suspend(struct of_device *op, pm_message_t state) 851mpc52xx_ata_suspend(struct platform_device *op, pm_message_t state)
852{ 852{
853 struct ata_host *host = dev_get_drvdata(&op->dev); 853 struct ata_host *host = dev_get_drvdata(&op->dev);
854 854
@@ -856,7 +856,7 @@ mpc52xx_ata_suspend(struct of_device *op, pm_message_t state)
856} 856}
857 857
858static int 858static int
859mpc52xx_ata_resume(struct of_device *op) 859mpc52xx_ata_resume(struct platform_device *op)
860{ 860{
861 struct ata_host *host = dev_get_drvdata(&op->dev); 861 struct ata_host *host = dev_get_drvdata(&op->dev);
862 struct mpc52xx_ata_priv *priv = host->private_data; 862 struct mpc52xx_ata_priv *priv = host->private_data;
@@ -884,9 +884,6 @@ static struct of_device_id mpc52xx_ata_of_match[] = {
884 884
885 885
886static struct of_platform_driver mpc52xx_ata_of_platform_driver = { 886static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
887 .owner = THIS_MODULE,
888 .name = DRV_NAME,
889 .match_table = mpc52xx_ata_of_match,
890 .probe = mpc52xx_ata_probe, 887 .probe = mpc52xx_ata_probe,
891 .remove = mpc52xx_ata_remove, 888 .remove = mpc52xx_ata_remove,
892#ifdef CONFIG_PM 889#ifdef CONFIG_PM
@@ -896,6 +893,7 @@ static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
896 .driver = { 893 .driver = {
897 .name = DRV_NAME, 894 .name = DRV_NAME,
898 .owner = THIS_MODULE, 895 .owner = THIS_MODULE,
896 .of_match_table = mpc52xx_ata_of_match,
899 }, 897 },
900}; 898};
901 899
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 94f979a7f4f7..3eb921c746a1 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -82,7 +82,7 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e
82 ata_pci_bmdma_clear_simplex(pdev); 82 ata_pci_bmdma_clear_simplex(pdev);
83 83
84 /* And let the library code do the work */ 84 /* And let the library code do the work */
85 return ata_pci_sff_init_one(pdev, port_info, &netcell_sht, NULL, 0); 85 return ata_pci_bmdma_init_one(pdev, port_info, &netcell_sht, NULL, 0);
86} 86}
87 87
88static const struct pci_device_id netcell_pci_tbl[] = { 88static const struct pci_device_id netcell_pci_tbl[] = {
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index dd53a66b19e3..cc50bd09aa26 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -149,7 +149,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
149 149
150 ninja32_program(base); 150 ninja32_program(base);
151 /* FIXME: Should we disable them at remove ? */ 151 /* FIXME: Should we disable them at remove ? */
152 return ata_host_activate(host, dev->irq, ata_sff_interrupt, 152 return ata_host_activate(host, dev->irq, ata_bmdma_interrupt,
153 IRQF_SHARED, &ninja32_sht); 153 IRQF_SHARED, &ninja32_sht);
154} 154}
155 155
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index 830431f036a1..605f198f958c 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -126,7 +126,7 @@ static void ns87415_bmdma_setup(struct ata_queued_cmd *qc)
126 126
127 /* load PRD table addr. */ 127 /* load PRD table addr. */
128 mb(); /* make sure PRD table writes are visible to controller */ 128 mb(); /* make sure PRD table writes are visible to controller */
129 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); 129 iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
130 130
131 /* specify data direction, triple-check start bit is clear */ 131 /* specify data direction, triple-check start bit is clear */
132 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 132 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
@@ -380,7 +380,7 @@ static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *e
380 380
381 ns87415_fixup(pdev); 381 ns87415_fixup(pdev);
382 382
383 return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL, 0); 383 return ata_pci_bmdma_init_one(pdev, ppi, &ns87415_sht, NULL, 0);
384} 384}
385 385
386static const struct pci_device_id ns87415_pci_tbl[] = { 386static const struct pci_device_id ns87415_pci_tbl[] = {
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 005a44483a7b..06ddd91ffeda 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -489,9 +489,8 @@ static void octeon_cf_exec_command16(struct ata_port *ap,
489 ata_wait_idle(ap); 489 ata_wait_idle(ap);
490} 490}
491 491
492static u8 octeon_cf_irq_on(struct ata_port *ap) 492static void octeon_cf_irq_on(struct ata_port *ap)
493{ 493{
494 return 0;
495} 494}
496 495
497static void octeon_cf_irq_clear(struct ata_port *ap) 496static void octeon_cf_irq_clear(struct ata_port *ap)
@@ -655,9 +654,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
655 ap = host->ports[i]; 654 ap = host->ports[i];
656 ocd = ap->dev->platform_data; 655 ocd = ap->dev->platform_data;
657 656
658 if (ap->flags & ATA_FLAG_DISABLED)
659 continue;
660
661 ocd = ap->dev->platform_data; 657 ocd = ap->dev->platform_data;
662 cf_port = ap->private_data; 658 cf_port = ap->private_data;
663 dma_int.u64 = 659 dma_int.u64 =
@@ -667,8 +663,7 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
667 663
668 qc = ata_qc_from_tag(ap, ap->link.active_tag); 664 qc = ata_qc_from_tag(ap, ap->link.active_tag);
669 665
670 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && 666 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) {
671 (qc->flags & ATA_QCFLAG_ACTIVE)) {
672 if (dma_int.s.done && !dma_cfg.s.en) { 667 if (dma_int.s.done && !dma_cfg.s.en) {
673 if (!sg_is_last(qc->cursg)) { 668 if (!sg_is_last(qc->cursg)) {
674 qc->cursg = sg_next(qc->cursg); 669 qc->cursg = sg_next(qc->cursg);
@@ -738,8 +733,7 @@ static void octeon_cf_delayed_finish(struct work_struct *work)
738 goto out; 733 goto out;
739 } 734 }
740 qc = ata_qc_from_tag(ap, ap->link.active_tag); 735 qc = ata_qc_from_tag(ap, ap->link.active_tag);
741 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && 736 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
742 (qc->flags & ATA_QCFLAG_ACTIVE))
743 octeon_cf_dma_finished(ap, qc); 737 octeon_cf_dma_finished(ap, qc);
744out: 738out:
745 spin_unlock_irqrestore(&host->lock, flags); 739 spin_unlock_irqrestore(&host->lock, flags);
@@ -756,20 +750,6 @@ static void octeon_cf_dev_config(struct ata_device *dev)
756} 750}
757 751
758/* 752/*
759 * Trap if driver tries to do standard bmdma commands. They are not
760 * supported.
761 */
762static void unreachable_qc(struct ata_queued_cmd *qc)
763{
764 BUG();
765}
766
767static u8 unreachable_port(struct ata_port *ap)
768{
769 BUG();
770}
771
772/*
773 * We don't do ATAPI DMA so return 0. 753 * We don't do ATAPI DMA so return 0.
774 */ 754 */
775static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc) 755static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
@@ -810,10 +790,6 @@ static struct ata_port_operations octeon_cf_ops = {
810 .sff_dev_select = octeon_cf_dev_select, 790 .sff_dev_select = octeon_cf_dev_select,
811 .sff_irq_on = octeon_cf_irq_on, 791 .sff_irq_on = octeon_cf_irq_on,
812 .sff_irq_clear = octeon_cf_irq_clear, 792 .sff_irq_clear = octeon_cf_irq_clear,
813 .bmdma_setup = unreachable_qc,
814 .bmdma_start = unreachable_qc,
815 .bmdma_stop = unreachable_qc,
816 .bmdma_status = unreachable_port,
817 .cable_detect = ata_cable_40wire, 793 .cable_detect = ata_cable_40wire,
818 .set_piomode = octeon_cf_set_piomode, 794 .set_piomode = octeon_cf_set_piomode,
819 .set_dmamode = octeon_cf_set_dmamode, 795 .set_dmamode = octeon_cf_set_dmamode,
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 1f18ad9e4fe1..480e043ce6b8 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -14,11 +14,11 @@
14#include <linux/of_platform.h> 14#include <linux/of_platform.h>
15#include <linux/ata_platform.h> 15#include <linux/ata_platform.h>
16 16
17static int __devinit pata_of_platform_probe(struct of_device *ofdev, 17static int __devinit pata_of_platform_probe(struct platform_device *ofdev,
18 const struct of_device_id *match) 18 const struct of_device_id *match)
19{ 19{
20 int ret; 20 int ret;
21 struct device_node *dn = ofdev->node; 21 struct device_node *dn = ofdev->dev.of_node;
22 struct resource io_res; 22 struct resource io_res;
23 struct resource ctl_res; 23 struct resource ctl_res;
24 struct resource irq_res; 24 struct resource irq_res;
@@ -78,7 +78,7 @@ static int __devinit pata_of_platform_probe(struct of_device *ofdev,
78 reg_shift, pio_mask); 78 reg_shift, pio_mask);
79} 79}
80 80
81static int __devexit pata_of_platform_remove(struct of_device *ofdev) 81static int __devexit pata_of_platform_remove(struct platform_device *ofdev)
82{ 82{
83 return __pata_platform_remove(&ofdev->dev); 83 return __pata_platform_remove(&ofdev->dev);
84} 84}
@@ -91,8 +91,11 @@ static struct of_device_id pata_of_platform_match[] = {
91MODULE_DEVICE_TABLE(of, pata_of_platform_match); 91MODULE_DEVICE_TABLE(of, pata_of_platform_match);
92 92
93static struct of_platform_driver pata_of_platform_driver = { 93static struct of_platform_driver pata_of_platform_driver = {
94 .name = "pata_of_platform", 94 .driver = {
95 .match_table = pata_of_platform_match, 95 .name = "pata_of_platform",
96 .owner = THIS_MODULE,
97 .of_match_table = pata_of_platform_match,
98 },
96 .probe = pata_of_platform_probe, 99 .probe = pata_of_platform_probe,
97 .remove = __devexit_p(pata_of_platform_remove), 100 .remove = __devexit_p(pata_of_platform_remove),
98}; 101};
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 5f6aba7eb0dd..b811c1636204 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -200,7 +200,7 @@ static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc)
200 if (ata_dma_enabled(adev)) 200 if (ata_dma_enabled(adev))
201 oldpiix_set_dmamode(ap, adev); 201 oldpiix_set_dmamode(ap, adev);
202 } 202 }
203 return ata_sff_qc_issue(qc); 203 return ata_bmdma_qc_issue(qc);
204} 204}
205 205
206 206
@@ -248,7 +248,7 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e
248 dev_printk(KERN_DEBUG, &pdev->dev, 248 dev_printk(KERN_DEBUG, &pdev->dev,
249 "version " DRV_VERSION "\n"); 249 "version " DRV_VERSION "\n");
250 250
251 return ata_pci_sff_init_one(pdev, ppi, &oldpiix_sht, NULL, 0); 251 return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0);
252} 252}
253 253
254static const struct pci_device_id oldpiix_pci_tbl[] = { 254static const struct pci_device_id oldpiix_pci_tbl[] = {
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 76b7d12b1e8d..0852cd07de08 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -429,7 +429,7 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
429 if (optiplus_with_udma(dev)) 429 if (optiplus_with_udma(dev))
430 ppi[0] = &info_82c700_udma; 430 ppi[0] = &info_82c700_udma;
431 431
432 return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL, 0); 432 return ata_pci_bmdma_init_one(dev, ppi, &optidma_sht, NULL, 0);
433} 433}
434 434
435static const struct pci_device_id optidma[] = { 435static const struct pci_device_id optidma[] = {
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index d94b8f0bd743..e944aa0c5517 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -34,7 +34,6 @@
34#include <linux/ata.h> 34#include <linux/ata.h>
35#include <linux/libata.h> 35#include <linux/libata.h>
36 36
37#include <pcmcia/cs_types.h>
38#include <pcmcia/cs.h> 37#include <pcmcia/cs.h>
39#include <pcmcia/cistpl.h> 38#include <pcmcia/cistpl.h>
40#include <pcmcia/ds.h> 39#include <pcmcia/ds.h>
@@ -45,16 +44,6 @@
45#define DRV_NAME "pata_pcmcia" 44#define DRV_NAME "pata_pcmcia"
46#define DRV_VERSION "0.3.5" 45#define DRV_VERSION "0.3.5"
47 46
48/*
49 * Private data structure to glue stuff together
50 */
51
52struct ata_pcmcia_info {
53 struct pcmcia_device *pdev;
54 int ndev;
55 dev_node_t node;
56};
57
58/** 47/**
59 * pcmcia_set_mode - PCMCIA specific mode setup 48 * pcmcia_set_mode - PCMCIA specific mode setup
60 * @link: link 49 * @link: link
@@ -175,7 +164,7 @@ static struct ata_port_operations pcmcia_8bit_port_ops = {
175 .sff_data_xfer = ata_data_xfer_8bit, 164 .sff_data_xfer = ata_data_xfer_8bit,
176 .cable_detect = ata_cable_40wire, 165 .cable_detect = ata_cable_40wire,
177 .set_mode = pcmcia_set_mode_8bit, 166 .set_mode = pcmcia_set_mode_8bit,
178 .drain_fifo = pcmcia_8bit_drain_fifo, 167 .sff_drain_fifo = pcmcia_8bit_drain_fifo,
179}; 168};
180 169
181 170
@@ -211,23 +200,25 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev,
211 200
212 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { 201 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
213 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; 202 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
214 pdev->io.BasePort1 = io->win[0].base; 203 pdev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
215 pdev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; 204 pdev->resource[0]->start = io->win[0].base;
216 if (!(io->flags & CISTPL_IO_16BIT)) 205 if (!(io->flags & CISTPL_IO_16BIT)) {
217 pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 206 pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
207 pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
208 }
218 if (io->nwin == 2) { 209 if (io->nwin == 2) {
219 pdev->io.NumPorts1 = 8; 210 pdev->resource[0]->end = 8;
220 pdev->io.BasePort2 = io->win[1].base; 211 pdev->resource[1]->start = io->win[1].base;
221 pdev->io.NumPorts2 = (stk->is_kme) ? 2 : 1; 212 pdev->resource[1]->end = (stk->is_kme) ? 2 : 1;
222 if (pcmcia_request_io(pdev, &pdev->io) != 0) 213 if (pcmcia_request_io(pdev) != 0)
223 return -ENODEV; 214 return -ENODEV;
224 stk->ctl_base = pdev->io.BasePort2; 215 stk->ctl_base = pdev->resource[1]->start;
225 } else if ((io->nwin == 1) && (io->win[0].len >= 16)) { 216 } else if ((io->nwin == 1) && (io->win[0].len >= 16)) {
226 pdev->io.NumPorts1 = io->win[0].len; 217 pdev->resource[0]->end = io->win[0].len;
227 pdev->io.NumPorts2 = 0; 218 pdev->resource[1]->end = 0;
228 if (pcmcia_request_io(pdev, &pdev->io) != 0) 219 if (pcmcia_request_io(pdev) != 0)
229 return -ENODEV; 220 return -ENODEV;
230 stk->ctl_base = pdev->io.BasePort1 + 0x0e; 221 stk->ctl_base = pdev->resource[0]->start + 0x0e;
231 } else 222 } else
232 return -ENODEV; 223 return -ENODEV;
233 /* If we've got this far, we're done */ 224 /* If we've got this far, we're done */
@@ -248,7 +239,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
248{ 239{
249 struct ata_host *host; 240 struct ata_host *host;
250 struct ata_port *ap; 241 struct ata_port *ap;
251 struct ata_pcmcia_info *info;
252 struct pcmcia_config_check *stk = NULL; 242 struct pcmcia_config_check *stk = NULL;
253 int is_kme = 0, ret = -ENOMEM, p; 243 int is_kme = 0, ret = -ENOMEM, p;
254 unsigned long io_base, ctl_base; 244 unsigned long io_base, ctl_base;
@@ -256,19 +246,9 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
256 int n_ports = 1; 246 int n_ports = 1;
257 struct ata_port_operations *ops = &pcmcia_port_ops; 247 struct ata_port_operations *ops = &pcmcia_port_ops;
258 248
259 info = kzalloc(sizeof(*info), GFP_KERNEL);
260 if (info == NULL)
261 return -ENOMEM;
262
263 /* Glue stuff together. FIXME: We may be able to get rid of info with care */
264 info->pdev = pdev;
265 pdev->priv = info;
266
267 /* Set up attributes in order to probe card and get resources */ 249 /* Set up attributes in order to probe card and get resources */
268 pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 250 pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
269 pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 251 pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
270 pdev->io.IOAddrLines = 3;
271 pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
272 pdev->conf.Attributes = CONF_ENABLE_IRQ; 252 pdev->conf.Attributes = CONF_ENABLE_IRQ;
273 pdev->conf.IntType = INT_MEMORY_AND_IO; 253 pdev->conf.IntType = INT_MEMORY_AND_IO;
274 254
@@ -291,10 +271,9 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
291 if (pcmcia_loop_config(pdev, pcmcia_check_one_config, stk)) 271 if (pcmcia_loop_config(pdev, pcmcia_check_one_config, stk))
292 goto failed; /* No suitable config found */ 272 goto failed; /* No suitable config found */
293 } 273 }
294 io_base = pdev->io.BasePort1; 274 io_base = pdev->resource[0]->start;
295 ctl_base = stk->ctl_base; 275 ctl_base = stk->ctl_base;
296 ret = pcmcia_request_irq(pdev, &pdev->irq); 276 if (!pdev->irq)
297 if (ret)
298 goto failed; 277 goto failed;
299 278
300 ret = pcmcia_request_configuration(pdev, &pdev->conf); 279 ret = pcmcia_request_configuration(pdev, &pdev->conf);
@@ -315,7 +294,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
315 294
316 /* FIXME: Could be more ports at base + 0x10 but we only deal with 295 /* FIXME: Could be more ports at base + 0x10 but we only deal with
317 one right now */ 296 one right now */
318 if (pdev->io.NumPorts1 >= 0x20) 297 if (resource_size(pdev->resource[0]) >= 0x20)
319 n_ports = 2; 298 n_ports = 2;
320 299
321 if (pdev->manf_id == 0x0097 && pdev->card_id == 0x1620) 300 if (pdev->manf_id == 0x0097 && pdev->card_id == 0x1620)
@@ -344,21 +323,19 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
344 } 323 }
345 324
346 /* activate */ 325 /* activate */
347 ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_sff_interrupt, 326 ret = ata_host_activate(host, pdev->irq, ata_sff_interrupt,
348 IRQF_SHARED, &pcmcia_sht); 327 IRQF_SHARED, &pcmcia_sht);
349 if (ret) 328 if (ret)
350 goto failed; 329 goto failed;
351 330
352 info->ndev = 1; 331 pdev->priv = host;
353 kfree(stk); 332 kfree(stk);
354 return 0; 333 return 0;
355 334
356failed: 335failed:
357 kfree(stk); 336 kfree(stk);
358 info->ndev = 0;
359 pcmcia_disable_device(pdev); 337 pcmcia_disable_device(pdev);
360out1: 338out1:
361 kfree(info);
362 return ret; 339 return ret;
363} 340}
364 341
@@ -372,20 +349,12 @@ out1:
372 349
373static void pcmcia_remove_one(struct pcmcia_device *pdev) 350static void pcmcia_remove_one(struct pcmcia_device *pdev)
374{ 351{
375 struct ata_pcmcia_info *info = pdev->priv; 352 struct ata_host *host = pdev->priv;
376 struct device *dev = &pdev->dev; 353
377 354 if (host)
378 if (info != NULL) { 355 ata_host_detach(host);
379 /* If we have attached the device to the ATA layer, detach it */ 356
380 if (info->ndev) {
381 struct ata_host *host = dev_get_drvdata(dev);
382 ata_host_detach(host);
383 }
384 info->ndev = 0;
385 pdev->priv = NULL;
386 }
387 pcmcia_disable_device(pdev); 357 pcmcia_disable_device(pdev);
388 kfree(info);
389} 358}
390 359
391static struct pcmcia_device_id pcmcia_devices[] = { 360static struct pcmcia_device_id pcmcia_devices[] = {
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index ca5cad0fd80b..b18351122525 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -265,7 +265,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
265 struct ata_device *pair = ata_dev_pair(adev); 265 struct ata_device *pair = ata_dev_pair(adev);
266 266
267 if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL) 267 if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL)
268 return ata_bmdma_mode_filter(adev, mask); 268 return mask;
269 269
270 /* Check for slave of a Maxtor at UDMA6 */ 270 /* Check for slave of a Maxtor at UDMA6 */
271 ata_id_c_string(pair->id, model_num, ATA_ID_PROD, 271 ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
@@ -274,7 +274,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
274 if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6) 274 if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6)
275 mask &= ~ (1 << (6 + ATA_SHIFT_UDMA)); 275 mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
276 276
277 return ata_bmdma_mode_filter(adev, mask); 277 return mask;
278} 278}
279 279
280/** 280/**
@@ -754,7 +754,7 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
754 return -EIO; 754 return -EIO;
755 755
756 pci_set_master(pdev); 756 pci_set_master(pdev);
757 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 757 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
758 IRQF_SHARED, &pdc2027x_sht); 758 IRQF_SHARED, &pdc2027x_sht);
759} 759}
760 760
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 9ac0897cf8b0..c39f213e1bbc 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -249,7 +249,7 @@ static int pdc2026x_port_start(struct ata_port *ap)
249 u8 burst = ioread8(bmdma + 0x1f); 249 u8 burst = ioread8(bmdma + 0x1f);
250 iowrite8(burst | 0x01, bmdma + 0x1f); 250 iowrite8(burst | 0x01, bmdma + 0x1f);
251 } 251 }
252 return ata_sff_port_start(ap); 252 return ata_bmdma_port_start(ap);
253} 253}
254 254
255/** 255/**
@@ -337,7 +337,7 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id
337 return -ENODEV; 337 return -ENODEV;
338 } 338 }
339 } 339 }
340 return ata_pci_sff_init_one(dev, ppi, &pdc202xx_sht, NULL, 0); 340 return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0);
341} 341}
342 342
343static const struct pci_device_id pdc202xx[] = { 343static const struct pci_device_id pdc202xx[] = {
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index 981615414849..cb01bf9496fe 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -95,7 +95,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id
95 }; 95 };
96 const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; 96 const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
97 /* Just one port for the moment */ 97 /* Just one port for the moment */
98 return ata_pci_sff_init_one(dev, ppi, &tosh_sht, NULL, 0); 98 return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0);
99} 99}
100 100
101static struct pci_device_id ata_tosh[] = { 101static struct pci_device_id ata_tosh[] = {
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 3f6ebc6c665a..50400fa120fe 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -53,7 +53,6 @@ static struct ata_port_operations pata_platform_port_ops = {
53 .sff_data_xfer = ata_sff_data_xfer_noirq, 53 .sff_data_xfer = ata_sff_data_xfer_noirq,
54 .cable_detect = ata_cable_unknown, 54 .cable_detect = ata_cable_unknown,
55 .set_mode = pata_platform_set_mode, 55 .set_mode = pata_platform_set_mode,
56 .port_start = ATA_OP_NULL,
57}; 56};
58 57
59static void pata_platform_setup_port(struct ata_ioports *ioaddr, 58static void pata_platform_setup_port(struct ata_ioports *ioaddr,
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
new file mode 100644
index 000000000000..1898c6ed4b4e
--- /dev/null
+++ b/drivers/ata/pata_pxa.c
@@ -0,0 +1,411 @@
1/*
2 * Generic PXA PATA driver
3 *
4 * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/blkdev.h>
25#include <linux/ata.h>
26#include <linux/libata.h>
27#include <linux/platform_device.h>
28#include <linux/gpio.h>
29#include <linux/slab.h>
30#include <linux/completion.h>
31
32#include <scsi/scsi_host.h>
33
34#include <mach/pxa2xx-regs.h>
35#include <mach/pata_pxa.h>
36#include <mach/dma.h>
37
38#define DRV_NAME "pata_pxa"
39#define DRV_VERSION "0.1"
40
41struct pata_pxa_data {
42 uint32_t dma_channel;
43 struct pxa_dma_desc *dma_desc;
44 dma_addr_t dma_desc_addr;
45 uint32_t dma_desc_id;
46
47 /* DMA IO physical address */
48 uint32_t dma_io_addr;
49 /* PXA DREQ<0:2> pin selector */
50 uint32_t dma_dreq;
51 /* DMA DCSR register value */
52 uint32_t dma_dcsr;
53
54 struct completion dma_done;
55};
56
57/*
58 * Setup the DMA descriptors. The size is transfer capped at 4k per descriptor,
59 * if the transfer is longer, it is split into multiple chained descriptors.
60 */
61static void pxa_load_dmac(struct scatterlist *sg, struct ata_queued_cmd *qc)
62{
63 struct pata_pxa_data *pd = qc->ap->private_data;
64
65 uint32_t cpu_len, seg_len;
66 dma_addr_t cpu_addr;
67
68 cpu_addr = sg_dma_address(sg);
69 cpu_len = sg_dma_len(sg);
70
71 do {
72 seg_len = (cpu_len > 0x1000) ? 0x1000 : cpu_len;
73
74 pd->dma_desc[pd->dma_desc_id].ddadr = pd->dma_desc_addr +
75 ((pd->dma_desc_id + 1) * sizeof(struct pxa_dma_desc));
76
77 pd->dma_desc[pd->dma_desc_id].dcmd = DCMD_BURST32 |
78 DCMD_WIDTH2 | (DCMD_LENGTH & seg_len);
79
80 if (qc->tf.flags & ATA_TFLAG_WRITE) {
81 pd->dma_desc[pd->dma_desc_id].dsadr = cpu_addr;
82 pd->dma_desc[pd->dma_desc_id].dtadr = pd->dma_io_addr;
83 pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCSRCADDR |
84 DCMD_FLOWTRG;
85 } else {
86 pd->dma_desc[pd->dma_desc_id].dsadr = pd->dma_io_addr;
87 pd->dma_desc[pd->dma_desc_id].dtadr = cpu_addr;
88 pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCTRGADDR |
89 DCMD_FLOWSRC;
90 }
91
92 cpu_len -= seg_len;
93 cpu_addr += seg_len;
94 pd->dma_desc_id++;
95
96 } while (cpu_len);
97
98 /* Should not happen */
99 if (seg_len & 0x1f)
100 DALGN |= (1 << pd->dma_dreq);
101}
102
103/*
104 * Prepare taskfile for submission.
105 */
106static void pxa_qc_prep(struct ata_queued_cmd *qc)
107{
108 struct pata_pxa_data *pd = qc->ap->private_data;
109 int si = 0;
110 struct scatterlist *sg;
111
112 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
113 return;
114
115 pd->dma_desc_id = 0;
116
117 DCSR(pd->dma_channel) = 0;
118 DALGN &= ~(1 << pd->dma_dreq);
119
120 for_each_sg(qc->sg, sg, qc->n_elem, si)
121 pxa_load_dmac(sg, qc);
122
123 pd->dma_desc[pd->dma_desc_id - 1].ddadr = DDADR_STOP;
124
125 /* Fire IRQ only at the end of last block */
126 pd->dma_desc[pd->dma_desc_id - 1].dcmd |= DCMD_ENDIRQEN;
127
128 DDADR(pd->dma_channel) = pd->dma_desc_addr;
129 DRCMR(pd->dma_dreq) = DRCMR_MAPVLD | pd->dma_channel;
130
131}
132
133/*
134 * Configure the DMA controller, load the DMA descriptors, but don't start the
135 * DMA controller yet. Only issue the ATA command.
136 */
137static void pxa_bmdma_setup(struct ata_queued_cmd *qc)
138{
139 qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
140}
141
142/*
143 * Execute the DMA transfer.
144 */
145static void pxa_bmdma_start(struct ata_queued_cmd *qc)
146{
147 struct pata_pxa_data *pd = qc->ap->private_data;
148 init_completion(&pd->dma_done);
149 DCSR(pd->dma_channel) = DCSR_RUN;
150}
151
152/*
153 * Wait until the DMA transfer completes, then stop the DMA controller.
154 */
155static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
156{
157 struct pata_pxa_data *pd = qc->ap->private_data;
158
159 if ((DCSR(pd->dma_channel) & DCSR_RUN) &&
160 wait_for_completion_timeout(&pd->dma_done, HZ))
161 dev_err(qc->ap->dev, "Timeout waiting for DMA completion!");
162
163 DCSR(pd->dma_channel) = 0;
164}
165
166/*
167 * Read DMA status. The bmdma_stop() will take care of properly finishing the
168 * DMA transfer so we always have DMA-complete interrupt here.
169 */
170static unsigned char pxa_bmdma_status(struct ata_port *ap)
171{
172 struct pata_pxa_data *pd = ap->private_data;
173 unsigned char ret = ATA_DMA_INTR;
174
175 if (pd->dma_dcsr & DCSR_BUSERR)
176 ret |= ATA_DMA_ERR;
177
178 return ret;
179}
180
181/*
182 * No IRQ register present so we do nothing.
183 */
184static void pxa_irq_clear(struct ata_port *ap)
185{
186}
187
188/*
189 * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still
190 * unclear why ATAPI has DMA issues.
191 */
192static int pxa_check_atapi_dma(struct ata_queued_cmd *qc)
193{
194 return -EOPNOTSUPP;
195}
196
197static struct scsi_host_template pxa_ata_sht = {
198 ATA_BMDMA_SHT(DRV_NAME),
199};
200
201static struct ata_port_operations pxa_ata_port_ops = {
202 .inherits = &ata_bmdma_port_ops,
203 .cable_detect = ata_cable_40wire,
204
205 .bmdma_setup = pxa_bmdma_setup,
206 .bmdma_start = pxa_bmdma_start,
207 .bmdma_stop = pxa_bmdma_stop,
208 .bmdma_status = pxa_bmdma_status,
209
210 .check_atapi_dma = pxa_check_atapi_dma,
211
212 .sff_irq_clear = pxa_irq_clear,
213
214 .qc_prep = pxa_qc_prep,
215};
216
217/*
218 * DMA interrupt handler.
219 */
220static void pxa_ata_dma_irq(int dma, void *port)
221{
222 struct ata_port *ap = port;
223 struct pata_pxa_data *pd = ap->private_data;
224
225 pd->dma_dcsr = DCSR(dma);
226 DCSR(dma) = pd->dma_dcsr;
227
228 if (pd->dma_dcsr & DCSR_STOPSTATE)
229 complete(&pd->dma_done);
230}
231
232static int __devinit pxa_ata_probe(struct platform_device *pdev)
233{
234 struct ata_host *host;
235 struct ata_port *ap;
236 struct pata_pxa_data *data;
237 struct resource *cmd_res;
238 struct resource *ctl_res;
239 struct resource *dma_res;
240 struct resource *irq_res;
241 struct pata_pxa_pdata *pdata = pdev->dev.platform_data;
242 int ret = 0;
243
244 /*
245 * Resource validation, three resources are needed:
246 * - CMD port base address
247 * - CTL port base address
248 * - DMA port base address
249 * - IRQ pin
250 */
251 if (pdev->num_resources != 4) {
252 dev_err(&pdev->dev, "invalid number of resources\n");
253 return -EINVAL;
254 }
255
256 /*
257 * CMD port base address
258 */
259 cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
260 if (unlikely(cmd_res == NULL))
261 return -EINVAL;
262
263 /*
264 * CTL port base address
265 */
266 ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
267 if (unlikely(ctl_res == NULL))
268 return -EINVAL;
269
270 /*
271 * DMA port base address
272 */
273 dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
274 if (unlikely(dma_res == NULL))
275 return -EINVAL;
276
277 /*
278 * IRQ pin
279 */
280 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
281 if (unlikely(irq_res == NULL))
282 return -EINVAL;
283
284 /*
285 * Allocate the host
286 */
287 host = ata_host_alloc(&pdev->dev, 1);
288 if (!host)
289 return -ENOMEM;
290
291 ap = host->ports[0];
292 ap->ops = &pxa_ata_port_ops;
293 ap->pio_mask = ATA_PIO4;
294 ap->mwdma_mask = ATA_MWDMA2;
295 ap->flags = ATA_FLAG_MMIO;
296
297 ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
298 resource_size(cmd_res));
299 ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
300 resource_size(ctl_res));
301 ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
302 resource_size(dma_res));
303
304 /*
305 * Adjust register offsets
306 */
307 ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
308 ap->ioaddr.data_addr = ap->ioaddr.cmd_addr +
309 (ATA_REG_DATA << pdata->reg_shift);
310 ap->ioaddr.error_addr = ap->ioaddr.cmd_addr +
311 (ATA_REG_ERR << pdata->reg_shift);
312 ap->ioaddr.feature_addr = ap->ioaddr.cmd_addr +
313 (ATA_REG_FEATURE << pdata->reg_shift);
314 ap->ioaddr.nsect_addr = ap->ioaddr.cmd_addr +
315 (ATA_REG_NSECT << pdata->reg_shift);
316 ap->ioaddr.lbal_addr = ap->ioaddr.cmd_addr +
317 (ATA_REG_LBAL << pdata->reg_shift);
318 ap->ioaddr.lbam_addr = ap->ioaddr.cmd_addr +
319 (ATA_REG_LBAM << pdata->reg_shift);
320 ap->ioaddr.lbah_addr = ap->ioaddr.cmd_addr +
321 (ATA_REG_LBAH << pdata->reg_shift);
322 ap->ioaddr.device_addr = ap->ioaddr.cmd_addr +
323 (ATA_REG_DEVICE << pdata->reg_shift);
324 ap->ioaddr.status_addr = ap->ioaddr.cmd_addr +
325 (ATA_REG_STATUS << pdata->reg_shift);
326 ap->ioaddr.command_addr = ap->ioaddr.cmd_addr +
327 (ATA_REG_CMD << pdata->reg_shift);
328
329 /*
330 * Allocate and load driver's internal data structure
331 */
332 data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
333 GFP_KERNEL);
334 if (!data)
335 return -ENOMEM;
336
337 ap->private_data = data;
338 data->dma_dreq = pdata->dma_dreq;
339 data->dma_io_addr = dma_res->start;
340
341 /*
342 * Allocate space for the DMA descriptors
343 */
344 data->dma_desc = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE,
345 &data->dma_desc_addr, GFP_KERNEL);
346 if (!data->dma_desc)
347 return -EINVAL;
348
349 /*
350 * Request the DMA channel
351 */
352 data->dma_channel = pxa_request_dma(DRV_NAME, DMA_PRIO_LOW,
353 pxa_ata_dma_irq, ap);
354 if (data->dma_channel < 0)
355 return -EBUSY;
356
357 /*
358 * Stop and clear the DMA channel
359 */
360 DCSR(data->dma_channel) = 0;
361
362 /*
363 * Activate the ATA host
364 */
365 ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
366 pdata->irq_flags, &pxa_ata_sht);
367 if (ret)
368 pxa_free_dma(data->dma_channel);
369
370 return ret;
371}
372
373static int __devexit pxa_ata_remove(struct platform_device *pdev)
374{
375 struct ata_host *host = dev_get_drvdata(&pdev->dev);
376 struct pata_pxa_data *data = host->ports[0]->private_data;
377
378 pxa_free_dma(data->dma_channel);
379
380 ata_host_detach(host);
381
382 return 0;
383}
384
385static struct platform_driver pxa_ata_driver = {
386 .probe = pxa_ata_probe,
387 .remove = __devexit_p(pxa_ata_remove),
388 .driver = {
389 .name = DRV_NAME,
390 .owner = THIS_MODULE,
391 },
392};
393
394static int __init pxa_ata_init(void)
395{
396 return platform_driver_register(&pxa_ata_driver);
397}
398
399static void __exit pxa_ata_exit(void)
400{
401 platform_driver_unregister(&pxa_ata_driver);
402}
403
404module_init(pxa_ata_init);
405module_exit(pxa_ata_exit);
406
407MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
408MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
409MODULE_LICENSE("GPL");
410MODULE_VERSION(DRV_VERSION);
411MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index fc9602229acb..8574b31f1773 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -179,7 +179,7 @@ static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc)
179 radisys_set_piomode(ap, adev); 179 radisys_set_piomode(ap, adev);
180 } 180 }
181 } 181 }
182 return ata_sff_qc_issue(qc); 182 return ata_bmdma_qc_issue(qc);
183} 183}
184 184
185 185
@@ -227,7 +227,7 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e
227 dev_printk(KERN_DEBUG, &pdev->dev, 227 dev_printk(KERN_DEBUG, &pdev->dev,
228 "version " DRV_VERSION "\n"); 228 "version " DRV_VERSION "\n");
229 229
230 return ata_pci_sff_init_one(pdev, ppi, &radisys_sht, NULL, 0); 230 return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0);
231} 231}
232 232
233static const struct pci_device_id radisys_pci_tbl[] = { 233static const struct pci_device_id radisys_pci_tbl[] = {
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 37092cfd7bc6..5fbe9b166c69 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -344,7 +344,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
344 */ 344 */
345 pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg); 345 pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
346 346
347 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 347 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
348 if (rc) 348 if (rc)
349 return rc; 349 return rc;
350 host->private_data = hpriv; 350 host->private_data = hpriv;
@@ -354,7 +354,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
354 host->flags |= ATA_HOST_PARALLEL_SCAN; 354 host->flags |= ATA_HOST_PARALLEL_SCAN;
355 355
356 pci_set_master(pdev); 356 pci_set_master(pdev);
357 return ata_pci_sff_activate_host(host, ata_sff_interrupt, &rdc_sht); 357 return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht);
358} 358}
359 359
360static void rdc_remove_one(struct pci_dev *pdev) 360static void rdc_remove_one(struct pci_dev *pdev)
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
new file mode 100644
index 000000000000..6f9cfb24b751
--- /dev/null
+++ b/drivers/ata/pata_samsung_cf.c
@@ -0,0 +1,683 @@
1/*
2 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * PATA driver for Samsung SoCs.
6 * Supports CF Interface in True IDE mode. Currently only PIO mode has been
7 * implemented; UDMA support has to be added.
8 *
9 * Based on:
10 * PATA driver for AT91SAM9260 Static Memory Controller
11 * PATA driver for Toshiba SCC controller
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation.
16*/
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/clk.h>
22#include <linux/libata.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25
26#include <plat/ata.h>
27#include <plat/regs-ata.h>
28
29#define DRV_NAME "pata_samsung_cf"
30#define DRV_VERSION "0.1"
31
32enum s3c_cpu_type {
33 TYPE_S3C64XX,
34 TYPE_S5PC100,
35 TYPE_S5PV210,
36};
37
38/*
39 * struct s3c_ide_info - S3C PATA instance.
40 * @clk: The clock resource for this controller.
41 * @ide_addr: The area mapped for the hardware registers.
42 * @sfr_addr: The area mapped for the special function registers.
43 * @irq: The IRQ number we are using.
44 * @cpu_type: The exact type of this controller.
45 * @fifo_status_reg: The ATA_FIFO_STATUS register offset.
46 */
47struct s3c_ide_info {
48 struct clk *clk;
49 void __iomem *ide_addr;
50 void __iomem *sfr_addr;
51 unsigned int irq;
52 enum s3c_cpu_type cpu_type;
53 unsigned int fifo_status_reg;
54};
55
56static void pata_s3c_set_endian(void __iomem *s3c_ide_regbase, u8 mode)
57{
58 u32 reg = readl(s3c_ide_regbase + S3C_ATA_CFG);
59 reg = mode ? (reg & ~S3C_ATA_CFG_SWAP) : (reg | S3C_ATA_CFG_SWAP);
60 writel(reg, s3c_ide_regbase + S3C_ATA_CFG);
61}
62
63static void pata_s3c_cfg_mode(void __iomem *s3c_ide_sfrbase)
64{
65 /* Select true-ide as the internal operating mode */
66 writel(readl(s3c_ide_sfrbase + S3C_CFATA_MUX) | S3C_CFATA_MUX_TRUEIDE,
67 s3c_ide_sfrbase + S3C_CFATA_MUX);
68}
69
70static unsigned long
71pata_s3c_setup_timing(struct s3c_ide_info *info, const struct ata_timing *ata)
72{
73 int t1 = ata->setup;
74 int t2 = ata->act8b;
75 int t2i = ata->rec8b;
76 ulong piotime;
77
78 piotime = ((t2i & 0xff) << 12) | ((t2 & 0xff) << 4) | (t1 & 0xf);
79
80 return piotime;
81}
82
83static void pata_s3c_set_piomode(struct ata_port *ap, struct ata_device *adev)
84{
85 struct s3c_ide_info *info = ap->host->private_data;
86 struct ata_timing timing;
87 int cycle_time;
88 ulong ata_cfg = readl(info->ide_addr + S3C_ATA_CFG);
89 ulong piotime;
90
91 /* Enables IORDY if mode requires it */
92 if (ata_pio_need_iordy(adev))
93 ata_cfg |= S3C_ATA_CFG_IORDYEN;
94 else
95 ata_cfg &= ~S3C_ATA_CFG_IORDYEN;
96
97 cycle_time = (int)(1000000000UL / clk_get_rate(info->clk));
98
99 ata_timing_compute(adev, adev->pio_mode, &timing,
100 cycle_time * 1000, 0);
101
102 piotime = pata_s3c_setup_timing(info, &timing);
103
104 writel(ata_cfg, info->ide_addr + S3C_ATA_CFG);
105 writel(piotime, info->ide_addr + S3C_ATA_PIO_TIME);
106}
107
108/*
109 * Waits until the IDE controller is able to perform next read/write
110 * operation to the disk. Needed for 64XX series boards only.
111 */
112static int wait_for_host_ready(struct s3c_ide_info *info)
113{
114 ulong timeout;
115 void __iomem *fifo_reg = info->ide_addr + info->fifo_status_reg;
116
117 /* wait for maximum of 20 msec */
118 timeout = jiffies + msecs_to_jiffies(20);
119 while (time_before(jiffies, timeout)) {
120 if ((readl(fifo_reg) >> 28) == 0)
121 return 0;
122 }
123 return -EBUSY;
124}
125
126/*
127 * Writes to one of the task file registers.
128 */
129static void ata_outb(struct ata_host *host, u8 addr, void __iomem *reg)
130{
131 struct s3c_ide_info *info = host->private_data;
132
133 wait_for_host_ready(info);
134 writeb(addr, reg);
135}
136
137/*
138 * Reads from one of the task file registers.
139 */
140static u8 ata_inb(struct ata_host *host, void __iomem *reg)
141{
142 struct s3c_ide_info *info = host->private_data;
143 u8 temp;
144
145 wait_for_host_ready(info);
146 (void) readb(reg);
147 wait_for_host_ready(info);
148 temp = readb(info->ide_addr + S3C_ATA_PIO_RDATA);
149 return temp;
150}
151
152/*
153 * pata_s3c_tf_load - send taskfile registers to host controller
154 */
155static void pata_s3c_tf_load(struct ata_port *ap,
156 const struct ata_taskfile *tf)
157{
158 struct ata_ioports *ioaddr = &ap->ioaddr;
159 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
160
161 if (tf->ctl != ap->last_ctl) {
162 ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr);
163 ap->last_ctl = tf->ctl;
164 ata_wait_idle(ap);
165 }
166
167 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
168 ata_outb(ap->host, tf->hob_feature, ioaddr->feature_addr);
169 ata_outb(ap->host, tf->hob_nsect, ioaddr->nsect_addr);
170 ata_outb(ap->host, tf->hob_lbal, ioaddr->lbal_addr);
171 ata_outb(ap->host, tf->hob_lbam, ioaddr->lbam_addr);
172 ata_outb(ap->host, tf->hob_lbah, ioaddr->lbah_addr);
173 }
174
175 if (is_addr) {
176 ata_outb(ap->host, tf->feature, ioaddr->feature_addr);
177 ata_outb(ap->host, tf->nsect, ioaddr->nsect_addr);
178 ata_outb(ap->host, tf->lbal, ioaddr->lbal_addr);
179 ata_outb(ap->host, tf->lbam, ioaddr->lbam_addr);
180 ata_outb(ap->host, tf->lbah, ioaddr->lbah_addr);
181 }
182
183 if (tf->flags & ATA_TFLAG_DEVICE)
184 ata_outb(ap->host, tf->device, ioaddr->device_addr);
185
186 ata_wait_idle(ap);
187}
188
189/*
190 * pata_s3c_tf_read - input device's ATA taskfile shadow registers
191 */
192static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
193{
194 struct ata_ioports *ioaddr = &ap->ioaddr;
195
196 tf->feature = ata_inb(ap->host, ioaddr->error_addr);
197 tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr);
198 tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr);
199 tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr);
200 tf->lbah = ata_inb(ap->host, ioaddr->lbah_addr);
201 tf->device = ata_inb(ap->host, ioaddr->device_addr);
202
203 if (tf->flags & ATA_TFLAG_LBA48) {
204 ata_outb(ap->host, tf->ctl | ATA_HOB, ioaddr->ctl_addr);
205 tf->hob_feature = ata_inb(ap->host, ioaddr->error_addr);
206 tf->hob_nsect = ata_inb(ap->host, ioaddr->nsect_addr);
207 tf->hob_lbal = ata_inb(ap->host, ioaddr->lbal_addr);
208 tf->hob_lbam = ata_inb(ap->host, ioaddr->lbam_addr);
209 tf->hob_lbah = ata_inb(ap->host, ioaddr->lbah_addr);
210 ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr);
211 ap->last_ctl = tf->ctl;
212 }
213}
214
215/*
216 * pata_s3c_exec_command - issue ATA command to host controller
217 */
218static void pata_s3c_exec_command(struct ata_port *ap,
219 const struct ata_taskfile *tf)
220{
221 ata_outb(ap->host, tf->command, ap->ioaddr.command_addr);
222 ata_sff_pause(ap);
223}
224
225/*
226 * pata_s3c_check_status - Read device status register
227 */
228static u8 pata_s3c_check_status(struct ata_port *ap)
229{
230 return ata_inb(ap->host, ap->ioaddr.status_addr);
231}
232
233/*
234 * pata_s3c_check_altstatus - Read alternate device status register
235 */
236static u8 pata_s3c_check_altstatus(struct ata_port *ap)
237{
238 return ata_inb(ap->host, ap->ioaddr.altstatus_addr);
239}
240
241/*
242 * pata_s3c_data_xfer - Transfer data by PIO
243 */
244unsigned int pata_s3c_data_xfer(struct ata_device *dev, unsigned char *buf,
245 unsigned int buflen, int rw)
246{
247 struct ata_port *ap = dev->link->ap;
248 struct s3c_ide_info *info = ap->host->private_data;
249 void __iomem *data_addr = ap->ioaddr.data_addr;
250 unsigned int words = buflen >> 1, i;
251 u16 *data_ptr = (u16 *)buf;
252
253 /* Requires wait same as in ata_inb/ata_outb */
254 if (rw == READ)
255 for (i = 0; i < words; i++, data_ptr++) {
256 wait_for_host_ready(info);
257 (void) readw(data_addr);
258 wait_for_host_ready(info);
259 *data_ptr = readw(info->ide_addr
260 + S3C_ATA_PIO_RDATA);
261 }
262 else
263 for (i = 0; i < words; i++, data_ptr++) {
264 wait_for_host_ready(info);
265 writew(*data_ptr, data_addr);
266 }
267
268 if (buflen & 0x01)
269 dev_err(ap->dev, "unexpected trailing data\n");
270
271 return words << 1;
272}
273
274/*
275 * pata_s3c_dev_select - Select device on ATA bus
276 */
277static void pata_s3c_dev_select(struct ata_port *ap, unsigned int device)
278{
279 u8 tmp = ATA_DEVICE_OBS;
280
281 if (device != 0)
282 tmp |= ATA_DEV1;
283
284 ata_outb(ap->host, tmp, ap->ioaddr.device_addr);
285 ata_sff_pause(ap);
286}
287
288/*
289 * pata_s3c_devchk - PATA device presence detection
290 */
291static unsigned int pata_s3c_devchk(struct ata_port *ap,
292 unsigned int device)
293{
294 struct ata_ioports *ioaddr = &ap->ioaddr;
295 u8 nsect, lbal;
296
297 pata_s3c_dev_select(ap, device);
298
299 ata_outb(ap->host, 0x55, ioaddr->nsect_addr);
300 ata_outb(ap->host, 0xaa, ioaddr->lbal_addr);
301
302 ata_outb(ap->host, 0xaa, ioaddr->nsect_addr);
303 ata_outb(ap->host, 0x55, ioaddr->lbal_addr);
304
305 ata_outb(ap->host, 0x55, ioaddr->nsect_addr);
306 ata_outb(ap->host, 0xaa, ioaddr->lbal_addr);
307
308 nsect = ata_inb(ap->host, ioaddr->nsect_addr);
309 lbal = ata_inb(ap->host, ioaddr->lbal_addr);
310
311 if ((nsect == 0x55) && (lbal == 0xaa))
312 return 1; /* we found a device */
313
314 return 0; /* nothing found */
315}
316
317/*
318 * pata_s3c_wait_after_reset - wait for devices to become ready after reset
319 */
320static int pata_s3c_wait_after_reset(struct ata_link *link,
321 unsigned long deadline)
322{
323 int rc;
324
325 msleep(ATA_WAIT_AFTER_RESET);
326
327 /* always check readiness of the master device */
328 rc = ata_sff_wait_ready(link, deadline);
329 /* -ENODEV means the odd clown forgot the D7 pulldown resistor
330 * and TF status is 0xff, bail out on it too.
331 */
332 if (rc)
333 return rc;
334
335 return 0;
336}
337
338/*
339 * pata_s3c_bus_softreset - PATA device software reset
340 */
341static unsigned int pata_s3c_bus_softreset(struct ata_port *ap,
342 unsigned long deadline)
343{
344 struct ata_ioports *ioaddr = &ap->ioaddr;
345
346 /* software reset. causes dev0 to be selected */
347 ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr);
348 udelay(20);
349 ata_outb(ap->host, ap->ctl | ATA_SRST, ioaddr->ctl_addr);
350 udelay(20);
351 ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr);
352 ap->last_ctl = ap->ctl;
353
354 return pata_s3c_wait_after_reset(&ap->link, deadline);
355}
356
357/*
358 * pata_s3c_softreset - reset host port via ATA SRST
359 */
360static int pata_s3c_softreset(struct ata_link *link, unsigned int *classes,
361 unsigned long deadline)
362{
363 struct ata_port *ap = link->ap;
364 unsigned int devmask = 0;
365 int rc;
366 u8 err;
367
368 /* determine if device 0 is present */
369 if (pata_s3c_devchk(ap, 0))
370 devmask |= (1 << 0);
371
372 /* select device 0 again */
373 pata_s3c_dev_select(ap, 0);
374
375 /* issue bus reset */
376 rc = pata_s3c_bus_softreset(ap, deadline);
377 /* if link is occupied, -ENODEV too is an error */
378 if (rc && rc != -ENODEV) {
379 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
380 return rc;
381 }
382
383 /* determine by signature whether we have ATA or ATAPI devices */
384 classes[0] = ata_sff_dev_classify(&ap->link.device[0],
385 devmask & (1 << 0), &err);
386
387 return 0;
388}
389
390/*
391 * pata_s3c_set_devctl - Write device control register
392 */
393static void pata_s3c_set_devctl(struct ata_port *ap, u8 ctl)
394{
395 ata_outb(ap->host, ctl, ap->ioaddr.ctl_addr);
396}
397
398static struct scsi_host_template pata_s3c_sht = {
399 ATA_PIO_SHT(DRV_NAME),
400};
401
402static struct ata_port_operations pata_s3c_port_ops = {
403 .inherits = &ata_sff_port_ops,
404 .sff_check_status = pata_s3c_check_status,
405 .sff_check_altstatus = pata_s3c_check_altstatus,
406 .sff_tf_load = pata_s3c_tf_load,
407 .sff_tf_read = pata_s3c_tf_read,
408 .sff_data_xfer = pata_s3c_data_xfer,
409 .sff_exec_command = pata_s3c_exec_command,
410 .sff_dev_select = pata_s3c_dev_select,
411 .sff_set_devctl = pata_s3c_set_devctl,
412 .softreset = pata_s3c_softreset,
413 .set_piomode = pata_s3c_set_piomode,
414};
415
416static struct ata_port_operations pata_s5p_port_ops = {
417 .inherits = &ata_sff_port_ops,
418 .set_piomode = pata_s3c_set_piomode,
419};
420
421static void pata_s3c_enable(void *s3c_ide_regbase, bool state)
422{
423 u32 temp = readl(s3c_ide_regbase + S3C_ATA_CTRL);
424 temp = state ? (temp | 1) : (temp & ~1);
425 writel(temp, s3c_ide_regbase + S3C_ATA_CTRL);
426}
427
428static irqreturn_t pata_s3c_irq(int irq, void *dev_instance)
429{
430 struct ata_host *host = dev_instance;
431 struct s3c_ide_info *info = host->private_data;
432 u32 reg;
433
434 reg = readl(info->ide_addr + S3C_ATA_IRQ);
435 writel(reg, info->ide_addr + S3C_ATA_IRQ);
436
437 return ata_sff_interrupt(irq, dev_instance);
438}
439
440static void pata_s3c_hwinit(struct s3c_ide_info *info,
441 struct s3c_ide_platdata *pdata)
442{
443 switch (info->cpu_type) {
444 case TYPE_S3C64XX:
445 /* Configure as big endian */
446 pata_s3c_cfg_mode(info->sfr_addr);
447 pata_s3c_set_endian(info->ide_addr, 1);
448 pata_s3c_enable(info->ide_addr, true);
449 msleep(100);
450
451 /* Remove IRQ Status */
452 writel(0x1f, info->ide_addr + S3C_ATA_IRQ);
453 writel(0x1b, info->ide_addr + S3C_ATA_IRQ_MSK);
454 break;
455
456 case TYPE_S5PC100:
457 pata_s3c_cfg_mode(info->sfr_addr);
458 /* FALLTHROUGH */
459
460 case TYPE_S5PV210:
461 /* Configure as little endian */
462 pata_s3c_set_endian(info->ide_addr, 0);
463 pata_s3c_enable(info->ide_addr, true);
464 msleep(100);
465
466 /* Remove IRQ Status */
467 writel(0x3f, info->ide_addr + S3C_ATA_IRQ);
468 writel(0x3f, info->ide_addr + S3C_ATA_IRQ_MSK);
469 break;
470
471 default:
472 BUG();
473 }
474}
475
476static int __init pata_s3c_probe(struct platform_device *pdev)
477{
478 struct s3c_ide_platdata *pdata = pdev->dev.platform_data;
479 struct device *dev = &pdev->dev;
480 struct s3c_ide_info *info;
481 struct resource *res;
482 struct ata_port *ap;
483 struct ata_host *host;
484 enum s3c_cpu_type cpu_type;
485 int ret;
486
487 cpu_type = platform_get_device_id(pdev)->driver_data;
488
489 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
490 if (!info) {
491 dev_err(dev, "failed to allocate memory for device data\n");
492 return -ENOMEM;
493 }
494
495 info->irq = platform_get_irq(pdev, 0);
496
497 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
498 if (res == NULL) {
499 dev_err(dev, "failed to get mem resource\n");
500 return -EINVAL;
501 }
502
503 if (!devm_request_mem_region(dev, res->start,
504 resource_size(res), DRV_NAME)) {
505 dev_err(dev, "error requesting register region\n");
506 return -EBUSY;
507 }
508
509 info->ide_addr = devm_ioremap(dev, res->start, resource_size(res));
510 if (!info->ide_addr) {
511 dev_err(dev, "failed to map IO base address\n");
512 return -ENOMEM;
513 }
514
515 info->clk = clk_get(&pdev->dev, "cfcon");
516 if (IS_ERR(info->clk)) {
517 dev_err(dev, "failed to get access to cf controller clock\n");
518 ret = PTR_ERR(info->clk);
519 info->clk = NULL;
520 return ret;
521 }
522
523 clk_enable(info->clk);
524
525 /* init ata host */
526 host = ata_host_alloc(dev, 1);
527 if (!host) {
528 dev_err(dev, "failed to allocate ide host\n");
529 ret = -ENOMEM;
530 goto stop_clk;
531 }
532
533 ap = host->ports[0];
534 ap->flags |= ATA_FLAG_MMIO;
535 ap->pio_mask = ATA_PIO4;
536
537 if (cpu_type == TYPE_S3C64XX) {
538 ap->ops = &pata_s3c_port_ops;
539 info->sfr_addr = info->ide_addr + 0x1800;
540 info->ide_addr += 0x1900;
541 info->fifo_status_reg = 0x94;
542 } else if (cpu_type == TYPE_S5PC100) {
543 ap->ops = &pata_s5p_port_ops;
544 info->sfr_addr = info->ide_addr + 0x1800;
545 info->ide_addr += 0x1900;
546 info->fifo_status_reg = 0x84;
547 } else {
548 ap->ops = &pata_s5p_port_ops;
549 info->fifo_status_reg = 0x84;
550 }
551
552 info->cpu_type = cpu_type;
553
554 if (info->irq <= 0) {
555 ap->flags |= ATA_FLAG_PIO_POLLING;
556 info->irq = 0;
557 ata_port_desc(ap, "no IRQ, using PIO polling\n");
558 }
559
560 ap->ioaddr.cmd_addr = info->ide_addr + S3C_ATA_CMD;
561 ap->ioaddr.data_addr = info->ide_addr + S3C_ATA_PIO_DTR;
562 ap->ioaddr.error_addr = info->ide_addr + S3C_ATA_PIO_FED;
563 ap->ioaddr.feature_addr = info->ide_addr + S3C_ATA_PIO_FED;
564 ap->ioaddr.nsect_addr = info->ide_addr + S3C_ATA_PIO_SCR;
565 ap->ioaddr.lbal_addr = info->ide_addr + S3C_ATA_PIO_LLR;
566 ap->ioaddr.lbam_addr = info->ide_addr + S3C_ATA_PIO_LMR;
567 ap->ioaddr.lbah_addr = info->ide_addr + S3C_ATA_PIO_LHR;
568 ap->ioaddr.device_addr = info->ide_addr + S3C_ATA_PIO_DVR;
569 ap->ioaddr.status_addr = info->ide_addr + S3C_ATA_PIO_CSD;
570 ap->ioaddr.command_addr = info->ide_addr + S3C_ATA_PIO_CSD;
571 ap->ioaddr.altstatus_addr = info->ide_addr + S3C_ATA_PIO_DAD;
572 ap->ioaddr.ctl_addr = info->ide_addr + S3C_ATA_PIO_DAD;
573
574 ata_port_desc(ap, "mmio cmd 0x%llx ",
575 (unsigned long long)res->start);
576
577 host->private_data = info;
578
579 if (pdata && pdata->setup_gpio)
580 pdata->setup_gpio();
581
582 /* Set endianness and enable the interface */
583 pata_s3c_hwinit(info, pdata);
584
585 platform_set_drvdata(pdev, host);
586
587 return ata_host_activate(host, info->irq,
588 info->irq ? pata_s3c_irq : NULL,
589 0, &pata_s3c_sht);
590
591stop_clk:
592 clk_disable(info->clk);
593 clk_put(info->clk);
594 return ret;
595}
596
597static int __exit pata_s3c_remove(struct platform_device *pdev)
598{
599 struct ata_host *host = platform_get_drvdata(pdev);
600 struct s3c_ide_info *info = host->private_data;
601
602 ata_host_detach(host);
603
604 clk_disable(info->clk);
605 clk_put(info->clk);
606
607 return 0;
608}
609
610#ifdef CONFIG_PM
611static int pata_s3c_suspend(struct device *dev)
612{
613 struct platform_device *pdev = to_platform_device(dev);
614 struct ata_host *host = platform_get_drvdata(pdev);
615
616 return ata_host_suspend(host, PMSG_SUSPEND);
617}
618
619static int pata_s3c_resume(struct device *dev)
620{
621 struct platform_device *pdev = to_platform_device(dev);
622 struct ata_host *host = platform_get_drvdata(pdev);
623 struct s3c_ide_platdata *pdata = pdev->dev.platform_data;
624 struct s3c_ide_info *info = host->private_data;
625
626 pata_s3c_hwinit(info, pdata);
627 ata_host_resume(host);
628
629 return 0;
630}
631
632static const struct dev_pm_ops pata_s3c_pm_ops = {
633 .suspend = pata_s3c_suspend,
634 .resume = pata_s3c_resume,
635};
636#endif
637
638/* driver device registration */
639static struct platform_device_id pata_s3c_driver_ids[] = {
640 {
641 .name = "s3c64xx-pata",
642 .driver_data = TYPE_S3C64XX,
643 }, {
644 .name = "s5pc100-pata",
645 .driver_data = TYPE_S5PC100,
646 }, {
647 .name = "s5pv210-pata",
648 .driver_data = TYPE_S5PV210,
649 },
650 { }
651};
652
653MODULE_DEVICE_TABLE(platform, pata_s3c_driver_ids);
654
655static struct platform_driver pata_s3c_driver = {
656 .remove = __exit_p(pata_s3c_remove),
657 .id_table = pata_s3c_driver_ids,
658 .driver = {
659 .name = DRV_NAME,
660 .owner = THIS_MODULE,
661#ifdef CONFIG_PM
662 .pm = &pata_s3c_pm_ops,
663#endif
664 },
665};
666
667static int __init pata_s3c_init(void)
668{
669 return platform_driver_probe(&pata_s3c_driver, pata_s3c_probe);
670}
671
672static void __exit pata_s3c_exit(void)
673{
674 platform_driver_unregister(&pata_s3c_driver);
675}
676
677module_init(pata_s3c_init);
678module_exit(pata_s3c_exit);
679
680MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>");
681MODULE_DESCRIPTION("low-level driver for Samsung PATA controller");
682MODULE_LICENSE("GPL");
683MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index dfecc6f964b0..e2c18257adff 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -174,7 +174,7 @@ static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc)
174 sc1200_set_dmamode(ap, adev); 174 sc1200_set_dmamode(ap, adev);
175 } 175 }
176 176
177 return ata_sff_qc_issue(qc); 177 return ata_bmdma_qc_issue(qc);
178} 178}
179 179
180/** 180/**
@@ -209,7 +209,7 @@ static struct scsi_host_template sc1200_sht = {
209 209
210static struct ata_port_operations sc1200_port_ops = { 210static struct ata_port_operations sc1200_port_ops = {
211 .inherits = &ata_bmdma_port_ops, 211 .inherits = &ata_bmdma_port_ops,
212 .qc_prep = ata_sff_dumb_qc_prep, 212 .qc_prep = ata_bmdma_dumb_qc_prep,
213 .qc_issue = sc1200_qc_issue, 213 .qc_issue = sc1200_qc_issue,
214 .qc_defer = sc1200_qc_defer, 214 .qc_defer = sc1200_qc_defer,
215 .cable_detect = ata_cable_40wire, 215 .cable_detect = ata_cable_40wire,
@@ -237,7 +237,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
237 }; 237 };
238 const struct ata_port_info *ppi[] = { &info, NULL }; 238 const struct ata_port_info *ppi[] = { &info, NULL };
239 239
240 return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL, 0); 240 return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0);
241} 241}
242 242
243static const struct pci_device_id sc1200[] = { 243static const struct pci_device_id sc1200[] = {
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 4257d6b40af4..fe36966f7e34 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -168,8 +168,7 @@ static const unsigned long JCACTSELtbl[2][7] = {
168}; 168};
169 169
170static const struct pci_device_id scc_pci_tbl[] = { 170static const struct pci_device_id scc_pci_tbl[] = {
171 {PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA, 171 { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0},
172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
173 { } /* terminate list */ 172 { } /* terminate list */
174}; 173};
175 174
@@ -265,7 +264,7 @@ unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
265 printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME); 264 printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
266 mask &= ~(0xE0 << ATA_SHIFT_UDMA); 265 mask &= ~(0xE0 << ATA_SHIFT_UDMA);
267 } 266 }
268 return ata_bmdma_mode_filter(adev, mask); 267 return mask;
269} 268}
270 269
271/** 270/**
@@ -416,6 +415,17 @@ static void scc_dev_select (struct ata_port *ap, unsigned int device)
416} 415}
417 416
418/** 417/**
418 * scc_set_devctl - Write device control reg
419 * @ap: port where the device is
420 * @ctl: value to write
421 */
422
423static void scc_set_devctl(struct ata_port *ap, u8 ctl)
424{
425 out_be32(ap->ioaddr.ctl_addr, ctl);
426}
427
428/**
419 * scc_bmdma_setup - Set up PCI IDE BMDMA transaction 429 * scc_bmdma_setup - Set up PCI IDE BMDMA transaction
420 * @qc: Info associated with this ATA transaction. 430 * @qc: Info associated with this ATA transaction.
421 * 431 *
@@ -430,7 +440,7 @@ static void scc_bmdma_setup (struct ata_queued_cmd *qc)
430 void __iomem *mmio = ap->ioaddr.bmdma_addr; 440 void __iomem *mmio = ap->ioaddr.bmdma_addr;
431 441
432 /* load PRD table addr */ 442 /* load PRD table addr */
433 out_be32(mmio + SCC_DMA_TABLE_OFS, ap->prd_dma); 443 out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma);
434 444
435 /* specify data direction, triple-check start bit is clear */ 445 /* specify data direction, triple-check start bit is clear */
436 dmactl = in_be32(mmio + SCC_DMA_CMD); 446 dmactl = in_be32(mmio + SCC_DMA_CMD);
@@ -501,8 +511,8 @@ static unsigned int scc_devchk (struct ata_port *ap,
501 * Note: Original code is ata_sff_wait_after_reset 511 * Note: Original code is ata_sff_wait_after_reset
502 */ 512 */
503 513
504int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, 514static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
505 unsigned long deadline) 515 unsigned long deadline)
506{ 516{
507 struct ata_port *ap = link->ap; 517 struct ata_port *ap = link->ap;
508 struct ata_ioports *ioaddr = &ap->ioaddr; 518 struct ata_ioports *ioaddr = &ap->ioaddr;
@@ -817,54 +827,6 @@ static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
817} 827}
818 828
819/** 829/**
820 * scc_irq_on - Enable interrupts on a port.
821 * @ap: Port on which interrupts are enabled.
822 *
823 * Note: Original code is ata_sff_irq_on().
824 */
825
826static u8 scc_irq_on (struct ata_port *ap)
827{
828 struct ata_ioports *ioaddr = &ap->ioaddr;
829 u8 tmp;
830
831 ap->ctl &= ~ATA_NIEN;
832 ap->last_ctl = ap->ctl;
833
834 out_be32(ioaddr->ctl_addr, ap->ctl);
835 tmp = ata_wait_idle(ap);
836
837 ap->ops->sff_irq_clear(ap);
838
839 return tmp;
840}
841
842/**
843 * scc_freeze - Freeze BMDMA controller port
844 * @ap: port to freeze
845 *
846 * Note: Original code is ata_sff_freeze().
847 */
848
849static void scc_freeze (struct ata_port *ap)
850{
851 struct ata_ioports *ioaddr = &ap->ioaddr;
852
853 ap->ctl |= ATA_NIEN;
854 ap->last_ctl = ap->ctl;
855
856 out_be32(ioaddr->ctl_addr, ap->ctl);
857
858 /* Under certain circumstances, some controllers raise IRQ on
859 * ATA_NIEN manipulation. Also, many controllers fail to mask
860 * previously pending IRQ on ATA_NIEN assertion. Clear it.
861 */
862 ap->ops->sff_check_status(ap);
863
864 ap->ops->sff_irq_clear(ap);
865}
866
867/**
868 * scc_pata_prereset - prepare for reset 830 * scc_pata_prereset - prepare for reset
869 * @ap: ATA port to be reset 831 * @ap: ATA port to be reset
870 * @deadline: deadline jiffies for the operation 832 * @deadline: deadline jiffies for the operation
@@ -903,8 +865,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes)
903 } 865 }
904 866
905 /* set up device control */ 867 /* set up device control */
906 if (ap->ioaddr.ctl_addr) 868 out_be32(ap->ioaddr.ctl_addr, ap->ctl);
907 out_be32(ap->ioaddr.ctl_addr, ap->ctl);
908 869
909 DPRINTK("EXIT\n"); 870 DPRINTK("EXIT\n");
910} 871}
@@ -913,7 +874,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes)
913 * scc_irq_clear - Clear PCI IDE BMDMA interrupt. 874 * scc_irq_clear - Clear PCI IDE BMDMA interrupt.
914 * @ap: Port associated with this ATA transaction. 875 * @ap: Port associated with this ATA transaction.
915 * 876 *
916 * Note: Original code is ata_sff_irq_clear(). 877 * Note: Original code is ata_bmdma_irq_clear().
917 */ 878 */
918 879
919static void scc_irq_clear (struct ata_port *ap) 880static void scc_irq_clear (struct ata_port *ap)
@@ -930,7 +891,7 @@ static void scc_irq_clear (struct ata_port *ap)
930 * scc_port_start - Set port up for dma. 891 * scc_port_start - Set port up for dma.
931 * @ap: Port to initialize 892 * @ap: Port to initialize
932 * 893 *
933 * Allocate space for PRD table using ata_port_start(). 894 * Allocate space for PRD table using ata_bmdma_port_start().
934 * Set PRD table address for PTERADD. (PRD Transfer End Read) 895 * Set PRD table address for PTERADD. (PRD Transfer End Read)
935 */ 896 */
936 897
@@ -939,11 +900,11 @@ static int scc_port_start (struct ata_port *ap)
939 void __iomem *mmio = ap->ioaddr.bmdma_addr; 900 void __iomem *mmio = ap->ioaddr.bmdma_addr;
940 int rc; 901 int rc;
941 902
942 rc = ata_port_start(ap); 903 rc = ata_bmdma_port_start(ap);
943 if (rc) 904 if (rc)
944 return rc; 905 return rc;
945 906
946 out_be32(mmio + SCC_DMA_PTERADD, ap->prd_dma); 907 out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma);
947 return 0; 908 return 0;
948} 909}
949 910
@@ -978,6 +939,7 @@ static struct ata_port_operations scc_pata_ops = {
978 .sff_check_status = scc_check_status, 939 .sff_check_status = scc_check_status,
979 .sff_check_altstatus = scc_check_altstatus, 940 .sff_check_altstatus = scc_check_altstatus,
980 .sff_dev_select = scc_dev_select, 941 .sff_dev_select = scc_dev_select,
942 .sff_set_devctl = scc_set_devctl,
981 943
982 .bmdma_setup = scc_bmdma_setup, 944 .bmdma_setup = scc_bmdma_setup,
983 .bmdma_start = scc_bmdma_start, 945 .bmdma_start = scc_bmdma_start,
@@ -985,14 +947,11 @@ static struct ata_port_operations scc_pata_ops = {
985 .bmdma_status = scc_bmdma_status, 947 .bmdma_status = scc_bmdma_status,
986 .sff_data_xfer = scc_data_xfer, 948 .sff_data_xfer = scc_data_xfer,
987 949
988 .freeze = scc_freeze,
989 .prereset = scc_pata_prereset, 950 .prereset = scc_pata_prereset,
990 .softreset = scc_softreset, 951 .softreset = scc_softreset,
991 .postreset = scc_postreset, 952 .postreset = scc_postreset,
992 .post_internal_cmd = scc_bmdma_stop,
993 953
994 .sff_irq_clear = scc_irq_clear, 954 .sff_irq_clear = scc_irq_clear,
995 .sff_irq_on = scc_irq_on,
996 955
997 .port_start = scc_port_start, 956 .port_start = scc_port_start,
998 .port_stop = scc_port_stop, 957 .port_stop = scc_port_stop,
@@ -1145,7 +1104,7 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1145 if (rc) 1104 if (rc)
1146 return rc; 1105 return rc;
1147 1106
1148 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 1107 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
1149 IRQF_SHARED, &scc_sht); 1108 IRQF_SHARED, &scc_sht);
1150} 1109}
1151 1110
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index 99cceb458e2a..e97b32f03a6e 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -174,22 +174,12 @@ static int __devinit sch_init_one(struct pci_dev *pdev,
174{ 174{
175 static int printed_version; 175 static int printed_version;
176 const struct ata_port_info *ppi[] = { &sch_port_info, NULL }; 176 const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
177 struct ata_host *host;
178 int rc;
179 177
180 if (!printed_version++) 178 if (!printed_version++)
181 dev_printk(KERN_DEBUG, &pdev->dev, 179 dev_printk(KERN_DEBUG, &pdev->dev,
182 "version " DRV_VERSION "\n"); 180 "version " DRV_VERSION "\n");
183 181
184 /* enable device and prepare host */ 182 return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0);
185 rc = pcim_enable_device(pdev);
186 if (rc)
187 return rc;
188 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
189 if (rc)
190 return rc;
191 pci_set_master(pdev);
192 return ata_pci_sff_activate_host(host, ata_sff_interrupt, &sch_sht);
193} 183}
194 184
195static int __init sch_init(void) 185static int __init sch_init(void)
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 9524d54035f7..86dd714e3e1d 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -198,7 +198,7 @@ static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned l
198{ 198{
199 if (adev->class == ATA_DEV_ATA) 199 if (adev->class == ATA_DEV_ATA)
200 mask &= ~ATA_MASK_UDMA; 200 mask &= ~ATA_MASK_UDMA;
201 return ata_bmdma_mode_filter(adev, mask); 201 return mask;
202} 202}
203 203
204 204
@@ -218,7 +218,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
218 218
219 /* Disk, UDMA */ 219 /* Disk, UDMA */
220 if (adev->class != ATA_DEV_ATA) 220 if (adev->class != ATA_DEV_ATA)
221 return ata_bmdma_mode_filter(adev, mask); 221 return mask;
222 222
223 /* Actually do need to check */ 223 /* Actually do need to check */
224 ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 224 ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
@@ -227,7 +227,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
227 if (!strcmp(p, model_num)) 227 if (!strcmp(p, model_num))
228 mask &= ~(0xE0 << ATA_SHIFT_UDMA); 228 mask &= ~(0xE0 << ATA_SHIFT_UDMA);
229 } 229 }
230 return ata_bmdma_mode_filter(adev, mask); 230 return mask;
231} 231}
232 232
233/** 233/**
@@ -460,7 +460,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
460 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) 460 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
461 ata_pci_bmdma_clear_simplex(pdev); 461 ata_pci_bmdma_clear_simplex(pdev);
462 462
463 return ata_pci_sff_init_one(pdev, ppi, &serverworks_sht, NULL, 0); 463 return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
464} 464}
465 465
466#ifdef CONFIG_PM 466#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index c6c589c23ffc..d3190d7ec304 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -190,15 +190,37 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
190 pci_write_config_word(pdev, ua, ultra); 190 pci_write_config_word(pdev, ua, ultra);
191} 191}
192 192
193/**
194 * sil680_sff_exec_command - issue ATA command to host controller
195 * @ap: port to which command is being issued
196 * @tf: ATA taskfile register set
197 *
198 * Issues ATA command, with proper synchronization with interrupt
199 * handler / other threads. Use our MMIO space for PCI posting to avoid
200 * a hideously slow cycle all the way to the device.
201 *
202 * LOCKING:
203 * spin_lock_irqsave(host lock)
204 */
205void sil680_sff_exec_command(struct ata_port *ap,
206 const struct ata_taskfile *tf)
207{
208 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
209 iowrite8(tf->command, ap->ioaddr.command_addr);
210 ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
211}
212
193static struct scsi_host_template sil680_sht = { 213static struct scsi_host_template sil680_sht = {
194 ATA_BMDMA_SHT(DRV_NAME), 214 ATA_BMDMA_SHT(DRV_NAME),
195}; 215};
196 216
217
197static struct ata_port_operations sil680_port_ops = { 218static struct ata_port_operations sil680_port_ops = {
198 .inherits = &ata_bmdma32_port_ops, 219 .inherits = &ata_bmdma32_port_ops,
199 .cable_detect = sil680_cable_detect, 220 .sff_exec_command = sil680_sff_exec_command,
200 .set_piomode = sil680_set_piomode, 221 .cable_detect = sil680_cable_detect,
201 .set_dmamode = sil680_set_dmamode, 222 .set_piomode = sil680_set_piomode,
223 .set_dmamode = sil680_set_dmamode,
202}; 224};
203 225
204/** 226/**
@@ -352,11 +374,11 @@ static int __devinit sil680_init_one(struct pci_dev *pdev,
352 ata_sff_std_ports(&host->ports[1]->ioaddr); 374 ata_sff_std_ports(&host->ports[1]->ioaddr);
353 375
354 /* Register & activate */ 376 /* Register & activate */
355 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 377 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
356 IRQF_SHARED, &sil680_sht); 378 IRQF_SHARED, &sil680_sht);
357 379
358use_ioports: 380use_ioports:
359 return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL, 0); 381 return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0);
360} 382}
361 383
362#ifdef CONFIG_PM 384#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index b6708032f321..60cea13cccce 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -826,7 +826,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
826 826
827 sis_fixup(pdev, chipset); 827 sis_fixup(pdev, chipset);
828 828
829 return ata_pci_sff_init_one(pdev, ppi, &sis_sht, chipset, 0); 829 return ata_pci_bmdma_init_one(pdev, ppi, &sis_sht, chipset, 0);
830} 830}
831 831
832#ifdef CONFIG_PM 832#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 733b042a7469..98548f640c8e 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -316,7 +316,7 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
316 val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; 316 val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
317 pci_write_config_dword(dev, 0x40, val); 317 pci_write_config_dword(dev, 0x40, val);
318 318
319 return ata_pci_sff_init_one(dev, ppi, &sl82c105_sht, NULL, 0); 319 return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
320} 320}
321 321
322static const struct pci_device_id sl82c105[] = { 322static const struct pci_device_id sl82c105[] = {
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 48f50600ed2a..0d1f89e571dd 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -201,7 +201,7 @@ static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
201 if (!printed_version++) 201 if (!printed_version++)
202 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); 202 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
203 203
204 return ata_pci_sff_init_one(dev, ppi, &triflex_sht, NULL, 0); 204 return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0);
205} 205}
206 206
207static const struct pci_device_id triflex[] = { 207static const struct pci_device_id triflex[] = {
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 741e7cb69d8c..ac8d7d97e408 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -355,7 +355,7 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
355 mask &= ~ ATA_MASK_UDMA; 355 mask &= ~ ATA_MASK_UDMA;
356 } 356 }
357 } 357 }
358 return ata_bmdma_mode_filter(dev, mask); 358 return mask;
359} 359}
360 360
361/** 361/**
@@ -426,7 +426,7 @@ static int via_port_start(struct ata_port *ap)
426 struct via_port *vp; 426 struct via_port *vp;
427 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 427 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
428 428
429 int ret = ata_sff_port_start(ap); 429 int ret = ata_bmdma_port_start(ap);
430 if (ret < 0) 430 if (ret < 0)
431 return ret; 431 return ret;
432 432
@@ -629,7 +629,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
629 } 629 }
630 630
631 /* We have established the device type, now fire it up */ 631 /* We have established the device type, now fire it up */
632 return ata_pci_sff_init_one(pdev, ppi, &via_sht, (void *)config, 0); 632 return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0);
633} 633}
634 634
635#ifdef CONFIG_PM 635#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
deleted file mode 100644
index 6d8619b6f670..000000000000
--- a/drivers/ata/pata_winbond.c
+++ /dev/null
@@ -1,282 +0,0 @@
1/*
2 * pata_winbond.c - Winbond VLB ATA controllers
3 * (C) 2006 Red Hat
4 *
5 * Support for the Winbond 83759A when operating in advanced mode.
6 * Multichip mode is not currently supported.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/delay.h>
14#include <scsi/scsi_host.h>
15#include <linux/libata.h>
16#include <linux/platform_device.h>
17
18#define DRV_NAME "pata_winbond"
19#define DRV_VERSION "0.0.3"
20
21#define NR_HOST 4 /* Two winbond controllers, two channels each */
22
23struct winbond_data {
24 unsigned long config;
25 struct platform_device *platform_dev;
26};
27
28static struct ata_host *winbond_host[NR_HOST];
29static struct winbond_data winbond_data[NR_HOST];
30static int nr_winbond_host;
31
32#ifdef MODULE
33static int probe_winbond = 1;
34#else
35static int probe_winbond;
36#endif
37
38static DEFINE_SPINLOCK(winbond_lock);
39
40static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
41{
42 unsigned long flags;
43 spin_lock_irqsave(&winbond_lock, flags);
44 outb(reg, port + 0x01);
45 outb(val, port + 0x02);
46 spin_unlock_irqrestore(&winbond_lock, flags);
47}
48
49static u8 winbond_readcfg(unsigned long port, u8 reg)
50{
51 u8 val;
52
53 unsigned long flags;
54 spin_lock_irqsave(&winbond_lock, flags);
55 outb(reg, port + 0x01);
56 val = inb(port + 0x02);
57 spin_unlock_irqrestore(&winbond_lock, flags);
58
59 return val;
60}
61
62static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
63{
64 struct ata_timing t;
65 struct winbond_data *winbond = ap->host->private_data;
66 int active, recovery;
67 u8 reg;
68 int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
69
70 reg = winbond_readcfg(winbond->config, 0x81);
71
72 /* Get the timing data in cycles */
73 if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
74 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
75 else
76 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
77
78 active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
79 recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
80 timing = (active << 4) | recovery;
81 winbond_writecfg(winbond->config, timing, reg);
82
83 /* Load the setup timing */
84
85 reg = 0x35;
86 if (adev->class != ATA_DEV_ATA)
87 reg |= 0x08; /* FIFO off */
88 if (!ata_pio_need_iordy(adev))
89 reg |= 0x02; /* IORDY off */
90 reg |= (clamp_val(t.setup, 0, 3) << 6);
91 winbond_writecfg(winbond->config, timing + 1, reg);
92}
93
94
95static unsigned int winbond_data_xfer(struct ata_device *dev,
96 unsigned char *buf, unsigned int buflen, int rw)
97{
98 struct ata_port *ap = dev->link->ap;
99 int slop = buflen & 3;
100
101 if (ata_id_has_dword_io(dev->id)) {
102 if (rw == READ)
103 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
104 else
105 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
106
107 if (unlikely(slop)) {
108 __le32 pad;
109 if (rw == READ) {
110 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
111 memcpy(buf + buflen - slop, &pad, slop);
112 } else {
113 memcpy(&pad, buf + buflen - slop, slop);
114 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
115 }
116 buflen += 4 - slop;
117 }
118 } else
119 buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
120
121 return buflen;
122}
123
124static struct scsi_host_template winbond_sht = {
125 ATA_PIO_SHT(DRV_NAME),
126};
127
128static struct ata_port_operations winbond_port_ops = {
129 .inherits = &ata_sff_port_ops,
130 .sff_data_xfer = winbond_data_xfer,
131 .cable_detect = ata_cable_40wire,
132 .set_piomode = winbond_set_piomode,
133};
134
135/**
136 * winbond_init_one - attach a winbond interface
137 * @type: Type to display
138 * @io: I/O port start
139 * @irq: interrupt line
140 * @fast: True if on a > 33Mhz VLB
141 *
142 * Register a VLB bus IDE interface. Such interfaces are PIO and we
143 * assume do not support IRQ sharing.
144 */
145
146static __init int winbond_init_one(unsigned long port)
147{
148 struct platform_device *pdev;
149 u8 reg;
150 int i, rc;
151
152 reg = winbond_readcfg(port, 0x81);
153 reg |= 0x80; /* jumpered mode off */
154 winbond_writecfg(port, 0x81, reg);
155 reg = winbond_readcfg(port, 0x83);
156 reg |= 0xF0; /* local control */
157 winbond_writecfg(port, 0x83, reg);
158 reg = winbond_readcfg(port, 0x85);
159 reg |= 0xF0; /* programmable timing */
160 winbond_writecfg(port, 0x85, reg);
161
162 reg = winbond_readcfg(port, 0x81);
163
164 if (!(reg & 0x03)) /* Disabled */
165 return -ENODEV;
166
167 for (i = 0; i < 2 ; i ++) {
168 unsigned long cmd_port = 0x1F0 - (0x80 * i);
169 unsigned long ctl_port = cmd_port + 0x206;
170 struct ata_host *host;
171 struct ata_port *ap;
172 void __iomem *cmd_addr, *ctl_addr;
173
174 if (!(reg & (1 << i)))
175 continue;
176
177 pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
178 if (IS_ERR(pdev))
179 return PTR_ERR(pdev);
180
181 rc = -ENOMEM;
182 host = ata_host_alloc(&pdev->dev, 1);
183 if (!host)
184 goto err_unregister;
185 ap = host->ports[0];
186
187 rc = -ENOMEM;
188 cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
189 ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1);
190 if (!cmd_addr || !ctl_addr)
191 goto err_unregister;
192
193 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port);
194
195 ap->ops = &winbond_port_ops;
196 ap->pio_mask = ATA_PIO4;
197 ap->flags |= ATA_FLAG_SLAVE_POSS;
198 ap->ioaddr.cmd_addr = cmd_addr;
199 ap->ioaddr.altstatus_addr = ctl_addr;
200 ap->ioaddr.ctl_addr = ctl_addr;
201 ata_sff_std_ports(&ap->ioaddr);
202
203 /* hook in a private data structure per channel */
204 host->private_data = &winbond_data[nr_winbond_host];
205 winbond_data[nr_winbond_host].config = port;
206 winbond_data[nr_winbond_host].platform_dev = pdev;
207
208 /* activate */
209 rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0,
210 &winbond_sht);
211 if (rc)
212 goto err_unregister;
213
214 winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
215 }
216
217 return 0;
218
219 err_unregister:
220 platform_device_unregister(pdev);
221 return rc;
222}
223
224/**
225 * winbond_init - attach winbond interfaces
226 *
227 * Attach winbond IDE interfaces by scanning the ports it may occupy.
228 */
229
230static __init int winbond_init(void)
231{
232 static const unsigned long config[2] = { 0x130, 0x1B0 };
233
234 int ct = 0;
235 int i;
236
237 if (probe_winbond == 0)
238 return -ENODEV;
239
240 /*
241 * Check both base addresses
242 */
243
244 for (i = 0; i < 2; i++) {
245 if (probe_winbond & (1<<i)) {
246 int ret = 0;
247 unsigned long port = config[i];
248
249 if (request_region(port, 2, "pata_winbond")) {
250 ret = winbond_init_one(port);
251 if (ret <= 0)
252 release_region(port, 2);
253 else ct+= ret;
254 }
255 }
256 }
257 if (ct != 0)
258 return 0;
259 return -ENODEV;
260}
261
262static __exit void winbond_exit(void)
263{
264 int i;
265
266 for (i = 0; i < nr_winbond_host; i++) {
267 ata_host_detach(winbond_host[i]);
268 release_region(winbond_data[i].config, 2);
269 platform_device_unregister(winbond_data[i].platform_dev);
270 }
271}
272
273MODULE_AUTHOR("Alan Cox");
274MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
275MODULE_LICENSE("GPL");
276MODULE_VERSION(DRV_VERSION);
277
278module_init(winbond_init);
279module_exit(winbond_exit);
280
281module_param(probe_winbond, int, 0);
282
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 5904cfdb8dbe..adbe0426c8f0 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -324,10 +324,8 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
324 VPRINTK("ENTER\n"); 324 VPRINTK("ENTER\n");
325 325
326 adma_enter_reg_mode(qc->ap); 326 adma_enter_reg_mode(qc->ap);
327 if (qc->tf.protocol != ATA_PROT_DMA) { 327 if (qc->tf.protocol != ATA_PROT_DMA)
328 ata_sff_qc_prep(qc);
329 return; 328 return;
330 }
331 329
332 buf[i++] = 0; /* Response flags */ 330 buf[i++] = 0; /* Response flags */
333 buf[i++] = 0; /* reserved */ 331 buf[i++] = 0; /* reserved */
@@ -442,8 +440,6 @@ static inline unsigned int adma_intr_pkt(struct ata_host *host)
442 continue; 440 continue;
443 handled = 1; 441 handled = 1;
444 adma_enter_reg_mode(ap); 442 adma_enter_reg_mode(ap);
445 if (ap->flags & ATA_FLAG_DISABLED)
446 continue;
447 pp = ap->private_data; 443 pp = ap->private_data;
448 if (!pp || pp->state != adma_state_pkt) 444 if (!pp || pp->state != adma_state_pkt)
449 continue; 445 continue;
@@ -484,42 +480,38 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host)
484 unsigned int handled = 0, port_no; 480 unsigned int handled = 0, port_no;
485 481
486 for (port_no = 0; port_no < host->n_ports; ++port_no) { 482 for (port_no = 0; port_no < host->n_ports; ++port_no) {
487 struct ata_port *ap; 483 struct ata_port *ap = host->ports[port_no];
488 ap = host->ports[port_no]; 484 struct adma_port_priv *pp = ap->private_data;
489 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) { 485 struct ata_queued_cmd *qc;
490 struct ata_queued_cmd *qc; 486
491 struct adma_port_priv *pp = ap->private_data; 487 if (!pp || pp->state != adma_state_mmio)
492 if (!pp || pp->state != adma_state_mmio) 488 continue;
489 qc = ata_qc_from_tag(ap, ap->link.active_tag);
490 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
491
492 /* check main status, clearing INTRQ */
493 u8 status = ata_sff_check_status(ap);
494 if ((status & ATA_BUSY))
493 continue; 495 continue;
494 qc = ata_qc_from_tag(ap, ap->link.active_tag); 496 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
495 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { 497 ap->print_id, qc->tf.protocol, status);
496 498
497 /* check main status, clearing INTRQ */ 499 /* complete taskfile transaction */
498 u8 status = ata_sff_check_status(ap); 500 pp->state = adma_state_idle;
499 if ((status & ATA_BUSY)) 501 qc->err_mask |= ac_err_mask(status);
500 continue; 502 if (!qc->err_mask)
501 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", 503 ata_qc_complete(qc);
502 ap->print_id, qc->tf.protocol, status); 504 else {
503 505 struct ata_eh_info *ehi = &ap->link.eh_info;
504 /* complete taskfile transaction */ 506 ata_ehi_clear_desc(ehi);
505 pp->state = adma_state_idle; 507 ata_ehi_push_desc(ehi, "status 0x%02X", status);
506 qc->err_mask |= ac_err_mask(status); 508
507 if (!qc->err_mask) 509 if (qc->err_mask == AC_ERR_DEV)
508 ata_qc_complete(qc); 510 ata_port_abort(ap);
509 else { 511 else
510 struct ata_eh_info *ehi = 512 ata_port_freeze(ap);
511 &ap->link.eh_info;
512 ata_ehi_clear_desc(ehi);
513 ata_ehi_push_desc(ehi,
514 "status 0x%02X", status);
515
516 if (qc->err_mask == AC_ERR_DEV)
517 ata_port_abort(ap);
518 else
519 ata_port_freeze(ap);
520 }
521 handled = 1;
522 } 513 }
514 handled = 1;
523 } 515 }
524 } 516 }
525 return handled; 517 return handled;
@@ -562,11 +554,7 @@ static int adma_port_start(struct ata_port *ap)
562{ 554{
563 struct device *dev = ap->host->dev; 555 struct device *dev = ap->host->dev;
564 struct adma_port_priv *pp; 556 struct adma_port_priv *pp;
565 int rc;
566 557
567 rc = ata_port_start(ap);
568 if (rc)
569 return rc;
570 adma_enter_reg_mode(ap); 558 adma_enter_reg_mode(ap);
571 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 559 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
572 if (!pp) 560 if (!pp)
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
new file mode 100644
index 000000000000..6cf57c5c2b5f
--- /dev/null
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -0,0 +1,1756 @@
1/*
2 * drivers/ata/sata_dwc_460ex.c
3 *
4 * Synopsys DesignWare Cores (DWC) SATA host driver
5 *
6 * Author: Mark Miesfeld <mmiesfeld@amcc.com>
7 *
8 * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
9 * Copyright 2008 DENX Software Engineering
10 *
11 * Based on versions provided by AMCC and Synopsys which are:
12 * Copyright 2006 Applied Micro Circuits Corporation
13 * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 */
20
21#ifdef CONFIG_SATA_DWC_DEBUG
22#define DEBUG
23#endif
24
25#ifdef CONFIG_SATA_DWC_VDEBUG
26#define VERBOSE_DEBUG
27#define DEBUG_NCQ
28#endif
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/device.h>
34#include <linux/of_platform.h>
35#include <linux/platform_device.h>
36#include <linux/libata.h>
37#include <linux/slab.h>
38#include "libata.h"
39
40#include <scsi/scsi_host.h>
41#include <scsi/scsi_cmnd.h>
42
43#define DRV_NAME "sata-dwc"
44#define DRV_VERSION "1.0"
45
46/* SATA DMA driver Globals */
47#define DMA_NUM_CHANS 1
48#define DMA_NUM_CHAN_REGS 8
49
50/* SATA DMA Register definitions */
51#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/
52
53struct dmareg {
54 u32 low; /* Low bits 0-31 */
55 u32 high; /* High bits 32-63 */
56};
57
58/* DMA Per Channel registers */
59struct dma_chan_regs {
60 struct dmareg sar; /* Source Address */
61 struct dmareg dar; /* Destination address */
62 struct dmareg llp; /* Linked List Pointer */
63 struct dmareg ctl; /* Control */
64 struct dmareg sstat; /* Source Status not implemented in core */
65 struct dmareg dstat; /* Destination Status not implemented in core*/
66 struct dmareg sstatar; /* Source Status Address not impl in core */
67 struct dmareg dstatar; /* Destination Status Address not implemente */
68 struct dmareg cfg; /* Config */
69 struct dmareg sgr; /* Source Gather */
70 struct dmareg dsr; /* Destination Scatter */
71};
72
73/* Generic Interrupt Registers */
74struct dma_interrupt_regs {
75 struct dmareg tfr; /* Transfer Interrupt */
76 struct dmareg block; /* Block Interrupt */
77 struct dmareg srctran; /* Source Transfer Interrupt */
78 struct dmareg dsttran; /* Dest Transfer Interrupt */
79 struct dmareg error; /* Error */
80};
81
82struct ahb_dma_regs {
83 struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS];
84 struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */
85 struct dma_interrupt_regs interrupt_status; /* Interrupt Status */
86 struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */
87 struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */
88 struct dmareg statusInt; /* Interrupt combined*/
89 struct dmareg rq_srcreg; /* Src Trans Req */
90 struct dmareg rq_dstreg; /* Dst Trans Req */
91 struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req*/
92 struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req*/
93 struct dmareg rq_lst_srcreg; /* Last Src Trans Req*/
94 struct dmareg rq_lst_dstreg; /* Last Dst Trans Req*/
95 struct dmareg dma_cfg; /* DMA Config */
96 struct dmareg dma_chan_en; /* DMA Channel Enable*/
97 struct dmareg dma_id; /* DMA ID */
98 struct dmareg dma_test; /* DMA Test */
99 struct dmareg res1; /* reserved */
100 struct dmareg res2; /* reserved */
101 /*
102 * DMA Comp Params
103 * Param 6 = dma_param[0], Param 5 = dma_param[1],
104 * Param 4 = dma_param[2] ...
105 */
106 struct dmareg dma_params[6];
107};
108
109/* Data structure for linked list item */
110struct lli {
111 u32 sar; /* Source Address */
112 u32 dar; /* Destination address */
113 u32 llp; /* Linked List Pointer */
114 struct dmareg ctl; /* Control */
115 struct dmareg dstat; /* Destination Status */
116};
117
118enum {
119 SATA_DWC_DMAC_LLI_SZ = (sizeof(struct lli)),
120 SATA_DWC_DMAC_LLI_NUM = 256,
121 SATA_DWC_DMAC_LLI_TBL_SZ = (SATA_DWC_DMAC_LLI_SZ * \
122 SATA_DWC_DMAC_LLI_NUM),
123 SATA_DWC_DMAC_TWIDTH_BYTES = 4,
124 SATA_DWC_DMAC_CTRL_TSIZE_MAX = (0x00000800 * \
125 SATA_DWC_DMAC_TWIDTH_BYTES),
126};
127
128/* DMA Register Operation Bits */
129enum {
130 DMA_EN = 0x00000001, /* Enable AHB DMA */
131 DMA_CTL_LLP_SRCEN = 0x10000000, /* Blk chain enable Src */
132 DMA_CTL_LLP_DSTEN = 0x08000000, /* Blk chain enable Dst */
133};
134
135#define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */
136#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */
137 /* Enable channel */
138#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \
139 ((0x000000001 << (ch)) << 8))
140 /* Disable channel */
141#define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8))
142 /* Transfer Type & Flow Controller */
143#define DMA_CTL_TTFC(type) (((type) & 0x7) << 20)
144#define DMA_CTL_SMS(num) (((num) & 0x3) << 25) /* Src Master Select */
145#define DMA_CTL_DMS(num) (((num) & 0x3) << 23)/* Dst Master Select */
146 /* Src Burst Transaction Length */
147#define DMA_CTL_SRC_MSIZE(size) (((size) & 0x7) << 14)
148 /* Dst Burst Transaction Length */
149#define DMA_CTL_DST_MSIZE(size) (((size) & 0x7) << 11)
150 /* Source Transfer Width */
151#define DMA_CTL_SRC_TRWID(size) (((size) & 0x7) << 4)
152 /* Destination Transfer Width */
153#define DMA_CTL_DST_TRWID(size) (((size) & 0x7) << 1)
154
155/* Assign HW handshaking interface (x) to destination / source peripheral */
156#define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11)
157#define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7)
158#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master))
159
160/*
161 * This define is used to set block chaining disabled in the control low
162 * register. It is already in little endian format so it can be &'d dirctly.
163 * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN))
164 */
165enum {
166 DMA_CTL_LLP_DISABLE_LE32 = 0xffffffe7,
167 DMA_CTL_TTFC_P2M_DMAC = 0x00000002, /* Per to mem, DMAC cntr */
168 DMA_CTL_TTFC_M2P_PER = 0x00000003, /* Mem to per, peripheral cntr */
169 DMA_CTL_SINC_INC = 0x00000000, /* Source Address Increment */
170 DMA_CTL_SINC_DEC = 0x00000200,
171 DMA_CTL_SINC_NOCHANGE = 0x00000400,
172 DMA_CTL_DINC_INC = 0x00000000, /* Destination Address Increment */
173 DMA_CTL_DINC_DEC = 0x00000080,
174 DMA_CTL_DINC_NOCHANGE = 0x00000100,
175 DMA_CTL_INT_EN = 0x00000001, /* Interrupt Enable */
176
177/* Channel Configuration Register high bits */
178 DMA_CFG_FCMOD_REQ = 0x00000001, /* Flow Control - request based */
179 DMA_CFG_PROTCTL = (0x00000003 << 2),/* Protection Control */
180
181/* Channel Configuration Register low bits */
182 DMA_CFG_RELD_DST = 0x80000000, /* Reload Dest / Src Addr */
183 DMA_CFG_RELD_SRC = 0x40000000,
184 DMA_CFG_HS_SELSRC = 0x00000800, /* Software handshake Src/ Dest */
185 DMA_CFG_HS_SELDST = 0x00000400,
186 DMA_CFG_FIFOEMPTY = (0x00000001 << 9), /* FIFO Empty bit */
187
188/* Channel Linked List Pointer Register */
189 DMA_LLP_AHBMASTER1 = 0, /* List Master Select */
190 DMA_LLP_AHBMASTER2 = 1,
191
192 SATA_DWC_MAX_PORTS = 1,
193
194 SATA_DWC_SCR_OFFSET = 0x24,
195 SATA_DWC_REG_OFFSET = 0x64,
196};
197
198/* DWC SATA Registers */
199struct sata_dwc_regs {
200 u32 fptagr; /* 1st party DMA tag */
201 u32 fpbor; /* 1st party DMA buffer offset */
202 u32 fptcr; /* 1st party DMA Xfr count */
203 u32 dmacr; /* DMA Control */
204 u32 dbtsr; /* DMA Burst Transac size */
205 u32 intpr; /* Interrupt Pending */
206 u32 intmr; /* Interrupt Mask */
207 u32 errmr; /* Error Mask */
208 u32 llcr; /* Link Layer Control */
209 u32 phycr; /* PHY Control */
210 u32 physr; /* PHY Status */
211 u32 rxbistpd; /* Recvd BIST pattern def register */
212 u32 rxbistpd1; /* Recvd BIST data dword1 */
213 u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
214 u32 txbistpd; /* Trans BIST pattern def register */
215 u32 txbistpd1; /* Trans BIST data dword1 */
216 u32 txbistpd2; /* Trans BIST data dword2 */
217 u32 bistcr; /* BIST Control Register */
218 u32 bistfctr; /* BIST FIS Count Register */
219 u32 bistsr; /* BIST Status Register */
220 u32 bistdecr; /* BIST Dword Error count register */
221 u32 res[15]; /* Reserved locations */
222 u32 testr; /* Test Register */
223 u32 versionr; /* Version Register */
224 u32 idr; /* ID Register */
225 u32 unimpl[192]; /* Unimplemented */
226 u32 dmadr[256]; /* FIFO Locations in DMA Mode */
227};
228
229enum {
230 SCR_SCONTROL_DET_ENABLE = 0x00000001,
231 SCR_SSTATUS_DET_PRESENT = 0x00000001,
232 SCR_SERROR_DIAG_X = 0x04000000,
233/* DWC SATA Register Operations */
234 SATA_DWC_TXFIFO_DEPTH = 0x01FF,
235 SATA_DWC_RXFIFO_DEPTH = 0x01FF,
236 SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004,
237 SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
238 SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
239 SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN,
240 SATA_DWC_INTPR_DMAT = 0x00000001,
241 SATA_DWC_INTPR_NEWFP = 0x00000002,
242 SATA_DWC_INTPR_PMABRT = 0x00000004,
243 SATA_DWC_INTPR_ERR = 0x00000008,
244 SATA_DWC_INTPR_NEWBIST = 0x00000010,
245 SATA_DWC_INTPR_IPF = 0x10000000,
246 SATA_DWC_INTMR_DMATM = 0x00000001,
247 SATA_DWC_INTMR_NEWFPM = 0x00000002,
248 SATA_DWC_INTMR_PMABRTM = 0x00000004,
249 SATA_DWC_INTMR_ERRM = 0x00000008,
250 SATA_DWC_INTMR_NEWBISTM = 0x00000010,
251 SATA_DWC_LLCR_SCRAMEN = 0x00000001,
252 SATA_DWC_LLCR_DESCRAMEN = 0x00000002,
253 SATA_DWC_LLCR_RPDEN = 0x00000004,
254/* This is all error bits, zero's are reserved fields. */
255 SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03
256};
257
258#define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F)
259#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
260 SATA_DWC_DMACR_TMOD_TXCHEN)
261#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
262 SATA_DWC_DMACR_TMOD_TXCHEN)
263#define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH)
264#define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
265 << 16)
266struct sata_dwc_device {
267 struct device *dev; /* generic device struct */
268 struct ata_probe_ent *pe; /* ptr to probe-ent */
269 struct ata_host *host;
270 u8 *reg_base;
271 struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
272 int irq_dma;
273};
274
275#define SATA_DWC_QCMD_MAX 32
276
277struct sata_dwc_device_port {
278 struct sata_dwc_device *hsdev;
279 int cmd_issued[SATA_DWC_QCMD_MAX];
280 struct lli *llit[SATA_DWC_QCMD_MAX]; /* DMA LLI table */
281 dma_addr_t llit_dma[SATA_DWC_QCMD_MAX];
282 u32 dma_chan[SATA_DWC_QCMD_MAX];
283 int dma_pending[SATA_DWC_QCMD_MAX];
284};
285
286/*
287 * Commonly used DWC SATA driver Macros
288 */
289#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)\
290 (host)->private_data)
291#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\
292 (ap)->host->private_data)
293#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\
294 (ap)->private_data)
295#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\
296 (qc)->ap->host->private_data)
297#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)\
298 (hsdevp)->hsdev)
299
300enum {
301 SATA_DWC_CMD_ISSUED_NOT = 0,
302 SATA_DWC_CMD_ISSUED_PEND = 1,
303 SATA_DWC_CMD_ISSUED_EXEC = 2,
304 SATA_DWC_CMD_ISSUED_NODATA = 3,
305
306 SATA_DWC_DMA_PENDING_NONE = 0,
307 SATA_DWC_DMA_PENDING_TX = 1,
308 SATA_DWC_DMA_PENDING_RX = 2,
309};
310
311struct sata_dwc_host_priv {
312 void __iomem *scr_addr_sstatus;
313 u32 sata_dwc_sactive_issued ;
314 u32 sata_dwc_sactive_queued ;
315 u32 dma_interrupt_count;
316 struct ahb_dma_regs *sata_dma_regs;
317 struct device *dwc_dev;
318};
319struct sata_dwc_host_priv host_pvt;
320/*
321 * Prototypes
322 */
323static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
324static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
325 u32 check_status);
326static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
327static void sata_dwc_port_stop(struct ata_port *ap);
328static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
329static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq);
330static void dma_dwc_exit(struct sata_dwc_device *hsdev);
331static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
332 struct lli *lli, dma_addr_t dma_lli,
333 void __iomem *addr, int dir);
334static void dma_dwc_xfer_start(int dma_ch);
335
336static void sata_dwc_tf_dump(struct ata_taskfile *tf)
337{
338 dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:"
339 "0x%lx device: %x\n", tf->command, ata_get_cmd_descript\
340 (tf->protocol), tf->flags, tf->device);
341 dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x "
342 "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal,
343 tf->lbam, tf->lbah);
344 dev_vdbg(host_pvt.dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x "
345 "hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n",
346 tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
347 tf->hob_lbah);
348}
349
350/*
351 * Function: get_burst_length_encode
352 * arguments: datalength: length in bytes of data
353 * returns value to be programmed in register corrresponding to data length
354 * This value is effectively the log(base 2) of the length
355 */
356static int get_burst_length_encode(int datalength)
357{
358 int items = datalength >> 2; /* div by 4 to get lword count */
359
360 if (items >= 64)
361 return 5;
362
363 if (items >= 32)
364 return 4;
365
366 if (items >= 16)
367 return 3;
368
369 if (items >= 8)
370 return 2;
371
372 if (items >= 4)
373 return 1;
374
375 return 0;
376}
377
378static void clear_chan_interrupts(int c)
379{
380 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.tfr.low),
381 DMA_CHANNEL(c));
382 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.block.low),
383 DMA_CHANNEL(c));
384 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.srctran.low),
385 DMA_CHANNEL(c));
386 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.dsttran.low),
387 DMA_CHANNEL(c));
388 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.error.low),
389 DMA_CHANNEL(c));
390}
391
392/*
393 * Function: dma_request_channel
394 * arguments: None
395 * returns channel number if available else -1
396 * This function assigns the next available DMA channel from the list to the
397 * requester
398 */
399static int dma_request_channel(void)
400{
401 int i;
402
403 for (i = 0; i < DMA_NUM_CHANS; i++) {
404 if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) &\
405 DMA_CHANNEL(i)))
406 return i;
407 }
408 dev_err(host_pvt.dwc_dev, "%s NO channel chan_en: 0x%08x\n", __func__,
409 in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)));
410 return -1;
411}
412
413/*
414 * Function: dma_dwc_interrupt
415 * arguments: irq, dev_id, pt_regs
416 * returns channel number if available else -1
417 * Interrupt Handler for DW AHB SATA DMA
418 */
419static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance)
420{
421 int chan;
422 u32 tfr_reg, err_reg;
423 unsigned long flags;
424 struct sata_dwc_device *hsdev =
425 (struct sata_dwc_device *)hsdev_instance;
426 struct ata_host *host = (struct ata_host *)hsdev->host;
427 struct ata_port *ap;
428 struct sata_dwc_device_port *hsdevp;
429 u8 tag = 0;
430 unsigned int port = 0;
431
432 spin_lock_irqsave(&host->lock, flags);
433 ap = host->ports[port];
434 hsdevp = HSDEVP_FROM_AP(ap);
435 tag = ap->link.active_tag;
436
437 tfr_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.tfr\
438 .low));
439 err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error\
440 .low));
441
442 dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n",
443 tfr_reg, err_reg, hsdevp->dma_pending[tag], port);
444
445 for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
446 /* Check for end-of-transfer interrupt. */
447 if (tfr_reg & DMA_CHANNEL(chan)) {
448 /*
449 * Each DMA command produces 2 interrupts. Only
450 * complete the command after both interrupts have been
451 * seen. (See sata_dwc_isr())
452 */
453 host_pvt.dma_interrupt_count++;
454 sata_dwc_clear_dmacr(hsdevp, tag);
455
456 if (hsdevp->dma_pending[tag] ==
457 SATA_DWC_DMA_PENDING_NONE) {
458 dev_err(ap->dev, "DMA not pending eot=0x%08x "
459 "err=0x%08x tag=0x%02x pending=%d\n",
460 tfr_reg, err_reg, tag,
461 hsdevp->dma_pending[tag]);
462 }
463
464 if ((host_pvt.dma_interrupt_count % 2) == 0)
465 sata_dwc_dma_xfer_complete(ap, 1);
466
467 /* Clear the interrupt */
468 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
469 .tfr.low),
470 DMA_CHANNEL(chan));
471 }
472
473 /* Check for error interrupt. */
474 if (err_reg & DMA_CHANNEL(chan)) {
475 /* TODO Need error handler ! */
476 dev_err(ap->dev, "error interrupt err_reg=0x%08x\n",
477 err_reg);
478
479 /* Clear the interrupt. */
480 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
481 .error.low),
482 DMA_CHANNEL(chan));
483 }
484 }
485 spin_unlock_irqrestore(&host->lock, flags);
486 return IRQ_HANDLED;
487}
488
489/*
490 * Function: dma_request_interrupts
491 * arguments: hsdev
492 * returns status
493 * This function registers ISR for a particular DMA channel interrupt
494 */
495static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq)
496{
497 int retval = 0;
498 int chan;
499
500 for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
501 /* Unmask error interrupt */
502 out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low,
503 DMA_ENABLE_CHAN(chan));
504
505 /* Unmask end-of-transfer interrupt */
506 out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.tfr.low,
507 DMA_ENABLE_CHAN(chan));
508 }
509
510 retval = request_irq(irq, dma_dwc_interrupt, 0, "SATA DMA", hsdev);
511 if (retval) {
512 dev_err(host_pvt.dwc_dev, "%s: could not get IRQ %d\n",
513 __func__, irq);
514 return -ENODEV;
515 }
516
517 /* Mark this interrupt as requested */
518 hsdev->irq_dma = irq;
519 return 0;
520}
521
522/*
523 * Function: map_sg_to_lli
524 * The Synopsis driver has a comment proposing that better performance
525 * is possible by only enabling interrupts on the last item in the linked list.
526 * However, it seems that could be a problem if an error happened on one of the
527 * first items. The transfer would halt, but no error interrupt would occur.
528 * Currently this function sets interrupts enabled for each linked list item:
529 * DMA_CTL_INT_EN.
530 */
531static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
532 struct lli *lli, dma_addr_t dma_lli,
533 void __iomem *dmadr_addr, int dir)
534{
535 int i, idx = 0;
536 int fis_len = 0;
537 dma_addr_t next_llp;
538 int bl;
539
540 dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x"
541 " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
542 (u32)dmadr_addr);
543
544 bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);
545
546 for (i = 0; i < num_elems; i++, sg++) {
547 u32 addr, offset;
548 u32 sg_len, len;
549
550 addr = (u32) sg_dma_address(sg);
551 sg_len = sg_dma_len(sg);
552
553 dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len"
554 "=%d\n", __func__, i, addr, sg_len);
555
556 while (sg_len) {
557 if (idx >= SATA_DWC_DMAC_LLI_NUM) {
558 /* The LLI table is not large enough. */
559 dev_err(host_pvt.dwc_dev, "LLI table overrun "
560 "(idx=%d)\n", idx);
561 break;
562 }
563 len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
564 SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;
565
566 offset = addr & 0xffff;
567 if ((offset + sg_len) > 0x10000)
568 len = 0x10000 - offset;
569
570 /*
571 * Make sure a LLI block is not created that will span
572 * 8K max FIS boundary. If the block spans such a FIS
573 * boundary, there is a chance that a DMA burst will
574 * cross that boundary -- this results in an error in
575 * the host controller.
576 */
577 if (fis_len + len > 8192) {
578 dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len="
579 "%d(0x%x) len=%d(0x%x)\n", fis_len,
580 fis_len, len, len);
581 len = 8192 - fis_len;
582 fis_len = 0;
583 } else {
584 fis_len += len;
585 }
586 if (fis_len == 8192)
587 fis_len = 0;
588
589 /*
590 * Set DMA addresses and lower half of control register
591 * based on direction.
592 */
593 if (dir == DMA_FROM_DEVICE) {
594 lli[idx].dar = cpu_to_le32(addr);
595 lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
596
597 lli[idx].ctl.low = cpu_to_le32(
598 DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
599 DMA_CTL_SMS(0) |
600 DMA_CTL_DMS(1) |
601 DMA_CTL_SRC_MSIZE(bl) |
602 DMA_CTL_DST_MSIZE(bl) |
603 DMA_CTL_SINC_NOCHANGE |
604 DMA_CTL_SRC_TRWID(2) |
605 DMA_CTL_DST_TRWID(2) |
606 DMA_CTL_INT_EN |
607 DMA_CTL_LLP_SRCEN |
608 DMA_CTL_LLP_DSTEN);
609 } else { /* DMA_TO_DEVICE */
610 lli[idx].sar = cpu_to_le32(addr);
611 lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
612
613 lli[idx].ctl.low = cpu_to_le32(
614 DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
615 DMA_CTL_SMS(1) |
616 DMA_CTL_DMS(0) |
617 DMA_CTL_SRC_MSIZE(bl) |
618 DMA_CTL_DST_MSIZE(bl) |
619 DMA_CTL_DINC_NOCHANGE |
620 DMA_CTL_SRC_TRWID(2) |
621 DMA_CTL_DST_TRWID(2) |
622 DMA_CTL_INT_EN |
623 DMA_CTL_LLP_SRCEN |
624 DMA_CTL_LLP_DSTEN);
625 }
626
627 dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: "
628 "0x%08x val: 0x%08x\n", __func__,
629 len, DMA_CTL_BLK_TS(len / 4));
630
631 /* Program the LLI CTL high register */
632 lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\
633 (len / 4));
634
635 /* Program the next pointer. The next pointer must be
636 * the physical address, not the virtual address.
637 */
638 next_llp = (dma_lli + ((idx + 1) * sizeof(struct \
639 lli)));
640
641 /* The last 2 bits encode the list master select. */
642 next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);
643
644 lli[idx].llp = cpu_to_le32(next_llp);
645 idx++;
646 sg_len -= len;
647 addr += len;
648 }
649 }
650
651 /*
652 * The last next ptr has to be zero and the last control low register
653 * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source
654 * and destination enable) set back to 0 (disabled.) This is what tells
655 * the core that this is the last item in the linked list.
656 */
657 if (idx) {
658 lli[idx-1].llp = 0x00000000;
659 lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;
660
661 /* Flush cache to memory */
662 dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
663 DMA_BIDIRECTIONAL);
664 }
665
666 return idx;
667}
668
669/*
670 * Function: dma_dwc_xfer_start
671 * arguments: Channel number
672 * Return : None
673 * Enables the DMA channel
674 */
675static void dma_dwc_xfer_start(int dma_ch)
676{
677 /* Enable the DMA channel */
678 out_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low),
679 in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) |
680 DMA_ENABLE_CHAN(dma_ch));
681}
682
683static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
684 struct lli *lli, dma_addr_t dma_lli,
685 void __iomem *addr, int dir)
686{
687 int dma_ch;
688 int num_lli;
689 /* Acquire DMA channel */
690 dma_ch = dma_request_channel();
691 if (dma_ch == -1) {
692 dev_err(host_pvt.dwc_dev, "%s: dma channel unavailable\n",
693 __func__);
694 return -EAGAIN;
695 }
696
697 /* Convert SG list to linked list of items (LLIs) for AHB DMA */
698 num_lli = map_sg_to_lli(sg, num_elems, lli, dma_lli, addr, dir);
699
700 dev_dbg(host_pvt.dwc_dev, "%s sg: 0x%p, count: %d lli: %p dma_lli:"
701 " 0x%0xlx addr: %p lli count: %d\n", __func__, sg, num_elems,
702 lli, (u32)dma_lli, addr, num_lli);
703
704 clear_chan_interrupts(dma_ch);
705
706 /* Program the CFG register. */
707 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high),
708 DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ);
709 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low), 0);
710
711 /* Program the address of the linked list */
712 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low),
713 DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2));
714
715 /* Program the CTL register with src enable / dst enable */
716 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low),
717 DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
718 return 0;
719}
720
721/*
722 * Function: dma_dwc_exit
723 * arguments: None
724 * returns status
725 * This function exits the SATA DMA driver
726 */
727static void dma_dwc_exit(struct sata_dwc_device *hsdev)
728{
729 dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__);
730 if (host_pvt.sata_dma_regs)
731 iounmap(host_pvt.sata_dma_regs);
732
733 if (hsdev->irq_dma)
734 free_irq(hsdev->irq_dma, hsdev);
735}
736
737/*
738 * Function: dma_dwc_init
739 * arguments: hsdev
740 * returns status
741 * This function initializes the SATA DMA driver
742 */
743static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
744{
745 int err;
746
747 err = dma_request_interrupts(hsdev, irq);
748 if (err) {
749 dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
750 " %d\n", __func__, err);
751 goto error_out;
752 }
753
754 /* Enabe DMA */
755 out_le32(&(host_pvt.sata_dma_regs->dma_cfg.low), DMA_EN);
756
757 dev_notice(host_pvt.dwc_dev, "DMA initialized\n");
758 dev_dbg(host_pvt.dwc_dev, "SATA DMA registers=0x%p\n", host_pvt.\
759 sata_dma_regs);
760
761 return 0;
762
763error_out:
764 dma_dwc_exit(hsdev);
765
766 return err;
767}
768
769static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
770{
771 if (scr > SCR_NOTIFICATION) {
772 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
773 __func__, scr);
774 return -EINVAL;
775 }
776
777 *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4));
778 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
779 __func__, link->ap->print_id, scr, *val);
780
781 return 0;
782}
783
784static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
785{
786 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
787 __func__, link->ap->print_id, scr, val);
788 if (scr > SCR_NOTIFICATION) {
789 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
790 __func__, scr);
791 return -EINVAL;
792 }
793 out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val);
794
795 return 0;
796}
797
798static u32 core_scr_read(unsigned int scr)
799{
800 return in_le32((void __iomem *)(host_pvt.scr_addr_sstatus) +\
801 (scr * 4));
802}
803
804static void core_scr_write(unsigned int scr, u32 val)
805{
806 out_le32((void __iomem *)(host_pvt.scr_addr_sstatus) + (scr * 4),
807 val);
808}
809
810static void clear_serror(void)
811{
812 u32 val;
813 val = core_scr_read(SCR_ERROR);
814 core_scr_write(SCR_ERROR, val);
815
816}
817
818static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
819{
820 out_le32(&hsdev->sata_dwc_regs->intpr,
821 in_le32(&hsdev->sata_dwc_regs->intpr));
822}
823
824static u32 qcmd_tag_to_mask(u8 tag)
825{
826 return 0x00000001 << (tag & 0x1f);
827}
828
829/* See ahci.c */
830static void sata_dwc_error_intr(struct ata_port *ap,
831 struct sata_dwc_device *hsdev, uint intpr)
832{
833 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
834 struct ata_eh_info *ehi = &ap->link.eh_info;
835 unsigned int err_mask = 0, action = 0;
836 struct ata_queued_cmd *qc;
837 u32 serror;
838 u8 status, tag;
839 u32 err_reg;
840
841 ata_ehi_clear_desc(ehi);
842
843 serror = core_scr_read(SCR_ERROR);
844 status = ap->ops->sff_check_status(ap);
845
846 err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error.\
847 low));
848 tag = ap->link.active_tag;
849
850 dev_err(ap->dev, "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x "
851 "dma_intp=%d pending=%d issued=%d dma_err_status=0x%08x\n",
852 __func__, serror, intpr, status, host_pvt.dma_interrupt_count,
853 hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag], err_reg);
854
855 /* Clear error register and interrupt bit */
856 clear_serror();
857 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
858
859 /* This is the only error happening now. TODO check for exact error */
860
861 err_mask |= AC_ERR_HOST_BUS;
862 action |= ATA_EH_RESET;
863
864 /* Pass this on to EH */
865 ehi->serror |= serror;
866 ehi->action |= action;
867
868 qc = ata_qc_from_tag(ap, tag);
869 if (qc)
870 qc->err_mask |= err_mask;
871 else
872 ehi->err_mask |= err_mask;
873
874 ata_port_abort(ap);
875}
876
877/*
878 * Function : sata_dwc_isr
879 * arguments : irq, void *dev_instance, struct pt_regs *regs
880 * Return value : irqreturn_t - status of IRQ
881 * This Interrupt handler called via port ops registered function.
882 * .irq_handler = sata_dwc_isr
883 */
884static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
885{
886 struct ata_host *host = (struct ata_host *)dev_instance;
887 struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
888 struct ata_port *ap;
889 struct ata_queued_cmd *qc;
890 unsigned long flags;
891 u8 status, tag;
892 int handled, num_processed, port = 0;
893 uint intpr, sactive, sactive2, tag_mask;
894 struct sata_dwc_device_port *hsdevp;
895 host_pvt.sata_dwc_sactive_issued = 0;
896
897 spin_lock_irqsave(&host->lock, flags);
898
899 /* Read the interrupt register */
900 intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
901
902 ap = host->ports[port];
903 hsdevp = HSDEVP_FROM_AP(ap);
904
905 dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
906 ap->link.active_tag);
907
908 /* Check for error interrupt */
909 if (intpr & SATA_DWC_INTPR_ERR) {
910 sata_dwc_error_intr(ap, hsdev, intpr);
911 handled = 1;
912 goto DONE;
913 }
914
915 /* Check for DMA SETUP FIS (FP DMA) interrupt */
916 if (intpr & SATA_DWC_INTPR_NEWFP) {
917 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
918
919 tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
920 dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
921 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
922 dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
923
924 host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag);
925
926 qc = ata_qc_from_tag(ap, tag);
927 /*
928 * Start FP DMA for NCQ command. At this point the tag is the
929 * active tag. It is the tag that matches the command about to
930 * be completed.
931 */
932 qc->ap->link.active_tag = tag;
933 sata_dwc_bmdma_start_by_tag(qc, tag);
934
935 handled = 1;
936 goto DONE;
937 }
938 sactive = core_scr_read(SCR_ACTIVE);
939 tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
940
941 /* If no sactive issued and tag_mask is zero then this is not NCQ */
942 if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) {
943 if (ap->link.active_tag == ATA_TAG_POISON)
944 tag = 0;
945 else
946 tag = ap->link.active_tag;
947 qc = ata_qc_from_tag(ap, tag);
948
949 /* DEV interrupt w/ no active qc? */
950 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
951 dev_err(ap->dev, "%s interrupt with no active qc "
952 "qc=%p\n", __func__, qc);
953 ap->ops->sff_check_status(ap);
954 handled = 1;
955 goto DONE;
956 }
957 status = ap->ops->sff_check_status(ap);
958
959 qc->ap->link.active_tag = tag;
960 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
961
962 if (status & ATA_ERR) {
963 dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
964 sata_dwc_qc_complete(ap, qc, 1);
965 handled = 1;
966 goto DONE;
967 }
968
969 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
970 __func__, ata_get_cmd_descript(qc->tf.protocol));
971DRVSTILLBUSY:
972 if (ata_is_dma(qc->tf.protocol)) {
973 /*
974 * Each DMA transaction produces 2 interrupts. The DMAC
975 * transfer complete interrupt and the SATA controller
976 * operation done interrupt. The command should be
977 * completed only after both interrupts are seen.
978 */
979 host_pvt.dma_interrupt_count++;
980 if (hsdevp->dma_pending[tag] == \
981 SATA_DWC_DMA_PENDING_NONE) {
982 dev_err(ap->dev, "%s: DMA not pending "
983 "intpr=0x%08x status=0x%08x pending"
984 "=%d\n", __func__, intpr, status,
985 hsdevp->dma_pending[tag]);
986 }
987
988 if ((host_pvt.dma_interrupt_count % 2) == 0)
989 sata_dwc_dma_xfer_complete(ap, 1);
990 } else if (ata_is_pio(qc->tf.protocol)) {
991 ata_sff_hsm_move(ap, qc, status, 0);
992 handled = 1;
993 goto DONE;
994 } else {
995 if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
996 goto DRVSTILLBUSY;
997 }
998
999 handled = 1;
1000 goto DONE;
1001 }
1002
1003 /*
1004 * This is a NCQ command. At this point we need to figure out for which
1005 * tags we have gotten a completion interrupt. One interrupt may serve
1006 * as completion for more than one operation when commands are queued
1007 * (NCQ). We need to process each completed command.
1008 */
1009
1010 /* process completed commands */
1011 sactive = core_scr_read(SCR_ACTIVE);
1012 tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
1013
1014 if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \
1015 tag_mask > 1) {
1016 dev_dbg(ap->dev, "%s NCQ:sactive=0x%08x sactive_issued=0x%08x"
1017 "tag_mask=0x%08x\n", __func__, sactive,
1018 host_pvt.sata_dwc_sactive_issued, tag_mask);
1019 }
1020
1021 if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \
1022 (host_pvt.sata_dwc_sactive_issued)) {
1023 dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x "
1024 "(host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask"
1025 "=0x%08x\n", sactive, host_pvt.sata_dwc_sactive_issued,
1026 tag_mask);
1027 }
1028
1029 /* read just to clear ... not bad if currently still busy */
1030 status = ap->ops->sff_check_status(ap);
1031 dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
1032
1033 tag = 0;
1034 num_processed = 0;
1035 while (tag_mask) {
1036 num_processed++;
1037 while (!(tag_mask & 0x00000001)) {
1038 tag++;
1039 tag_mask <<= 1;
1040 }
1041
1042 tag_mask &= (~0x00000001);
1043 qc = ata_qc_from_tag(ap, tag);
1044
1045 /* To be picked up by completion functions */
1046 qc->ap->link.active_tag = tag;
1047 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
1048
1049 /* Let libata/scsi layers handle error */
1050 if (status & ATA_ERR) {
1051 dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
1052 status);
1053 sata_dwc_qc_complete(ap, qc, 1);
1054 handled = 1;
1055 goto DONE;
1056 }
1057
1058 /* Process completed command */
1059 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
1060 ata_get_cmd_descript(qc->tf.protocol));
1061 if (ata_is_dma(qc->tf.protocol)) {
1062 host_pvt.dma_interrupt_count++;
1063 if (hsdevp->dma_pending[tag] == \
1064 SATA_DWC_DMA_PENDING_NONE)
1065 dev_warn(ap->dev, "%s: DMA not pending?\n",
1066 __func__);
1067 if ((host_pvt.dma_interrupt_count % 2) == 0)
1068 sata_dwc_dma_xfer_complete(ap, 1);
1069 } else {
1070 if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
1071 goto STILLBUSY;
1072 }
1073 continue;
1074
1075STILLBUSY:
1076 ap->stats.idle_irq++;
1077 dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
1078 ap->print_id);
1079 } /* while tag_mask */
1080
1081 /*
1082 * Check to see if any commands completed while we were processing our
1083 * initial set of completed commands (read status clears interrupts,
1084 * so we might miss a completed command interrupt if one came in while
1085 * we were processing --we read status as part of processing a completed
1086 * command).
1087 */
1088 sactive2 = core_scr_read(SCR_ACTIVE);
1089 if (sactive2 != sactive) {
1090 dev_dbg(ap->dev, "More completed - sactive=0x%x sactive2"
1091 "=0x%x\n", sactive, sactive2);
1092 }
1093 handled = 1;
1094
1095DONE:
1096 spin_unlock_irqrestore(&host->lock, flags);
1097 return IRQ_RETVAL(handled);
1098}
1099
1100static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
1101{
1102 struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
1103
1104 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
1105 out_le32(&(hsdev->sata_dwc_regs->dmacr),
1106 SATA_DWC_DMACR_RX_CLEAR(
1107 in_le32(&(hsdev->sata_dwc_regs->dmacr))));
1108 } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
1109 out_le32(&(hsdev->sata_dwc_regs->dmacr),
1110 SATA_DWC_DMACR_TX_CLEAR(
1111 in_le32(&(hsdev->sata_dwc_regs->dmacr))));
1112 } else {
1113 /*
1114 * This should not happen, it indicates the driver is out of
1115 * sync. If it does happen, clear dmacr anyway.
1116 */
1117 dev_err(host_pvt.dwc_dev, "%s DMA protocol RX and"
1118 "TX DMA not pending tag=0x%02x pending=%d"
1119 " dmacr: 0x%08x\n", __func__, tag,
1120 hsdevp->dma_pending[tag],
1121 in_le32(&(hsdev->sata_dwc_regs->dmacr)));
1122 out_le32(&(hsdev->sata_dwc_regs->dmacr),
1123 SATA_DWC_DMACR_TXRXCH_CLEAR);
1124 }
1125}
1126
1127static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
1128{
1129 struct ata_queued_cmd *qc;
1130 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1131 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1132 u8 tag = 0;
1133
1134 tag = ap->link.active_tag;
1135 qc = ata_qc_from_tag(ap, tag);
1136 if (!qc) {
1137 dev_err(ap->dev, "failed to get qc");
1138 return;
1139 }
1140
1141#ifdef DEBUG_NCQ
1142 if (tag > 0) {
1143 dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s "
1144 "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command,
1145 ata_get_cmd_descript(qc->dma_dir),
1146 ata_get_cmd_descript(qc->tf.protocol),
1147 in_le32(&(hsdev->sata_dwc_regs->dmacr)));
1148 }
1149#endif
1150
1151 if (ata_is_dma(qc->tf.protocol)) {
1152 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
1153 dev_err(ap->dev, "%s DMA protocol RX and TX DMA not "
1154 "pending dmacr: 0x%08x\n", __func__,
1155 in_le32(&(hsdev->sata_dwc_regs->dmacr)));
1156 }
1157
1158 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
1159 sata_dwc_qc_complete(ap, qc, check_status);
1160 ap->link.active_tag = ATA_TAG_POISON;
1161 } else {
1162 sata_dwc_qc_complete(ap, qc, check_status);
1163 }
1164}
1165
1166static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
1167 u32 check_status)
1168{
1169 u8 status = 0;
1170 u32 mask = 0x0;
1171 u8 tag = qc->tag;
1172 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1173 host_pvt.sata_dwc_sactive_queued = 0;
1174 dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
1175
1176 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
1177 dev_err(ap->dev, "TX DMA PENDING\n");
1178 else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
1179 dev_err(ap->dev, "RX DMA PENDING\n");
1180 dev_dbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u:"
1181 " protocol=%d\n", qc->tf.command, status, ap->print_id,
1182 qc->tf.protocol);
1183
1184 /* clear active bit */
1185 mask = (~(qcmd_tag_to_mask(tag)));
1186 host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \
1187 & mask;
1188 host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \
1189 & mask;
1190 ata_qc_complete(qc);
1191 return 0;
1192}
1193
1194static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
1195{
1196 /* Enable selective interrupts by setting the interrupt maskregister*/
1197 out_le32(&hsdev->sata_dwc_regs->intmr,
1198 SATA_DWC_INTMR_ERRM |
1199 SATA_DWC_INTMR_NEWFPM |
1200 SATA_DWC_INTMR_PMABRTM |
1201 SATA_DWC_INTMR_DMATM);
1202 /*
1203 * Unmask the error bits that should trigger an error interrupt by
1204 * setting the error mask register.
1205 */
1206 out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
1207
1208 dev_dbg(host_pvt.dwc_dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
1209 __func__, in_le32(&hsdev->sata_dwc_regs->intmr),
1210 in_le32(&hsdev->sata_dwc_regs->errmr));
1211}
1212
1213static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
1214{
1215 port->cmd_addr = (void *)base + 0x00;
1216 port->data_addr = (void *)base + 0x00;
1217
1218 port->error_addr = (void *)base + 0x04;
1219 port->feature_addr = (void *)base + 0x04;
1220
1221 port->nsect_addr = (void *)base + 0x08;
1222
1223 port->lbal_addr = (void *)base + 0x0c;
1224 port->lbam_addr = (void *)base + 0x10;
1225 port->lbah_addr = (void *)base + 0x14;
1226
1227 port->device_addr = (void *)base + 0x18;
1228 port->command_addr = (void *)base + 0x1c;
1229 port->status_addr = (void *)base + 0x1c;
1230
1231 port->altstatus_addr = (void *)base + 0x20;
1232 port->ctl_addr = (void *)base + 0x20;
1233}
1234
1235/*
1236 * Function : sata_dwc_port_start
1237 * arguments : struct ata_ioports *port
1238 * Return value : returns 0 if success, error code otherwise
1239 * This function allocates the scatter gather LLI table for AHB DMA
1240 */
1241static int sata_dwc_port_start(struct ata_port *ap)
1242{
1243 int err = 0;
1244 struct sata_dwc_device *hsdev;
1245 struct sata_dwc_device_port *hsdevp = NULL;
1246 struct device *pdev;
1247 int i;
1248
1249 hsdev = HSDEV_FROM_AP(ap);
1250
1251 dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
1252
1253 hsdev->host = ap->host;
1254 pdev = ap->host->dev;
1255 if (!pdev) {
1256 dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
1257 err = -ENODEV;
1258 goto CLEANUP;
1259 }
1260
1261 /* Allocate Port Struct */
1262 hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
1263 if (!hsdevp) {
1264 dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__);
1265 err = -ENOMEM;
1266 goto CLEANUP;
1267 }
1268 hsdevp->hsdev = hsdev;
1269
1270 for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
1271 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
1272
1273 ap->bmdma_prd = 0; /* set these so libata doesn't use them */
1274 ap->bmdma_prd_dma = 0;
1275
1276 /*
1277 * DMA - Assign scatter gather LLI table. We can't use the libata
1278 * version since it's PRD is IDE PCI specific.
1279 */
1280 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
1281 hsdevp->llit[i] = dma_alloc_coherent(pdev,
1282 SATA_DWC_DMAC_LLI_TBL_SZ,
1283 &(hsdevp->llit_dma[i]),
1284 GFP_ATOMIC);
1285 if (!hsdevp->llit[i]) {
1286 dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
1287 __func__);
1288 err = -ENOMEM;
1289 goto CLEANUP;
1290 }
1291 }
1292
1293 if (ap->port_no == 0) {
1294 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
1295 __func__);
1296 out_le32(&hsdev->sata_dwc_regs->dmacr,
1297 SATA_DWC_DMACR_TXRXCH_CLEAR);
1298
1299 dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
1300 __func__);
1301 out_le32(&hsdev->sata_dwc_regs->dbtsr,
1302 (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
1303 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
1304 }
1305
1306 /* Clear any error bits before libata starts issuing commands */
1307 clear_serror();
1308 ap->private_data = hsdevp;
1309
1310CLEANUP:
1311 if (err) {
1312 sata_dwc_port_stop(ap);
1313 dev_dbg(ap->dev, "%s: fail\n", __func__);
1314 } else {
1315 dev_dbg(ap->dev, "%s: done\n", __func__);
1316 }
1317
1318 return err;
1319}
1320
1321static void sata_dwc_port_stop(struct ata_port *ap)
1322{
1323 int i;
1324 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1325 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1326
1327 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
1328
1329 if (hsdevp && hsdev) {
1330 /* deallocate LLI table */
1331 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
1332 dma_free_coherent(ap->host->dev,
1333 SATA_DWC_DMAC_LLI_TBL_SZ,
1334 hsdevp->llit[i], hsdevp->llit_dma[i]);
1335 }
1336
1337 kfree(hsdevp);
1338 }
1339 ap->private_data = NULL;
1340}
1341
1342/*
1343 * Function : sata_dwc_exec_command_by_tag
1344 * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
1345 * Return value : None
1346 * This function keeps track of individual command tag ids and calls
1347 * ata_exec_command in libata
1348 */
1349static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
1350 struct ata_taskfile *tf,
1351 u8 tag, u32 cmd_issued)
1352{
1353 unsigned long flags;
1354 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1355
1356 dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
1357 ata_get_cmd_descript(tf), tag);
1358
1359 spin_lock_irqsave(&ap->host->lock, flags);
1360 hsdevp->cmd_issued[tag] = cmd_issued;
1361 spin_unlock_irqrestore(&ap->host->lock, flags);
1362 /*
1363 * Clear SError before executing a new command.
1364 * sata_dwc_scr_write and read can not be used here. Clearing the PM
1365 * managed SError register for the disk needs to be done before the
1366 * task file is loaded.
1367 */
1368 clear_serror();
1369 ata_sff_exec_command(ap, tf);
1370}
1371
1372static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
1373{
1374 sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
1375 SATA_DWC_CMD_ISSUED_PEND);
1376}
1377
1378static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
1379{
1380 u8 tag = qc->tag;
1381
1382 if (ata_is_ncq(qc->tf.protocol)) {
1383 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
1384 __func__, qc->ap->link.sactive, tag);
1385 } else {
1386 tag = 0;
1387 }
1388 sata_dwc_bmdma_setup_by_tag(qc, tag);
1389}
1390
1391static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
1392{
1393 int start_dma;
1394 u32 reg, dma_chan;
1395 struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
1396 struct ata_port *ap = qc->ap;
1397 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1398 int dir = qc->dma_dir;
1399 dma_chan = hsdevp->dma_chan[tag];
1400
1401 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
1402 start_dma = 1;
1403 if (dir == DMA_TO_DEVICE)
1404 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
1405 else
1406 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
1407 } else {
1408 dev_err(ap->dev, "%s: Command not pending cmd_issued=%d "
1409 "(tag=%d) DMA NOT started\n", __func__,
1410 hsdevp->cmd_issued[tag], tag);
1411 start_dma = 0;
1412 }
1413
1414 dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s "
1415 "start_dma? %x\n", __func__, qc, tag, qc->tf.command,
1416 ata_get_cmd_descript(qc->dma_dir), start_dma);
1417 sata_dwc_tf_dump(&(qc->tf));
1418
1419 if (start_dma) {
1420 reg = core_scr_read(SCR_ERROR);
1421 if (reg & SATA_DWC_SERROR_ERR_BITS) {
1422 dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
1423 __func__, reg);
1424 }
1425
1426 if (dir == DMA_TO_DEVICE)
1427 out_le32(&hsdev->sata_dwc_regs->dmacr,
1428 SATA_DWC_DMACR_TXCHEN);
1429 else
1430 out_le32(&hsdev->sata_dwc_regs->dmacr,
1431 SATA_DWC_DMACR_RXCHEN);
1432
1433 /* Enable AHB DMA transfer on the specified channel */
1434 dma_dwc_xfer_start(dma_chan);
1435 }
1436}
1437
1438static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
1439{
1440 u8 tag = qc->tag;
1441
1442 if (ata_is_ncq(qc->tf.protocol)) {
1443 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
1444 __func__, qc->ap->link.sactive, tag);
1445 } else {
1446 tag = 0;
1447 }
1448 dev_dbg(qc->ap->dev, "%s\n", __func__);
1449 sata_dwc_bmdma_start_by_tag(qc, tag);
1450}
1451
1452/*
1453 * Function : sata_dwc_qc_prep_by_tag
1454 * arguments : ata_queued_cmd *qc, u8 tag
1455 * Return value : None
1456 * qc_prep for a particular queued command based on tag
1457 */
1458static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
1459{
1460 struct scatterlist *sg = qc->sg;
1461 struct ata_port *ap = qc->ap;
1462 int dma_chan;
1463 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1464 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1465 int err;
1466
1467 dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
1468 __func__, ap->port_no, ata_get_cmd_descript(qc->dma_dir),
1469 qc->n_elem);
1470
1471 dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag],
1472 hsdevp->llit_dma[tag],
1473 (void *__iomem)(&hsdev->sata_dwc_regs->\
1474 dmadr), qc->dma_dir);
1475 if (dma_chan < 0) {
1476 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
1477 __func__, err);
1478 return;
1479 }
1480 hsdevp->dma_chan[tag] = dma_chan;
1481}
1482
1483static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
1484{
1485 u32 sactive;
1486 u8 tag = qc->tag;
1487 struct ata_port *ap = qc->ap;
1488
1489#ifdef DEBUG_NCQ
1490 if (qc->tag > 0 || ap->link.sactive > 1)
1491 dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d "
1492 "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
1493 __func__, ap->print_id, qc->tf.command,
1494 ata_get_cmd_descript(&qc->tf),
1495 qc->tag, ata_get_cmd_descript(qc->tf.protocol),
1496 ap->link.active_tag, ap->link.sactive);
1497#endif
1498
1499 if (!ata_is_ncq(qc->tf.protocol))
1500 tag = 0;
1501 sata_dwc_qc_prep_by_tag(qc, tag);
1502
1503 if (ata_is_ncq(qc->tf.protocol)) {
1504 sactive = core_scr_read(SCR_ACTIVE);
1505 sactive |= (0x00000001 << tag);
1506 core_scr_write(SCR_ACTIVE, sactive);
1507
1508 dev_dbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x "
1509 "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive,
1510 sactive);
1511
1512 ap->ops->sff_tf_load(ap, &qc->tf);
1513 sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
1514 SATA_DWC_CMD_ISSUED_PEND);
1515 } else {
1516 ata_sff_qc_issue(qc);
1517 }
1518 return 0;
1519}
1520
1521/*
1522 * Function : sata_dwc_qc_prep
1523 * arguments : ata_queued_cmd *qc
1524 * Return value : None
1525 * qc_prep for a particular queued command
1526 */
1527
1528static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
1529{
1530 if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
1531 return;
1532
1533#ifdef DEBUG_NCQ
1534 if (qc->tag > 0)
1535 dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
1536 __func__, tag, qc->ap->link.active_tag);
1537
1538 return ;
1539#endif
1540}
1541
1542static void sata_dwc_error_handler(struct ata_port *ap)
1543{
1544 ap->link.flags |= ATA_LFLAG_NO_HRST;
1545 ata_sff_error_handler(ap);
1546}
1547
1548/*
1549 * scsi mid-layer and libata interface structures
1550 */
1551static struct scsi_host_template sata_dwc_sht = {
1552 ATA_NCQ_SHT(DRV_NAME),
1553 /*
1554 * test-only: Currently this driver doesn't handle NCQ
1555 * correctly. We enable NCQ but set the queue depth to a
1556 * max of 1. This will get fixed in in a future release.
1557 */
1558 .sg_tablesize = LIBATA_MAX_PRD,
1559 .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */
1560 .dma_boundary = ATA_DMA_BOUNDARY,
1561};
1562
1563static struct ata_port_operations sata_dwc_ops = {
1564 .inherits = &ata_sff_port_ops,
1565
1566 .error_handler = sata_dwc_error_handler,
1567
1568 .qc_prep = sata_dwc_qc_prep,
1569 .qc_issue = sata_dwc_qc_issue,
1570
1571 .scr_read = sata_dwc_scr_read,
1572 .scr_write = sata_dwc_scr_write,
1573
1574 .port_start = sata_dwc_port_start,
1575 .port_stop = sata_dwc_port_stop,
1576
1577 .bmdma_setup = sata_dwc_bmdma_setup,
1578 .bmdma_start = sata_dwc_bmdma_start,
1579};
1580
1581static const struct ata_port_info sata_dwc_port_info[] = {
1582 {
1583 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
1584 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
1585 .pio_mask = 0x1f, /* pio 0-4 */
1586 .udma_mask = ATA_UDMA6,
1587 .port_ops = &sata_dwc_ops,
1588 },
1589};
1590
1591static int sata_dwc_probe(struct platform_device *ofdev,
1592 const struct of_device_id *match)
1593{
1594 struct sata_dwc_device *hsdev;
1595 u32 idr, versionr;
1596 char *ver = (char *)&versionr;
1597 u8 *base = NULL;
1598 int err = 0;
1599 int irq, rc;
1600 struct ata_host *host;
1601 struct ata_port_info pi = sata_dwc_port_info[0];
1602 const struct ata_port_info *ppi[] = { &pi, NULL };
1603
1604 /* Allocate DWC SATA device */
1605 hsdev = kmalloc(sizeof(*hsdev), GFP_KERNEL);
1606 if (hsdev == NULL) {
1607 dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
1608 err = -ENOMEM;
1609 goto error_out;
1610 }
1611 memset(hsdev, 0, sizeof(*hsdev));
1612
1613 /* Ioremap SATA registers */
1614 base = of_iomap(ofdev->dev.of_node, 0);
1615 if (!base) {
1616 dev_err(&ofdev->dev, "ioremap failed for SATA register"
1617 " address\n");
1618 err = -ENODEV;
1619 goto error_out;
1620 }
1621 hsdev->reg_base = base;
1622 dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
1623
1624 /* Synopsys DWC SATA specific Registers */
1625 hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
1626
1627 /* Allocate and fill host */
1628 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
1629 if (!host) {
1630 dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
1631 err = -ENOMEM;
1632 goto error_out;
1633 }
1634
1635 host->private_data = hsdev;
1636
1637 /* Setup port */
1638 host->ports[0]->ioaddr.cmd_addr = base;
1639 host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
1640 host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
1641 sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
1642
1643 /* Read the ID and Version Registers */
1644 idr = in_le32(&hsdev->sata_dwc_regs->idr);
1645 versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
1646 dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
1647 idr, ver[0], ver[1], ver[2]);
1648
1649 /* Get SATA DMA interrupt number */
1650 irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
1651 if (irq == NO_IRQ) {
1652 dev_err(&ofdev->dev, "no SATA DMA irq\n");
1653 err = -ENODEV;
1654 goto error_out;
1655 }
1656
1657 /* Get physical SATA DMA register base address */
1658 host_pvt.sata_dma_regs = of_iomap(ofdev->dev.of_node, 1);
1659 if (!(host_pvt.sata_dma_regs)) {
1660 dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
1661 " address\n");
1662 err = -ENODEV;
1663 goto error_out;
1664 }
1665
1666 /* Save dev for later use in dev_xxx() routines */
1667 host_pvt.dwc_dev = &ofdev->dev;
1668
1669 /* Initialize AHB DMAC */
1670 dma_dwc_init(hsdev, irq);
1671
1672 /* Enable SATA Interrupts */
1673 sata_dwc_enable_interrupts(hsdev);
1674
1675 /* Get SATA interrupt number */
1676 irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1677 if (irq == NO_IRQ) {
1678 dev_err(&ofdev->dev, "no SATA DMA irq\n");
1679 err = -ENODEV;
1680 goto error_out;
1681 }
1682
1683 /*
1684 * Now, register with libATA core, this will also initiate the
1685 * device discovery process, invoking our port_start() handler &
1686 * error_handler() to execute a dummy Softreset EH session
1687 */
1688 rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
1689
1690 if (rc != 0)
1691 dev_err(&ofdev->dev, "failed to activate host");
1692
1693 dev_set_drvdata(&ofdev->dev, host);
1694 return 0;
1695
1696error_out:
1697 /* Free SATA DMA resources */
1698 dma_dwc_exit(hsdev);
1699
1700 if (base)
1701 iounmap(base);
1702 return err;
1703}
1704
1705static int sata_dwc_remove(struct platform_device *ofdev)
1706{
1707 struct device *dev = &ofdev->dev;
1708 struct ata_host *host = dev_get_drvdata(dev);
1709 struct sata_dwc_device *hsdev = host->private_data;
1710
1711 ata_host_detach(host);
1712 dev_set_drvdata(dev, NULL);
1713
1714 /* Free SATA DMA resources */
1715 dma_dwc_exit(hsdev);
1716
1717 iounmap(hsdev->reg_base);
1718 kfree(hsdev);
1719 kfree(host);
1720 dev_dbg(&ofdev->dev, "done\n");
1721 return 0;
1722}
1723
1724static const struct of_device_id sata_dwc_match[] = {
1725 { .compatible = "amcc,sata-460ex", },
1726 {}
1727};
1728MODULE_DEVICE_TABLE(of, sata_dwc_match);
1729
1730static struct of_platform_driver sata_dwc_driver = {
1731 .driver = {
1732 .name = DRV_NAME,
1733 .owner = THIS_MODULE,
1734 .of_match_table = sata_dwc_match,
1735 },
1736 .probe = sata_dwc_probe,
1737 .remove = sata_dwc_remove,
1738};
1739
1740static int __init sata_dwc_init(void)
1741{
1742 return of_register_platform_driver(&sata_dwc_driver);
1743}
1744
1745static void __exit sata_dwc_exit(void)
1746{
1747 of_unregister_platform_driver(&sata_dwc_driver);
1748}
1749
1750module_init(sata_dwc_init);
1751module_exit(sata_dwc_exit);
1752
1753MODULE_LICENSE("GPL");
1754MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
1755MODULE_DESCRIPTION("DesignWare Cores SATA controller low lever driver");
1756MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index a69192b38b43..7325f77480dc 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1096,7 +1096,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
1096{ 1096{
1097 struct sata_fsl_host_priv *host_priv = ap->host->private_data; 1097 struct sata_fsl_host_priv *host_priv = ap->host->private_data;
1098 void __iomem *hcr_base = host_priv->hcr_base; 1098 void __iomem *hcr_base = host_priv->hcr_base;
1099 u32 hstatus, qc_active = 0; 1099 u32 hstatus, done_mask = 0;
1100 struct ata_queued_cmd *qc; 1100 struct ata_queued_cmd *qc;
1101 u32 SError; 1101 u32 SError;
1102 1102
@@ -1116,28 +1116,28 @@ static void sata_fsl_host_intr(struct ata_port *ap)
1116 } 1116 }
1117 1117
1118 /* Read command completed register */ 1118 /* Read command completed register */
1119 qc_active = ioread32(hcr_base + CC); 1119 done_mask = ioread32(hcr_base + CC);
1120 1120
1121 VPRINTK("Status of all queues :\n"); 1121 VPRINTK("Status of all queues :\n");
1122 VPRINTK("qc_active/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%x\n", 1122 VPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%x\n",
1123 qc_active, 1123 done_mask,
1124 ioread32(hcr_base + CA), 1124 ioread32(hcr_base + CA),
1125 ioread32(hcr_base + CE), 1125 ioread32(hcr_base + CE),
1126 ioread32(hcr_base + CQ), 1126 ioread32(hcr_base + CQ),
1127 ap->qc_active); 1127 ap->qc_active);
1128 1128
1129 if (qc_active & ap->qc_active) { 1129 if (done_mask & ap->qc_active) {
1130 int i; 1130 int i;
1131 /* clear CC bit, this will also complete the interrupt */ 1131 /* clear CC bit, this will also complete the interrupt */
1132 iowrite32(qc_active, hcr_base + CC); 1132 iowrite32(done_mask, hcr_base + CC);
1133 1133
1134 DPRINTK("Status of all queues :\n"); 1134 DPRINTK("Status of all queues :\n");
1135 DPRINTK("qc_active/CC = 0x%x, CA = 0x%x, CE=0x%x\n", 1135 DPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x\n",
1136 qc_active, ioread32(hcr_base + CA), 1136 done_mask, ioread32(hcr_base + CA),
1137 ioread32(hcr_base + CE)); 1137 ioread32(hcr_base + CE));
1138 1138
1139 for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) { 1139 for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) {
1140 if (qc_active & (1 << i)) { 1140 if (done_mask & (1 << i)) {
1141 qc = ata_qc_from_tag(ap, i); 1141 qc = ata_qc_from_tag(ap, i);
1142 if (qc) { 1142 if (qc) {
1143 ata_qc_complete(qc); 1143 ata_qc_complete(qc);
@@ -1164,7 +1164,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
1164 /* Spurious Interrupt!! */ 1164 /* Spurious Interrupt!! */
1165 DPRINTK("spurious interrupt!!, CC = 0x%x\n", 1165 DPRINTK("spurious interrupt!!, CC = 0x%x\n",
1166 ioread32(hcr_base + CC)); 1166 ioread32(hcr_base + CC));
1167 iowrite32(qc_active, hcr_base + CC); 1167 iowrite32(done_mask, hcr_base + CC);
1168 return; 1168 return;
1169 } 1169 }
1170} 1170}
@@ -1296,7 +1296,7 @@ static const struct ata_port_info sata_fsl_port_info[] = {
1296 }, 1296 },
1297}; 1297};
1298 1298
1299static int sata_fsl_probe(struct of_device *ofdev, 1299static int sata_fsl_probe(struct platform_device *ofdev,
1300 const struct of_device_id *match) 1300 const struct of_device_id *match)
1301{ 1301{
1302 int retval = -ENXIO; 1302 int retval = -ENXIO;
@@ -1313,7 +1313,7 @@ static int sata_fsl_probe(struct of_device *ofdev,
1313 dev_printk(KERN_INFO, &ofdev->dev, 1313 dev_printk(KERN_INFO, &ofdev->dev,
1314 "Sata FSL Platform/CSB Driver init\n"); 1314 "Sata FSL Platform/CSB Driver init\n");
1315 1315
1316 hcr_base = of_iomap(ofdev->node, 0); 1316 hcr_base = of_iomap(ofdev->dev.of_node, 0);
1317 if (!hcr_base) 1317 if (!hcr_base)
1318 goto error_exit_with_cleanup; 1318 goto error_exit_with_cleanup;
1319 1319
@@ -1332,7 +1332,7 @@ static int sata_fsl_probe(struct of_device *ofdev,
1332 host_priv->ssr_base = ssr_base; 1332 host_priv->ssr_base = ssr_base;
1333 host_priv->csr_base = csr_base; 1333 host_priv->csr_base = csr_base;
1334 1334
1335 irq = irq_of_parse_and_map(ofdev->node, 0); 1335 irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1336 if (irq < 0) { 1336 if (irq < 0) {
1337 dev_printk(KERN_ERR, &ofdev->dev, "invalid irq from platform\n"); 1337 dev_printk(KERN_ERR, &ofdev->dev, "invalid irq from platform\n");
1338 goto error_exit_with_cleanup; 1338 goto error_exit_with_cleanup;
@@ -1370,7 +1370,7 @@ error_exit_with_cleanup:
1370 return retval; 1370 return retval;
1371} 1371}
1372 1372
1373static int sata_fsl_remove(struct of_device *ofdev) 1373static int sata_fsl_remove(struct platform_device *ofdev)
1374{ 1374{
1375 struct ata_host *host = dev_get_drvdata(&ofdev->dev); 1375 struct ata_host *host = dev_get_drvdata(&ofdev->dev);
1376 struct sata_fsl_host_priv *host_priv = host->private_data; 1376 struct sata_fsl_host_priv *host_priv = host->private_data;
@@ -1387,13 +1387,13 @@ static int sata_fsl_remove(struct of_device *ofdev)
1387} 1387}
1388 1388
1389#ifdef CONFIG_PM 1389#ifdef CONFIG_PM
1390static int sata_fsl_suspend(struct of_device *op, pm_message_t state) 1390static int sata_fsl_suspend(struct platform_device *op, pm_message_t state)
1391{ 1391{
1392 struct ata_host *host = dev_get_drvdata(&op->dev); 1392 struct ata_host *host = dev_get_drvdata(&op->dev);
1393 return ata_host_suspend(host, state); 1393 return ata_host_suspend(host, state);
1394} 1394}
1395 1395
1396static int sata_fsl_resume(struct of_device *op) 1396static int sata_fsl_resume(struct platform_device *op)
1397{ 1397{
1398 struct ata_host *host = dev_get_drvdata(&op->dev); 1398 struct ata_host *host = dev_get_drvdata(&op->dev);
1399 struct sata_fsl_host_priv *host_priv = host->private_data; 1399 struct sata_fsl_host_priv *host_priv = host->private_data;
@@ -1427,8 +1427,11 @@ static struct of_device_id fsl_sata_match[] = {
1427MODULE_DEVICE_TABLE(of, fsl_sata_match); 1427MODULE_DEVICE_TABLE(of, fsl_sata_match);
1428 1428
1429static struct of_platform_driver fsl_sata_driver = { 1429static struct of_platform_driver fsl_sata_driver = {
1430 .name = "fsl-sata", 1430 .driver = {
1431 .match_table = fsl_sata_match, 1431 .name = "fsl-sata",
1432 .owner = THIS_MODULE,
1433 .of_match_table = fsl_sata_match,
1434 },
1432 .probe = sata_fsl_probe, 1435 .probe = sata_fsl_probe,
1433 .remove = sata_fsl_remove, 1436 .remove = sata_fsl_remove,
1434#ifdef CONFIG_PM 1437#ifdef CONFIG_PM
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 27dc6c86a4cd..a36149ebf4a2 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -415,22 +415,11 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance)
415 415
416 spin_lock(&host->lock); 416 spin_lock(&host->lock);
417 417
418 for (i = 0; i < NR_PORTS; i++) { 418 for (i = 0; i < NR_PORTS; i++)
419 struct ata_port *ap = host->ports[i]; 419 if (host_irq_stat & (HIRQ_PORT0 << i)) {
420 420 inic_host_intr(host->ports[i]);
421 if (!(host_irq_stat & (HIRQ_PORT0 << i)))
422 continue;
423
424 if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) {
425 inic_host_intr(ap);
426 handled++; 421 handled++;
427 } else {
428 if (ata_ratelimit())
429 dev_printk(KERN_ERR, host->dev, "interrupt "
430 "from disabled port %d (0x%x)\n",
431 i, host_irq_stat);
432 } 422 }
433 }
434 423
435 spin_unlock(&host->lock); 424 spin_unlock(&host->lock);
436 425
@@ -679,8 +668,7 @@ static void init_port(struct ata_port *ap)
679 memset(pp->pkt, 0, sizeof(struct inic_pkt)); 668 memset(pp->pkt, 0, sizeof(struct inic_pkt));
680 memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE); 669 memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
681 670
682 /* setup PRD and CPB lookup table addresses */ 671 /* setup CPB lookup table addresses */
683 writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
684 writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR); 672 writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
685} 673}
686 674
@@ -694,7 +682,6 @@ static int inic_port_start(struct ata_port *ap)
694{ 682{
695 struct device *dev = ap->host->dev; 683 struct device *dev = ap->host->dev;
696 struct inic_port_priv *pp; 684 struct inic_port_priv *pp;
697 int rc;
698 685
699 /* alloc and initialize private data */ 686 /* alloc and initialize private data */
700 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 687 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
@@ -703,10 +690,6 @@ static int inic_port_start(struct ata_port *ap)
703 ap->private_data = pp; 690 ap->private_data = pp;
704 691
705 /* Alloc resources */ 692 /* Alloc resources */
706 rc = ata_port_start(ap);
707 if (rc)
708 return rc;
709
710 pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt), 693 pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
711 &pp->pkt_dma, GFP_KERNEL); 694 &pp->pkt_dma, GFP_KERNEL);
712 if (!pp->pkt) 695 if (!pp->pkt)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 71cc0d42f9e1..a9fd9709c262 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -675,8 +675,6 @@ static struct ata_port_operations mv5_ops = {
675 .freeze = mv_eh_freeze, 675 .freeze = mv_eh_freeze,
676 .thaw = mv_eh_thaw, 676 .thaw = mv_eh_thaw,
677 .hardreset = mv_hardreset, 677 .hardreset = mv_hardreset,
678 .error_handler = ata_std_error_handler, /* avoid SFF EH */
679 .post_internal_cmd = ATA_OP_NULL,
680 678
681 .scr_read = mv5_scr_read, 679 .scr_read = mv5_scr_read,
682 .scr_write = mv5_scr_write, 680 .scr_write = mv5_scr_write,
@@ -686,16 +684,27 @@ static struct ata_port_operations mv5_ops = {
686}; 684};
687 685
688static struct ata_port_operations mv6_ops = { 686static struct ata_port_operations mv6_ops = {
689 .inherits = &mv5_ops, 687 .inherits = &ata_bmdma_port_ops,
688
689 .lost_interrupt = ATA_OP_NULL,
690
691 .qc_defer = mv_qc_defer,
692 .qc_prep = mv_qc_prep,
693 .qc_issue = mv_qc_issue,
694
690 .dev_config = mv6_dev_config, 695 .dev_config = mv6_dev_config,
691 .scr_read = mv_scr_read,
692 .scr_write = mv_scr_write,
693 696
697 .freeze = mv_eh_freeze,
698 .thaw = mv_eh_thaw,
699 .hardreset = mv_hardreset,
700 .softreset = mv_softreset,
694 .pmp_hardreset = mv_pmp_hardreset, 701 .pmp_hardreset = mv_pmp_hardreset,
695 .pmp_softreset = mv_softreset, 702 .pmp_softreset = mv_softreset,
696 .softreset = mv_softreset,
697 .error_handler = mv_pmp_error_handler, 703 .error_handler = mv_pmp_error_handler,
698 704
705 .scr_read = mv_scr_read,
706 .scr_write = mv_scr_write,
707
699 .sff_check_status = mv_sff_check_status, 708 .sff_check_status = mv_sff_check_status,
700 .sff_irq_clear = mv_sff_irq_clear, 709 .sff_irq_clear = mv_sff_irq_clear,
701 .check_atapi_dma = mv_check_atapi_dma, 710 .check_atapi_dma = mv_check_atapi_dma,
@@ -703,6 +712,9 @@ static struct ata_port_operations mv6_ops = {
703 .bmdma_start = mv_bmdma_start, 712 .bmdma_start = mv_bmdma_start,
704 .bmdma_stop = mv_bmdma_stop, 713 .bmdma_stop = mv_bmdma_stop,
705 .bmdma_status = mv_bmdma_status, 714 .bmdma_status = mv_bmdma_status,
715
716 .port_start = mv_port_start,
717 .port_stop = mv_port_stop,
706}; 718};
707 719
708static struct ata_port_operations mv_iie_ops = { 720static struct ata_port_operations mv_iie_ops = {
@@ -1886,19 +1898,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
1886 * LOCKING: 1898 * LOCKING:
1887 * Inherited from caller. 1899 * Inherited from caller.
1888 */ 1900 */
1889static void mv_bmdma_stop(struct ata_queued_cmd *qc) 1901static void mv_bmdma_stop_ap(struct ata_port *ap)
1890{ 1902{
1891 struct ata_port *ap = qc->ap;
1892 void __iomem *port_mmio = mv_ap_base(ap); 1903 void __iomem *port_mmio = mv_ap_base(ap);
1893 u32 cmd; 1904 u32 cmd;
1894 1905
1895 /* clear start/stop bit */ 1906 /* clear start/stop bit */
1896 cmd = readl(port_mmio + BMDMA_CMD); 1907 cmd = readl(port_mmio + BMDMA_CMD);
1897 cmd &= ~ATA_DMA_START; 1908 if (cmd & ATA_DMA_START) {
1898 writelfl(cmd, port_mmio + BMDMA_CMD); 1909 cmd &= ~ATA_DMA_START;
1910 writelfl(cmd, port_mmio + BMDMA_CMD);
1911
1912 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1913 ata_sff_dma_pause(ap);
1914 }
1915}
1899 1916
1900 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 1917static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1901 ata_sff_dma_pause(ap); 1918{
1919 mv_bmdma_stop_ap(qc->ap);
1902} 1920}
1903 1921
1904/** 1922/**
@@ -1922,8 +1940,21 @@ static u8 mv_bmdma_status(struct ata_port *ap)
1922 reg = readl(port_mmio + BMDMA_STATUS); 1940 reg = readl(port_mmio + BMDMA_STATUS);
1923 if (reg & ATA_DMA_ACTIVE) 1941 if (reg & ATA_DMA_ACTIVE)
1924 status = ATA_DMA_ACTIVE; 1942 status = ATA_DMA_ACTIVE;
1925 else 1943 else if (reg & ATA_DMA_ERR)
1926 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; 1944 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1945 else {
1946 /*
1947 * Just because DMA_ACTIVE is 0 (DMA completed),
1948 * this does _not_ mean the device is "done".
1949 * So we should not yet be signalling ATA_DMA_INTR
1950 * in some cases. Eg. DSM/TRIM, and perhaps others.
1951 */
1952 mv_bmdma_stop_ap(ap);
1953 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1954 status = 0;
1955 else
1956 status = ATA_DMA_INTR;
1957 }
1927 return status; 1958 return status;
1928} 1959}
1929 1960
@@ -1983,6 +2014,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1983 2014
1984 switch (tf->protocol) { 2015 switch (tf->protocol) {
1985 case ATA_PROT_DMA: 2016 case ATA_PROT_DMA:
2017 if (tf->command == ATA_CMD_DSM)
2018 return;
2019 /* fall-thru */
1986 case ATA_PROT_NCQ: 2020 case ATA_PROT_NCQ:
1987 break; /* continue below */ 2021 break; /* continue below */
1988 case ATA_PROT_PIO: 2022 case ATA_PROT_PIO:
@@ -2082,6 +2116,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2082 if ((tf->protocol != ATA_PROT_DMA) && 2116 if ((tf->protocol != ATA_PROT_DMA) &&
2083 (tf->protocol != ATA_PROT_NCQ)) 2117 (tf->protocol != ATA_PROT_NCQ))
2084 return; 2118 return;
2119 if (tf->command == ATA_CMD_DSM)
2120 return; /* use bmdma for this */
2085 2121
2086 /* Fill in Gen IIE command request block */ 2122 /* Fill in Gen IIE command request block */
2087 if (!(tf->flags & ATA_TFLAG_WRITE)) 2123 if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2248,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2248 } 2284 }
2249 2285
2250 if (qc->tf.flags & ATA_TFLAG_POLLING) 2286 if (qc->tf.flags & ATA_TFLAG_POLLING)
2251 ata_pio_queue_task(ap, qc, 0); 2287 ata_sff_queue_pio_task(link, 0);
2252 return 0; 2288 return 0;
2253} 2289}
2254 2290
@@ -2277,6 +2313,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2277 2313
2278 switch (qc->tf.protocol) { 2314 switch (qc->tf.protocol) {
2279 case ATA_PROT_DMA: 2315 case ATA_PROT_DMA:
2316 if (qc->tf.command == ATA_CMD_DSM) {
2317 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
2318 return AC_ERR_OTHER;
2319 break; /* use bmdma for this */
2320 }
2321 /* fall thru */
2280 case ATA_PROT_NCQ: 2322 case ATA_PROT_NCQ:
2281 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); 2323 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2282 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; 2324 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
@@ -2344,7 +2386,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2344 if (IS_GEN_II(hpriv)) 2386 if (IS_GEN_II(hpriv))
2345 return mv_qc_issue_fis(qc); 2387 return mv_qc_issue_fis(qc);
2346 } 2388 }
2347 return ata_sff_qc_issue(qc); 2389 return ata_bmdma_qc_issue(qc);
2348} 2390}
2349 2391
2350static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) 2392static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
@@ -2355,13 +2397,9 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2355 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) 2397 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2356 return NULL; 2398 return NULL;
2357 qc = ata_qc_from_tag(ap, ap->link.active_tag); 2399 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2358 if (qc) { 2400 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2359 if (qc->tf.flags & ATA_TFLAG_POLLING) 2401 return qc;
2360 qc = NULL; 2402 return NULL;
2361 else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
2362 qc = NULL;
2363 }
2364 return qc;
2365} 2403}
2366 2404
2367static void mv_pmp_error_handler(struct ata_port *ap) 2405static void mv_pmp_error_handler(struct ata_port *ap)
@@ -2546,9 +2584,7 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2546 char *when = "idle"; 2584 char *when = "idle";
2547 2585
2548 ata_ehi_clear_desc(ehi); 2586 ata_ehi_clear_desc(ehi);
2549 if (ap->flags & ATA_FLAG_DISABLED) { 2587 if (edma_was_enabled) {
2550 when = "disabled";
2551 } else if (edma_was_enabled) {
2552 when = "EDMA enabled"; 2588 when = "EDMA enabled";
2553 } else { 2589 } else {
2554 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 2590 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
@@ -2710,34 +2746,35 @@ static void mv_err_intr(struct ata_port *ap)
2710static void mv_process_crpb_response(struct ata_port *ap, 2746static void mv_process_crpb_response(struct ata_port *ap,
2711 struct mv_crpb *response, unsigned int tag, int ncq_enabled) 2747 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2712{ 2748{
2749 u8 ata_status;
2750 u16 edma_status = le16_to_cpu(response->flags);
2713 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 2751 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
2714 2752
2715 if (qc) { 2753 if (unlikely(!qc)) {
2716 u8 ata_status;
2717 u16 edma_status = le16_to_cpu(response->flags);
2718 /*
2719 * edma_status from a response queue entry:
2720 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2721 * MSB is saved ATA status from command completion.
2722 */
2723 if (!ncq_enabled) {
2724 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2725 if (err_cause) {
2726 /*
2727 * Error will be seen/handled by mv_err_intr().
2728 * So do nothing at all here.
2729 */
2730 return;
2731 }
2732 }
2733 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2734 if (!ac_err_mask(ata_status))
2735 ata_qc_complete(qc);
2736 /* else: leave it for mv_err_intr() */
2737 } else {
2738 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", 2754 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
2739 __func__, tag); 2755 __func__, tag);
2756 return;
2740 } 2757 }
2758
2759 /*
2760 * edma_status from a response queue entry:
2761 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2762 * MSB is saved ATA status from command completion.
2763 */
2764 if (!ncq_enabled) {
2765 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2766 if (err_cause) {
2767 /*
2768 * Error will be seen/handled by
2769 * mv_err_intr(). So do nothing at all here.
2770 */
2771 return;
2772 }
2773 }
2774 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2775 if (!ac_err_mask(ata_status))
2776 ata_qc_complete(qc);
2777 /* else: leave it for mv_err_intr() */
2741} 2778}
2742 2779
2743static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) 2780static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
@@ -2782,10 +2819,6 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2782 struct mv_port_priv *pp; 2819 struct mv_port_priv *pp;
2783 int edma_was_enabled; 2820 int edma_was_enabled;
2784 2821
2785 if (ap->flags & ATA_FLAG_DISABLED) {
2786 mv_unexpected_intr(ap, 0);
2787 return;
2788 }
2789 /* 2822 /*
2790 * Grab a snapshot of the EDMA_EN flag setting, 2823 * Grab a snapshot of the EDMA_EN flag setting,
2791 * so that we have a consistent view for this port, 2824 * so that we have a consistent view for this port,
@@ -2809,7 +2842,7 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2809 } else if (!edma_was_enabled) { 2842 } else if (!edma_was_enabled) {
2810 struct ata_queued_cmd *qc = mv_get_active_qc(ap); 2843 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2811 if (qc) 2844 if (qc)
2812 ata_sff_host_intr(ap, qc); 2845 ata_bmdma_port_intr(ap, qc);
2813 else 2846 else
2814 mv_unexpected_intr(ap, edma_was_enabled); 2847 mv_unexpected_intr(ap, edma_was_enabled);
2815 } 2848 }
@@ -3656,9 +3689,6 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3656 /* special case: control/altstatus doesn't have ATA_REG_ address */ 3689 /* special case: control/altstatus doesn't have ATA_REG_ address */
3657 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST; 3690 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3658 3691
3659 /* unused: */
3660 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
3661
3662 /* Clear any currently outstanding port interrupt conditions */ 3692 /* Clear any currently outstanding port interrupt conditions */
3663 serr = port_mmio + mv_scr_offset(SCR_ERROR); 3693 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3664 writelfl(readl(serr), serr); 3694 writelfl(readl(serr), serr);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 2a98b09ab735..cb89ef8d99d9 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -272,7 +272,7 @@ enum ncq_saw_flag_list {
272}; 272};
273 273
274struct nv_swncq_port_priv { 274struct nv_swncq_port_priv {
275 struct ata_prd *prd; /* our SG list */ 275 struct ata_bmdma_prd *prd; /* our SG list */
276 dma_addr_t prd_dma; /* and its DMA mapping */ 276 dma_addr_t prd_dma; /* and its DMA mapping */
277 void __iomem *sactive_block; 277 void __iomem *sactive_block;
278 void __iomem *irq_block; 278 void __iomem *irq_block;
@@ -920,7 +920,7 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
920 } 920 }
921 921
922 /* handle interrupt */ 922 /* handle interrupt */
923 return ata_sff_host_intr(ap, qc); 923 return ata_bmdma_port_intr(ap, qc);
924} 924}
925 925
926static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) 926static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
@@ -933,107 +933,110 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
933 933
934 for (i = 0; i < host->n_ports; i++) { 934 for (i = 0; i < host->n_ports; i++) {
935 struct ata_port *ap = host->ports[i]; 935 struct ata_port *ap = host->ports[i];
936 struct nv_adma_port_priv *pp = ap->private_data;
937 void __iomem *mmio = pp->ctl_block;
938 u16 status;
939 u32 gen_ctl;
940 u32 notifier, notifier_error;
941
936 notifier_clears[i] = 0; 942 notifier_clears[i] = 0;
937 943
938 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { 944 /* if ADMA is disabled, use standard ata interrupt handler */
939 struct nv_adma_port_priv *pp = ap->private_data; 945 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
940 void __iomem *mmio = pp->ctl_block; 946 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
941 u16 status; 947 >> (NV_INT_PORT_SHIFT * i);
942 u32 gen_ctl; 948 handled += nv_host_intr(ap, irq_stat);
943 u32 notifier, notifier_error; 949 continue;
944 950 }
945 /* if ADMA is disabled, use standard ata interrupt handler */
946 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
947 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
948 >> (NV_INT_PORT_SHIFT * i);
949 handled += nv_host_intr(ap, irq_stat);
950 continue;
951 }
952 951
953 /* if in ATA register mode, check for standard interrupts */ 952 /* if in ATA register mode, check for standard interrupts */
954 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { 953 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
955 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) 954 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
956 >> (NV_INT_PORT_SHIFT * i); 955 >> (NV_INT_PORT_SHIFT * i);
957 if (ata_tag_valid(ap->link.active_tag)) 956 if (ata_tag_valid(ap->link.active_tag))
958 /** NV_INT_DEV indication seems unreliable at times 957 /** NV_INT_DEV indication seems unreliable
959 at least in ADMA mode. Force it on always when a 958 at times at least in ADMA mode. Force it
960 command is active, to prevent losing interrupts. */ 959 on always when a command is active, to
961 irq_stat |= NV_INT_DEV; 960 prevent losing interrupts. */
962 handled += nv_host_intr(ap, irq_stat); 961 irq_stat |= NV_INT_DEV;
963 } 962 handled += nv_host_intr(ap, irq_stat);
963 }
964
965 notifier = readl(mmio + NV_ADMA_NOTIFIER);
966 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
967 notifier_clears[i] = notifier | notifier_error;
968
969 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
964 970
965 notifier = readl(mmio + NV_ADMA_NOTIFIER); 971 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
966 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); 972 !notifier_error)
967 notifier_clears[i] = notifier | notifier_error; 973 /* Nothing to do */
968 974 continue;
969 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); 975
970 976 status = readw(mmio + NV_ADMA_STAT);
971 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && 977
972 !notifier_error) 978 /*
973 /* Nothing to do */ 979 * Clear status. Ensure the controller sees the
974 continue; 980 * clearing before we start looking at any of the CPB
975 981 * statuses, so that any CPB completions after this
976 status = readw(mmio + NV_ADMA_STAT); 982 * point in the handler will raise another interrupt.
977 983 */
978 /* Clear status. Ensure the controller sees the clearing before we start 984 writew(status, mmio + NV_ADMA_STAT);
979 looking at any of the CPB statuses, so that any CPB completions after 985 readw(mmio + NV_ADMA_STAT); /* flush posted write */
980 this point in the handler will raise another interrupt. */ 986 rmb();
981 writew(status, mmio + NV_ADMA_STAT); 987
982 readw(mmio + NV_ADMA_STAT); /* flush posted write */ 988 handled++; /* irq handled if we got here */
983 rmb(); 989
984 990 /* freeze if hotplugged or controller error */
985 handled++; /* irq handled if we got here */ 991 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
986 992 NV_ADMA_STAT_HOTUNPLUG |
987 /* freeze if hotplugged or controller error */ 993 NV_ADMA_STAT_TIMEOUT |
988 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | 994 NV_ADMA_STAT_SERROR))) {
989 NV_ADMA_STAT_HOTUNPLUG | 995 struct ata_eh_info *ehi = &ap->link.eh_info;
990 NV_ADMA_STAT_TIMEOUT | 996
991 NV_ADMA_STAT_SERROR))) { 997 ata_ehi_clear_desc(ehi);
992 struct ata_eh_info *ehi = &ap->link.eh_info; 998 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
993 999 if (status & NV_ADMA_STAT_TIMEOUT) {
994 ata_ehi_clear_desc(ehi); 1000 ehi->err_mask |= AC_ERR_SYSTEM;
995 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status); 1001 ata_ehi_push_desc(ehi, "timeout");
996 if (status & NV_ADMA_STAT_TIMEOUT) { 1002 } else if (status & NV_ADMA_STAT_HOTPLUG) {
997 ehi->err_mask |= AC_ERR_SYSTEM; 1003 ata_ehi_hotplugged(ehi);
998 ata_ehi_push_desc(ehi, "timeout"); 1004 ata_ehi_push_desc(ehi, "hotplug");
999 } else if (status & NV_ADMA_STAT_HOTPLUG) { 1005 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1000 ata_ehi_hotplugged(ehi); 1006 ata_ehi_hotplugged(ehi);
1001 ata_ehi_push_desc(ehi, "hotplug"); 1007 ata_ehi_push_desc(ehi, "hot unplug");
1002 } else if (status & NV_ADMA_STAT_HOTUNPLUG) { 1008 } else if (status & NV_ADMA_STAT_SERROR) {
1003 ata_ehi_hotplugged(ehi); 1009 /* let EH analyze SError and figure out cause */
1004 ata_ehi_push_desc(ehi, "hot unplug"); 1010 ata_ehi_push_desc(ehi, "SError");
1005 } else if (status & NV_ADMA_STAT_SERROR) { 1011 } else
1006 /* let libata analyze SError and figure out the cause */ 1012 ata_ehi_push_desc(ehi, "unknown");
1007 ata_ehi_push_desc(ehi, "SError"); 1013 ata_port_freeze(ap);
1008 } else 1014 continue;
1009 ata_ehi_push_desc(ehi, "unknown"); 1015 }
1010 ata_port_freeze(ap); 1016
1011 continue; 1017 if (status & (NV_ADMA_STAT_DONE |
1018 NV_ADMA_STAT_CPBERR |
1019 NV_ADMA_STAT_CMD_COMPLETE)) {
1020 u32 check_commands = notifier_clears[i];
1021 int pos, rc;
1022
1023 if (status & NV_ADMA_STAT_CPBERR) {
1024 /* check all active commands */
1025 if (ata_tag_valid(ap->link.active_tag))
1026 check_commands = 1 <<
1027 ap->link.active_tag;
1028 else
1029 check_commands = ap->link.sactive;
1012 } 1030 }
1013 1031
1014 if (status & (NV_ADMA_STAT_DONE | 1032 /* check CPBs for completed commands */
1015 NV_ADMA_STAT_CPBERR | 1033 while ((pos = ffs(check_commands))) {
1016 NV_ADMA_STAT_CMD_COMPLETE)) { 1034 pos--;
1017 u32 check_commands = notifier_clears[i]; 1035 rc = nv_adma_check_cpb(ap, pos,
1018 int pos, error = 0;
1019
1020 if (status & NV_ADMA_STAT_CPBERR) {
1021 /* Check all active commands */
1022 if (ata_tag_valid(ap->link.active_tag))
1023 check_commands = 1 <<
1024 ap->link.active_tag;
1025 else
1026 check_commands = ap->
1027 link.sactive;
1028 }
1029
1030 /** Check CPBs for completed commands */
1031 while ((pos = ffs(check_commands)) && !error) {
1032 pos--;
1033 error = nv_adma_check_cpb(ap, pos,
1034 notifier_error & (1 << pos)); 1036 notifier_error & (1 << pos));
1035 check_commands &= ~(1 << pos); 1037 if (unlikely(rc))
1036 } 1038 check_commands = 0;
1039 check_commands &= ~(1 << pos);
1037 } 1040 }
1038 } 1041 }
1039 } 1042 }
@@ -1099,7 +1102,7 @@ static void nv_adma_irq_clear(struct ata_port *ap)
1099 u32 notifier_clears[2]; 1102 u32 notifier_clears[2];
1100 1103
1101 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { 1104 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1102 ata_sff_irq_clear(ap); 1105 ata_bmdma_irq_clear(ap);
1103 return; 1106 return;
1104 } 1107 }
1105 1108
@@ -1130,7 +1133,7 @@ static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1130 struct nv_adma_port_priv *pp = qc->ap->private_data; 1133 struct nv_adma_port_priv *pp = qc->ap->private_data;
1131 1134
1132 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) 1135 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1133 ata_sff_post_internal_cmd(qc); 1136 ata_bmdma_post_internal_cmd(qc);
1134} 1137}
1135 1138
1136static int nv_adma_port_start(struct ata_port *ap) 1139static int nv_adma_port_start(struct ata_port *ap)
@@ -1155,7 +1158,8 @@ static int nv_adma_port_start(struct ata_port *ap)
1155 if (rc) 1158 if (rc)
1156 return rc; 1159 return rc;
1157 1160
1158 rc = ata_port_start(ap); 1161 /* we might fallback to bmdma, allocate bmdma resources */
1162 rc = ata_bmdma_port_start(ap);
1159 if (rc) 1163 if (rc)
1160 return rc; 1164 return rc;
1161 1165
@@ -1407,7 +1411,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1407 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && 1411 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1408 (qc->flags & ATA_QCFLAG_DMAMAP)); 1412 (qc->flags & ATA_QCFLAG_DMAMAP));
1409 nv_adma_register_mode(qc->ap); 1413 nv_adma_register_mode(qc->ap);
1410 ata_sff_qc_prep(qc); 1414 ata_bmdma_qc_prep(qc);
1411 return; 1415 return;
1412 } 1416 }
1413 1417
@@ -1466,7 +1470,7 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1466 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && 1470 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1467 (qc->flags & ATA_QCFLAG_DMAMAP)); 1471 (qc->flags & ATA_QCFLAG_DMAMAP));
1468 nv_adma_register_mode(qc->ap); 1472 nv_adma_register_mode(qc->ap);
1469 return ata_sff_qc_issue(qc); 1473 return ata_bmdma_qc_issue(qc);
1470 } else 1474 } else
1471 nv_adma_mode(qc->ap); 1475 nv_adma_mode(qc->ap);
1472 1476
@@ -1498,22 +1502,19 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1498 spin_lock_irqsave(&host->lock, flags); 1502 spin_lock_irqsave(&host->lock, flags);
1499 1503
1500 for (i = 0; i < host->n_ports; i++) { 1504 for (i = 0; i < host->n_ports; i++) {
1501 struct ata_port *ap; 1505 struct ata_port *ap = host->ports[i];
1502 1506 struct ata_queued_cmd *qc;
1503 ap = host->ports[i];
1504 if (ap &&
1505 !(ap->flags & ATA_FLAG_DISABLED)) {
1506 struct ata_queued_cmd *qc;
1507 1507
1508 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1508 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1509 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) 1509 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1510 handled += ata_sff_host_intr(ap, qc); 1510 handled += ata_bmdma_port_intr(ap, qc);
1511 else 1511 } else {
1512 // No request pending? Clear interrupt status 1512 /*
1513 // anyway, in case there's one pending. 1513 * No request pending? Clear interrupt status
1514 ap->ops->sff_check_status(ap); 1514 * anyway, in case there's one pending.
1515 */
1516 ap->ops->sff_check_status(ap);
1515 } 1517 }
1516
1517 } 1518 }
1518 1519
1519 spin_unlock_irqrestore(&host->lock, flags); 1520 spin_unlock_irqrestore(&host->lock, flags);
@@ -1526,11 +1527,7 @@ static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1526 int i, handled = 0; 1527 int i, handled = 0;
1527 1528
1528 for (i = 0; i < host->n_ports; i++) { 1529 for (i = 0; i < host->n_ports; i++) {
1529 struct ata_port *ap = host->ports[i]; 1530 handled += nv_host_intr(host->ports[i], irq_stat);
1530
1531 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1532 handled += nv_host_intr(ap, irq_stat);
1533
1534 irq_stat >>= NV_INT_PORT_SHIFT; 1531 irq_stat >>= NV_INT_PORT_SHIFT;
1535 } 1532 }
1536 1533
@@ -1674,7 +1671,6 @@ static void nv_mcp55_freeze(struct ata_port *ap)
1674 mask = readl(mmio_base + NV_INT_ENABLE_MCP55); 1671 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1675 mask &= ~(NV_INT_ALL_MCP55 << shift); 1672 mask &= ~(NV_INT_ALL_MCP55 << shift);
1676 writel(mask, mmio_base + NV_INT_ENABLE_MCP55); 1673 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1677 ata_sff_freeze(ap);
1678} 1674}
1679 1675
1680static void nv_mcp55_thaw(struct ata_port *ap) 1676static void nv_mcp55_thaw(struct ata_port *ap)
@@ -1688,7 +1684,6 @@ static void nv_mcp55_thaw(struct ata_port *ap)
1688 mask = readl(mmio_base + NV_INT_ENABLE_MCP55); 1684 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1689 mask |= (NV_INT_MASK_MCP55 << shift); 1685 mask |= (NV_INT_MASK_MCP55 << shift);
1690 writel(mask, mmio_base + NV_INT_ENABLE_MCP55); 1686 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1691 ata_sff_thaw(ap);
1692} 1687}
1693 1688
1694static void nv_adma_error_handler(struct ata_port *ap) 1689static void nv_adma_error_handler(struct ata_port *ap)
@@ -1744,7 +1739,7 @@ static void nv_adma_error_handler(struct ata_port *ap)
1744 readw(mmio + NV_ADMA_CTL); /* flush posted write */ 1739 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1745 } 1740 }
1746 1741
1747 ata_sff_error_handler(ap); 1742 ata_bmdma_error_handler(ap);
1748} 1743}
1749 1744
1750static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) 1745static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -1870,7 +1865,7 @@ static void nv_swncq_error_handler(struct ata_port *ap)
1870 ehc->i.action |= ATA_EH_RESET; 1865 ehc->i.action |= ATA_EH_RESET;
1871 } 1866 }
1872 1867
1873 ata_sff_error_handler(ap); 1868 ata_bmdma_error_handler(ap);
1874} 1869}
1875 1870
1876#ifdef CONFIG_PM 1871#ifdef CONFIG_PM
@@ -1991,7 +1986,8 @@ static int nv_swncq_port_start(struct ata_port *ap)
1991 struct nv_swncq_port_priv *pp; 1986 struct nv_swncq_port_priv *pp;
1992 int rc; 1987 int rc;
1993 1988
1994 rc = ata_port_start(ap); 1989 /* we might fallback to bmdma, allocate bmdma resources */
1990 rc = ata_bmdma_port_start(ap);
1995 if (rc) 1991 if (rc)
1996 return rc; 1992 return rc;
1997 1993
@@ -2016,7 +2012,7 @@ static int nv_swncq_port_start(struct ata_port *ap)
2016static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) 2012static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
2017{ 2013{
2018 if (qc->tf.protocol != ATA_PROT_NCQ) { 2014 if (qc->tf.protocol != ATA_PROT_NCQ) {
2019 ata_sff_qc_prep(qc); 2015 ata_bmdma_qc_prep(qc);
2020 return; 2016 return;
2021 } 2017 }
2022 2018
@@ -2031,7 +2027,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2031 struct ata_port *ap = qc->ap; 2027 struct ata_port *ap = qc->ap;
2032 struct scatterlist *sg; 2028 struct scatterlist *sg;
2033 struct nv_swncq_port_priv *pp = ap->private_data; 2029 struct nv_swncq_port_priv *pp = ap->private_data;
2034 struct ata_prd *prd; 2030 struct ata_bmdma_prd *prd;
2035 unsigned int si, idx; 2031 unsigned int si, idx;
2036 2032
2037 prd = pp->prd + ATA_MAX_PRD * qc->tag; 2033 prd = pp->prd + ATA_MAX_PRD * qc->tag;
@@ -2092,7 +2088,7 @@ static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2092 struct nv_swncq_port_priv *pp = ap->private_data; 2088 struct nv_swncq_port_priv *pp = ap->private_data;
2093 2089
2094 if (qc->tf.protocol != ATA_PROT_NCQ) 2090 if (qc->tf.protocol != ATA_PROT_NCQ)
2095 return ata_sff_qc_issue(qc); 2091 return ata_bmdma_qc_issue(qc);
2096 2092
2097 DPRINTK("Enter\n"); 2093 DPRINTK("Enter\n");
2098 2094
@@ -2135,7 +2131,6 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
2135 struct nv_swncq_port_priv *pp = ap->private_data; 2131 struct nv_swncq_port_priv *pp = ap->private_data;
2136 struct ata_eh_info *ehi = &ap->link.eh_info; 2132 struct ata_eh_info *ehi = &ap->link.eh_info;
2137 u32 sactive; 2133 u32 sactive;
2138 int nr_done = 0;
2139 u32 done_mask; 2134 u32 done_mask;
2140 int i; 2135 int i;
2141 u8 host_stat; 2136 u8 host_stat;
@@ -2176,22 +2171,21 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
2176 pp->dhfis_bits &= ~(1 << i); 2171 pp->dhfis_bits &= ~(1 << i);
2177 pp->dmafis_bits &= ~(1 << i); 2172 pp->dmafis_bits &= ~(1 << i);
2178 pp->sdbfis_bits |= (1 << i); 2173 pp->sdbfis_bits |= (1 << i);
2179 nr_done++;
2180 } 2174 }
2181 } 2175 }
2182 2176
2183 if (!ap->qc_active) { 2177 if (!ap->qc_active) {
2184 DPRINTK("over\n"); 2178 DPRINTK("over\n");
2185 nv_swncq_pp_reinit(ap); 2179 nv_swncq_pp_reinit(ap);
2186 return nr_done; 2180 return 0;
2187 } 2181 }
2188 2182
2189 if (pp->qc_active & pp->dhfis_bits) 2183 if (pp->qc_active & pp->dhfis_bits)
2190 return nr_done; 2184 return 0;
2191 2185
2192 if ((pp->ncq_flags & ncq_saw_backout) || 2186 if ((pp->ncq_flags & ncq_saw_backout) ||
2193 (pp->qc_active ^ pp->dhfis_bits)) 2187 (pp->qc_active ^ pp->dhfis_bits))
2194 /* if the controller cann't get a device to host register FIS, 2188 /* if the controller can't get a device to host register FIS,
2195 * The driver needs to reissue the new command. 2189 * The driver needs to reissue the new command.
2196 */ 2190 */
2197 lack_dhfis = 1; 2191 lack_dhfis = 1;
@@ -2208,7 +2202,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
2208 if (lack_dhfis) { 2202 if (lack_dhfis) {
2209 qc = ata_qc_from_tag(ap, pp->last_issue_tag); 2203 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2210 nv_swncq_issue_atacmd(ap, qc); 2204 nv_swncq_issue_atacmd(ap, qc);
2211 return nr_done; 2205 return 0;
2212 } 2206 }
2213 2207
2214 if (pp->defer_queue.defer_bits) { 2208 if (pp->defer_queue.defer_bits) {
@@ -2218,7 +2212,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
2218 nv_swncq_issue_atacmd(ap, qc); 2212 nv_swncq_issue_atacmd(ap, qc);
2219 } 2213 }
2220 2214
2221 return nr_done; 2215 return 0;
2222} 2216}
2223 2217
2224static inline u32 nv_swncq_tag(struct ata_port *ap) 2218static inline u32 nv_swncq_tag(struct ata_port *ap)
@@ -2230,7 +2224,7 @@ static inline u32 nv_swncq_tag(struct ata_port *ap)
2230 return (tag & 0x1f); 2224 return (tag & 0x1f);
2231} 2225}
2232 2226
2233static int nv_swncq_dmafis(struct ata_port *ap) 2227static void nv_swncq_dmafis(struct ata_port *ap)
2234{ 2228{
2235 struct ata_queued_cmd *qc; 2229 struct ata_queued_cmd *qc;
2236 unsigned int rw; 2230 unsigned int rw;
@@ -2245,7 +2239,7 @@ static int nv_swncq_dmafis(struct ata_port *ap)
2245 qc = ata_qc_from_tag(ap, tag); 2239 qc = ata_qc_from_tag(ap, tag);
2246 2240
2247 if (unlikely(!qc)) 2241 if (unlikely(!qc))
2248 return 0; 2242 return;
2249 2243
2250 rw = qc->tf.flags & ATA_TFLAG_WRITE; 2244 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2251 2245
@@ -2260,8 +2254,6 @@ static int nv_swncq_dmafis(struct ata_port *ap)
2260 dmactl |= ATA_DMA_WR; 2254 dmactl |= ATA_DMA_WR;
2261 2255
2262 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 2256 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2263
2264 return 1;
2265} 2257}
2266 2258
2267static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) 2259static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
@@ -2271,7 +2263,6 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2271 struct ata_eh_info *ehi = &ap->link.eh_info; 2263 struct ata_eh_info *ehi = &ap->link.eh_info;
2272 u32 serror; 2264 u32 serror;
2273 u8 ata_stat; 2265 u8 ata_stat;
2274 int rc = 0;
2275 2266
2276 ata_stat = ap->ops->sff_check_status(ap); 2267 ata_stat = ap->ops->sff_check_status(ap);
2277 nv_swncq_irq_clear(ap, fis); 2268 nv_swncq_irq_clear(ap, fis);
@@ -2316,8 +2307,7 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2316 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n", 2307 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2317 ap->print_id, pp->qc_active, pp->dhfis_bits, 2308 ap->print_id, pp->qc_active, pp->dhfis_bits,
2318 pp->dmafis_bits, readl(pp->sactive_block)); 2309 pp->dmafis_bits, readl(pp->sactive_block));
2319 rc = nv_swncq_sdbfis(ap); 2310 if (nv_swncq_sdbfis(ap) < 0)
2320 if (rc < 0)
2321 goto irq_error; 2311 goto irq_error;
2322 } 2312 }
2323 2313
@@ -2354,7 +2344,7 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2354 */ 2344 */
2355 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap)); 2345 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2356 pp->ncq_flags |= ncq_saw_dmas; 2346 pp->ncq_flags |= ncq_saw_dmas;
2357 rc = nv_swncq_dmafis(ap); 2347 nv_swncq_dmafis(ap);
2358 } 2348 }
2359 2349
2360irq_exit: 2350irq_exit:
@@ -2380,16 +2370,14 @@ static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2380 for (i = 0; i < host->n_ports; i++) { 2370 for (i = 0; i < host->n_ports; i++) {
2381 struct ata_port *ap = host->ports[i]; 2371 struct ata_port *ap = host->ports[i];
2382 2372
2383 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { 2373 if (ap->link.sactive) {
2384 if (ap->link.sactive) { 2374 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2385 nv_swncq_host_interrupt(ap, (u16)irq_stat); 2375 handled = 1;
2386 handled = 1; 2376 } else {
2387 } else { 2377 if (irq_stat) /* reserve Hotplug */
2388 if (irq_stat) /* reserve Hotplug */ 2378 nv_swncq_irq_clear(ap, 0xfff0);
2389 nv_swncq_irq_clear(ap, 0xfff0);
2390 2379
2391 handled += nv_host_intr(ap, (u8)irq_stat); 2380 handled += nv_host_intr(ap, (u8)irq_stat);
2392 }
2393 } 2381 }
2394 irq_stat >>= NV_INT_PORT_SHIFT_MCP55; 2382 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2395 } 2383 }
@@ -2436,7 +2424,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2436 2424
2437 ppi[0] = &nv_port_info[type]; 2425 ppi[0] = &nv_port_info[type];
2438 ipriv = ppi[0]->private_data; 2426 ipriv = ppi[0]->private_data;
2439 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 2427 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2440 if (rc) 2428 if (rc)
2441 return rc; 2429 return rc;
2442 2430
@@ -2479,8 +2467,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2479 } 2467 }
2480 2468
2481 pci_set_master(pdev); 2469 pci_set_master(pdev);
2482 return ata_host_activate(host, pdev->irq, ipriv->irq_handler, 2470 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2483 IRQF_SHARED, ipriv->sht);
2484} 2471}
2485 2472
2486#ifdef CONFIG_PM 2473#ifdef CONFIG_PM
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 5356ec00d2b4..f03ad48273ff 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -333,7 +333,8 @@ static int pdc_common_port_start(struct ata_port *ap)
333 struct pdc_port_priv *pp; 333 struct pdc_port_priv *pp;
334 int rc; 334 int rc;
335 335
336 rc = ata_port_start(ap); 336 /* we use the same prd table as bmdma, allocate it */
337 rc = ata_bmdma_port_start(ap);
337 if (rc) 338 if (rc)
338 return rc; 339 return rc;
339 340
@@ -499,7 +500,7 @@ static int pdc_sata_scr_write(struct ata_link *link,
499static void pdc_atapi_pkt(struct ata_queued_cmd *qc) 500static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
500{ 501{
501 struct ata_port *ap = qc->ap; 502 struct ata_port *ap = qc->ap;
502 dma_addr_t sg_table = ap->prd_dma; 503 dma_addr_t sg_table = ap->bmdma_prd_dma;
503 unsigned int cdb_len = qc->dev->cdb_len; 504 unsigned int cdb_len = qc->dev->cdb_len;
504 u8 *cdb = qc->cdb; 505 u8 *cdb = qc->cdb;
505 struct pdc_port_priv *pp = ap->private_data; 506 struct pdc_port_priv *pp = ap->private_data;
@@ -587,6 +588,7 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
587static void pdc_fill_sg(struct ata_queued_cmd *qc) 588static void pdc_fill_sg(struct ata_queued_cmd *qc)
588{ 589{
589 struct ata_port *ap = qc->ap; 590 struct ata_port *ap = qc->ap;
591 struct ata_bmdma_prd *prd = ap->bmdma_prd;
590 struct scatterlist *sg; 592 struct scatterlist *sg;
591 const u32 SG_COUNT_ASIC_BUG = 41*4; 593 const u32 SG_COUNT_ASIC_BUG = 41*4;
592 unsigned int si, idx; 594 unsigned int si, idx;
@@ -613,8 +615,8 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
613 if ((offset + sg_len) > 0x10000) 615 if ((offset + sg_len) > 0x10000)
614 len = 0x10000 - offset; 616 len = 0x10000 - offset;
615 617
616 ap->prd[idx].addr = cpu_to_le32(addr); 618 prd[idx].addr = cpu_to_le32(addr);
617 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); 619 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
618 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 620 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
619 621
620 idx++; 622 idx++;
@@ -623,27 +625,27 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
623 } 625 }
624 } 626 }
625 627
626 len = le32_to_cpu(ap->prd[idx - 1].flags_len); 628 len = le32_to_cpu(prd[idx - 1].flags_len);
627 629
628 if (len > SG_COUNT_ASIC_BUG) { 630 if (len > SG_COUNT_ASIC_BUG) {
629 u32 addr; 631 u32 addr;
630 632
631 VPRINTK("Splitting last PRD.\n"); 633 VPRINTK("Splitting last PRD.\n");
632 634
633 addr = le32_to_cpu(ap->prd[idx - 1].addr); 635 addr = le32_to_cpu(prd[idx - 1].addr);
634 ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG); 636 prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
635 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG); 637 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
636 638
637 addr = addr + len - SG_COUNT_ASIC_BUG; 639 addr = addr + len - SG_COUNT_ASIC_BUG;
638 len = SG_COUNT_ASIC_BUG; 640 len = SG_COUNT_ASIC_BUG;
639 ap->prd[idx].addr = cpu_to_le32(addr); 641 prd[idx].addr = cpu_to_le32(addr);
640 ap->prd[idx].flags_len = cpu_to_le32(len); 642 prd[idx].flags_len = cpu_to_le32(len);
641 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 643 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
642 644
643 idx++; 645 idx++;
644 } 646 }
645 647
646 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 648 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
647} 649}
648 650
649static void pdc_qc_prep(struct ata_queued_cmd *qc) 651static void pdc_qc_prep(struct ata_queued_cmd *qc)
@@ -658,7 +660,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
658 pdc_fill_sg(qc); 660 pdc_fill_sg(qc);
659 /*FALLTHROUGH*/ 661 /*FALLTHROUGH*/
660 case ATA_PROT_NODATA: 662 case ATA_PROT_NODATA:
661 i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma, 663 i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma,
662 qc->dev->devno, pp->pkt); 664 qc->dev->devno, pp->pkt);
663 if (qc->tf.flags & ATA_TFLAG_LBA48) 665 if (qc->tf.flags & ATA_TFLAG_LBA48)
664 i = pdc_prep_lba48(&qc->tf, pp->pkt, i); 666 i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
@@ -838,7 +840,7 @@ static void pdc_error_handler(struct ata_port *ap)
838 if (!(ap->pflags & ATA_PFLAG_FROZEN)) 840 if (!(ap->pflags & ATA_PFLAG_FROZEN))
839 pdc_reset_port(ap); 841 pdc_reset_port(ap);
840 842
841 ata_std_error_handler(ap); 843 ata_sff_error_handler(ap);
842} 844}
843 845
844static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) 846static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
@@ -984,8 +986,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
984 /* check for a plug or unplug event */ 986 /* check for a plug or unplug event */
985 ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); 987 ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
986 tmp = hotplug_status & (0x11 << ata_no); 988 tmp = hotplug_status & (0x11 << ata_no);
987 if (tmp && ap && 989 if (tmp) {
988 !(ap->flags & ATA_FLAG_DISABLED)) {
989 struct ata_eh_info *ehi = &ap->link.eh_info; 990 struct ata_eh_info *ehi = &ap->link.eh_info;
990 ata_ehi_clear_desc(ehi); 991 ata_ehi_clear_desc(ehi);
991 ata_ehi_hotplugged(ehi); 992 ata_ehi_hotplugged(ehi);
@@ -997,8 +998,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
997 998
998 /* check for a packet interrupt */ 999 /* check for a packet interrupt */
999 tmp = mask & (1 << (i + 1)); 1000 tmp = mask & (1 << (i + 1));
1000 if (tmp && ap && 1001 if (tmp) {
1001 !(ap->flags & ATA_FLAG_DISABLED)) {
1002 struct ata_queued_cmd *qc; 1002 struct ata_queued_cmd *qc;
1003 1003
1004 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1004 qc = ata_qc_from_tag(ap, ap->link.active_tag);
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 92ba45e6689b..daeebf19a6a9 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -120,8 +120,6 @@ static void qs_host_stop(struct ata_host *host);
120static void qs_qc_prep(struct ata_queued_cmd *qc); 120static void qs_qc_prep(struct ata_queued_cmd *qc);
121static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); 121static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
122static int qs_check_atapi_dma(struct ata_queued_cmd *qc); 122static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
123static void qs_bmdma_stop(struct ata_queued_cmd *qc);
124static u8 qs_bmdma_status(struct ata_port *ap);
125static void qs_freeze(struct ata_port *ap); 123static void qs_freeze(struct ata_port *ap);
126static void qs_thaw(struct ata_port *ap); 124static void qs_thaw(struct ata_port *ap);
127static int qs_prereset(struct ata_link *link, unsigned long deadline); 125static int qs_prereset(struct ata_link *link, unsigned long deadline);
@@ -137,8 +135,6 @@ static struct ata_port_operations qs_ata_ops = {
137 .inherits = &ata_sff_port_ops, 135 .inherits = &ata_sff_port_ops,
138 136
139 .check_atapi_dma = qs_check_atapi_dma, 137 .check_atapi_dma = qs_check_atapi_dma,
140 .bmdma_stop = qs_bmdma_stop,
141 .bmdma_status = qs_bmdma_status,
142 .qc_prep = qs_qc_prep, 138 .qc_prep = qs_qc_prep,
143 .qc_issue = qs_qc_issue, 139 .qc_issue = qs_qc_issue,
144 140
@@ -147,7 +143,6 @@ static struct ata_port_operations qs_ata_ops = {
147 .prereset = qs_prereset, 143 .prereset = qs_prereset,
148 .softreset = ATA_OP_NULL, 144 .softreset = ATA_OP_NULL,
149 .error_handler = qs_error_handler, 145 .error_handler = qs_error_handler,
150 .post_internal_cmd = ATA_OP_NULL,
151 .lost_interrupt = ATA_OP_NULL, 146 .lost_interrupt = ATA_OP_NULL,
152 147
153 .scr_read = qs_scr_read, 148 .scr_read = qs_scr_read,
@@ -191,16 +186,6 @@ static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
191 return 1; /* ATAPI DMA not supported */ 186 return 1; /* ATAPI DMA not supported */
192} 187}
193 188
194static void qs_bmdma_stop(struct ata_queued_cmd *qc)
195{
196 /* nothing */
197}
198
199static u8 qs_bmdma_status(struct ata_port *ap)
200{
201 return 0;
202}
203
204static inline void qs_enter_reg_mode(struct ata_port *ap) 189static inline void qs_enter_reg_mode(struct ata_port *ap)
205{ 190{
206 u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); 191 u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
@@ -255,7 +240,7 @@ static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
255static void qs_error_handler(struct ata_port *ap) 240static void qs_error_handler(struct ata_port *ap)
256{ 241{
257 qs_enter_reg_mode(ap); 242 qs_enter_reg_mode(ap);
258 ata_std_error_handler(ap); 243 ata_sff_error_handler(ap);
259} 244}
260 245
261static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) 246static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
@@ -304,10 +289,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
304 VPRINTK("ENTER\n"); 289 VPRINTK("ENTER\n");
305 290
306 qs_enter_reg_mode(qc->ap); 291 qs_enter_reg_mode(qc->ap);
307 if (qc->tf.protocol != ATA_PROT_DMA) { 292 if (qc->tf.protocol != ATA_PROT_DMA)
308 ata_sff_qc_prep(qc);
309 return; 293 return;
310 }
311 294
312 nelem = qs_fill_sg(qc); 295 nelem = qs_fill_sg(qc);
313 296
@@ -404,26 +387,24 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host)
404 u8 sHST = sff1 & 0x3f; /* host status */ 387 u8 sHST = sff1 & 0x3f; /* host status */
405 unsigned int port_no = (sff1 >> 8) & 0x03; 388 unsigned int port_no = (sff1 >> 8) & 0x03;
406 struct ata_port *ap = host->ports[port_no]; 389 struct ata_port *ap = host->ports[port_no];
390 struct qs_port_priv *pp = ap->private_data;
391 struct ata_queued_cmd *qc;
407 392
408 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", 393 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
409 sff1, sff0, port_no, sHST, sDST); 394 sff1, sff0, port_no, sHST, sDST);
410 handled = 1; 395 handled = 1;
411 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { 396 if (!pp || pp->state != qs_state_pkt)
412 struct ata_queued_cmd *qc; 397 continue;
413 struct qs_port_priv *pp = ap->private_data; 398 qc = ata_qc_from_tag(ap, ap->link.active_tag);
414 if (!pp || pp->state != qs_state_pkt) 399 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
415 continue; 400 switch (sHST) {
416 qc = ata_qc_from_tag(ap, ap->link.active_tag); 401 case 0: /* successful CPB */
417 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { 402 case 3: /* device error */
418 switch (sHST) { 403 qs_enter_reg_mode(qc->ap);
419 case 0: /* successful CPB */ 404 qs_do_or_die(qc, sDST);
420 case 3: /* device error */ 405 break;
421 qs_enter_reg_mode(qc->ap); 406 default:
422 qs_do_or_die(qc, sDST); 407 break;
423 break;
424 default:
425 break;
426 }
427 } 408 }
428 } 409 }
429 } 410 }
@@ -436,33 +417,30 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host)
436 unsigned int handled = 0, port_no; 417 unsigned int handled = 0, port_no;
437 418
438 for (port_no = 0; port_no < host->n_ports; ++port_no) { 419 for (port_no = 0; port_no < host->n_ports; ++port_no) {
439 struct ata_port *ap; 420 struct ata_port *ap = host->ports[port_no];
440 ap = host->ports[port_no]; 421 struct qs_port_priv *pp = ap->private_data;
441 if (ap && 422 struct ata_queued_cmd *qc;
442 !(ap->flags & ATA_FLAG_DISABLED)) { 423
443 struct ata_queued_cmd *qc; 424 qc = ata_qc_from_tag(ap, ap->link.active_tag);
444 struct qs_port_priv *pp; 425 if (!qc) {
445 qc = ata_qc_from_tag(ap, ap->link.active_tag); 426 /*
446 if (!qc || !(qc->flags & ATA_QCFLAG_ACTIVE)) { 427 * The qstor hardware generates spurious
447 /* 428 * interrupts from time to time when switching
448 * The qstor hardware generates spurious 429 * in and out of packet mode. There's no
449 * interrupts from time to time when switching 430 * obvious way to know if we're here now due
450 * in and out of packet mode. 431 * to that, so just ack the irq and pretend we
451 * There's no obvious way to know if we're 432 * knew it was ours.. (ugh). This does not
452 * here now due to that, so just ack the irq 433 * affect packet mode.
453 * and pretend we knew it was ours.. (ugh). 434 */
454 * This does not affect packet mode. 435 ata_sff_check_status(ap);
455 */ 436 handled = 1;
456 ata_sff_check_status(ap); 437 continue;
457 handled = 1;
458 continue;
459 }
460 pp = ap->private_data;
461 if (!pp || pp->state != qs_state_mmio)
462 continue;
463 if (!(qc->tf.flags & ATA_TFLAG_POLLING))
464 handled |= ata_sff_host_intr(ap, qc);
465 } 438 }
439
440 if (!pp || pp->state != qs_state_mmio)
441 continue;
442 if (!(qc->tf.flags & ATA_TFLAG_POLLING))
443 handled |= ata_sff_port_intr(ap, qc);
466 } 444 }
467 return handled; 445 return handled;
468} 446}
@@ -509,11 +487,7 @@ static int qs_port_start(struct ata_port *ap)
509 void __iomem *mmio_base = qs_mmio_base(ap->host); 487 void __iomem *mmio_base = qs_mmio_base(ap->host);
510 void __iomem *chan = mmio_base + (ap->port_no * 0x4000); 488 void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
511 u64 addr; 489 u64 addr;
512 int rc;
513 490
514 rc = ata_port_start(ap);
515 if (rc)
516 return rc;
517 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 491 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
518 if (!pp) 492 if (!pp)
519 return -ENOMEM; 493 return -ENOMEM;
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3cb69d5fb817..3a4f84219719 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -284,7 +284,7 @@ static void sil_bmdma_setup(struct ata_queued_cmd *qc)
284 void __iomem *bmdma = ap->ioaddr.bmdma_addr; 284 void __iomem *bmdma = ap->ioaddr.bmdma_addr;
285 285
286 /* load PRD table addr. */ 286 /* load PRD table addr. */
287 iowrite32(ap->prd_dma, bmdma + ATA_DMA_TABLE_OFS); 287 iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS);
288 288
289 /* issue r/w command */ 289 /* issue r/w command */
290 ap->ops->sff_exec_command(ap, &qc->tf); 290 ap->ops->sff_exec_command(ap, &qc->tf);
@@ -311,10 +311,10 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
311{ 311{
312 struct scatterlist *sg; 312 struct scatterlist *sg;
313 struct ata_port *ap = qc->ap; 313 struct ata_port *ap = qc->ap;
314 struct ata_prd *prd, *last_prd = NULL; 314 struct ata_bmdma_prd *prd, *last_prd = NULL;
315 unsigned int si; 315 unsigned int si;
316 316
317 prd = &ap->prd[0]; 317 prd = &ap->bmdma_prd[0];
318 for_each_sg(qc->sg, sg, qc->n_elem, si) { 318 for_each_sg(qc->sg, sg, qc->n_elem, si) {
319 /* Note h/w doesn't support 64-bit, so we unconditionally 319 /* Note h/w doesn't support 64-bit, so we unconditionally
320 * truncate dma_addr_t to u32. 320 * truncate dma_addr_t to u32.
@@ -503,7 +503,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
503 goto err_hsm; 503 goto err_hsm;
504 504
505 /* ack bmdma irq events */ 505 /* ack bmdma irq events */
506 ata_sff_irq_clear(ap); 506 ata_bmdma_irq_clear(ap);
507 507
508 /* kick HSM in the ass */ 508 /* kick HSM in the ass */
509 ata_sff_hsm_move(ap, qc, status, 0); 509 ata_sff_hsm_move(ap, qc, status, 0);
@@ -532,9 +532,6 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance)
532 struct ata_port *ap = host->ports[i]; 532 struct ata_port *ap = host->ports[i];
533 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); 533 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
534 534
535 if (unlikely(ap->flags & ATA_FLAG_DISABLED))
536 continue;
537
538 /* turn off SATA_IRQ if not supported */ 535 /* turn off SATA_IRQ if not supported */
539 if (ap->flags & SIL_FLAG_NO_SATA_IRQ) 536 if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
540 bmdma2 &= ~SIL_DMA_SATA_IRQ; 537 bmdma2 &= ~SIL_DMA_SATA_IRQ;
@@ -587,7 +584,7 @@ static void sil_thaw(struct ata_port *ap)
587 584
588 /* clear IRQ */ 585 /* clear IRQ */
589 ap->ops->sff_check_status(ap); 586 ap->ops->sff_check_status(ap);
590 ata_sff_irq_clear(ap); 587 ata_bmdma_irq_clear(ap);
591 588
592 /* turn on SATA IRQ if supported */ 589 /* turn on SATA IRQ if supported */
593 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) 590 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 433b6b89c795..be7726d7686d 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -539,12 +539,12 @@ static void sil24_config_port(struct ata_port *ap)
539 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); 539 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
540 540
541 /* zero error counters. */ 541 /* zero error counters. */
542 writel(0x8000, port + PORT_DECODE_ERR_THRESH); 542 writew(0x8000, port + PORT_DECODE_ERR_THRESH);
543 writel(0x8000, port + PORT_CRC_ERR_THRESH); 543 writew(0x8000, port + PORT_CRC_ERR_THRESH);
544 writel(0x8000, port + PORT_HSHK_ERR_THRESH); 544 writew(0x8000, port + PORT_HSHK_ERR_THRESH);
545 writel(0x0000, port + PORT_DECODE_ERR_CNT); 545 writew(0x0000, port + PORT_DECODE_ERR_CNT);
546 writel(0x0000, port + PORT_CRC_ERR_CNT); 546 writew(0x0000, port + PORT_CRC_ERR_CNT);
547 writel(0x0000, port + PORT_HSHK_ERR_CNT); 547 writew(0x0000, port + PORT_HSHK_ERR_CNT);
548 548
549 /* always use 64bit activation */ 549 /* always use 64bit activation */
550 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); 550 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
@@ -622,6 +622,11 @@ static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
622 irq_enabled = readl(port + PORT_IRQ_ENABLE_SET); 622 irq_enabled = readl(port + PORT_IRQ_ENABLE_SET);
623 writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR); 623 writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR);
624 624
625 /*
626 * The barrier is required to ensure that writes to cmd_block reach
627 * the memory before the write to PORT_CMD_ACTIVATE.
628 */
629 wmb();
625 writel((u32)paddr, port + PORT_CMD_ACTIVATE); 630 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
626 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); 631 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
627 632
@@ -865,7 +870,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
865 } else { 870 } else {
866 prb = &cb->atapi.prb; 871 prb = &cb->atapi.prb;
867 sge = cb->atapi.sge; 872 sge = cb->atapi.sge;
868 memset(cb->atapi.cdb, 0, 32); 873 memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb));
869 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len); 874 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
870 875
871 if (ata_is_data(qc->tf.protocol)) { 876 if (ata_is_data(qc->tf.protocol)) {
@@ -895,6 +900,11 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
895 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block); 900 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
896 activate = port + PORT_CMD_ACTIVATE + tag * 8; 901 activate = port + PORT_CMD_ACTIVATE + tag * 8;
897 902
903 /*
904 * The barrier is required to ensure that writes to cmd_block reach
905 * the memory before the write to PORT_CMD_ACTIVATE.
906 */
907 wmb();
898 writel((u32)paddr, activate); 908 writel((u32)paddr, activate);
899 writel((u64)paddr >> 32, activate + 4); 909 writel((u64)paddr >> 32, activate + 4);
900 910
@@ -1160,13 +1170,8 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
1160 1170
1161 for (i = 0; i < host->n_ports; i++) 1171 for (i = 0; i < host->n_ports; i++)
1162 if (status & (1 << i)) { 1172 if (status & (1 << i)) {
1163 struct ata_port *ap = host->ports[i]; 1173 sil24_host_intr(host->ports[i]);
1164 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { 1174 handled++;
1165 sil24_host_intr(ap);
1166 handled++;
1167 } else
1168 printk(KERN_ERR DRV_NAME
1169 ": interrupt from disabled port %d\n", i);
1170 } 1175 }
1171 1176
1172 spin_unlock(&host->lock); 1177 spin_unlock(&host->lock);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index f8a91bfd66a8..2bfe3ae03976 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -279,7 +279,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
279 break; 279 break;
280 } 280 }
281 281
282 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 282 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
283 if (rc) 283 if (rc)
284 return rc; 284 return rc;
285 285
@@ -308,7 +308,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
308 308
309 pci_set_master(pdev); 309 pci_set_master(pdev);
310 pci_intx(pdev, 1); 310 pci_intx(pdev, 1);
311 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 311 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
312 IRQF_SHARED, &sis_sht); 312 IRQF_SHARED, &sis_sht);
313} 313}
314 314
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 7257f2d5c52c..7d9db4aaf07e 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -224,7 +224,7 @@ static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc)
224 224
225 /* load PRD table addr. */ 225 /* load PRD table addr. */
226 mb(); /* make sure PRD table writes are visible to controller */ 226 mb(); /* make sure PRD table writes are visible to controller */
227 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); 227 writel(ap->bmdma_prd_dma, mmio + ATA_DMA_TABLE_OFS);
228 228
229 /* specify data direction, triple-check start bit is clear */ 229 /* specify data direction, triple-check start bit is clear */
230 dmactl = readb(mmio + ATA_DMA_CMD); 230 dmactl = readb(mmio + ATA_DMA_CMD);
@@ -502,7 +502,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
502 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); 502 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
503 503
504 pci_set_master(pdev); 504 pci_set_master(pdev);
505 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 505 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
506 IRQF_SHARED, &k2_sata_sht); 506 IRQF_SHARED, &k2_sata_sht);
507} 507}
508 508
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 232468f2ea90..bedd5188e5b0 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -302,11 +302,6 @@ static int pdc_port_start(struct ata_port *ap)
302{ 302{
303 struct device *dev = ap->host->dev; 303 struct device *dev = ap->host->dev;
304 struct pdc_port_priv *pp; 304 struct pdc_port_priv *pp;
305 int rc;
306
307 rc = ata_port_start(ap);
308 if (rc)
309 return rc;
310 305
311 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 306 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
312 if (!pp) 307 if (!pp)
@@ -840,8 +835,7 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
840 ap = host->ports[port_no]; 835 ap = host->ports[port_no];
841 tmp = mask & (1 << i); 836 tmp = mask & (1 << i);
842 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 837 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
843 if (tmp && ap && 838 if (tmp && ap) {
844 !(ap->flags & ATA_FLAG_DISABLED)) {
845 struct ata_queued_cmd *qc; 839 struct ata_queued_cmd *qc;
846 840
847 qc = ata_qc_from_tag(ap, ap->link.active_tag); 841 qc = ata_qc_from_tag(ap, ap->link.active_tag);
@@ -927,7 +921,7 @@ static void pdc_error_handler(struct ata_port *ap)
927 if (!(ap->pflags & ATA_PFLAG_FROZEN)) 921 if (!(ap->pflags & ATA_PFLAG_FROZEN))
928 pdc_reset_port(ap); 922 pdc_reset_port(ap);
929 923
930 ata_std_error_handler(ap); 924 ata_sff_error_handler(ap);
931} 925}
932 926
933static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) 927static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 011e098590d1..b8578c32d344 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -181,9 +181,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
181 if (rc) 181 if (rc)
182 return rc; 182 return rc;
183 183
184 rc = ata_pci_bmdma_init(host); 184 ata_pci_bmdma_init(host);
185 if (rc)
186 return rc;
187 185
188 iomap = host->iomap; 186 iomap = host->iomap;
189 187
@@ -244,7 +242,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
244 242
245 pci_set_master(pdev); 243 pci_set_master(pdev);
246 pci_intx(pdev, 1); 244 pci_intx(pdev, 1);
247 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 245 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
248 IRQF_SHARED, &uli_sht); 246 IRQF_SHARED, &uli_sht);
249} 247}
250 248
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 08f65492cc81..4730c42a5ee5 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -308,7 +308,7 @@ static void svia_noop_freeze(struct ata_port *ap)
308 * certain way. Leave it alone and just clear pending IRQ. 308 * certain way. Leave it alone and just clear pending IRQ.
309 */ 309 */
310 ap->ops->sff_check_status(ap); 310 ap->ops->sff_check_status(ap);
311 ata_sff_irq_clear(ap); 311 ata_bmdma_irq_clear(ap);
312} 312}
313 313
314/** 314/**
@@ -463,7 +463,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
463 struct ata_host *host; 463 struct ata_host *host;
464 int rc; 464 int rc;
465 465
466 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 466 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
467 if (rc) 467 if (rc)
468 return rc; 468 return rc;
469 *r_host = host; 469 *r_host = host;
@@ -520,7 +520,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
520 struct ata_host *host; 520 struct ata_host *host;
521 int i, rc; 521 int i, rc;
522 522
523 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 523 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
524 if (rc) 524 if (rc)
525 return rc; 525 return rc;
526 *r_host = host; 526 *r_host = host;
@@ -575,6 +575,33 @@ static void svia_configure(struct pci_dev *pdev)
575 tmp8 |= NATIVE_MODE_ALL; 575 tmp8 |= NATIVE_MODE_ALL;
576 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 576 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
577 } 577 }
578
579 /*
580 * vt6421 has problems talking to some drives. The following
581 * is the fix from Joseph Chan <JosephChan@via.com.tw>.
582 *
583 * When host issues HOLD, device may send up to 20DW of data
584 * before acknowledging it with HOLDA and the host should be
585 * able to buffer them in FIFO. Unfortunately, some WD drives
586 * send upto 40DW before acknowledging HOLD and, in the
587 * default configuration, this ends up overflowing vt6421's
588 * FIFO, making the controller abort the transaction with
589 * R_ERR.
590 *
591 * Rx52[2] is the internal 128DW FIFO Flow control watermark
592 * adjusting mechanism enable bit and the default value 0
593 * means host will issue HOLD to device when the left FIFO
594 * size goes below 32DW. Setting it to 1 makes the watermark
595 * 64DW.
596 *
597 * https://bugzilla.kernel.org/show_bug.cgi?id=15173
598 * http://article.gmane.org/gmane.linux.ide/46352
599 */
600 if (pdev->device == 0x3249) {
601 pci_read_config_byte(pdev, 0x52, &tmp8);
602 tmp8 |= 1 << 2;
603 pci_write_config_byte(pdev, 0x52, tmp8);
604 }
578} 605}
579 606
580static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 607static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -628,7 +655,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
628 svia_configure(pdev); 655 svia_configure(pdev);
629 656
630 pci_set_master(pdev); 657 pci_set_master(pdev);
631 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 658 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
632 IRQF_SHARED, &svia_sht); 659 IRQF_SHARED, &svia_sht);
633} 660}
634 661
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 8b2a278b2547..b777176ff494 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -245,7 +245,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap)
245 245
246 qc = ata_qc_from_tag(ap, ap->link.active_tag); 246 qc = ata_qc_from_tag(ap, ap->link.active_tag);
247 if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) 247 if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
248 handled = ata_sff_host_intr(ap, qc); 248 handled = ata_bmdma_port_intr(ap, qc);
249 249
250 /* We received an interrupt during a polled command, 250 /* We received an interrupt during a polled command,
251 * or some other spurious condition. Interrupt reporting 251 * or some other spurious condition. Interrupt reporting
@@ -284,14 +284,8 @@ static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance)
284 for (i = 0; i < host->n_ports; i++) { 284 for (i = 0; i < host->n_ports; i++) {
285 u8 port_status = (status >> (8 * i)) & 0xff; 285 u8 port_status = (status >> (8 * i)) & 0xff;
286 if (port_status) { 286 if (port_status) {
287 struct ata_port *ap = host->ports[i]; 287 vsc_port_intr(port_status, host->ports[i]);
288 288 handled++;
289 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
290 vsc_port_intr(port_status, ap);
291 handled++;
292 } else
293 dev_printk(KERN_ERR, host->dev,
294 "interrupt from disabled port %d\n", i);
295 } 289 }
296 } 290 }
297 291