aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/Kconfig35
-rw-r--r--drivers/ata/Makefile4
-rw-r--r--drivers/ata/ahci.c229
-rw-r--r--drivers/ata/ata_generic.c11
-rw-r--r--drivers/ata/ata_piix.c194
-rw-r--r--drivers/ata/libata-core.c536
-rw-r--r--drivers/ata/libata-eh.c108
-rw-r--r--drivers/ata/libata-scsi.c328
-rw-r--r--drivers/ata/libata-sff.c43
-rw-r--r--drivers/ata/libata.h24
-rw-r--r--drivers/ata/pata_ali.c140
-rw-r--r--drivers/ata/pata_amd.c26
-rw-r--r--drivers/ata/pata_artop.c1
-rw-r--r--drivers/ata/pata_atiixp.c9
-rw-r--r--drivers/ata/pata_cmd64x.c23
-rw-r--r--drivers/ata/pata_cs5520.c25
-rw-r--r--drivers/ata/pata_cs5530.c100
-rw-r--r--drivers/ata/pata_cs5535.c9
-rw-r--r--drivers/ata/pata_cypress.c9
-rw-r--r--drivers/ata/pata_efar.c7
-rw-r--r--drivers/ata/pata_hpt366.c58
-rw-r--r--drivers/ata/pata_hpt37x.c1
-rw-r--r--drivers/ata/pata_hpt3x2n.c1
-rw-r--r--drivers/ata/pata_hpt3x3.c47
-rw-r--r--drivers/ata/pata_isapnp.c1
-rw-r--r--drivers/ata/pata_it821x.c19
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c271
-rw-r--r--drivers/ata/pata_jmicron.c36
-rw-r--r--drivers/ata/pata_legacy.c1
-rw-r--r--drivers/ata/pata_marvell.c224
-rw-r--r--drivers/ata/pata_mpiix.c9
-rw-r--r--drivers/ata/pata_netcell.c8
-rw-r--r--drivers/ata/pata_ns87410.c9
-rw-r--r--drivers/ata/pata_oldpiix.c5
-rw-r--r--drivers/ata/pata_opti.c34
-rw-r--r--drivers/ata/pata_optidma.c9
-rw-r--r--drivers/ata/pata_pcmcia.c1
-rw-r--r--drivers/ata/pata_pdc2027x.c3
-rw-r--r--drivers/ata/pata_pdc202xx_old.c65
-rw-r--r--drivers/ata/pata_platform.c295
-rw-r--r--drivers/ata/pata_qdi.c1
-rw-r--r--drivers/ata/pata_radisys.c5
-rw-r--r--drivers/ata/pata_rz1000.c49
-rw-r--r--drivers/ata/pata_sc1200.c9
-rw-r--r--drivers/ata/pata_serverworks.c33
-rw-r--r--drivers/ata/pata_sil680.c83
-rw-r--r--drivers/ata/pata_sis.c7
-rw-r--r--drivers/ata/pata_sl82c105.c1
-rw-r--r--drivers/ata/pata_triflex.c9
-rw-r--r--drivers/ata/pata_via.c109
-rw-r--r--drivers/ata/pata_winbond.c306
-rw-r--r--drivers/ata/sata_nv.c1044
-rw-r--r--drivers/ata/sata_promise.c46
-rw-r--r--drivers/ata/sata_sil.c16
-rw-r--r--drivers/ata/sata_sil24.c18
-rw-r--r--drivers/ata/sata_sis.c17
56 files changed, 3769 insertions, 942 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 03f6338acc8..984ab284382 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -328,6 +328,15 @@ config PATA_TRIFLEX
328 328
329 If unsure, say N. 329 If unsure, say N.
330 330
331config PATA_MARVELL
332 tristate "Marvell PATA support via legacy mode"
333 depends on PCI
334 help
335 This option enables limited support for the Marvell 88SE6145 ATA
336 controller.
337
338 If unsure, say N.
339
331config PATA_MPIIX 340config PATA_MPIIX
332 tristate "Intel PATA MPIIX support" 341 tristate "Intel PATA MPIIX support"
333 depends on PCI 342 depends on PCI
@@ -483,6 +492,32 @@ config PATA_WINBOND
483 492
484 If unsure, say N. 493 If unsure, say N.
485 494
495config PATA_WINBOND_VLB
496 tristate "Winbond W83759A VLB PATA support (Experimental)"
497 depends on ISA && EXPERIMENTAL
498 help
499 Support for the Winbond W83759A controller on Vesa Local Bus
500 systems.
501
502config PATA_PLATFORM
503 tristate "Generic platform device PATA support"
504 depends on EMBEDDED
505 help
506 This option enables support for generic directly connected ATA
507 devices commonly found on embedded systems.
508
509 If unsure, say N.
510
511config PATA_IXP4XX_CF
512 tristate "IXP4XX Compact Flash support"
513 depends on ARCH_IXP4XX
514 help
515 This option enables support for a Compact Flash connected on
516 the ixp4xx expansion bus. This driver had been written for
517 Loft/Avila boards in mind but can work with others.
518
519 If unsure, say N.
520
486endif 521endif
487endmenu 522endmenu
488 523
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 72243a677f9..bc3d81ae757 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
38obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o 38obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
39obj-$(CONFIG_PATA_OPTI) += pata_opti.o 39obj-$(CONFIG_PATA_OPTI) += pata_opti.o
40obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o 40obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
41obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
41obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o 42obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
42obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o 43obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
43obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o 44obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
@@ -51,8 +52,11 @@ obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
51obj-$(CONFIG_PATA_SIL680) += pata_sil680.o 52obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
52obj-$(CONFIG_PATA_VIA) += pata_via.o 53obj-$(CONFIG_PATA_VIA) += pata_via.o
53obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o 54obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
55obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
54obj-$(CONFIG_PATA_SIS) += pata_sis.o 56obj-$(CONFIG_PATA_SIS) += pata_sis.o
55obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o 57obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
58obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
59obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
56# Should be last but one libata driver 60# Should be last but one libata driver
57obj-$(CONFIG_ATA_GENERIC) += ata_generic.o 61obj-$(CONFIG_ATA_GENERIC) += ata_generic.o
58# Should be last libata driver 62# Should be last libata driver
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index bddb14e91d3..f36da488a2c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -53,6 +53,7 @@
53 53
54enum { 54enum {
55 AHCI_PCI_BAR = 5, 55 AHCI_PCI_BAR = 5,
56 AHCI_MAX_PORTS = 32,
56 AHCI_MAX_SG = 168, /* hardware max is 64K */ 57 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff, 58 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0, 59 AHCI_USE_CLUSTERING = 0,
@@ -77,8 +78,9 @@ enum {
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ 78 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78 79
79 board_ahci = 0, 80 board_ahci = 0,
80 board_ahci_vt8251 = 1, 81 board_ahci_pi = 1,
81 board_ahci_ign_iferr = 2, 82 board_ahci_vt8251 = 2,
83 board_ahci_ign_iferr = 3,
82 84
83 /* global controller registers */ 85 /* global controller registers */
84 HOST_CAP = 0x00, /* host capabilities */ 86 HOST_CAP = 0x00, /* host capabilities */
@@ -167,9 +169,9 @@ enum {
167 AHCI_FLAG_MSI = (1 << 0), 169 AHCI_FLAG_MSI = (1 << 0),
168 170
169 /* ap->flags bits */ 171 /* ap->flags bits */
170 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24), 172 AHCI_FLAG_NO_NCQ = (1 << 24),
171 AHCI_FLAG_NO_NCQ = (1 << 25), 173 AHCI_FLAG_IGN_IRQ_IF_ERR = (1 << 25), /* ignore IRQ_IF_ERR */
172 AHCI_FLAG_IGN_IRQ_IF_ERR = (1 << 26), /* ignore IRQ_IF_ERR */ 174 AHCI_FLAG_HONOR_PI = (1 << 26), /* honor PORTS_IMPL */
173}; 175};
174 176
175struct ahci_cmd_hdr { 177struct ahci_cmd_hdr {
@@ -216,6 +218,7 @@ static u8 ahci_check_status(struct ata_port *ap);
216static void ahci_freeze(struct ata_port *ap); 218static void ahci_freeze(struct ata_port *ap);
217static void ahci_thaw(struct ata_port *ap); 219static void ahci_thaw(struct ata_port *ap);
218static void ahci_error_handler(struct ata_port *ap); 220static void ahci_error_handler(struct ata_port *ap);
221static void ahci_vt8251_error_handler(struct ata_port *ap);
219static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 222static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
220static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); 223static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
221static int ahci_port_resume(struct ata_port *ap); 224static int ahci_port_resume(struct ata_port *ap);
@@ -275,6 +278,37 @@ static const struct ata_port_operations ahci_ops = {
275 .port_stop = ahci_port_stop, 278 .port_stop = ahci_port_stop,
276}; 279};
277 280
281static const struct ata_port_operations ahci_vt8251_ops = {
282 .port_disable = ata_port_disable,
283
284 .check_status = ahci_check_status,
285 .check_altstatus = ahci_check_status,
286 .dev_select = ata_noop_dev_select,
287
288 .tf_read = ahci_tf_read,
289
290 .qc_prep = ahci_qc_prep,
291 .qc_issue = ahci_qc_issue,
292
293 .irq_handler = ahci_interrupt,
294 .irq_clear = ahci_irq_clear,
295
296 .scr_read = ahci_scr_read,
297 .scr_write = ahci_scr_write,
298
299 .freeze = ahci_freeze,
300 .thaw = ahci_thaw,
301
302 .error_handler = ahci_vt8251_error_handler,
303 .post_internal_cmd = ahci_post_internal_cmd,
304
305 .port_suspend = ahci_port_suspend,
306 .port_resume = ahci_port_resume,
307
308 .port_start = ahci_port_start,
309 .port_stop = ahci_port_stop,
310};
311
278static const struct ata_port_info ahci_port_info[] = { 312static const struct ata_port_info ahci_port_info[] = {
279 /* board_ahci */ 313 /* board_ahci */
280 { 314 {
@@ -286,16 +320,26 @@ static const struct ata_port_info ahci_port_info[] = {
286 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 320 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
287 .port_ops = &ahci_ops, 321 .port_ops = &ahci_ops,
288 }, 322 },
323 /* board_ahci_pi */
324 {
325 .sht = &ahci_sht,
326 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
327 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
328 ATA_FLAG_SKIP_D2H_BSY | AHCI_FLAG_HONOR_PI,
329 .pio_mask = 0x1f, /* pio0-4 */
330 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
331 .port_ops = &ahci_ops,
332 },
289 /* board_ahci_vt8251 */ 333 /* board_ahci_vt8251 */
290 { 334 {
291 .sht = &ahci_sht, 335 .sht = &ahci_sht,
292 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 336 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
293 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 337 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
294 ATA_FLAG_SKIP_D2H_BSY | 338 ATA_FLAG_SKIP_D2H_BSY |
295 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ, 339 ATA_FLAG_HRST_TO_RESUME | AHCI_FLAG_NO_NCQ,
296 .pio_mask = 0x1f, /* pio0-4 */ 340 .pio_mask = 0x1f, /* pio0-4 */
297 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 341 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
298 .port_ops = &ahci_ops, 342 .port_ops = &ahci_vt8251_ops,
299 }, 343 },
300 /* board_ahci_ign_iferr */ 344 /* board_ahci_ign_iferr */
301 { 345 {
@@ -322,22 +366,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
322 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ 366 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
323 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ 367 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
324 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ 368 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
325 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ 369 { PCI_VDEVICE(INTEL, 0x2821), board_ahci_pi }, /* ICH8 */
326 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */ 370 { PCI_VDEVICE(INTEL, 0x2822), board_ahci_pi }, /* ICH8 */
327 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ 371 { PCI_VDEVICE(INTEL, 0x2824), board_ahci_pi }, /* ICH8 */
328 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ 372 { PCI_VDEVICE(INTEL, 0x2829), board_ahci_pi }, /* ICH8M */
329 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ 373 { PCI_VDEVICE(INTEL, 0x282a), board_ahci_pi }, /* ICH8M */
330 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */ 374 { PCI_VDEVICE(INTEL, 0x2922), board_ahci_pi }, /* ICH9 */
331 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */ 375 { PCI_VDEVICE(INTEL, 0x2923), board_ahci_pi }, /* ICH9 */
332 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */ 376 { PCI_VDEVICE(INTEL, 0x2924), board_ahci_pi }, /* ICH9 */
333 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */ 377 { PCI_VDEVICE(INTEL, 0x2925), board_ahci_pi }, /* ICH9 */
334 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */ 378 { PCI_VDEVICE(INTEL, 0x2927), board_ahci_pi }, /* ICH9 */
335 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */ 379 { PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */
336 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */ 380 { PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */
337 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */ 381 { PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */
338 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */ 382 { PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */
339 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */ 383 { PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */
340 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ 384 { PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */
341 385
342 /* JMicron */ 386 /* JMicron */
343 { PCI_VDEVICE(JMICRON, 0x2360), board_ahci_ign_iferr }, /* JMB360 */ 387 { PCI_VDEVICE(JMICRON, 0x2360), board_ahci_ign_iferr }, /* JMB360 */
@@ -372,6 +416,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
372 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */ 416 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
373 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ 417 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
374 418
419 /* Generic, PCI class code for AHCI */
420 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
421 0x010601, 0xffffff, board_ahci },
422
375 { } /* terminate list */ 423 { } /* terminate list */
376}; 424};
377 425
@@ -386,6 +434,11 @@ static struct pci_driver ahci_pci_driver = {
386}; 434};
387 435
388 436
437static inline int ahci_nr_ports(u32 cap)
438{
439 return (cap & 0x1f) + 1;
440}
441
389static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port) 442static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
390{ 443{
391 return base + 0x100 + (port * 0x80); 444 return base + 0x100 + (port * 0x80);
@@ -559,9 +612,6 @@ static void ahci_power_down(void __iomem *port_mmio, u32 cap)
559static void ahci_init_port(void __iomem *port_mmio, u32 cap, 612static void ahci_init_port(void __iomem *port_mmio, u32 cap,
560 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma) 613 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
561{ 614{
562 /* power up */
563 ahci_power_up(port_mmio, cap);
564
565 /* enable FIS reception */ 615 /* enable FIS reception */
566 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma); 616 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
567 617
@@ -587,19 +637,17 @@ static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg)
587 return rc; 637 return rc;
588 } 638 }
589 639
590 /* put device into slumber mode */
591 ahci_power_down(port_mmio, cap);
592
593 return 0; 640 return 0;
594} 641}
595 642
596static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev) 643static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
597{ 644{
598 u32 cap_save, tmp; 645 u32 cap_save, impl_save, tmp;
599 646
600 cap_save = readl(mmio + HOST_CAP); 647 cap_save = readl(mmio + HOST_CAP);
601 cap_save &= ( (1<<28) | (1<<17) ); 648 cap_save &= ( (1<<28) | (1<<17) );
602 cap_save |= (1 << 27); 649 cap_save |= (1 << 27);
650 impl_save = readl(mmio + HOST_PORTS_IMPL);
603 651
604 /* global controller reset */ 652 /* global controller reset */
605 tmp = readl(mmio + HOST_CTL); 653 tmp = readl(mmio + HOST_CTL);
@@ -620,10 +668,21 @@ static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
620 return -EIO; 668 return -EIO;
621 } 669 }
622 670
671 /* turn on AHCI mode */
623 writel(HOST_AHCI_EN, mmio + HOST_CTL); 672 writel(HOST_AHCI_EN, mmio + HOST_CTL);
624 (void) readl(mmio + HOST_CTL); /* flush */ 673 (void) readl(mmio + HOST_CTL); /* flush */
674
675 /* These write-once registers are normally cleared on reset.
676 * Restore BIOS values... which we HOPE were present before
677 * reset.
678 */
679 if (!impl_save) {
680 impl_save = (1 << ahci_nr_ports(cap_save)) - 1;
681 dev_printk(KERN_WARNING, &pdev->dev,
682 "PORTS_IMPL is zero, forcing 0x%x\n", impl_save);
683 }
625 writel(cap_save, mmio + HOST_CAP); 684 writel(cap_save, mmio + HOST_CAP);
626 writel(0xf, mmio + HOST_PORTS_IMPL); 685 writel(impl_save, mmio + HOST_PORTS_IMPL);
627 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ 686 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
628 687
629 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 688 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
@@ -639,7 +698,8 @@ static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
639} 698}
640 699
641static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev, 700static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
642 int n_ports, u32 cap) 701 int n_ports, unsigned int port_flags,
702 struct ahci_host_priv *hpriv)
643{ 703{
644 int i, rc; 704 int i, rc;
645 u32 tmp; 705 u32 tmp;
@@ -648,13 +708,12 @@ static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
648 void __iomem *port_mmio = ahci_port_base(mmio, i); 708 void __iomem *port_mmio = ahci_port_base(mmio, i);
649 const char *emsg = NULL; 709 const char *emsg = NULL;
650 710
651#if 0 /* BIOSen initialize this incorrectly */ 711 if ((port_flags & AHCI_FLAG_HONOR_PI) &&
652 if (!(hpriv->port_map & (1 << i))) 712 !(hpriv->port_map & (1 << i)))
653 continue; 713 continue;
654#endif
655 714
656 /* make sure port is not active */ 715 /* make sure port is not active */
657 rc = ahci_deinit_port(port_mmio, cap, &emsg); 716 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
658 if (rc) 717 if (rc)
659 dev_printk(KERN_WARNING, &pdev->dev, 718 dev_printk(KERN_WARNING, &pdev->dev,
660 "%s (%d)\n", emsg, rc); 719 "%s (%d)\n", emsg, rc);
@@ -729,17 +788,6 @@ static int ahci_clo(struct ata_port *ap)
729 return 0; 788 return 0;
730} 789}
731 790
732static int ahci_prereset(struct ata_port *ap)
733{
734 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
735 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
736 /* ATA_BUSY hasn't cleared, so send a CLO */
737 ahci_clo(ap);
738 }
739
740 return ata_std_prereset(ap);
741}
742
743static int ahci_softreset(struct ata_port *ap, unsigned int *class) 791static int ahci_softreset(struct ata_port *ap, unsigned int *class)
744{ 792{
745 struct ahci_port_priv *pp = ap->private_data; 793 struct ahci_port_priv *pp = ap->private_data;
@@ -877,6 +925,31 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
877 return rc; 925 return rc;
878} 926}
879 927
928static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class)
929{
930 void __iomem *mmio = ap->host->mmio_base;
931 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
932 int rc;
933
934 DPRINTK("ENTER\n");
935
936 ahci_stop_engine(port_mmio);
937
938 rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->eh_context));
939
940 /* vt8251 needs SError cleared for the port to operate */
941 ahci_scr_write(ap, SCR_ERROR, ahci_scr_read(ap, SCR_ERROR));
942
943 ahci_start_engine(port_mmio);
944
945 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
946
947 /* vt8251 doesn't clear BSY on signature FIS reception,
948 * request follow-up softreset.
949 */
950 return rc ?: -EAGAIN;
951}
952
880static void ahci_postreset(struct ata_port *ap, unsigned int *class) 953static void ahci_postreset(struct ata_port *ap, unsigned int *class)
881{ 954{
882 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 955 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@@ -1196,7 +1269,23 @@ static void ahci_error_handler(struct ata_port *ap)
1196 } 1269 }
1197 1270
1198 /* perform recovery */ 1271 /* perform recovery */
1199 ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset, 1272 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_hardreset,
1273 ahci_postreset);
1274}
1275
1276static void ahci_vt8251_error_handler(struct ata_port *ap)
1277{
1278 void __iomem *mmio = ap->host->mmio_base;
1279 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1280
1281 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1282 /* restart engine */
1283 ahci_stop_engine(port_mmio);
1284 ahci_start_engine(port_mmio);
1285 }
1286
1287 /* perform recovery */
1288 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_vt8251_hardreset,
1200 ahci_postreset); 1289 ahci_postreset);
1201} 1290}
1202 1291
@@ -1226,7 +1315,9 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
1226 int rc; 1315 int rc;
1227 1316
1228 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg); 1317 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1229 if (rc) { 1318 if (rc == 0)
1319 ahci_power_down(port_mmio, hpriv->cap);
1320 else {
1230 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); 1321 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
1231 ahci_init_port(port_mmio, hpriv->cap, 1322 ahci_init_port(port_mmio, hpriv->cap,
1232 pp->cmd_slot_dma, pp->rx_fis_dma); 1323 pp->cmd_slot_dma, pp->rx_fis_dma);
@@ -1242,6 +1333,7 @@ static int ahci_port_resume(struct ata_port *ap)
1242 void __iomem *mmio = ap->host->mmio_base; 1333 void __iomem *mmio = ap->host->mmio_base;
1243 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1334 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1244 1335
1336 ahci_power_up(port_mmio, hpriv->cap);
1245 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma); 1337 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1246 1338
1247 return 0; 1339 return 0;
@@ -1281,7 +1373,8 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
1281 if (rc) 1373 if (rc)
1282 return rc; 1374 return rc;
1283 1375
1284 ahci_init_controller(mmio, pdev, host->n_ports, hpriv->cap); 1376 ahci_init_controller(mmio, pdev, host->n_ports,
1377 host->ports[0]->flags, hpriv);
1285 } 1378 }
1286 1379
1287 ata_host_resume(host); 1380 ata_host_resume(host);
@@ -1347,6 +1440,9 @@ static int ahci_port_start(struct ata_port *ap)
1347 1440
1348 ap->private_data = pp; 1441 ap->private_data = pp;
1349 1442
1443 /* power up port */
1444 ahci_power_up(port_mmio, hpriv->cap);
1445
1350 /* initialize port */ 1446 /* initialize port */
1351 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma); 1447 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1352 1448
@@ -1393,7 +1489,7 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1393 struct ahci_host_priv *hpriv = probe_ent->private_data; 1489 struct ahci_host_priv *hpriv = probe_ent->private_data;
1394 struct pci_dev *pdev = to_pci_dev(probe_ent->dev); 1490 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1395 void __iomem *mmio = probe_ent->mmio_base; 1491 void __iomem *mmio = probe_ent->mmio_base;
1396 unsigned int i, using_dac; 1492 unsigned int i, cap_n_ports, using_dac;
1397 int rc; 1493 int rc;
1398 1494
1399 rc = ahci_reset_controller(mmio, pdev); 1495 rc = ahci_reset_controller(mmio, pdev);
@@ -1402,10 +1498,34 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1402 1498
1403 hpriv->cap = readl(mmio + HOST_CAP); 1499 hpriv->cap = readl(mmio + HOST_CAP);
1404 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL); 1500 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
1405 probe_ent->n_ports = (hpriv->cap & 0x1f) + 1; 1501 cap_n_ports = ahci_nr_ports(hpriv->cap);
1406 1502
1407 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n", 1503 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1408 hpriv->cap, hpriv->port_map, probe_ent->n_ports); 1504 hpriv->cap, hpriv->port_map, cap_n_ports);
1505
1506 if (probe_ent->port_flags & AHCI_FLAG_HONOR_PI) {
1507 unsigned int n_ports = cap_n_ports;
1508 u32 port_map = hpriv->port_map;
1509 int max_port = 0;
1510
1511 for (i = 0; i < AHCI_MAX_PORTS && n_ports; i++) {
1512 if (port_map & (1 << i)) {
1513 n_ports--;
1514 port_map &= ~(1 << i);
1515 max_port = i;
1516 } else
1517 probe_ent->dummy_port_mask |= 1 << i;
1518 }
1519
1520 if (n_ports || port_map)
1521 dev_printk(KERN_WARNING, &pdev->dev,
1522 "nr_ports (%u) and implemented port map "
1523 "(0x%x) don't match\n",
1524 cap_n_ports, hpriv->port_map);
1525
1526 probe_ent->n_ports = max_port + 1;
1527 } else
1528 probe_ent->n_ports = cap_n_ports;
1409 1529
1410 using_dac = hpriv->cap & HOST_CAP_64; 1530 using_dac = hpriv->cap & HOST_CAP_64;
1411 if (using_dac && 1531 if (using_dac &&
@@ -1437,7 +1557,8 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1437 for (i = 0; i < probe_ent->n_ports; i++) 1557 for (i = 0; i < probe_ent->n_ports; i++)
1438 ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i); 1558 ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
1439 1559
1440 ahci_init_controller(mmio, pdev, probe_ent->n_ports, hpriv->cap); 1560 ahci_init_controller(mmio, pdev, probe_ent->n_ports,
1561 probe_ent->port_flags, hpriv);
1441 1562
1442 pci_set_master(pdev); 1563 pci_set_master(pdev);
1443 1564
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 4a80ff9312b..908751d27e7 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -26,7 +26,7 @@
26#include <linux/libata.h> 26#include <linux/libata.h>
27 27
28#define DRV_NAME "ata_generic" 28#define DRV_NAME "ata_generic"
29#define DRV_VERSION "0.2.6" 29#define DRV_VERSION "0.2.10"
30 30
31/* 31/*
32 * A generic parallel ATA driver using libata 32 * A generic parallel ATA driver using libata
@@ -109,7 +109,6 @@ static struct scsi_host_template generic_sht = {
109 .can_queue = ATA_DEF_QUEUE, 109 .can_queue = ATA_DEF_QUEUE,
110 .this_id = ATA_SHT_THIS_ID, 110 .this_id = ATA_SHT_THIS_ID,
111 .sg_tablesize = LIBATA_MAX_PRD, 111 .sg_tablesize = LIBATA_MAX_PRD,
112 .max_sectors = ATA_MAX_SECTORS,
113 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 112 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
114 .emulated = ATA_SHT_EMULATED, 113 .emulated = ATA_SHT_EMULATED,
115 .use_clustering = ATA_SHT_USE_CLUSTERING, 114 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -118,6 +117,8 @@ static struct scsi_host_template generic_sht = {
118 .slave_configure = ata_scsi_slave_config, 117 .slave_configure = ata_scsi_slave_config,
119 .slave_destroy = ata_scsi_slave_destroy, 118 .slave_destroy = ata_scsi_slave_destroy,
120 .bios_param = ata_std_bios_param, 119 .bios_param = ata_std_bios_param,
120 .resume = ata_scsi_device_resume,
121 .suspend = ata_scsi_device_suspend,
121}; 122};
122 123
123static struct ata_port_operations generic_port_ops = { 124static struct ata_port_operations generic_port_ops = {
@@ -226,12 +227,14 @@ static struct pci_driver ata_generic_pci_driver = {
226 .name = DRV_NAME, 227 .name = DRV_NAME,
227 .id_table = ata_generic, 228 .id_table = ata_generic,
228 .probe = ata_generic_init_one, 229 .probe = ata_generic_init_one,
229 .remove = ata_pci_remove_one 230 .remove = ata_pci_remove_one,
231 .suspend = ata_pci_device_suspend,
232 .resume = ata_pci_device_resume,
230}; 233};
231 234
232static int __init ata_generic_init(void) 235static int __init ata_generic_init(void)
233{ 236{
234 return pci_module_init(&ata_generic_pci_driver); 237 return pci_register_driver(&ata_generic_pci_driver);
235} 238}
236 239
237 240
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 720174d628f..c7de0bb1591 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -40,7 +40,7 @@
40 * Documentation 40 * Documentation
41 * Publically available from Intel web site. Errata documentation 41 * Publically available from Intel web site. Errata documentation
42 * is also publically available. As an aide to anyone hacking on this 42 * is also publically available. As an aide to anyone hacking on this
43 * driver the list of errata that are relevant is below.going back to 43 * driver the list of errata that are relevant is below, going back to
44 * PIIX4. Older device documentation is now a bit tricky to find. 44 * PIIX4. Older device documentation is now a bit tricky to find.
45 * 45 *
46 * The chipsets all follow very much the same design. The orginal Triton 46 * The chipsets all follow very much the same design. The orginal Triton
@@ -93,7 +93,7 @@
93#include <linux/libata.h> 93#include <linux/libata.h>
94 94
95#define DRV_NAME "ata_piix" 95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "2.00ac6" 96#define DRV_VERSION "2.00ac7"
97 97
98enum { 98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ 99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
@@ -101,11 +101,13 @@ enum {
101 ICH5_PCS = 0x92, /* port control and status */ 101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */ 102 PIIX_SCC = 0x0A, /* sub-class code register */
103 103
104 PIIX_FLAG_IGNORE_PCS = (1 << 25), /* ignore PCS present bits */
105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */ 104 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */ 105 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ 106 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108 107
108 PIIX_PATA_FLAGS = ATA_FLAG_SLAVE_POSS,
109 PIIX_SATA_FLAGS = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
110
109 /* combined mode. if set, PATA is channel 0. 111 /* combined mode. if set, PATA is channel 0.
110 * if clear, PATA is channel 1. 112 * if clear, PATA is channel 1.
111 */ 113 */
@@ -122,11 +124,10 @@ enum {
122 ich_pata_100 = 3, /* ICH up to UDMA 100 */ 124 ich_pata_100 = 3, /* ICH up to UDMA 100 */
123 ich_pata_133 = 4, /* ICH up to UDMA 133 */ 125 ich_pata_133 = 4, /* ICH up to UDMA 133 */
124 ich5_sata = 5, 126 ich5_sata = 5,
125 esb_sata = 6, 127 ich6_sata = 6,
126 ich6_sata = 7, 128 ich6_sata_ahci = 7,
127 ich6_sata_ahci = 8, 129 ich6m_sata_ahci = 8,
128 ich6m_sata_ahci = 9, 130 ich8_sata_ahci = 9,
129 ich8_sata_ahci = 10,
130 131
131 /* constants for mapping table */ 132 /* constants for mapping table */
132 P0 = 0, /* port 0 */ 133 P0 = 0, /* port 0 */
@@ -143,13 +144,11 @@ enum {
143struct piix_map_db { 144struct piix_map_db {
144 const u32 mask; 145 const u32 mask;
145 const u16 port_enable; 146 const u16 port_enable;
146 const int present_shift;
147 const int map[][4]; 147 const int map[][4];
148}; 148};
149 149
150struct piix_host_priv { 150struct piix_host_priv {
151 const int *map; 151 const int *map;
152 const struct piix_map_db *map_db;
153}; 152};
154 153
155static int piix_init_one (struct pci_dev *pdev, 154static int piix_init_one (struct pci_dev *pdev,
@@ -214,9 +213,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
214 /* 82801EB (ICH5) */ 213 /* 82801EB (ICH5) */
215 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 214 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
216 /* 6300ESB (ICH5 variant with broken PCS present bits) */ 215 /* 6300ESB (ICH5 variant with broken PCS present bits) */
217 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata }, 216 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
218 /* 6300ESB pretending RAID */ 217 /* 6300ESB pretending RAID */
219 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata }, 218 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
220 /* 82801FB/FW (ICH6/ICH6W) */ 219 /* 82801FB/FW (ICH6/ICH6W) */
221 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, 220 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
222 /* 82801FR/FRW (ICH6R/ICH6RW) */ 221 /* 82801FR/FRW (ICH6R/ICH6RW) */
@@ -367,7 +366,6 @@ static const struct ata_port_operations piix_sata_ops = {
367static const struct piix_map_db ich5_map_db = { 366static const struct piix_map_db ich5_map_db = {
368 .mask = 0x7, 367 .mask = 0x7,
369 .port_enable = 0x3, 368 .port_enable = 0x3,
370 .present_shift = 4,
371 .map = { 369 .map = {
372 /* PM PS SM SS MAP */ 370 /* PM PS SM SS MAP */
373 { P0, NA, P1, NA }, /* 000b */ 371 { P0, NA, P1, NA }, /* 000b */
@@ -384,7 +382,6 @@ static const struct piix_map_db ich5_map_db = {
384static const struct piix_map_db ich6_map_db = { 382static const struct piix_map_db ich6_map_db = {
385 .mask = 0x3, 383 .mask = 0x3,
386 .port_enable = 0xf, 384 .port_enable = 0xf,
387 .present_shift = 4,
388 .map = { 385 .map = {
389 /* PM PS SM SS MAP */ 386 /* PM PS SM SS MAP */
390 { P0, P2, P1, P3 }, /* 00b */ 387 { P0, P2, P1, P3 }, /* 00b */
@@ -397,7 +394,6 @@ static const struct piix_map_db ich6_map_db = {
397static const struct piix_map_db ich6m_map_db = { 394static const struct piix_map_db ich6m_map_db = {
398 .mask = 0x3, 395 .mask = 0x3,
399 .port_enable = 0x5, 396 .port_enable = 0x5,
400 .present_shift = 4,
401 397
402 /* Map 01b isn't specified in the doc but some notebooks use 398 /* Map 01b isn't specified in the doc but some notebooks use
403 * it anyway. MAP 01b have been spotted on both ICH6M and 399 * it anyway. MAP 01b have been spotted on both ICH6M and
@@ -415,7 +411,6 @@ static const struct piix_map_db ich6m_map_db = {
415static const struct piix_map_db ich8_map_db = { 411static const struct piix_map_db ich8_map_db = {
416 .mask = 0x3, 412 .mask = 0x3,
417 .port_enable = 0x3, 413 .port_enable = 0x3,
418 .present_shift = 8,
419 .map = { 414 .map = {
420 /* PM PS SM SS MAP */ 415 /* PM PS SM SS MAP */
421 { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */ 416 { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */
@@ -427,7 +422,6 @@ static const struct piix_map_db ich8_map_db = {
427 422
428static const struct piix_map_db *piix_map_db_table[] = { 423static const struct piix_map_db *piix_map_db_table[] = {
429 [ich5_sata] = &ich5_map_db, 424 [ich5_sata] = &ich5_map_db,
430 [esb_sata] = &ich5_map_db,
431 [ich6_sata] = &ich6_map_db, 425 [ich6_sata] = &ich6_map_db,
432 [ich6_sata_ahci] = &ich6_map_db, 426 [ich6_sata_ahci] = &ich6_map_db,
433 [ich6m_sata_ahci] = &ich6m_map_db, 427 [ich6m_sata_ahci] = &ich6m_map_db,
@@ -438,7 +432,7 @@ static struct ata_port_info piix_port_info[] = {
438 /* piix_pata_33: 0: PIIX3 or 4 at 33MHz */ 432 /* piix_pata_33: 0: PIIX3 or 4 at 33MHz */
439 { 433 {
440 .sht = &piix_sht, 434 .sht = &piix_sht,
441 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 435 .flags = PIIX_PATA_FLAGS,
442 .pio_mask = 0x1f, /* pio0-4 */ 436 .pio_mask = 0x1f, /* pio0-4 */
443 .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ 437 .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
444 .udma_mask = ATA_UDMA_MASK_40C, 438 .udma_mask = ATA_UDMA_MASK_40C,
@@ -448,7 +442,7 @@ static struct ata_port_info piix_port_info[] = {
448 /* ich_pata_33: 1 ICH0 - ICH at 33Mhz*/ 442 /* ich_pata_33: 1 ICH0 - ICH at 33Mhz*/
449 { 443 {
450 .sht = &piix_sht, 444 .sht = &piix_sht,
451 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS, 445 .flags = PIIX_PATA_FLAGS,
452 .pio_mask = 0x1f, /* pio 0-4 */ 446 .pio_mask = 0x1f, /* pio 0-4 */
453 .mwdma_mask = 0x06, /* Check: maybe 0x07 */ 447 .mwdma_mask = 0x06, /* Check: maybe 0x07 */
454 .udma_mask = ATA_UDMA2, /* UDMA33 */ 448 .udma_mask = ATA_UDMA2, /* UDMA33 */
@@ -457,7 +451,7 @@ static struct ata_port_info piix_port_info[] = {
457 /* ich_pata_66: 2 ICH controllers up to 66MHz */ 451 /* ich_pata_66: 2 ICH controllers up to 66MHz */
458 { 452 {
459 .sht = &piix_sht, 453 .sht = &piix_sht,
460 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS, 454 .flags = PIIX_PATA_FLAGS,
461 .pio_mask = 0x1f, /* pio 0-4 */ 455 .pio_mask = 0x1f, /* pio 0-4 */
462 .mwdma_mask = 0x06, /* MWDMA0 is broken on chip */ 456 .mwdma_mask = 0x06, /* MWDMA0 is broken on chip */
463 .udma_mask = ATA_UDMA4, 457 .udma_mask = ATA_UDMA4,
@@ -467,7 +461,7 @@ static struct ata_port_info piix_port_info[] = {
467 /* ich_pata_100: 3 */ 461 /* ich_pata_100: 3 */
468 { 462 {
469 .sht = &piix_sht, 463 .sht = &piix_sht,
470 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR, 464 .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
471 .pio_mask = 0x1f, /* pio0-4 */ 465 .pio_mask = 0x1f, /* pio0-4 */
472 .mwdma_mask = 0x06, /* mwdma1-2 */ 466 .mwdma_mask = 0x06, /* mwdma1-2 */
473 .udma_mask = ATA_UDMA5, /* udma0-5 */ 467 .udma_mask = ATA_UDMA5, /* udma0-5 */
@@ -477,7 +471,7 @@ static struct ata_port_info piix_port_info[] = {
477 /* ich_pata_133: 4 ICH with full UDMA6 */ 471 /* ich_pata_133: 4 ICH with full UDMA6 */
478 { 472 {
479 .sht = &piix_sht, 473 .sht = &piix_sht,
480 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR, 474 .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
481 .pio_mask = 0x1f, /* pio 0-4 */ 475 .pio_mask = 0x1f, /* pio 0-4 */
482 .mwdma_mask = 0x06, /* Check: maybe 0x07 */ 476 .mwdma_mask = 0x06, /* Check: maybe 0x07 */
483 .udma_mask = ATA_UDMA6, /* UDMA133 */ 477 .udma_mask = ATA_UDMA6, /* UDMA133 */
@@ -487,41 +481,27 @@ static struct ata_port_info piix_port_info[] = {
487 /* ich5_sata: 5 */ 481 /* ich5_sata: 5 */
488 { 482 {
489 .sht = &piix_sht, 483 .sht = &piix_sht,
490 .flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | 484 .flags = PIIX_SATA_FLAGS,
491 PIIX_FLAG_IGNORE_PCS,
492 .pio_mask = 0x1f, /* pio0-4 */
493 .mwdma_mask = 0x07, /* mwdma0-2 */
494 .udma_mask = 0x7f, /* udma0-6 */
495 .port_ops = &piix_sata_ops,
496 },
497
498 /* i6300esb_sata: 6 */
499 {
500 .sht = &piix_sht,
501 .flags = ATA_FLAG_SATA |
502 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
503 .pio_mask = 0x1f, /* pio0-4 */ 485 .pio_mask = 0x1f, /* pio0-4 */
504 .mwdma_mask = 0x07, /* mwdma0-2 */ 486 .mwdma_mask = 0x07, /* mwdma0-2 */
505 .udma_mask = 0x7f, /* udma0-6 */ 487 .udma_mask = 0x7f, /* udma0-6 */
506 .port_ops = &piix_sata_ops, 488 .port_ops = &piix_sata_ops,
507 }, 489 },
508 490
509 /* ich6_sata: 7 */ 491 /* ich6_sata: 6 */
510 { 492 {
511 .sht = &piix_sht, 493 .sht = &piix_sht,
512 .flags = ATA_FLAG_SATA | 494 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR,
513 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
514 .pio_mask = 0x1f, /* pio0-4 */ 495 .pio_mask = 0x1f, /* pio0-4 */
515 .mwdma_mask = 0x07, /* mwdma0-2 */ 496 .mwdma_mask = 0x07, /* mwdma0-2 */
516 .udma_mask = 0x7f, /* udma0-6 */ 497 .udma_mask = 0x7f, /* udma0-6 */
517 .port_ops = &piix_sata_ops, 498 .port_ops = &piix_sata_ops,
518 }, 499 },
519 500
520 /* ich6_sata_ahci: 8 */ 501 /* ich6_sata_ahci: 7 */
521 { 502 {
522 .sht = &piix_sht, 503 .sht = &piix_sht,
523 .flags = ATA_FLAG_SATA | 504 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
524 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
525 PIIX_FLAG_AHCI, 505 PIIX_FLAG_AHCI,
526 .pio_mask = 0x1f, /* pio0-4 */ 506 .pio_mask = 0x1f, /* pio0-4 */
527 .mwdma_mask = 0x07, /* mwdma0-2 */ 507 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -529,11 +509,10 @@ static struct ata_port_info piix_port_info[] = {
529 .port_ops = &piix_sata_ops, 509 .port_ops = &piix_sata_ops,
530 }, 510 },
531 511
532 /* ich6m_sata_ahci: 9 */ 512 /* ich6m_sata_ahci: 8 */
533 { 513 {
534 .sht = &piix_sht, 514 .sht = &piix_sht,
535 .flags = ATA_FLAG_SATA | 515 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
536 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
537 PIIX_FLAG_AHCI, 516 PIIX_FLAG_AHCI,
538 .pio_mask = 0x1f, /* pio0-4 */ 517 .pio_mask = 0x1f, /* pio0-4 */
539 .mwdma_mask = 0x07, /* mwdma0-2 */ 518 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -541,11 +520,10 @@ static struct ata_port_info piix_port_info[] = {
541 .port_ops = &piix_sata_ops, 520 .port_ops = &piix_sata_ops,
542 }, 521 },
543 522
544 /* ich8_sata_ahci: 10 */ 523 /* ich8_sata_ahci: 9 */
545 { 524 {
546 .sht = &piix_sht, 525 .sht = &piix_sht,
547 .flags = ATA_FLAG_SATA | 526 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
548 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
549 PIIX_FLAG_AHCI, 527 PIIX_FLAG_AHCI,
550 .pio_mask = 0x1f, /* pio0-4 */ 528 .pio_mask = 0x1f, /* pio0-4 */
551 .mwdma_mask = 0x07, /* mwdma0-2 */ 529 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -566,10 +544,22 @@ MODULE_LICENSE("GPL");
566MODULE_DEVICE_TABLE(pci, piix_pci_tbl); 544MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
567MODULE_VERSION(DRV_VERSION); 545MODULE_VERSION(DRV_VERSION);
568 546
569static int force_pcs = 0; 547struct ich_laptop {
570module_param(force_pcs, int, 0444); 548 u16 device;
571MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around " 549 u16 subvendor;
572 "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)"); 550 u16 subdevice;
551};
552
553/*
554 * List of laptops that use short cables rather than 80 wire
555 */
556
557static const struct ich_laptop ich_laptop[] = {
558 /* devid, subvendor, subdev */
559 { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
560 /* end marker */
561 { 0, }
562};
573 563
574/** 564/**
575 * piix_pata_cbl_detect - Probe host controller cable detect info 565 * piix_pata_cbl_detect - Probe host controller cable detect info
@@ -585,12 +575,24 @@ MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
585static void ich_pata_cbl_detect(struct ata_port *ap) 575static void ich_pata_cbl_detect(struct ata_port *ap)
586{ 576{
587 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 577 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
578 const struct ich_laptop *lap = &ich_laptop[0];
588 u8 tmp, mask; 579 u8 tmp, mask;
589 580
590 /* no 80c support in host controller? */ 581 /* no 80c support in host controller? */
591 if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0) 582 if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0)
592 goto cbl40; 583 goto cbl40;
593 584
585 /* Check for specials - Acer Aspire 5602WLMi */
586 while (lap->device) {
587 if (lap->device == pdev->device &&
588 lap->subvendor == pdev->subsystem_vendor &&
589 lap->subdevice == pdev->subsystem_device) {
590 ap->cbl = ATA_CBL_PATA40_SHORT;
591 return;
592 }
593 lap++;
594 }
595
594 /* check BIOS cable detect results */ 596 /* check BIOS cable detect results */
595 mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; 597 mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
596 pci_read_config_byte(pdev, PIIX_IOCFG, &tmp); 598 pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
@@ -659,84 +661,9 @@ static void ich_pata_error_handler(struct ata_port *ap)
659 ata_std_postreset); 661 ata_std_postreset);
660} 662}
661 663
662/**
663 * piix_sata_present_mask - determine present mask for SATA host controller
664 * @ap: Target port
665 *
666 * Reads SATA PCI device's PCI config register Port Configuration
667 * and Status (PCS) to determine port and device availability.
668 *
669 * LOCKING:
670 * None (inherited from caller).
671 *
672 * RETURNS:
673 * determined present_mask
674 */
675static unsigned int piix_sata_present_mask(struct ata_port *ap)
676{
677 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
678 struct piix_host_priv *hpriv = ap->host->private_data;
679 const unsigned int *map = hpriv->map;
680 int base = 2 * ap->port_no;
681 unsigned int present_mask = 0;
682 int port, i;
683 u16 pcs;
684
685 pci_read_config_word(pdev, ICH5_PCS, &pcs);
686 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
687
688 for (i = 0; i < 2; i++) {
689 port = map[base + i];
690 if (port < 0)
691 continue;
692 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
693 (pcs & 1 << (hpriv->map_db->present_shift + port)))
694 present_mask |= 1 << i;
695 }
696
697 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
698 ap->id, pcs, present_mask);
699
700 return present_mask;
701}
702
703/**
704 * piix_sata_softreset - reset SATA host port via ATA SRST
705 * @ap: port to reset
706 * @classes: resulting classes of attached devices
707 *
708 * Reset SATA host port via ATA SRST. On controllers with
709 * reliable PCS present bits, the bits are used to determine
710 * device presence.
711 *
712 * LOCKING:
713 * Kernel thread context (may sleep)
714 *
715 * RETURNS:
716 * 0 on success, -errno otherwise.
717 */
718static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes)
719{
720 unsigned int present_mask;
721 int i, rc;
722
723 present_mask = piix_sata_present_mask(ap);
724
725 rc = ata_std_softreset(ap, classes);
726 if (rc)
727 return rc;
728
729 for (i = 0; i < ATA_MAX_DEVICES; i++) {
730 if (!(present_mask & (1 << i)))
731 classes[i] = ATA_DEV_NONE;
732 }
733
734 return 0;
735}
736
737static void piix_sata_error_handler(struct ata_port *ap) 664static void piix_sata_error_handler(struct ata_port *ap)
738{ 665{
739 ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL, 666 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, NULL,
740 ata_std_postreset); 667 ata_std_postreset);
741} 668}
742 669
@@ -1051,18 +978,6 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev,
1051 pci_write_config_word(pdev, ICH5_PCS, new_pcs); 978 pci_write_config_word(pdev, ICH5_PCS, new_pcs);
1052 msleep(150); 979 msleep(150);
1053 } 980 }
1054
1055 if (force_pcs == 1) {
1056 dev_printk(KERN_INFO, &pdev->dev,
1057 "force ignoring PCS (0x%x)\n", new_pcs);
1058 pinfo[0].flags |= PIIX_FLAG_IGNORE_PCS;
1059 pinfo[1].flags |= PIIX_FLAG_IGNORE_PCS;
1060 } else if (force_pcs == 2) {
1061 dev_printk(KERN_INFO, &pdev->dev,
1062 "force honoring PCS (0x%x)\n", new_pcs);
1063 pinfo[0].flags &= ~PIIX_FLAG_IGNORE_PCS;
1064 pinfo[1].flags &= ~PIIX_FLAG_IGNORE_PCS;
1065 }
1066} 981}
1067 982
1068static void __devinit piix_init_sata_map(struct pci_dev *pdev, 983static void __devinit piix_init_sata_map(struct pci_dev *pdev,
@@ -1112,7 +1027,6 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
1112 "invalid MAP value %u\n", map_value); 1027 "invalid MAP value %u\n", map_value);
1113 1028
1114 hpriv->map = map; 1029 hpriv->map = map;
1115 hpriv->map_db = map_db;
1116} 1030}
1117 1031
1118/** 1032/**
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 915a55a6cc1..f8ec3896b79 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -199,7 +199,8 @@ static const u8 ata_rw_cmds[] = {
199 199
200/** 200/**
201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
202 * @qc: command to examine and configure 202 * @tf: command to examine and configure
203 * @dev: device tf belongs to
203 * 204 *
204 * Examine the device configuration and tf->flags to calculate 205 * Examine the device configuration and tf->flags to calculate
205 * the proper read/write commands and protocol to use. 206 * the proper read/write commands and protocol to use.
@@ -207,10 +208,8 @@ static const u8 ata_rw_cmds[] = {
207 * LOCKING: 208 * LOCKING:
208 * caller. 209 * caller.
209 */ 210 */
210int ata_rwcmd_protocol(struct ata_queued_cmd *qc) 211static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
211{ 212{
212 struct ata_taskfile *tf = &qc->tf;
213 struct ata_device *dev = qc->dev;
214 u8 cmd; 213 u8 cmd;
215 214
216 int index, fua, lba48, write; 215 int index, fua, lba48, write;
@@ -222,7 +221,7 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
222 if (dev->flags & ATA_DFLAG_PIO) { 221 if (dev->flags & ATA_DFLAG_PIO) {
223 tf->protocol = ATA_PROT_PIO; 222 tf->protocol = ATA_PROT_PIO;
224 index = dev->multi_count ? 0 : 8; 223 index = dev->multi_count ? 0 : 8;
225 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) { 224 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
226 /* Unable to use DMA due to host limitation */ 225 /* Unable to use DMA due to host limitation */
227 tf->protocol = ATA_PROT_PIO; 226 tf->protocol = ATA_PROT_PIO;
228 index = dev->multi_count ? 0 : 8; 227 index = dev->multi_count ? 0 : 8;
@@ -240,6 +239,174 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
240} 239}
241 240
242/** 241/**
242 * ata_tf_read_block - Read block address from ATA taskfile
243 * @tf: ATA taskfile of interest
244 * @dev: ATA device @tf belongs to
245 *
246 * LOCKING:
247 * None.
248 *
249 * Read block address from @tf. This function can handle all
250 * three address formats - LBA, LBA48 and CHS. tf->protocol and
251 * flags select the address format to use.
252 *
253 * RETURNS:
254 * Block address read from @tf.
255 */
256u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
257{
258 u64 block = 0;
259
260 if (tf->flags & ATA_TFLAG_LBA) {
261 if (tf->flags & ATA_TFLAG_LBA48) {
262 block |= (u64)tf->hob_lbah << 40;
263 block |= (u64)tf->hob_lbam << 32;
264 block |= tf->hob_lbal << 24;
265 } else
266 block |= (tf->device & 0xf) << 24;
267
268 block |= tf->lbah << 16;
269 block |= tf->lbam << 8;
270 block |= tf->lbal;
271 } else {
272 u32 cyl, head, sect;
273
274 cyl = tf->lbam | (tf->lbah << 8);
275 head = tf->device & 0xf;
276 sect = tf->lbal;
277
278 block = (cyl * dev->heads + head) * dev->sectors + sect;
279 }
280
281 return block;
282}
283
284/**
285 * ata_build_rw_tf - Build ATA taskfile for given read/write request
286 * @tf: Target ATA taskfile
287 * @dev: ATA device @tf belongs to
288 * @block: Block address
289 * @n_block: Number of blocks
290 * @tf_flags: RW/FUA etc...
291 * @tag: tag
292 *
293 * LOCKING:
294 * None.
295 *
296 * Build ATA taskfile @tf for read/write request described by
297 * @block, @n_block, @tf_flags and @tag on @dev.
298 *
299 * RETURNS:
300 *
301 * 0 on success, -ERANGE if the request is too large for @dev,
302 * -EINVAL if the request is invalid.
303 */
304int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
305 u64 block, u32 n_block, unsigned int tf_flags,
306 unsigned int tag)
307{
308 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
309 tf->flags |= tf_flags;
310
311 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
312 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
313 likely(tag != ATA_TAG_INTERNAL)) {
314 /* yay, NCQ */
315 if (!lba_48_ok(block, n_block))
316 return -ERANGE;
317
318 tf->protocol = ATA_PROT_NCQ;
319 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
320
321 if (tf->flags & ATA_TFLAG_WRITE)
322 tf->command = ATA_CMD_FPDMA_WRITE;
323 else
324 tf->command = ATA_CMD_FPDMA_READ;
325
326 tf->nsect = tag << 3;
327 tf->hob_feature = (n_block >> 8) & 0xff;
328 tf->feature = n_block & 0xff;
329
330 tf->hob_lbah = (block >> 40) & 0xff;
331 tf->hob_lbam = (block >> 32) & 0xff;
332 tf->hob_lbal = (block >> 24) & 0xff;
333 tf->lbah = (block >> 16) & 0xff;
334 tf->lbam = (block >> 8) & 0xff;
335 tf->lbal = block & 0xff;
336
337 tf->device = 1 << 6;
338 if (tf->flags & ATA_TFLAG_FUA)
339 tf->device |= 1 << 7;
340 } else if (dev->flags & ATA_DFLAG_LBA) {
341 tf->flags |= ATA_TFLAG_LBA;
342
343 if (lba_28_ok(block, n_block)) {
344 /* use LBA28 */
345 tf->device |= (block >> 24) & 0xf;
346 } else if (lba_48_ok(block, n_block)) {
347 if (!(dev->flags & ATA_DFLAG_LBA48))
348 return -ERANGE;
349
350 /* use LBA48 */
351 tf->flags |= ATA_TFLAG_LBA48;
352
353 tf->hob_nsect = (n_block >> 8) & 0xff;
354
355 tf->hob_lbah = (block >> 40) & 0xff;
356 tf->hob_lbam = (block >> 32) & 0xff;
357 tf->hob_lbal = (block >> 24) & 0xff;
358 } else
359 /* request too large even for LBA48 */
360 return -ERANGE;
361
362 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
363 return -EINVAL;
364
365 tf->nsect = n_block & 0xff;
366
367 tf->lbah = (block >> 16) & 0xff;
368 tf->lbam = (block >> 8) & 0xff;
369 tf->lbal = block & 0xff;
370
371 tf->device |= ATA_LBA;
372 } else {
373 /* CHS */
374 u32 sect, head, cyl, track;
375
376 /* The request -may- be too large for CHS addressing. */
377 if (!lba_28_ok(block, n_block))
378 return -ERANGE;
379
380 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
381 return -EINVAL;
382
383 /* Convert LBA to CHS */
384 track = (u32)block / dev->sectors;
385 cyl = track / dev->heads;
386 head = track % dev->heads;
387 sect = (u32)block % dev->sectors + 1;
388
389 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
390 (u32)block, track, cyl, head, sect);
391
392 /* Check whether the converted CHS can fit.
393 Cylinder: 0-65535
394 Head: 0-15
395 Sector: 1-255*/
396 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
397 return -ERANGE;
398
399 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
400 tf->lbal = sect;
401 tf->lbam = cyl;
402 tf->lbah = cyl >> 8;
403 tf->device |= head;
404 }
405
406 return 0;
407}
408
409/**
243 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 410 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
244 * @pio_mask: pio_mask 411 * @pio_mask: pio_mask
245 * @mwdma_mask: mwdma_mask 412 * @mwdma_mask: mwdma_mask
@@ -999,13 +1166,13 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
999} 1166}
1000 1167
1001/** 1168/**
1002 * ata_exec_internal - execute libata internal command 1169 * ata_exec_internal_sg - execute libata internal command
1003 * @dev: Device to which the command is sent 1170 * @dev: Device to which the command is sent
1004 * @tf: Taskfile registers for the command and the result 1171 * @tf: Taskfile registers for the command and the result
1005 * @cdb: CDB for packet command 1172 * @cdb: CDB for packet command
1006 * @dma_dir: Data tranfer direction of the command 1173 * @dma_dir: Data tranfer direction of the command
1007 * @buf: Data buffer of the command 1174 * @sg: sg list for the data buffer of the command
1008 * @buflen: Length of data buffer 1175 * @n_elem: Number of sg entries
1009 * 1176 *
1010 * Executes libata internal command with timeout. @tf contains 1177 * Executes libata internal command with timeout. @tf contains
1011 * command on entry and result on return. Timeout and error 1178 * command on entry and result on return. Timeout and error
@@ -1019,9 +1186,10 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1019 * RETURNS: 1186 * RETURNS:
1020 * Zero on success, AC_ERR_* mask on failure 1187 * Zero on success, AC_ERR_* mask on failure
1021 */ 1188 */
1022unsigned ata_exec_internal(struct ata_device *dev, 1189unsigned ata_exec_internal_sg(struct ata_device *dev,
1023 struct ata_taskfile *tf, const u8 *cdb, 1190 struct ata_taskfile *tf, const u8 *cdb,
1024 int dma_dir, void *buf, unsigned int buflen) 1191 int dma_dir, struct scatterlist *sg,
1192 unsigned int n_elem)
1025{ 1193{
1026 struct ata_port *ap = dev->ap; 1194 struct ata_port *ap = dev->ap;
1027 u8 command = tf->command; 1195 u8 command = tf->command;
@@ -1077,7 +1245,12 @@ unsigned ata_exec_internal(struct ata_device *dev,
1077 qc->flags |= ATA_QCFLAG_RESULT_TF; 1245 qc->flags |= ATA_QCFLAG_RESULT_TF;
1078 qc->dma_dir = dma_dir; 1246 qc->dma_dir = dma_dir;
1079 if (dma_dir != DMA_NONE) { 1247 if (dma_dir != DMA_NONE) {
1080 ata_sg_init_one(qc, buf, buflen); 1248 unsigned int i, buflen = 0;
1249
1250 for (i = 0; i < n_elem; i++)
1251 buflen += sg[i].length;
1252
1253 ata_sg_init(qc, sg, n_elem);
1081 qc->nsect = buflen / ATA_SECT_SIZE; 1254 qc->nsect = buflen / ATA_SECT_SIZE;
1082 } 1255 }
1083 1256
@@ -1161,6 +1334,35 @@ unsigned ata_exec_internal(struct ata_device *dev,
1161} 1334}
1162 1335
1163/** 1336/**
1337 * ata_exec_internal_sg - execute libata internal command
1338 * @dev: Device to which the command is sent
1339 * @tf: Taskfile registers for the command and the result
1340 * @cdb: CDB for packet command
1341 * @dma_dir: Data tranfer direction of the command
1342 * @buf: Data buffer of the command
1343 * @buflen: Length of data buffer
1344 *
1345 * Wrapper around ata_exec_internal_sg() which takes simple
1346 * buffer instead of sg list.
1347 *
1348 * LOCKING:
1349 * None. Should be called with kernel context, might sleep.
1350 *
1351 * RETURNS:
1352 * Zero on success, AC_ERR_* mask on failure
1353 */
1354unsigned ata_exec_internal(struct ata_device *dev,
1355 struct ata_taskfile *tf, const u8 *cdb,
1356 int dma_dir, void *buf, unsigned int buflen)
1357{
1358 struct scatterlist sg;
1359
1360 sg_init_one(&sg, buf, buflen);
1361
1362 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, &sg, 1);
1363}
1364
1365/**
1164 * ata_do_simple_cmd - execute simple internal command 1366 * ata_do_simple_cmd - execute simple internal command
1165 * @dev: Device to which the command is sent 1367 * @dev: Device to which the command is sent
1166 * @cmd: Opcode to execute 1368 * @cmd: Opcode to execute
@@ -1224,7 +1426,7 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1224 * ata_dev_read_id - Read ID data from the specified device 1426 * ata_dev_read_id - Read ID data from the specified device
1225 * @dev: target device 1427 * @dev: target device
1226 * @p_class: pointer to class of the target device (may be changed) 1428 * @p_class: pointer to class of the target device (may be changed)
1227 * @post_reset: is this read ID post-reset? 1429 * @flags: ATA_READID_* flags
1228 * @id: buffer to read IDENTIFY data into 1430 * @id: buffer to read IDENTIFY data into
1229 * 1431 *
1230 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1432 * Read ID data from the specified device. ATA_CMD_ID_ATA is
@@ -1239,7 +1441,7 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1239 * 0 on success, -errno otherwise. 1441 * 0 on success, -errno otherwise.
1240 */ 1442 */
1241int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1443int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1242 int post_reset, u16 *id) 1444 unsigned int flags, u16 *id)
1243{ 1445{
1244 struct ata_port *ap = dev->ap; 1446 struct ata_port *ap = dev->ap;
1245 unsigned int class = *p_class; 1447 unsigned int class = *p_class;
@@ -1271,10 +1473,17 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1271 } 1473 }
1272 1474
1273 tf.protocol = ATA_PROT_PIO; 1475 tf.protocol = ATA_PROT_PIO;
1476 tf.flags |= ATA_TFLAG_POLLING; /* for polling presence detection */
1274 1477
1275 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 1478 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1276 id, sizeof(id[0]) * ATA_ID_WORDS); 1479 id, sizeof(id[0]) * ATA_ID_WORDS);
1277 if (err_mask) { 1480 if (err_mask) {
1481 if (err_mask & AC_ERR_NODEV_HINT) {
1482 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1483 ap->id, dev->devno);
1484 return -ENOENT;
1485 }
1486
1278 rc = -EIO; 1487 rc = -EIO;
1279 reason = "I/O error"; 1488 reason = "I/O error";
1280 goto err_out; 1489 goto err_out;
@@ -1294,7 +1503,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1294 goto err_out; 1503 goto err_out;
1295 } 1504 }
1296 1505
1297 if (post_reset && class == ATA_DEV_ATA) { 1506 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1298 /* 1507 /*
1299 * The exact sequence expected by certain pre-ATA4 drives is: 1508 * The exact sequence expected by certain pre-ATA4 drives is:
1300 * SRST RESET 1509 * SRST RESET
@@ -1314,7 +1523,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1314 /* current CHS translation info (id[53-58]) might be 1523 /* current CHS translation info (id[53-58]) might be
1315 * changed. reread the identify device info. 1524 * changed. reread the identify device info.
1316 */ 1525 */
1317 post_reset = 0; 1526 flags &= ~ATA_READID_POSTRESET;
1318 goto retry; 1527 goto retry;
1319 } 1528 }
1320 } 1529 }
@@ -1345,7 +1554,10 @@ static void ata_dev_config_ncq(struct ata_device *dev,
1345 desc[0] = '\0'; 1554 desc[0] = '\0';
1346 return; 1555 return;
1347 } 1556 }
1348 1557 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1558 snprintf(desc, desc_sz, "NCQ (not used)");
1559 return;
1560 }
1349 if (ap->flags & ATA_FLAG_NCQ) { 1561 if (ap->flags & ATA_FLAG_NCQ) {
1350 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 1562 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1351 dev->flags |= ATA_DFLAG_NCQ; 1563 dev->flags |= ATA_DFLAG_NCQ;
@@ -1374,7 +1586,6 @@ static void ata_set_port_max_cmd_len(struct ata_port *ap)
1374/** 1586/**
1375 * ata_dev_configure - Configure the specified ATA/ATAPI device 1587 * ata_dev_configure - Configure the specified ATA/ATAPI device
1376 * @dev: Target device to configure 1588 * @dev: Target device to configure
1377 * @print_info: Enable device info printout
1378 * 1589 *
1379 * Configure @dev according to @dev->id. Generic and low-level 1590 * Configure @dev according to @dev->id. Generic and low-level
1380 * driver specific fixups are also applied. 1591 * driver specific fixups are also applied.
@@ -1385,9 +1596,10 @@ static void ata_set_port_max_cmd_len(struct ata_port *ap)
1385 * RETURNS: 1596 * RETURNS:
1386 * 0 on success, -errno otherwise 1597 * 0 on success, -errno otherwise
1387 */ 1598 */
1388int ata_dev_configure(struct ata_device *dev, int print_info) 1599int ata_dev_configure(struct ata_device *dev)
1389{ 1600{
1390 struct ata_port *ap = dev->ap; 1601 struct ata_port *ap = dev->ap;
1602 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1391 const u16 *id = dev->id; 1603 const u16 *id = dev->id;
1392 unsigned int xfer_mask; 1604 unsigned int xfer_mask;
1393 char revbuf[7]; /* XYZ-99\0 */ 1605 char revbuf[7]; /* XYZ-99\0 */
@@ -1454,6 +1666,10 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1454 if (ata_id_has_lba48(id)) { 1666 if (ata_id_has_lba48(id)) {
1455 dev->flags |= ATA_DFLAG_LBA48; 1667 dev->flags |= ATA_DFLAG_LBA48;
1456 lba_desc = "LBA48"; 1668 lba_desc = "LBA48";
1669
1670 if (dev->n_sectors >= (1UL << 28) &&
1671 ata_id_has_flush_ext(id))
1672 dev->flags |= ATA_DFLAG_FLUSH_EXT;
1457 } 1673 }
1458 1674
1459 /* config NCQ */ 1675 /* config NCQ */
@@ -1530,6 +1746,11 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1530 cdb_intr_string); 1746 cdb_intr_string);
1531 } 1747 }
1532 1748
1749 /* determine max_sectors */
1750 dev->max_sectors = ATA_MAX_SECTORS;
1751 if (dev->flags & ATA_DFLAG_LBA48)
1752 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1753
1533 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 1754 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1534 /* Let the user know. We don't want to disallow opens for 1755 /* Let the user know. We don't want to disallow opens for
1535 rescue purposes, or in case the vendor is just a blithering 1756 rescue purposes, or in case the vendor is just a blithering
@@ -1631,11 +1852,14 @@ int ata_bus_probe(struct ata_port *ap)
1631 if (!ata_dev_enabled(dev)) 1852 if (!ata_dev_enabled(dev))
1632 continue; 1853 continue;
1633 1854
1634 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id); 1855 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1856 dev->id);
1635 if (rc) 1857 if (rc)
1636 goto fail; 1858 goto fail;
1637 1859
1638 rc = ata_dev_configure(dev, 1); 1860 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1861 rc = ata_dev_configure(dev);
1862 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
1639 if (rc) 1863 if (rc)
1640 goto fail; 1864 goto fail;
1641 } 1865 }
@@ -2153,6 +2377,7 @@ int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2153 2377
2154static int ata_dev_set_mode(struct ata_device *dev) 2378static int ata_dev_set_mode(struct ata_device *dev)
2155{ 2379{
2380 struct ata_eh_context *ehc = &dev->ap->eh_context;
2156 unsigned int err_mask; 2381 unsigned int err_mask;
2157 int rc; 2382 int rc;
2158 2383
@@ -2167,7 +2392,9 @@ static int ata_dev_set_mode(struct ata_device *dev)
2167 return -EIO; 2392 return -EIO;
2168 } 2393 }
2169 2394
2395 ehc->i.flags |= ATA_EHI_POST_SETMODE;
2170 rc = ata_dev_revalidate(dev, 0); 2396 rc = ata_dev_revalidate(dev, 0);
2397 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2171 if (rc) 2398 if (rc)
2172 return rc; 2399 return rc;
2173 2400
@@ -2325,11 +2552,14 @@ static inline void ata_tf_to_host(struct ata_port *ap,
2325 * Sleep until ATA Status register bit BSY clears, 2552 * Sleep until ATA Status register bit BSY clears,
2326 * or a timeout occurs. 2553 * or a timeout occurs.
2327 * 2554 *
2328 * LOCKING: None. 2555 * LOCKING:
2556 * Kernel thread context (may sleep).
2557 *
2558 * RETURNS:
2559 * 0 on success, -errno otherwise.
2329 */ 2560 */
2330 2561int ata_busy_sleep(struct ata_port *ap,
2331unsigned int ata_busy_sleep (struct ata_port *ap, 2562 unsigned long tmout_pat, unsigned long tmout)
2332 unsigned long tmout_pat, unsigned long tmout)
2333{ 2563{
2334 unsigned long timer_start, timeout; 2564 unsigned long timer_start, timeout;
2335 u8 status; 2565 u8 status;
@@ -2337,27 +2567,32 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
2337 status = ata_busy_wait(ap, ATA_BUSY, 300); 2567 status = ata_busy_wait(ap, ATA_BUSY, 300);
2338 timer_start = jiffies; 2568 timer_start = jiffies;
2339 timeout = timer_start + tmout_pat; 2569 timeout = timer_start + tmout_pat;
2340 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 2570 while (status != 0xff && (status & ATA_BUSY) &&
2571 time_before(jiffies, timeout)) {
2341 msleep(50); 2572 msleep(50);
2342 status = ata_busy_wait(ap, ATA_BUSY, 3); 2573 status = ata_busy_wait(ap, ATA_BUSY, 3);
2343 } 2574 }
2344 2575
2345 if (status & ATA_BUSY) 2576 if (status != 0xff && (status & ATA_BUSY))
2346 ata_port_printk(ap, KERN_WARNING, 2577 ata_port_printk(ap, KERN_WARNING,
2347 "port is slow to respond, please be patient " 2578 "port is slow to respond, please be patient "
2348 "(Status 0x%x)\n", status); 2579 "(Status 0x%x)\n", status);
2349 2580
2350 timeout = timer_start + tmout; 2581 timeout = timer_start + tmout;
2351 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 2582 while (status != 0xff && (status & ATA_BUSY) &&
2583 time_before(jiffies, timeout)) {
2352 msleep(50); 2584 msleep(50);
2353 status = ata_chk_status(ap); 2585 status = ata_chk_status(ap);
2354 } 2586 }
2355 2587
2588 if (status == 0xff)
2589 return -ENODEV;
2590
2356 if (status & ATA_BUSY) { 2591 if (status & ATA_BUSY) {
2357 ata_port_printk(ap, KERN_ERR, "port failed to respond " 2592 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2358 "(%lu secs, Status 0x%x)\n", 2593 "(%lu secs, Status 0x%x)\n",
2359 tmout / HZ, status); 2594 tmout / HZ, status);
2360 return 1; 2595 return -EBUSY;
2361 } 2596 }
2362 2597
2363 return 0; 2598 return 0;
@@ -2448,10 +2683,8 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2448 * the bus shows 0xFF because the odd clown forgets the D7 2683 * the bus shows 0xFF because the odd clown forgets the D7
2449 * pulldown resistor. 2684 * pulldown resistor.
2450 */ 2685 */
2451 if (ata_check_status(ap) == 0xFF) { 2686 if (ata_check_status(ap) == 0xFF)
2452 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n"); 2687 return 0;
2453 return AC_ERR_OTHER;
2454 }
2455 2688
2456 ata_bus_post_reset(ap, devmask); 2689 ata_bus_post_reset(ap, devmask);
2457 2690
@@ -2777,9 +3010,9 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2777} 3010}
2778 3011
2779/** 3012/**
2780 * sata_std_hardreset - reset host port via SATA phy reset 3013 * sata_port_hardreset - reset port via SATA phy reset
2781 * @ap: port to reset 3014 * @ap: port to reset
2782 * @class: resulting class of attached device 3015 * @timing: timing parameters { interval, duratinon, timeout } in msec
2783 * 3016 *
2784 * SATA phy-reset host port using DET bits of SControl register. 3017 * SATA phy-reset host port using DET bits of SControl register.
2785 * 3018 *
@@ -2789,10 +3022,8 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2789 * RETURNS: 3022 * RETURNS:
2790 * 0 on success, -errno otherwise. 3023 * 0 on success, -errno otherwise.
2791 */ 3024 */
2792int sata_std_hardreset(struct ata_port *ap, unsigned int *class) 3025int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
2793{ 3026{
2794 struct ata_eh_context *ehc = &ap->eh_context;
2795 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2796 u32 scontrol; 3027 u32 scontrol;
2797 int rc; 3028 int rc;
2798 3029
@@ -2805,24 +3036,24 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2805 * and Sil3124. 3036 * and Sil3124.
2806 */ 3037 */
2807 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) 3038 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2808 return rc; 3039 goto out;
2809 3040
2810 scontrol = (scontrol & 0x0f0) | 0x304; 3041 scontrol = (scontrol & 0x0f0) | 0x304;
2811 3042
2812 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) 3043 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2813 return rc; 3044 goto out;
2814 3045
2815 sata_set_spd(ap); 3046 sata_set_spd(ap);
2816 } 3047 }
2817 3048
2818 /* issue phy wake/reset */ 3049 /* issue phy wake/reset */
2819 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) 3050 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2820 return rc; 3051 goto out;
2821 3052
2822 scontrol = (scontrol & 0x0f0) | 0x301; 3053 scontrol = (scontrol & 0x0f0) | 0x301;
2823 3054
2824 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol))) 3055 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2825 return rc; 3056 goto out;
2826 3057
2827 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3058 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2828 * 10.4.2 says at least 1 ms. 3059 * 10.4.2 says at least 1 ms.
@@ -2830,7 +3061,40 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2830 msleep(1); 3061 msleep(1);
2831 3062
2832 /* bring phy back */ 3063 /* bring phy back */
2833 sata_phy_resume(ap, timing); 3064 rc = sata_phy_resume(ap, timing);
3065 out:
3066 DPRINTK("EXIT, rc=%d\n", rc);
3067 return rc;
3068}
3069
3070/**
3071 * sata_std_hardreset - reset host port via SATA phy reset
3072 * @ap: port to reset
3073 * @class: resulting class of attached device
3074 *
3075 * SATA phy-reset host port using DET bits of SControl register,
3076 * wait for !BSY and classify the attached device.
3077 *
3078 * LOCKING:
3079 * Kernel thread context (may sleep)
3080 *
3081 * RETURNS:
3082 * 0 on success, -errno otherwise.
3083 */
3084int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3085{
3086 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3087 int rc;
3088
3089 DPRINTK("ENTER\n");
3090
3091 /* do hardreset */
3092 rc = sata_port_hardreset(ap, timing);
3093 if (rc) {
3094 ata_port_printk(ap, KERN_ERR,
3095 "COMRESET failed (errno=%d)\n", rc);
3096 return rc;
3097 }
2834 3098
2835 /* TODO: phy layer with polling, timeouts, etc. */ 3099 /* TODO: phy layer with polling, timeouts, etc. */
2836 if (ata_port_offline(ap)) { 3100 if (ata_port_offline(ap)) {
@@ -2969,7 +3233,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2969/** 3233/**
2970 * ata_dev_revalidate - Revalidate ATA device 3234 * ata_dev_revalidate - Revalidate ATA device
2971 * @dev: device to revalidate 3235 * @dev: device to revalidate
2972 * @post_reset: is this revalidation after reset? 3236 * @readid_flags: read ID flags
2973 * 3237 *
2974 * Re-read IDENTIFY page and make sure @dev is still attached to 3238 * Re-read IDENTIFY page and make sure @dev is still attached to
2975 * the port. 3239 * the port.
@@ -2980,7 +3244,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2980 * RETURNS: 3244 * RETURNS:
2981 * 0 on success, negative errno otherwise 3245 * 0 on success, negative errno otherwise
2982 */ 3246 */
2983int ata_dev_revalidate(struct ata_device *dev, int post_reset) 3247int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
2984{ 3248{
2985 unsigned int class = dev->class; 3249 unsigned int class = dev->class;
2986 u16 *id = (void *)dev->ap->sector_buf; 3250 u16 *id = (void *)dev->ap->sector_buf;
@@ -2992,7 +3256,7 @@ int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2992 } 3256 }
2993 3257
2994 /* read ID data */ 3258 /* read ID data */
2995 rc = ata_dev_read_id(dev, &class, post_reset, id); 3259 rc = ata_dev_read_id(dev, &class, readid_flags, id);
2996 if (rc) 3260 if (rc)
2997 goto fail; 3261 goto fail;
2998 3262
@@ -3005,7 +3269,7 @@ int ata_dev_revalidate(struct ata_device *dev, int post_reset)
3005 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 3269 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3006 3270
3007 /* configure device according to the new ID */ 3271 /* configure device according to the new ID */
3008 rc = ata_dev_configure(dev, 0); 3272 rc = ata_dev_configure(dev);
3009 if (rc == 0) 3273 if (rc == 0)
3010 return 0; 3274 return 0;
3011 3275
@@ -3014,37 +3278,55 @@ int ata_dev_revalidate(struct ata_device *dev, int post_reset)
3014 return rc; 3278 return rc;
3015} 3279}
3016 3280
3017static const char * const ata_dma_blacklist [] = { 3281struct ata_blacklist_entry {
3018 "WDC AC11000H", NULL, 3282 const char *model_num;
3019 "WDC AC22100H", NULL, 3283 const char *model_rev;
3020 "WDC AC32500H", NULL, 3284 unsigned long horkage;
3021 "WDC AC33100H", NULL, 3285};
3022 "WDC AC31600H", NULL, 3286
3023 "WDC AC32100H", "24.09P07", 3287static const struct ata_blacklist_entry ata_device_blacklist [] = {
3024 "WDC AC23200L", "21.10N21", 3288 /* Devices with DMA related problems under Linux */
3025 "Compaq CRD-8241B", NULL, 3289 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3026 "CRD-8400B", NULL, 3290 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3027 "CRD-8480B", NULL, 3291 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3028 "CRD-8482B", NULL, 3292 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3029 "CRD-84", NULL, 3293 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3030 "SanDisk SDP3B", NULL, 3294 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3031 "SanDisk SDP3B-64", NULL, 3295 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3032 "SANYO CD-ROM CRD", NULL, 3296 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3033 "HITACHI CDR-8", NULL, 3297 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3034 "HITACHI CDR-8335", NULL, 3298 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3035 "HITACHI CDR-8435", NULL, 3299 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3036 "Toshiba CD-ROM XM-6202B", NULL, 3300 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3037 "TOSHIBA CD-ROM XM-1702BC", NULL, 3301 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3038 "CD-532E-A", NULL, 3302 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3039 "E-IDE CD-ROM CR-840", NULL, 3303 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3040 "CD-ROM Drive/F5A", NULL, 3304 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3041 "WPI CDD-820", NULL, 3305 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3042 "SAMSUNG CD-ROM SC-148C", NULL, 3306 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3043 "SAMSUNG CD-ROM SC", NULL, 3307 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3044 "SanDisk SDP3B-64", NULL, 3308 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3045 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL, 3309 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3046 "_NEC DV5800A", NULL, 3310 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3047 "SAMSUNG CD-ROM SN-124", "N001" 3311 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3312 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3313 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3314 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3315 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3316 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3317 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3318 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3319
3320 /* Devices we expect to fail diagnostics */
3321
3322 /* Devices where NCQ should be avoided */
3323 /* NCQ is slow */
3324 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3325
3326 /* Devices with NCQ limits */
3327
3328 /* End Marker */
3329 { }
3048}; 3330};
3049 3331
3050static int ata_strim(char *s, size_t len) 3332static int ata_strim(char *s, size_t len)
@@ -3059,20 +3341,12 @@ static int ata_strim(char *s, size_t len)
3059 return len; 3341 return len;
3060} 3342}
3061 3343
3062static int ata_dma_blacklisted(const struct ata_device *dev) 3344unsigned long ata_device_blacklisted(const struct ata_device *dev)
3063{ 3345{
3064 unsigned char model_num[40]; 3346 unsigned char model_num[40];
3065 unsigned char model_rev[16]; 3347 unsigned char model_rev[16];
3066 unsigned int nlen, rlen; 3348 unsigned int nlen, rlen;
3067 int i; 3349 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3068
3069 /* We don't support polling DMA.
3070 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3071 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3072 */
3073 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3074 (dev->flags & ATA_DFLAG_CDB_INTR))
3075 return 1;
3076 3350
3077 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 3351 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3078 sizeof(model_num)); 3352 sizeof(model_num));
@@ -3081,17 +3355,30 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
3081 nlen = ata_strim(model_num, sizeof(model_num)); 3355 nlen = ata_strim(model_num, sizeof(model_num));
3082 rlen = ata_strim(model_rev, sizeof(model_rev)); 3356 rlen = ata_strim(model_rev, sizeof(model_rev));
3083 3357
3084 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) { 3358 while (ad->model_num) {
3085 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) { 3359 if (!strncmp(ad->model_num, model_num, nlen)) {
3086 if (ata_dma_blacklist[i+1] == NULL) 3360 if (ad->model_rev == NULL)
3087 return 1; 3361 return ad->horkage;
3088 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen)) 3362 if (!strncmp(ad->model_rev, model_rev, rlen))
3089 return 1; 3363 return ad->horkage;
3090 } 3364 }
3365 ad++;
3091 } 3366 }
3092 return 0; 3367 return 0;
3093} 3368}
3094 3369
3370static int ata_dma_blacklisted(const struct ata_device *dev)
3371{
3372 /* We don't support polling DMA.
3373 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3374 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3375 */
3376 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3377 (dev->flags & ATA_DFLAG_CDB_INTR))
3378 return 1;
3379 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3380}
3381
3095/** 3382/**
3096 * ata_dev_xfermask - Compute supported xfermask of the given device 3383 * ata_dev_xfermask - Compute supported xfermask of the given device
3097 * @dev: Device to compute xfermask for 3384 * @dev: Device to compute xfermask for
@@ -3119,6 +3406,13 @@ static void ata_dev_xfermask(struct ata_device *dev)
3119 */ 3406 */
3120 if (ap->cbl == ATA_CBL_PATA40) 3407 if (ap->cbl == ATA_CBL_PATA40)
3121 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 3408 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3409 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3410 * host side are checked drive side as well. Cases where we know a
3411 * 40wire cable is used safely for 80 are not checked here.
3412 */
3413 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3414 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3415
3122 3416
3123 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 3417 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3124 dev->mwdma_mask, dev->udma_mask); 3418 dev->mwdma_mask, dev->udma_mask);
@@ -3236,8 +3530,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
3236 * LOCKING: 3530 * LOCKING:
3237 * spin_lock_irqsave(host lock) 3531 * spin_lock_irqsave(host lock)
3238 */ 3532 */
3239 3533void ata_sg_clean(struct ata_queued_cmd *qc)
3240static void ata_sg_clean(struct ata_queued_cmd *qc)
3241{ 3534{
3242 struct ata_port *ap = qc->ap; 3535 struct ata_port *ap = qc->ap;
3243 struct scatterlist *sg = qc->__sg; 3536 struct scatterlist *sg = qc->__sg;
@@ -3395,19 +3688,15 @@ void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3395 3688
3396void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) 3689void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3397{ 3690{
3398 struct scatterlist *sg;
3399
3400 qc->flags |= ATA_QCFLAG_SINGLE; 3691 qc->flags |= ATA_QCFLAG_SINGLE;
3401 3692
3402 memset(&qc->sgent, 0, sizeof(qc->sgent));
3403 qc->__sg = &qc->sgent; 3693 qc->__sg = &qc->sgent;
3404 qc->n_elem = 1; 3694 qc->n_elem = 1;
3405 qc->orig_n_elem = 1; 3695 qc->orig_n_elem = 1;
3406 qc->buf_virt = buf; 3696 qc->buf_virt = buf;
3407 qc->nbytes = buflen; 3697 qc->nbytes = buflen;
3408 3698
3409 sg = qc->__sg; 3699 sg_init_one(&qc->sgent, buf, buflen);
3410 sg_init_one(sg, buf, buflen);
3411} 3700}
3412 3701
3413/** 3702/**
@@ -4200,8 +4489,12 @@ fsm_start:
4200 /* device stops HSM for abort/error */ 4489 /* device stops HSM for abort/error */
4201 qc->err_mask |= AC_ERR_DEV; 4490 qc->err_mask |= AC_ERR_DEV;
4202 else 4491 else
4203 /* HSM violation. Let EH handle this */ 4492 /* HSM violation. Let EH handle this.
4204 qc->err_mask |= AC_ERR_HSM; 4493 * Phantom devices also trigger this
4494 * condition. Mark hint.
4495 */
4496 qc->err_mask |= AC_ERR_HSM |
4497 AC_ERR_NODEV_HINT;
4205 4498
4206 ap->hsm_task_state = HSM_ST_ERR; 4499 ap->hsm_task_state = HSM_ST_ERR;
4207 goto fsm_start; 4500 goto fsm_start;
@@ -4440,6 +4733,14 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
4440 qc->complete_fn(qc); 4733 qc->complete_fn(qc);
4441} 4734}
4442 4735
4736static void fill_result_tf(struct ata_queued_cmd *qc)
4737{
4738 struct ata_port *ap = qc->ap;
4739
4740 ap->ops->tf_read(ap, &qc->result_tf);
4741 qc->result_tf.flags = qc->tf.flags;
4742}
4743
4443/** 4744/**
4444 * ata_qc_complete - Complete an active ATA command 4745 * ata_qc_complete - Complete an active ATA command
4445 * @qc: Command to complete 4746 * @qc: Command to complete
@@ -4477,7 +4778,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
4477 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4778 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4478 if (!ata_tag_internal(qc->tag)) { 4779 if (!ata_tag_internal(qc->tag)) {
4479 /* always fill result TF for failed qc */ 4780 /* always fill result TF for failed qc */
4480 ap->ops->tf_read(ap, &qc->result_tf); 4781 fill_result_tf(qc);
4481 ata_qc_schedule_eh(qc); 4782 ata_qc_schedule_eh(qc);
4482 return; 4783 return;
4483 } 4784 }
@@ -4485,7 +4786,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
4485 4786
4486 /* read result TF if requested */ 4787 /* read result TF if requested */
4487 if (qc->flags & ATA_QCFLAG_RESULT_TF) 4788 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4488 ap->ops->tf_read(ap, &qc->result_tf); 4789 fill_result_tf(qc);
4489 4790
4490 __ata_qc_complete(qc); 4791 __ata_qc_complete(qc);
4491 } else { 4792 } else {
@@ -4494,7 +4795,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
4494 4795
4495 /* read result TF if failed or requested */ 4796 /* read result TF if failed or requested */
4496 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 4797 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4497 ap->ops->tf_read(ap, &qc->result_tf); 4798 fill_result_tf(qc);
4498 4799
4499 __ata_qc_complete(qc); 4800 __ata_qc_complete(qc);
4500 } 4801 }
@@ -4674,6 +4975,14 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4674 } 4975 }
4675 } 4976 }
4676 4977
4978 /* Some controllers show flaky interrupt behavior after
4979 * setting xfer mode. Use polling instead.
4980 */
4981 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4982 qc->tf.feature == SETFEATURES_XFER) &&
4983 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4984 qc->tf.flags |= ATA_TFLAG_POLLING;
4985
4677 /* select the device */ 4986 /* select the device */
4678 ata_dev_select(ap, qc->dev->devno, 1, 0); 4987 ata_dev_select(ap, qc->dev->devno, 1, 0);
4679 4988
@@ -4782,6 +5091,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4782inline unsigned int ata_host_intr (struct ata_port *ap, 5091inline unsigned int ata_host_intr (struct ata_port *ap,
4783 struct ata_queued_cmd *qc) 5092 struct ata_queued_cmd *qc)
4784{ 5093{
5094 struct ata_eh_info *ehi = &ap->eh_info;
4785 u8 status, host_stat = 0; 5095 u8 status, host_stat = 0;
4786 5096
4787 VPRINTK("ata%u: protocol %d task_state %d\n", 5097 VPRINTK("ata%u: protocol %d task_state %d\n",
@@ -4842,6 +5152,11 @@ inline unsigned int ata_host_intr (struct ata_port *ap,
4842 ap->ops->irq_clear(ap); 5152 ap->ops->irq_clear(ap);
4843 5153
4844 ata_hsm_move(ap, qc, status, 0); 5154 ata_hsm_move(ap, qc, status, 0);
5155
5156 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5157 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5158 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5159
4845 return 1; /* irq handled */ 5160 return 1; /* irq handled */
4846 5161
4847idle_irq: 5162idle_irq:
@@ -5048,7 +5363,7 @@ int ata_flush_cache(struct ata_device *dev)
5048 if (!ata_try_flush_cache(dev)) 5363 if (!ata_try_flush_cache(dev))
5049 return 0; 5364 return 0;
5050 5365
5051 if (ata_id_has_flush_ext(dev->id)) 5366 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5052 cmd = ATA_CMD_FLUSH_EXT; 5367 cmd = ATA_CMD_FLUSH_EXT;
5053 else 5368 else
5054 cmd = ATA_CMD_FLUSH; 5369 cmd = ATA_CMD_FLUSH;
@@ -5520,9 +5835,8 @@ int ata_device_add(const struct ata_probe_ent *ent)
5520 ap->ioaddr.bmdma_addr, 5835 ap->ioaddr.bmdma_addr,
5521 irq_line); 5836 irq_line);
5522 5837
5523 ata_chk_status(ap); 5838 /* freeze port before requesting IRQ */
5524 host->ops->irq_clear(ap); 5839 ata_eh_freeze_port(ap);
5525 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5526 } 5840 }
5527 5841
5528 /* obtain irq, that may be shared between channels */ 5842 /* obtain irq, that may be shared between channels */
@@ -6120,6 +6434,7 @@ EXPORT_SYMBOL_GPL(__sata_phy_reset);
6120EXPORT_SYMBOL_GPL(ata_bus_reset); 6434EXPORT_SYMBOL_GPL(ata_bus_reset);
6121EXPORT_SYMBOL_GPL(ata_std_prereset); 6435EXPORT_SYMBOL_GPL(ata_std_prereset);
6122EXPORT_SYMBOL_GPL(ata_std_softreset); 6436EXPORT_SYMBOL_GPL(ata_std_softreset);
6437EXPORT_SYMBOL_GPL(sata_port_hardreset);
6123EXPORT_SYMBOL_GPL(sata_std_hardreset); 6438EXPORT_SYMBOL_GPL(sata_std_hardreset);
6124EXPORT_SYMBOL_GPL(ata_std_postreset); 6439EXPORT_SYMBOL_GPL(ata_std_postreset);
6125EXPORT_SYMBOL_GPL(ata_dev_classify); 6440EXPORT_SYMBOL_GPL(ata_dev_classify);
@@ -6146,6 +6461,7 @@ EXPORT_SYMBOL_GPL(ata_host_suspend);
6146EXPORT_SYMBOL_GPL(ata_host_resume); 6461EXPORT_SYMBOL_GPL(ata_host_resume);
6147EXPORT_SYMBOL_GPL(ata_id_string); 6462EXPORT_SYMBOL_GPL(ata_id_string);
6148EXPORT_SYMBOL_GPL(ata_id_c_string); 6463EXPORT_SYMBOL_GPL(ata_id_c_string);
6464EXPORT_SYMBOL_GPL(ata_device_blacklisted);
6149EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6465EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6150 6466
6151EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6467EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 02b2b2787d9..76a85dfb730 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1136,19 +1136,21 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1136 break; 1136 break;
1137 1137
1138 case ATA_DEV_ATAPI: 1138 case ATA_DEV_ATAPI:
1139 tmp = atapi_eh_request_sense(qc->dev, 1139 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1140 qc->scsicmd->sense_buffer); 1140 tmp = atapi_eh_request_sense(qc->dev,
1141 if (!tmp) { 1141 qc->scsicmd->sense_buffer);
1142 /* ATA_QCFLAG_SENSE_VALID is used to tell 1142 if (!tmp) {
1143 * atapi_qc_complete() that sense data is 1143 /* ATA_QCFLAG_SENSE_VALID is used to
1144 * already valid. 1144 * tell atapi_qc_complete() that sense
1145 * 1145 * data is already valid.
1146 * TODO: interpret sense data and set 1146 *
1147 * appropriate err_mask. 1147 * TODO: interpret sense data and set
1148 */ 1148 * appropriate err_mask.
1149 qc->flags |= ATA_QCFLAG_SENSE_VALID; 1149 */
1150 } else 1150 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1151 qc->err_mask |= tmp; 1151 } else
1152 qc->err_mask |= tmp;
1153 }
1152 } 1154 }
1153 1155
1154 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1156 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
@@ -1433,16 +1435,39 @@ static void ata_eh_report(struct ata_port *ap)
1433 } 1435 }
1434 1436
1435 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1437 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1438 static const char *dma_str[] = {
1439 [DMA_BIDIRECTIONAL] = "bidi",
1440 [DMA_TO_DEVICE] = "out",
1441 [DMA_FROM_DEVICE] = "in",
1442 [DMA_NONE] = "",
1443 };
1436 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1444 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1445 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
1446 unsigned int nbytes;
1437 1447
1438 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask) 1448 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1439 continue; 1449 continue;
1440 1450
1441 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x " 1451 nbytes = qc->nbytes;
1442 "Emask 0x%x stat 0x%x err 0x%x (%s)\n", 1452 if (!nbytes)
1443 qc->tag, qc->tf.command, qc->err_mask, 1453 nbytes = qc->nsect << 9;
1444 qc->result_tf.command, qc->result_tf.feature, 1454
1445 ata_err_string(qc->err_mask)); 1455 ata_dev_printk(qc->dev, KERN_ERR,
1456 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
1457 "tag %d cdb 0x%x data %u %s\n "
1458 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
1459 "Emask 0x%x (%s)\n",
1460 cmd->command, cmd->feature, cmd->nsect,
1461 cmd->lbal, cmd->lbam, cmd->lbah,
1462 cmd->hob_feature, cmd->hob_nsect,
1463 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
1464 cmd->device, qc->tag, qc->cdb[0], nbytes,
1465 dma_str[qc->dma_dir],
1466 res->command, res->feature, res->nsect,
1467 res->lbal, res->lbam, res->lbah,
1468 res->hob_feature, res->hob_nsect,
1469 res->hob_lbal, res->hob_lbam, res->hob_lbah,
1470 res->device, qc->err_mask, ata_err_string(qc->err_mask));
1446 } 1471 }
1447} 1472}
1448 1473
@@ -1634,11 +1659,14 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1634 DPRINTK("ENTER\n"); 1659 DPRINTK("ENTER\n");
1635 1660
1636 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1661 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1637 unsigned int action; 1662 unsigned int action, readid_flags = 0;
1638 1663
1639 dev = &ap->device[i]; 1664 dev = &ap->device[i];
1640 action = ata_eh_dev_action(dev); 1665 action = ata_eh_dev_action(dev);
1641 1666
1667 if (ehc->i.flags & ATA_EHI_DID_RESET)
1668 readid_flags |= ATA_READID_POSTRESET;
1669
1642 if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) { 1670 if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
1643 if (ata_port_offline(ap)) { 1671 if (ata_port_offline(ap)) {
1644 rc = -EIO; 1672 rc = -EIO;
@@ -1646,13 +1674,17 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1646 } 1674 }
1647 1675
1648 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE); 1676 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
1649 rc = ata_dev_revalidate(dev, 1677 rc = ata_dev_revalidate(dev, readid_flags);
1650 ehc->i.flags & ATA_EHI_DID_RESET);
1651 if (rc) 1678 if (rc)
1652 break; 1679 break;
1653 1680
1654 ata_eh_done(ap, dev, ATA_EH_REVALIDATE); 1681 ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
1655 1682
1683 /* Configuration may have changed, reconfigure
1684 * transfer mode.
1685 */
1686 ehc->i.flags |= ATA_EHI_SETMODE;
1687
1656 /* schedule the scsi_rescan_device() here */ 1688 /* schedule the scsi_rescan_device() here */
1657 queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); 1689 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
1658 } else if (dev->class == ATA_DEV_UNKNOWN && 1690 } else if (dev->class == ATA_DEV_UNKNOWN &&
@@ -1660,18 +1692,35 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1660 ata_class_enabled(ehc->classes[dev->devno])) { 1692 ata_class_enabled(ehc->classes[dev->devno])) {
1661 dev->class = ehc->classes[dev->devno]; 1693 dev->class = ehc->classes[dev->devno];
1662 1694
1663 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id); 1695 rc = ata_dev_read_id(dev, &dev->class, readid_flags,
1664 if (rc == 0) 1696 dev->id);
1665 rc = ata_dev_configure(dev, 1); 1697 if (rc == 0) {
1698 ehc->i.flags |= ATA_EHI_PRINTINFO;
1699 rc = ata_dev_configure(dev);
1700 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
1701 } else if (rc == -ENOENT) {
1702 /* IDENTIFY was issued to non-existent
1703 * device. No need to reset. Just
1704 * thaw and kill the device.
1705 */
1706 ata_eh_thaw_port(ap);
1707 dev->class = ATA_DEV_UNKNOWN;
1708 rc = 0;
1709 }
1666 1710
1667 if (rc) { 1711 if (rc) {
1668 dev->class = ATA_DEV_UNKNOWN; 1712 dev->class = ATA_DEV_UNKNOWN;
1669 break; 1713 break;
1670 } 1714 }
1671 1715
1672 spin_lock_irqsave(ap->lock, flags); 1716 if (ata_dev_enabled(dev)) {
1673 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1717 spin_lock_irqsave(ap->lock, flags);
1674 spin_unlock_irqrestore(ap->lock, flags); 1718 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1719 spin_unlock_irqrestore(ap->lock, flags);
1720
1721 /* new device discovered, configure xfermode */
1722 ehc->i.flags |= ATA_EHI_SETMODE;
1723 }
1675 } 1724 }
1676 } 1725 }
1677 1726
@@ -1987,13 +2036,14 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1987 if (rc) 2036 if (rc)
1988 goto dev_fail; 2037 goto dev_fail;
1989 2038
1990 /* configure transfer mode if the port has been reset */ 2039 /* configure transfer mode if necessary */
1991 if (ehc->i.flags & ATA_EHI_DID_RESET) { 2040 if (ehc->i.flags & ATA_EHI_SETMODE) {
1992 rc = ata_set_mode(ap, &dev); 2041 rc = ata_set_mode(ap, &dev);
1993 if (rc) { 2042 if (rc) {
1994 down_xfermask = 1; 2043 down_xfermask = 1;
1995 goto dev_fail; 2044 goto dev_fail;
1996 } 2045 }
2046 ehc->i.flags &= ~ATA_EHI_SETMODE;
1997 } 2047 }
1998 2048
1999 /* suspend devices */ 2049 /* suspend devices */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 47ea111d5ac..8eaace94d96 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -671,7 +671,7 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
671} 671}
672 672
673/* 673/*
674 * ata_gen_ata_desc_sense - Generate check condition sense block. 674 * ata_gen_passthru_sense - Generate check condition sense block.
675 * @qc: Command that completed. 675 * @qc: Command that completed.
676 * 676 *
677 * This function is specific to the ATA descriptor format sense 677 * This function is specific to the ATA descriptor format sense
@@ -681,9 +681,9 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
681 * block. Clear sense key, ASC & ASCQ if there is no error. 681 * block. Clear sense key, ASC & ASCQ if there is no error.
682 * 682 *
683 * LOCKING: 683 * LOCKING:
684 * spin_lock_irqsave(host lock) 684 * None.
685 */ 685 */
686void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) 686static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
687{ 687{
688 struct scsi_cmnd *cmd = qc->scsicmd; 688 struct scsi_cmnd *cmd = qc->scsicmd;
689 struct ata_taskfile *tf = &qc->result_tf; 689 struct ata_taskfile *tf = &qc->result_tf;
@@ -713,12 +713,9 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
713 713
714 desc[0] = 0x09; 714 desc[0] = 0x09;
715 715
716 /* 716 /* set length of additional sense data */
717 * Set length of additional sense data. 717 sb[7] = 14;
718 * Since we only populate descriptor 0, the total 718 desc[1] = 12;
719 * length is the same (fixed) length as descriptor 0.
720 */
721 desc[1] = sb[7] = 14;
722 719
723 /* 720 /*
724 * Copy registers into sense buffer. 721 * Copy registers into sense buffer.
@@ -746,56 +743,56 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
746} 743}
747 744
748/** 745/**
749 * ata_gen_fixed_sense - generate a SCSI fixed sense block 746 * ata_gen_ata_sense - generate a SCSI fixed sense block
750 * @qc: Command that we are erroring out 747 * @qc: Command that we are erroring out
751 * 748 *
752 * Leverage ata_to_sense_error() to give us the codes. Fit our 749 * Generate sense block for a failed ATA command @qc. Descriptor
753 * LBA in here if there's room. 750 * format is used to accomodate LBA48 block address.
754 * 751 *
755 * LOCKING: 752 * LOCKING:
756 * inherited from caller 753 * None.
757 */ 754 */
758void ata_gen_fixed_sense(struct ata_queued_cmd *qc) 755static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
759{ 756{
757 struct ata_device *dev = qc->dev;
760 struct scsi_cmnd *cmd = qc->scsicmd; 758 struct scsi_cmnd *cmd = qc->scsicmd;
761 struct ata_taskfile *tf = &qc->result_tf; 759 struct ata_taskfile *tf = &qc->result_tf;
762 unsigned char *sb = cmd->sense_buffer; 760 unsigned char *sb = cmd->sense_buffer;
761 unsigned char *desc = sb + 8;
763 int verbose = qc->ap->ops->error_handler == NULL; 762 int verbose = qc->ap->ops->error_handler == NULL;
763 u64 block;
764 764
765 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 765 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
766 766
767 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 767 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
768 768
769 /* 769 /* sense data is current and format is descriptor */
770 * Use ata_to_sense_error() to map status register bits 770 sb[0] = 0x72;
771
772 /* Use ata_to_sense_error() to map status register bits
771 * onto sense key, asc & ascq. 773 * onto sense key, asc & ascq.
772 */ 774 */
773 if (qc->err_mask || 775 if (qc->err_mask ||
774 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 776 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
775 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 777 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
776 &sb[2], &sb[12], &sb[13], verbose); 778 &sb[1], &sb[2], &sb[3], verbose);
777 sb[2] &= 0x0f; 779 sb[1] &= 0x0f;
778 } 780 }
779 781
780 sb[0] = 0x70; 782 block = ata_tf_read_block(&qc->result_tf, dev);
781 sb[7] = 0x0a;
782
783 if (tf->flags & ATA_TFLAG_LBA48) {
784 /* TODO: find solution for LBA48 descriptors */
785 }
786 783
787 else if (tf->flags & ATA_TFLAG_LBA) { 784 /* information sense data descriptor */
788 /* A small (28b) LBA will fit in the 32b info field */ 785 sb[7] = 12;
789 sb[0] |= 0x80; /* set valid bit */ 786 desc[0] = 0x00;
790 sb[3] = tf->device & 0x0f; 787 desc[1] = 10;
791 sb[4] = tf->lbah;
792 sb[5] = tf->lbam;
793 sb[6] = tf->lbal;
794 }
795 788
796 else { 789 desc[2] |= 0x80; /* valid */
797 /* TODO: C/H/S */ 790 desc[6] = block >> 40;
798 } 791 desc[7] = block >> 32;
792 desc[8] = block >> 24;
793 desc[9] = block >> 16;
794 desc[10] = block >> 8;
795 desc[11] = block;
799} 796}
800 797
801static void ata_scsi_sdev_config(struct scsi_device *sdev) 798static void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -807,23 +804,10 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
807static void ata_scsi_dev_config(struct scsi_device *sdev, 804static void ata_scsi_dev_config(struct scsi_device *sdev,
808 struct ata_device *dev) 805 struct ata_device *dev)
809{ 806{
810 unsigned int max_sectors; 807 /* configure max sectors */
811 808 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
812 /* TODO: 2048 is an arbitrary number, not the
813 * hardware maximum. This should be increased to
814 * 65534 when Jens Axboe's patch for dynamically
815 * determining max_sectors is merged.
816 */
817 max_sectors = ATA_MAX_SECTORS;
818 if (dev->flags & ATA_DFLAG_LBA48)
819 max_sectors = ATA_MAX_SECTORS_LBA48;
820 if (dev->max_sectors)
821 max_sectors = dev->max_sectors;
822 809
823 blk_queue_max_sectors(sdev->request_queue, max_sectors); 810 /* SATA DMA transfers must be multiples of 4 byte, so
824
825 /*
826 * SATA DMA transfers must be multiples of 4 byte, so
827 * we need to pad ATAPI transfers using an extra sg. 811 * we need to pad ATAPI transfers using an extra sg.
828 * Decrement max hw segments accordingly. 812 * Decrement max hw segments accordingly.
829 */ 813 */
@@ -1040,8 +1024,7 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scs
1040 tf->flags |= ATA_TFLAG_DEVICE; 1024 tf->flags |= ATA_TFLAG_DEVICE;
1041 tf->protocol = ATA_PROT_NODATA; 1025 tf->protocol = ATA_PROT_NODATA;
1042 1026
1043 if ((qc->dev->flags & ATA_DFLAG_LBA48) && 1027 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
1044 (ata_id_has_flush_ext(qc->dev->id)))
1045 tf->command = ATA_CMD_FLUSH_EXT; 1028 tf->command = ATA_CMD_FLUSH_EXT;
1046 else 1029 else
1047 tf->command = ATA_CMD_FLUSH; 1030 tf->command = ATA_CMD_FLUSH;
@@ -1282,17 +1265,14 @@ nothing_to_do:
1282 1265
1283static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd) 1266static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1284{ 1267{
1285 struct ata_taskfile *tf = &qc->tf; 1268 unsigned int tf_flags = 0;
1286 struct ata_device *dev = qc->dev;
1287 u64 block; 1269 u64 block;
1288 u32 n_block; 1270 u32 n_block;
1289 1271 int rc;
1290 qc->flags |= ATA_QCFLAG_IO;
1291 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1292 1272
1293 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 || 1273 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
1294 scsicmd[0] == WRITE_16) 1274 scsicmd[0] == WRITE_16)
1295 tf->flags |= ATA_TFLAG_WRITE; 1275 tf_flags |= ATA_TFLAG_WRITE;
1296 1276
1297 /* Calculate the SCSI LBA, transfer length and FUA. */ 1277 /* Calculate the SCSI LBA, transfer length and FUA. */
1298 switch (scsicmd[0]) { 1278 switch (scsicmd[0]) {
@@ -1300,7 +1280,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1300 case WRITE_10: 1280 case WRITE_10:
1301 scsi_10_lba_len(scsicmd, &block, &n_block); 1281 scsi_10_lba_len(scsicmd, &block, &n_block);
1302 if (unlikely(scsicmd[1] & (1 << 3))) 1282 if (unlikely(scsicmd[1] & (1 << 3)))
1303 tf->flags |= ATA_TFLAG_FUA; 1283 tf_flags |= ATA_TFLAG_FUA;
1304 break; 1284 break;
1305 case READ_6: 1285 case READ_6:
1306 case WRITE_6: 1286 case WRITE_6:
@@ -1316,7 +1296,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1316 case WRITE_16: 1296 case WRITE_16:
1317 scsi_16_lba_len(scsicmd, &block, &n_block); 1297 scsi_16_lba_len(scsicmd, &block, &n_block);
1318 if (unlikely(scsicmd[1] & (1 << 3))) 1298 if (unlikely(scsicmd[1] & (1 << 3)))
1319 tf->flags |= ATA_TFLAG_FUA; 1299 tf_flags |= ATA_TFLAG_FUA;
1320 break; 1300 break;
1321 default: 1301 default:
1322 DPRINTK("no-byte command\n"); 1302 DPRINTK("no-byte command\n");
@@ -1334,106 +1314,17 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1334 */ 1314 */
1335 goto nothing_to_do; 1315 goto nothing_to_do;
1336 1316
1337 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF | 1317 qc->flags |= ATA_QCFLAG_IO;
1338 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) { 1318 qc->nsect = n_block;
1339 /* yay, NCQ */
1340 if (!lba_48_ok(block, n_block))
1341 goto out_of_range;
1342
1343 tf->protocol = ATA_PROT_NCQ;
1344 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1345
1346 if (tf->flags & ATA_TFLAG_WRITE)
1347 tf->command = ATA_CMD_FPDMA_WRITE;
1348 else
1349 tf->command = ATA_CMD_FPDMA_READ;
1350
1351 qc->nsect = n_block;
1352
1353 tf->nsect = qc->tag << 3;
1354 tf->hob_feature = (n_block >> 8) & 0xff;
1355 tf->feature = n_block & 0xff;
1356
1357 tf->hob_lbah = (block >> 40) & 0xff;
1358 tf->hob_lbam = (block >> 32) & 0xff;
1359 tf->hob_lbal = (block >> 24) & 0xff;
1360 tf->lbah = (block >> 16) & 0xff;
1361 tf->lbam = (block >> 8) & 0xff;
1362 tf->lbal = block & 0xff;
1363
1364 tf->device = 1 << 6;
1365 if (tf->flags & ATA_TFLAG_FUA)
1366 tf->device |= 1 << 7;
1367 } else if (dev->flags & ATA_DFLAG_LBA) {
1368 tf->flags |= ATA_TFLAG_LBA;
1369
1370 if (lba_28_ok(block, n_block)) {
1371 /* use LBA28 */
1372 tf->device |= (block >> 24) & 0xf;
1373 } else if (lba_48_ok(block, n_block)) {
1374 if (!(dev->flags & ATA_DFLAG_LBA48))
1375 goto out_of_range;
1376
1377 /* use LBA48 */
1378 tf->flags |= ATA_TFLAG_LBA48;
1379
1380 tf->hob_nsect = (n_block >> 8) & 0xff;
1381
1382 tf->hob_lbah = (block >> 40) & 0xff;
1383 tf->hob_lbam = (block >> 32) & 0xff;
1384 tf->hob_lbal = (block >> 24) & 0xff;
1385 } else
1386 /* request too large even for LBA48 */
1387 goto out_of_range;
1388
1389 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1390 goto invalid_fld;
1391
1392 qc->nsect = n_block;
1393 tf->nsect = n_block & 0xff;
1394
1395 tf->lbah = (block >> 16) & 0xff;
1396 tf->lbam = (block >> 8) & 0xff;
1397 tf->lbal = block & 0xff;
1398
1399 tf->device |= ATA_LBA;
1400 } else {
1401 /* CHS */
1402 u32 sect, head, cyl, track;
1403
1404 /* The request -may- be too large for CHS addressing. */
1405 if (!lba_28_ok(block, n_block))
1406 goto out_of_range;
1407
1408 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1409 goto invalid_fld;
1410
1411 /* Convert LBA to CHS */
1412 track = (u32)block / dev->sectors;
1413 cyl = track / dev->heads;
1414 head = track % dev->heads;
1415 sect = (u32)block % dev->sectors + 1;
1416
1417 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1418 (u32)block, track, cyl, head, sect);
1419
1420 /* Check whether the converted CHS can fit.
1421 Cylinder: 0-65535
1422 Head: 0-15
1423 Sector: 1-255*/
1424 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1425 goto out_of_range;
1426
1427 qc->nsect = n_block;
1428 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1429 tf->lbal = sect;
1430 tf->lbam = cyl;
1431 tf->lbah = cyl >> 8;
1432 tf->device |= head;
1433 }
1434 1319
1435 return 0; 1320 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
1321 qc->tag);
1322 if (likely(rc == 0))
1323 return 0;
1436 1324
1325 if (rc == -ERANGE)
1326 goto out_of_range;
1327 /* treat all other errors as -EINVAL, fall through */
1437invalid_fld: 1328invalid_fld:
1438 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0); 1329 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
1439 /* "Invalid field in cbd" */ 1330 /* "Invalid field in cbd" */
@@ -1477,7 +1368,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1477 */ 1368 */
1478 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && 1369 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1479 ((cdb[2] & 0x20) || need_sense)) { 1370 ((cdb[2] & 0x20) || need_sense)) {
1480 ata_gen_ata_desc_sense(qc); 1371 ata_gen_passthru_sense(qc);
1481 } else { 1372 } else {
1482 if (!need_sense) { 1373 if (!need_sense) {
1483 cmd->result = SAM_STAT_GOOD; 1374 cmd->result = SAM_STAT_GOOD;
@@ -1488,7 +1379,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1488 * good for smaller LBA (and maybe CHS?) 1379 * good for smaller LBA (and maybe CHS?)
1489 * devices. 1380 * devices.
1490 */ 1381 */
1491 ata_gen_fixed_sense(qc); 1382 ata_gen_ata_sense(qc);
1492 } 1383 }
1493 } 1384 }
1494 1385
@@ -1715,6 +1606,22 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1715} 1606}
1716 1607
1717/** 1608/**
1609 * ATA_SCSI_RBUF_SET - helper to set values in SCSI response buffer
1610 * @idx: byte index into SCSI response buffer
1611 * @val: value to set
1612 *
1613 * To be used by SCSI command simulator functions. This macros
1614 * expects two local variables, u8 *rbuf and unsigned int buflen,
1615 * are in scope.
1616 *
1617 * LOCKING:
1618 * None.
1619 */
1620#define ATA_SCSI_RBUF_SET(idx, val) do { \
1621 if ((idx) < buflen) rbuf[(idx)] = (u8)(val); \
1622 } while (0)
1623
1624/**
1718 * ata_scsiop_inq_std - Simulate INQUIRY command 1625 * ata_scsiop_inq_std - Simulate INQUIRY command
1719 * @args: device IDENTIFY data / SCSI command of interest. 1626 * @args: device IDENTIFY data / SCSI command of interest.
1720 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1627 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
@@ -2173,67 +2080,42 @@ saving_not_supp:
2173 * Simulate READ CAPACITY commands. 2080 * Simulate READ CAPACITY commands.
2174 * 2081 *
2175 * LOCKING: 2082 * LOCKING:
2176 * spin_lock_irqsave(host lock) 2083 * None.
2177 */ 2084 */
2178
2179unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, 2085unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
2180 unsigned int buflen) 2086 unsigned int buflen)
2181{ 2087{
2182 u64 n_sectors; 2088 u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */
2183 u32 tmp;
2184 2089
2185 VPRINTK("ENTER\n"); 2090 VPRINTK("ENTER\n");
2186 2091
2187 if (ata_id_has_lba(args->id)) {
2188 if (ata_id_has_lba48(args->id))
2189 n_sectors = ata_id_u64(args->id, 100);
2190 else
2191 n_sectors = ata_id_u32(args->id, 60);
2192 } else {
2193 /* CHS default translation */
2194 n_sectors = args->id[1] * args->id[3] * args->id[6];
2195
2196 if (ata_id_current_chs_valid(args->id))
2197 /* CHS current translation */
2198 n_sectors = ata_id_u32(args->id, 57);
2199 }
2200
2201 n_sectors--; /* ATA TotalUserSectors - 1 */
2202
2203 if (args->cmd->cmnd[0] == READ_CAPACITY) { 2092 if (args->cmd->cmnd[0] == READ_CAPACITY) {
2204 if( n_sectors >= 0xffffffffULL ) 2093 if (last_lba >= 0xffffffffULL)
2205 tmp = 0xffffffff ; /* Return max count on overflow */ 2094 last_lba = 0xffffffff;
2206 else
2207 tmp = n_sectors ;
2208 2095
2209 /* sector count, 32-bit */ 2096 /* sector count, 32-bit */
2210 rbuf[0] = tmp >> (8 * 3); 2097 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 3));
2211 rbuf[1] = tmp >> (8 * 2); 2098 ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 2));
2212 rbuf[2] = tmp >> (8 * 1); 2099 ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 1));
2213 rbuf[3] = tmp; 2100 ATA_SCSI_RBUF_SET(3, last_lba);
2214 2101
2215 /* sector size */ 2102 /* sector size */
2216 tmp = ATA_SECT_SIZE; 2103 ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8);
2217 rbuf[6] = tmp >> 8; 2104 ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE);
2218 rbuf[7] = tmp;
2219
2220 } else { 2105 } else {
2221 /* sector count, 64-bit */ 2106 /* sector count, 64-bit */
2222 tmp = n_sectors >> (8 * 4); 2107 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7));
2223 rbuf[2] = tmp >> (8 * 3); 2108 ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 6));
2224 rbuf[3] = tmp >> (8 * 2); 2109 ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 5));
2225 rbuf[4] = tmp >> (8 * 1); 2110 ATA_SCSI_RBUF_SET(3, last_lba >> (8 * 4));
2226 rbuf[5] = tmp; 2111 ATA_SCSI_RBUF_SET(4, last_lba >> (8 * 3));
2227 tmp = n_sectors; 2112 ATA_SCSI_RBUF_SET(5, last_lba >> (8 * 2));
2228 rbuf[6] = tmp >> (8 * 3); 2113 ATA_SCSI_RBUF_SET(6, last_lba >> (8 * 1));
2229 rbuf[7] = tmp >> (8 * 2); 2114 ATA_SCSI_RBUF_SET(7, last_lba);
2230 rbuf[8] = tmp >> (8 * 1);
2231 rbuf[9] = tmp;
2232 2115
2233 /* sector size */ 2116 /* sector size */
2234 tmp = ATA_SECT_SIZE; 2117 ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8);
2235 rbuf[12] = tmp >> 8; 2118 ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE);
2236 rbuf[13] = tmp;
2237 } 2119 }
2238 2120
2239 return 0; 2121 return 0;
@@ -2319,7 +2201,7 @@ static void atapi_sense_complete(struct ata_queued_cmd *qc)
2319 * a sense descriptors, since that's only 2201 * a sense descriptors, since that's only
2320 * correct for ATA, not ATAPI 2202 * correct for ATA, not ATAPI
2321 */ 2203 */
2322 ata_gen_ata_desc_sense(qc); 2204 ata_gen_passthru_sense(qc);
2323 } 2205 }
2324 2206
2325 qc->scsidone(qc->scsicmd); 2207 qc->scsidone(qc->scsicmd);
@@ -2394,7 +2276,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2394 * sense descriptors, since that's only 2276 * sense descriptors, since that's only
2395 * correct for ATA, not ATAPI 2277 * correct for ATA, not ATAPI
2396 */ 2278 */
2397 ata_gen_ata_desc_sense(qc); 2279 ata_gen_passthru_sense(qc);
2398 } 2280 }
2399 2281
2400 /* SCSI EH automatically locks door if sdev->locked is 2282 /* SCSI EH automatically locks door if sdev->locked is
@@ -2427,7 +2309,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2427 * a sense descriptors, since that's only 2309 * a sense descriptors, since that's only
2428 * correct for ATA, not ATAPI 2310 * correct for ATA, not ATAPI
2429 */ 2311 */
2430 ata_gen_ata_desc_sense(qc); 2312 ata_gen_passthru_sense(qc);
2431 } else { 2313 } else {
2432 u8 *scsicmd = cmd->cmnd; 2314 u8 *scsicmd = cmd->cmnd;
2433 2315
@@ -3182,10 +3064,12 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3182 rc = -EINVAL; 3064 rc = -EINVAL;
3183 } 3065 }
3184 3066
3185 if (rc == 0) 3067 if (rc == 0) {
3186 ata_port_schedule_eh(ap); 3068 ata_port_schedule_eh(ap);
3187 3069 spin_unlock_irqrestore(ap->lock, flags);
3188 spin_unlock_irqrestore(ap->lock, flags); 3070 ata_port_wait_eh(ap);
3071 } else
3072 spin_unlock_irqrestore(ap->lock, flags);
3189 3073
3190 return rc; 3074 return rc;
3191} 3075}
@@ -3205,15 +3089,27 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3205void ata_scsi_dev_rescan(void *data) 3089void ata_scsi_dev_rescan(void *data)
3206{ 3090{
3207 struct ata_port *ap = data; 3091 struct ata_port *ap = data;
3208 struct ata_device *dev; 3092 unsigned long flags;
3209 unsigned int i; 3093 unsigned int i;
3210 3094
3095 spin_lock_irqsave(ap->lock, flags);
3096
3211 for (i = 0; i < ATA_MAX_DEVICES; i++) { 3097 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3212 dev = &ap->device[i]; 3098 struct ata_device *dev = &ap->device[i];
3099 struct scsi_device *sdev = dev->sdev;
3213 3100
3214 if (ata_dev_enabled(dev) && dev->sdev) 3101 if (!ata_dev_enabled(dev) || !sdev)
3215 scsi_rescan_device(&(dev->sdev->sdev_gendev)); 3102 continue;
3103 if (scsi_device_get(sdev))
3104 continue;
3105
3106 spin_unlock_irqrestore(ap->lock, flags);
3107 scsi_rescan_device(&(sdev->sdev_gendev));
3108 scsi_device_put(sdev);
3109 spin_lock_irqsave(ap->lock, flags);
3216 } 3110 }
3111
3112 spin_unlock_irqrestore(ap->lock, flags);
3217} 3113}
3218 3114
3219/** 3115/**
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 7645f2b30cc..10ee22ae5c1 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -39,6 +39,35 @@
39#include "libata.h" 39#include "libata.h"
40 40
41/** 41/**
42 * ata_irq_on - Enable interrupts on a port.
43 * @ap: Port on which interrupts are enabled.
44 *
45 * Enable interrupts on a legacy IDE device using MMIO or PIO,
46 * wait for idle, clear any pending interrupts.
47 *
48 * LOCKING:
49 * Inherited from caller.
50 */
51u8 ata_irq_on(struct ata_port *ap)
52{
53 struct ata_ioports *ioaddr = &ap->ioaddr;
54 u8 tmp;
55
56 ap->ctl &= ~ATA_NIEN;
57 ap->last_ctl = ap->ctl;
58
59 if (ap->flags & ATA_FLAG_MMIO)
60 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
61 else
62 outb(ap->ctl, ioaddr->ctl_addr);
63 tmp = ata_wait_idle(ap);
64
65 ap->ops->irq_clear(ap);
66
67 return tmp;
68}
69
70/**
42 * ata_tf_load_pio - send taskfile registers to host controller 71 * ata_tf_load_pio - send taskfile registers to host controller
43 * @ap: Port to which output is sent 72 * @ap: Port to which output is sent
44 * @tf: ATA taskfile register set 73 * @tf: ATA taskfile register set
@@ -671,6 +700,14 @@ void ata_bmdma_freeze(struct ata_port *ap)
671 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr); 700 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
672 else 701 else
673 outb(ap->ctl, ioaddr->ctl_addr); 702 outb(ap->ctl, ioaddr->ctl_addr);
703
704 /* Under certain circumstances, some controllers raise IRQ on
705 * ATA_NIEN manipulation. Also, many controllers fail to mask
706 * previously pending IRQ on ATA_NIEN assertion. Clear it.
707 */
708 ata_chk_status(ap);
709
710 ap->ops->irq_clear(ap);
674} 711}
675 712
676/** 713/**
@@ -714,7 +751,6 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
714 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 751 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
715 ata_postreset_fn_t postreset) 752 ata_postreset_fn_t postreset)
716{ 753{
717 struct ata_eh_context *ehc = &ap->eh_context;
718 struct ata_queued_cmd *qc; 754 struct ata_queued_cmd *qc;
719 unsigned long flags; 755 unsigned long flags;
720 int thaw = 0; 756 int thaw = 0;
@@ -732,9 +768,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
732 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) { 768 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
733 u8 host_stat; 769 u8 host_stat;
734 770
735 host_stat = ata_bmdma_status(ap); 771 host_stat = ap->ops->bmdma_status(ap);
736
737 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
738 772
739 /* BMDMA controllers indicate host bus error by 773 /* BMDMA controllers indicate host bus error by
740 * setting DMA_ERR bit and timing out. As it wasn't 774 * setting DMA_ERR bit and timing out. As it wasn't
@@ -877,6 +911,7 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
877 return NULL; 911 return NULL;
878 912
879 probe_ent->n_ports = 2; 913 probe_ent->n_ports = 2;
914 probe_ent->irq_flags = IRQF_SHARED;
880 915
881 if (port_mask & ATA_PORT_PRIMARY) { 916 if (port_mask & ATA_PORT_PRIMARY) {
882 probe_ent->irq = ATA_PRIMARY_IRQ; 917 probe_ent->irq = ATA_PRIMARY_IRQ;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 0ed263be652..107b2b56522 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -39,26 +39,39 @@ struct ata_scsi_args {
39}; 39};
40 40
41/* libata-core.c */ 41/* libata-core.c */
42enum {
43 /* flags for ata_dev_read_id() */
44 ATA_READID_POSTRESET = (1 << 0), /* reading ID after reset */
45};
46
42extern struct workqueue_struct *ata_aux_wq; 47extern struct workqueue_struct *ata_aux_wq;
43extern int atapi_enabled; 48extern int atapi_enabled;
44extern int atapi_dmadir; 49extern int atapi_dmadir;
45extern int libata_fua; 50extern int libata_fua;
46extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev); 51extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 52extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
53 u64 block, u32 n_block, unsigned int tf_flags,
54 unsigned int tag);
55extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
48extern void ata_dev_disable(struct ata_device *dev); 56extern void ata_dev_disable(struct ata_device *dev);
49extern void ata_port_flush_task(struct ata_port *ap); 57extern void ata_port_flush_task(struct ata_port *ap);
50extern unsigned ata_exec_internal(struct ata_device *dev, 58extern unsigned ata_exec_internal(struct ata_device *dev,
51 struct ata_taskfile *tf, const u8 *cdb, 59 struct ata_taskfile *tf, const u8 *cdb,
52 int dma_dir, void *buf, unsigned int buflen); 60 int dma_dir, void *buf, unsigned int buflen);
61extern unsigned ata_exec_internal_sg(struct ata_device *dev,
62 struct ata_taskfile *tf, const u8 *cdb,
63 int dma_dir, struct scatterlist *sg,
64 unsigned int n_elem);
53extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd); 65extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
54extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 66extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
55 int post_reset, u16 *id); 67 unsigned int flags, u16 *id);
56extern int ata_dev_revalidate(struct ata_device *dev, int post_reset); 68extern int ata_dev_revalidate(struct ata_device *dev, unsigned int flags);
57extern int ata_dev_configure(struct ata_device *dev, int print_info); 69extern int ata_dev_configure(struct ata_device *dev);
58extern int sata_down_spd_limit(struct ata_port *ap); 70extern int sata_down_spd_limit(struct ata_port *ap);
59extern int sata_set_spd_needed(struct ata_port *ap); 71extern int sata_set_spd_needed(struct ata_port *ap);
60extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0); 72extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
61extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev); 73extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
74extern void ata_sg_clean(struct ata_queued_cmd *qc);
62extern void ata_qc_free(struct ata_queued_cmd *qc); 75extern void ata_qc_free(struct ata_queued_cmd *qc);
63extern void ata_qc_issue(struct ata_queued_cmd *qc); 76extern void ata_qc_issue(struct ata_queued_cmd *qc);
64extern void __ata_qc_complete(struct ata_queued_cmd *qc); 77extern void __ata_qc_complete(struct ata_queued_cmd *qc);
@@ -120,4 +133,7 @@ extern void ata_scsi_error(struct Scsi_Host *host);
120extern void ata_port_wait_eh(struct ata_port *ap); 133extern void ata_port_wait_eh(struct ata_port *ap);
121extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); 134extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
122 135
136/* libata-sff.c */
137extern u8 ata_irq_on(struct ata_port *ap);
138
123#endif /* __LIBATA_H__ */ 139#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 64eed99f681..c5d61d1911a 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -34,7 +34,7 @@
34#include <linux/dmi.h> 34#include <linux/dmi.h>
35 35
36#define DRV_NAME "pata_ali" 36#define DRV_NAME "pata_ali"
37#define DRV_VERSION "0.6.6" 37#define DRV_VERSION "0.7.2"
38 38
39/* 39/*
40 * Cable special cases 40 * Cable special cases
@@ -78,7 +78,7 @@ static int ali_c2_cable_detect(struct ata_port *ap)
78 implement the detect logic */ 78 implement the detect logic */
79 79
80 if (ali_cable_override(pdev)) 80 if (ali_cable_override(pdev))
81 return ATA_CBL_PATA80; 81 return ATA_CBL_PATA40_SHORT;
82 82
83 /* Host view cable detect 0x4A bit 0 primary bit 1 secondary 83 /* Host view cable detect 0x4A bit 0 primary bit 1 secondary
84 Bit set for 40 pin */ 84 Bit set for 40 pin */
@@ -337,9 +337,6 @@ static struct scsi_host_template ali_sht = {
337 .can_queue = ATA_DEF_QUEUE, 337 .can_queue = ATA_DEF_QUEUE,
338 .this_id = ATA_SHT_THIS_ID, 338 .this_id = ATA_SHT_THIS_ID,
339 .sg_tablesize = LIBATA_MAX_PRD, 339 .sg_tablesize = LIBATA_MAX_PRD,
340 /* Keep LBA28 counts so large I/O's don't turn LBA48 and PIO
341 with older controllers. Not locked so will grow on C5 or later */
342 .max_sectors = 255,
343 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 340 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
344 .emulated = ATA_SHT_EMULATED, 341 .emulated = ATA_SHT_EMULATED,
345 .use_clustering = ATA_SHT_USE_CLUSTERING, 342 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -348,6 +345,8 @@ static struct scsi_host_template ali_sht = {
348 .slave_configure = ata_scsi_slave_config, 345 .slave_configure = ata_scsi_slave_config,
349 .slave_destroy = ata_scsi_slave_destroy, 346 .slave_destroy = ata_scsi_slave_destroy,
350 .bios_param = ata_std_bios_param, 347 .bios_param = ata_std_bios_param,
348 .resume = ata_scsi_device_resume,
349 .suspend = ata_scsi_device_suspend,
351}; 350};
352 351
353/* 352/*
@@ -497,6 +496,69 @@ static struct ata_port_operations ali_c5_port_ops = {
497 .host_stop = ata_host_stop 496 .host_stop = ata_host_stop
498}; 497};
499 498
499
500/**
501 * ali_init_chipset - chip setup function
502 * @pdev: PCI device of ATA controller
503 *
504 * Perform the setup on the device that must be done both at boot
505 * and at resume time.
506 */
507
508static void ali_init_chipset(struct pci_dev *pdev)
509{
510 u8 rev, tmp;
511 struct pci_dev *north, *isa_bridge;
512
513 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
514
515 /*
516 * The chipset revision selects the driver operations and
517 * mode data.
518 */
519
520 if (rev >= 0x20 && rev < 0xC2) {
521 /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
522 pci_read_config_byte(pdev, 0x4B, &tmp);
523 /* Clear CD-ROM DMA write bit */
524 tmp &= 0x7F;
525 pci_write_config_byte(pdev, 0x4B, tmp);
526 } else if (rev >= 0xC2) {
527 /* Enable cable detection logic */
528 pci_read_config_byte(pdev, 0x4B, &tmp);
529 pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
530 }
531 north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
532 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
533
534 if (north && north->vendor == PCI_VENDOR_ID_AL && isa_bridge) {
535 /* Configure the ALi bridge logic. For non ALi rely on BIOS.
536 Set the south bridge enable bit */
537 pci_read_config_byte(isa_bridge, 0x79, &tmp);
538 if (rev == 0xC2)
539 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
540 else if (rev > 0xC2 && rev < 0xC5)
541 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
542 }
543 if (rev >= 0x20) {
544 /*
545 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
546 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
547 * via 0x54/55.
548 */
549 pci_read_config_byte(pdev, 0x53, &tmp);
550 if (rev <= 0x20)
551 tmp &= ~0x02;
552 if (rev >= 0xc7)
553 tmp |= 0x03;
554 else
555 tmp |= 0x01; /* CD_ROM enable for DMA */
556 pci_write_config_byte(pdev, 0x53, tmp);
557 }
558 pci_dev_put(isa_bridge);
559 pci_dev_put(north);
560 ata_pci_clear_simplex(pdev);
561}
500/** 562/**
501 * ali_init_one - discovery callback 563 * ali_init_one - discovery callback
502 * @pdev: PCI device ID 564 * @pdev: PCI device ID
@@ -570,7 +632,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
570 632
571 static struct ata_port_info *port_info[2]; 633 static struct ata_port_info *port_info[2];
572 u8 rev, tmp; 634 u8 rev, tmp;
573 struct pci_dev *north, *isa_bridge; 635 struct pci_dev *isa_bridge;
574 636
575 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 637 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
576 638
@@ -582,11 +644,6 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
582 if (rev < 0x20) { 644 if (rev < 0x20) {
583 port_info[0] = port_info[1] = &info_early; 645 port_info[0] = port_info[1] = &info_early;
584 } else if (rev < 0xC2) { 646 } else if (rev < 0xC2) {
585 /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
586 pci_read_config_byte(pdev, 0x4B, &tmp);
587 /* Clear CD-ROM DMA write bit */
588 tmp &= 0x7F;
589 pci_write_config_byte(pdev, 0x4B, tmp);
590 port_info[0] = port_info[1] = &info_20; 647 port_info[0] = port_info[1] = &info_20;
591 } else if (rev == 0xC2) { 648 } else if (rev == 0xC2) {
592 port_info[0] = port_info[1] = &info_c2; 649 port_info[0] = port_info[1] = &info_c2;
@@ -597,54 +654,25 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
597 } else 654 } else
598 port_info[0] = port_info[1] = &info_c5; 655 port_info[0] = port_info[1] = &info_c5;
599 656
600 if (rev >= 0xC2) { 657 ali_init_chipset(pdev);
601 /* Enable cable detection logic */ 658
602 pci_read_config_byte(pdev, 0x4B, &tmp);
603 pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
604 }
605
606 north = pci_get_slot(pdev->bus, PCI_DEVFN(0,0));
607 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); 659 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
608 660 if (isa_bridge && rev >= 0x20 && rev < 0xC2) {
609 if (north && north->vendor == PCI_VENDOR_ID_AL) { 661 /* Are we paired with a UDMA capable chip */
610 /* Configure the ALi bridge logic. For non ALi rely on BIOS. 662 pci_read_config_byte(isa_bridge, 0x5E, &tmp);
611 Set the south bridge enable bit */ 663 if ((tmp & 0x1E) == 0x12)
612 pci_read_config_byte(isa_bridge, 0x79, &tmp); 664 port_info[0] = port_info[1] = &info_20_udma;
613 if (rev == 0xC2) 665 pci_dev_put(isa_bridge);
614 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
615 else if (rev > 0xC2)
616 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
617 }
618
619 if (rev >= 0x20) {
620 if (rev < 0xC2) {
621 /* Are we paired with a UDMA capable chip */
622 pci_read_config_byte(isa_bridge, 0x5E, &tmp);
623 if ((tmp & 0x1E) == 0x12)
624 port_info[0] = port_info[1] = &info_20_udma;
625 }
626 /*
627 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
628 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
629 * via 0x54/55.
630 */
631 pci_read_config_byte(pdev, 0x53, &tmp);
632 if (rev <= 0x20)
633 tmp &= ~0x02;
634 if (rev >= 0xc7)
635 tmp |= 0x03;
636 else
637 tmp |= 0x01; /* CD_ROM enable for DMA */
638 pci_write_config_byte(pdev, 0x53, tmp);
639 } 666 }
640
641 pci_dev_put(isa_bridge);
642 pci_dev_put(north);
643
644 ata_pci_clear_simplex(pdev);
645 return ata_pci_init_one(pdev, port_info, 2); 667 return ata_pci_init_one(pdev, port_info, 2);
646} 668}
647 669
670static int ali_reinit_one(struct pci_dev *pdev)
671{
672 ali_init_chipset(pdev);
673 return ata_pci_device_resume(pdev);
674}
675
648static const struct pci_device_id ali[] = { 676static const struct pci_device_id ali[] = {
649 { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), }, 677 { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), },
650 { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), }, 678 { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), },
@@ -656,7 +684,9 @@ static struct pci_driver ali_pci_driver = {
656 .name = DRV_NAME, 684 .name = DRV_NAME,
657 .id_table = ali, 685 .id_table = ali,
658 .probe = ali_init_one, 686 .probe = ali_init_one,
659 .remove = ata_pci_remove_one 687 .remove = ata_pci_remove_one,
688 .suspend = ata_pci_device_suspend,
689 .resume = ali_reinit_one,
660}; 690};
661 691
662static int __init ali_init(void) 692static int __init ali_init(void)
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 8be46a63af7..a6b330089f2 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -25,7 +25,7 @@
25#include <linux/libata.h> 25#include <linux/libata.h>
26 26
27#define DRV_NAME "pata_amd" 27#define DRV_NAME "pata_amd"
28#define DRV_VERSION "0.2.4" 28#define DRV_VERSION "0.2.7"
29 29
30/** 30/**
31 * timing_setup - shared timing computation and load 31 * timing_setup - shared timing computation and load
@@ -326,7 +326,6 @@ static struct scsi_host_template amd_sht = {
326 .can_queue = ATA_DEF_QUEUE, 326 .can_queue = ATA_DEF_QUEUE,
327 .this_id = ATA_SHT_THIS_ID, 327 .this_id = ATA_SHT_THIS_ID,
328 .sg_tablesize = LIBATA_MAX_PRD, 328 .sg_tablesize = LIBATA_MAX_PRD,
329 .max_sectors = ATA_MAX_SECTORS,
330 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 329 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
331 .emulated = ATA_SHT_EMULATED, 330 .emulated = ATA_SHT_EMULATED,
332 .use_clustering = ATA_SHT_USE_CLUSTERING, 331 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -335,6 +334,8 @@ static struct scsi_host_template amd_sht = {
335 .slave_configure = ata_scsi_slave_config, 334 .slave_configure = ata_scsi_slave_config,
336 .slave_destroy = ata_scsi_slave_destroy, 335 .slave_destroy = ata_scsi_slave_destroy,
337 .bios_param = ata_std_bios_param, 336 .bios_param = ata_std_bios_param,
337 .resume = ata_scsi_device_resume,
338 .suspend = ata_scsi_device_suspend,
338}; 339};
339 340
340static struct ata_port_operations amd33_port_ops = { 341static struct ata_port_operations amd33_port_ops = {
@@ -662,6 +663,23 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
662 return ata_pci_init_one(pdev, port_info, 2); 663 return ata_pci_init_one(pdev, port_info, 2);
663} 664}
664 665
666static int amd_reinit_one(struct pci_dev *pdev)
667{
668 if (pdev->vendor == PCI_VENDOR_ID_AMD) {
669 u8 fifo;
670 pci_read_config_byte(pdev, 0x41, &fifo);
671 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
672 /* FIFO is broken */
673 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
674 else
675 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
676 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
677 pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
678 ata_pci_clear_simplex(pdev);
679 }
680 return ata_pci_device_resume(pdev);
681}
682
665static const struct pci_device_id amd[] = { 683static const struct pci_device_id amd[] = {
666 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 }, 684 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
667 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 }, 685 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
@@ -689,7 +707,9 @@ static struct pci_driver amd_pci_driver = {
689 .name = DRV_NAME, 707 .name = DRV_NAME,
690 .id_table = amd, 708 .id_table = amd,
691 .probe = amd_init_one, 709 .probe = amd_init_one,
692 .remove = ata_pci_remove_one 710 .remove = ata_pci_remove_one,
711 .suspend = ata_pci_device_suspend,
712 .resume = amd_reinit_one,
693}; 713};
694 714
695static int __init amd_init(void) 715static int __init amd_init(void)
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 2cd30761ca1..37bc1323bda 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -307,7 +307,6 @@ static struct scsi_host_template artop_sht = {
307 .can_queue = ATA_DEF_QUEUE, 307 .can_queue = ATA_DEF_QUEUE,
308 .this_id = ATA_SHT_THIS_ID, 308 .this_id = ATA_SHT_THIS_ID,
309 .sg_tablesize = LIBATA_MAX_PRD, 309 .sg_tablesize = LIBATA_MAX_PRD,
310 .max_sectors = ATA_MAX_SECTORS,
311 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 310 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
312 .emulated = ATA_SHT_EMULATED, 311 .emulated = ATA_SHT_EMULATED,
313 .use_clustering = ATA_SHT_USE_CLUSTERING, 312 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 4e1d3b59adb..6f6672c5513 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -22,7 +22,7 @@
22#include <linux/libata.h> 22#include <linux/libata.h>
23 23
24#define DRV_NAME "pata_atiixp" 24#define DRV_NAME "pata_atiixp"
25#define DRV_VERSION "0.4.3" 25#define DRV_VERSION "0.4.4"
26 26
27enum { 27enum {
28 ATIIXP_IDE_PIO_TIMING = 0x40, 28 ATIIXP_IDE_PIO_TIMING = 0x40,
@@ -209,7 +209,6 @@ static struct scsi_host_template atiixp_sht = {
209 .can_queue = ATA_DEF_QUEUE, 209 .can_queue = ATA_DEF_QUEUE,
210 .this_id = ATA_SHT_THIS_ID, 210 .this_id = ATA_SHT_THIS_ID,
211 .sg_tablesize = LIBATA_MAX_PRD, 211 .sg_tablesize = LIBATA_MAX_PRD,
212 .max_sectors = ATA_MAX_SECTORS,
213 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 212 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
214 .emulated = ATA_SHT_EMULATED, 213 .emulated = ATA_SHT_EMULATED,
215 .use_clustering = ATA_SHT_USE_CLUSTERING, 214 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -218,6 +217,8 @@ static struct scsi_host_template atiixp_sht = {
218 .slave_configure = ata_scsi_slave_config, 217 .slave_configure = ata_scsi_slave_config,
219 .slave_destroy = ata_scsi_slave_destroy, 218 .slave_destroy = ata_scsi_slave_destroy,
220 .bios_param = ata_std_bios_param, 219 .bios_param = ata_std_bios_param,
220 .resume = ata_scsi_device_resume,
221 .suspend = ata_scsi_device_suspend,
221}; 222};
222 223
223static struct ata_port_operations atiixp_port_ops = { 224static struct ata_port_operations atiixp_port_ops = {
@@ -281,7 +282,9 @@ static struct pci_driver atiixp_pci_driver = {
281 .name = DRV_NAME, 282 .name = DRV_NAME,
282 .id_table = atiixp, 283 .id_table = atiixp,
283 .probe = atiixp_init_one, 284 .probe = atiixp_init_one,
284 .remove = ata_pci_remove_one 285 .remove = ata_pci_remove_one,
286 .resume = ata_pci_device_resume,
287 .suspend = ata_pci_device_suspend,
285}; 288};
286 289
287static int __init atiixp_init(void) 290static int __init atiixp_init(void)
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 29a60df465d..15841a56369 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -31,7 +31,7 @@
31#include <linux/libata.h> 31#include <linux/libata.h>
32 32
33#define DRV_NAME "pata_cmd64x" 33#define DRV_NAME "pata_cmd64x"
34#define DRV_VERSION "0.2.1" 34#define DRV_VERSION "0.2.2"
35 35
36/* 36/*
37 * CMD64x specific registers definition. 37 * CMD64x specific registers definition.
@@ -268,7 +268,6 @@ static struct scsi_host_template cmd64x_sht = {
268 .can_queue = ATA_DEF_QUEUE, 268 .can_queue = ATA_DEF_QUEUE,
269 .this_id = ATA_SHT_THIS_ID, 269 .this_id = ATA_SHT_THIS_ID,
270 .sg_tablesize = LIBATA_MAX_PRD, 270 .sg_tablesize = LIBATA_MAX_PRD,
271 .max_sectors = ATA_MAX_SECTORS,
272 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 271 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
273 .emulated = ATA_SHT_EMULATED, 272 .emulated = ATA_SHT_EMULATED,
274 .use_clustering = ATA_SHT_USE_CLUSTERING, 273 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -277,6 +276,8 @@ static struct scsi_host_template cmd64x_sht = {
277 .slave_configure = ata_scsi_slave_config, 276 .slave_configure = ata_scsi_slave_config,
278 .slave_destroy = ata_scsi_slave_destroy, 277 .slave_destroy = ata_scsi_slave_destroy,
279 .bios_param = ata_std_bios_param, 278 .bios_param = ata_std_bios_param,
279 .resume = ata_scsi_device_resume,
280 .suspend = ata_scsi_device_suspend,
280}; 281};
281 282
282static struct ata_port_operations cmd64x_port_ops = { 283static struct ata_port_operations cmd64x_port_ops = {
@@ -469,6 +470,20 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
469 return ata_pci_init_one(pdev, port_info, 2); 470 return ata_pci_init_one(pdev, port_info, 2);
470} 471}
471 472
473static int cmd64x_reinit_one(struct pci_dev *pdev)
474{
475 u8 mrdmode;
476 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
477 pci_read_config_byte(pdev, MRDMODE, &mrdmode);
478 mrdmode &= ~ 0x30; /* IRQ set up */
479 mrdmode |= 0x02; /* Memory read line enable */
480 pci_write_config_byte(pdev, MRDMODE, mrdmode);
481#ifdef CONFIG_PPC
482 pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
483#endif
484 return ata_pci_device_resume(pdev);
485}
486
472static const struct pci_device_id cmd64x[] = { 487static const struct pci_device_id cmd64x[] = {
473 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 }, 488 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
474 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 }, 489 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 },
@@ -482,7 +497,9 @@ static struct pci_driver cmd64x_pci_driver = {
482 .name = DRV_NAME, 497 .name = DRV_NAME,
483 .id_table = cmd64x, 498 .id_table = cmd64x,
484 .probe = cmd64x_init_one, 499 .probe = cmd64x_init_one,
485 .remove = ata_pci_remove_one 500 .remove = ata_pci_remove_one,
501 .suspend = ata_pci_device_suspend,
502 .resume = cmd64x_reinit_one,
486}; 503};
487 504
488static int __init cmd64x_init(void) 505static int __init cmd64x_init(void)
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 33d2b88f9c7..9f165a8e032 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -41,7 +41,7 @@
41#include <linux/libata.h> 41#include <linux/libata.h>
42 42
43#define DRV_NAME "pata_cs5520" 43#define DRV_NAME "pata_cs5520"
44#define DRV_VERSION "0.6.2" 44#define DRV_VERSION "0.6.3"
45 45
46struct pio_clocks 46struct pio_clocks
47{ 47{
@@ -159,7 +159,6 @@ static struct scsi_host_template cs5520_sht = {
159 .can_queue = ATA_DEF_QUEUE, 159 .can_queue = ATA_DEF_QUEUE,
160 .this_id = ATA_SHT_THIS_ID, 160 .this_id = ATA_SHT_THIS_ID,
161 .sg_tablesize = LIBATA_MAX_PRD, 161 .sg_tablesize = LIBATA_MAX_PRD,
162 .max_sectors = ATA_MAX_SECTORS,
163 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 162 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
164 .emulated = ATA_SHT_EMULATED, 163 .emulated = ATA_SHT_EMULATED,
165 .use_clustering = ATA_SHT_USE_CLUSTERING, 164 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -168,6 +167,8 @@ static struct scsi_host_template cs5520_sht = {
168 .slave_configure = ata_scsi_slave_config, 167 .slave_configure = ata_scsi_slave_config,
169 .slave_destroy = ata_scsi_slave_destroy, 168 .slave_destroy = ata_scsi_slave_destroy,
170 .bios_param = ata_std_bios_param, 169 .bios_param = ata_std_bios_param,
170 .resume = ata_scsi_device_resume,
171 .suspend = ata_scsi_device_suspend,
171}; 172};
172 173
173static struct ata_port_operations cs5520_port_ops = { 174static struct ata_port_operations cs5520_port_ops = {
@@ -297,6 +298,22 @@ static void __devexit cs5520_remove_one(struct pci_dev *pdev)
297 dev_set_drvdata(dev, NULL); 298 dev_set_drvdata(dev, NULL);
298} 299}
299 300
301/**
302 * cs5520_reinit_one - device resume
303 * @pdev: PCI device
304 *
305 * Do any reconfiguration work needed by a resume from RAM. We need
306 * to restore DMA mode support on BIOSen which disabled it
307 */
308
309static int cs5520_reinit_one(struct pci_dev *pdev)
310{
311 u8 pcicfg;
312 pci_read_config_byte(pdev, 0x60, &pcicfg);
313 if ((pcicfg & 0x40) == 0)
314 pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
315 return ata_pci_device_resume(pdev);
316}
300/* For now keep DMA off. We can set it for all but A rev CS5510 once the 317/* For now keep DMA off. We can set it for all but A rev CS5510 once the
301 core ATA code can handle it */ 318 core ATA code can handle it */
302 319
@@ -311,7 +328,9 @@ static struct pci_driver cs5520_pci_driver = {
311 .name = DRV_NAME, 328 .name = DRV_NAME,
312 .id_table = pata_cs5520, 329 .id_table = pata_cs5520,
313 .probe = cs5520_init_one, 330 .probe = cs5520_init_one,
314 .remove = cs5520_remove_one 331 .remove = cs5520_remove_one,
332 .suspend = ata_pci_device_suspend,
333 .resume = cs5520_reinit_one,
315}; 334};
316 335
317static int __init cs5520_init(void) 336static int __init cs5520_init(void)
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 981f4922346..1c628014dae 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -35,7 +35,7 @@
35#include <linux/dmi.h> 35#include <linux/dmi.h>
36 36
37#define DRV_NAME "pata_cs5530" 37#define DRV_NAME "pata_cs5530"
38#define DRV_VERSION "0.6" 38#define DRV_VERSION "0.7.1"
39 39
40/** 40/**
41 * cs5530_set_piomode - PIO setup 41 * cs5530_set_piomode - PIO setup
@@ -173,7 +173,6 @@ static struct scsi_host_template cs5530_sht = {
173 .can_queue = ATA_DEF_QUEUE, 173 .can_queue = ATA_DEF_QUEUE,
174 .this_id = ATA_SHT_THIS_ID, 174 .this_id = ATA_SHT_THIS_ID,
175 .sg_tablesize = LIBATA_MAX_PRD, 175 .sg_tablesize = LIBATA_MAX_PRD,
176 .max_sectors = ATA_MAX_SECTORS,
177 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 176 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
178 .emulated = ATA_SHT_EMULATED, 177 .emulated = ATA_SHT_EMULATED,
179 .use_clustering = ATA_SHT_USE_CLUSTERING, 178 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -182,6 +181,8 @@ static struct scsi_host_template cs5530_sht = {
182 .slave_configure = ata_scsi_slave_config, 181 .slave_configure = ata_scsi_slave_config,
183 .slave_destroy = ata_scsi_slave_destroy, 182 .slave_destroy = ata_scsi_slave_destroy,
184 .bios_param = ata_std_bios_param, 183 .bios_param = ata_std_bios_param,
184 .resume = ata_scsi_device_resume,
185 .suspend = ata_scsi_device_suspend,
185}; 186};
186 187
187static struct ata_port_operations cs5530_port_ops = { 188static struct ata_port_operations cs5530_port_ops = {
@@ -239,38 +240,18 @@ static int cs5530_is_palmax(void)
239 return 0; 240 return 0;
240} 241}
241 242
243
242/** 244/**
243 * cs5530_init_one - Initialise a CS5530 245 * cs5530_init_chip - Chipset init
244 * @dev: PCI device
245 * @id: Entry in match table
246 * 246 *
247 * Install a driver for the newly found CS5530 companion chip. Most of 247 * Perform the chip initialisation work that is shared between both
248 * this is just housekeeping. We have to set the chip up correctly and 248 * setup and resume paths
249 * turn off various bits of emulation magic.
250 */ 249 */
251 250
252static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id) 251static int cs5530_init_chip(void)
253{ 252{
254 int compiler_warning_pointless_fix; 253 struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL;
255 struct pci_dev *master_0 = NULL, *cs5530_0 = NULL;
256 static struct ata_port_info info = {
257 .sht = &cs5530_sht,
258 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
259 .pio_mask = 0x1f,
260 .mwdma_mask = 0x07,
261 .udma_mask = 0x07,
262 .port_ops = &cs5530_port_ops
263 };
264 /* The docking connector doesn't do UDMA, and it seems not MWDMA */
265 static struct ata_port_info info_palmax_secondary = {
266 .sht = &cs5530_sht,
267 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
268 .pio_mask = 0x1f,
269 .port_ops = &cs5530_port_ops
270 };
271 static struct ata_port_info *port_info[2] = { &info, &info };
272 254
273 dev = NULL;
274 while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) { 255 while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
275 switch (dev->device) { 256 switch (dev->device) {
276 case PCI_DEVICE_ID_CYRIX_PCI_MASTER: 257 case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
@@ -291,7 +272,7 @@ static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
291 } 272 }
292 273
293 pci_set_master(cs5530_0); 274 pci_set_master(cs5530_0);
294 compiler_warning_pointless_fix = pci_set_mwi(cs5530_0); 275 pci_set_mwi(cs5530_0);
295 276
296 /* 277 /*
297 * Set PCI CacheLineSize to 16-bytes: 278 * Set PCI CacheLineSize to 16-bytes:
@@ -339,13 +320,7 @@ static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
339 320
340 pci_dev_put(master_0); 321 pci_dev_put(master_0);
341 pci_dev_put(cs5530_0); 322 pci_dev_put(cs5530_0);
342 323 return 0;
343 if (cs5530_is_palmax())
344 port_info[1] = &info_palmax_secondary;
345
346 /* Now kick off ATA set up */
347 return ata_pci_init_one(dev, port_info, 2);
348
349fail_put: 324fail_put:
350 if (master_0) 325 if (master_0)
351 pci_dev_put(master_0); 326 pci_dev_put(master_0);
@@ -354,6 +329,53 @@ fail_put:
354 return -ENODEV; 329 return -ENODEV;
355} 330}
356 331
332/**
333 * cs5530_init_one - Initialise a CS5530
334 * @dev: PCI device
335 * @id: Entry in match table
336 *
337 * Install a driver for the newly found CS5530 companion chip. Most of
338 * this is just housekeeping. We have to set the chip up correctly and
339 * turn off various bits of emulation magic.
340 */
341
342static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
343{
344 static struct ata_port_info info = {
345 .sht = &cs5530_sht,
346 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
347 .pio_mask = 0x1f,
348 .mwdma_mask = 0x07,
349 .udma_mask = 0x07,
350 .port_ops = &cs5530_port_ops
351 };
352 /* The docking connector doesn't do UDMA, and it seems not MWDMA */
353 static struct ata_port_info info_palmax_secondary = {
354 .sht = &cs5530_sht,
355 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
356 .pio_mask = 0x1f,
357 .port_ops = &cs5530_port_ops
358 };
359 static struct ata_port_info *port_info[2] = { &info, &info };
360
361 /* Chip initialisation */
362 if (cs5530_init_chip())
363 return -ENODEV;
364
365 if (cs5530_is_palmax())
366 port_info[1] = &info_palmax_secondary;
367
368 /* Now kick off ATA set up */
369 return ata_pci_init_one(pdev, port_info, 2);
370}
371
372static int cs5530_reinit_one(struct pci_dev *pdev)
373{
374 /* If we fail on resume we are doomed */
375 BUG_ON(cs5530_init_chip());
376 return ata_pci_device_resume(pdev);
377}
378
357static const struct pci_device_id cs5530[] = { 379static const struct pci_device_id cs5530[] = {
358 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), }, 380 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
359 381
@@ -364,7 +386,9 @@ static struct pci_driver cs5530_pci_driver = {
364 .name = DRV_NAME, 386 .name = DRV_NAME,
365 .id_table = cs5530, 387 .id_table = cs5530,
366 .probe = cs5530_init_one, 388 .probe = cs5530_init_one,
367 .remove = ata_pci_remove_one 389 .remove = ata_pci_remove_one,
390 .suspend = ata_pci_device_suspend,
391 .resume = cs5530_reinit_one,
368}; 392};
369 393
370static int __init cs5530_init(void) 394static int __init cs5530_init(void)
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index 8dafa4a49fd..e3efec4ffc7 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -39,7 +39,7 @@
39#include <asm/msr.h> 39#include <asm/msr.h>
40 40
41#define DRV_NAME "cs5535" 41#define DRV_NAME "cs5535"
42#define DRV_VERSION "0.2.10" 42#define DRV_VERSION "0.2.11"
43 43
44/* 44/*
45 * The Geode (Aka Athlon GX now) uses an internal MSR based 45 * The Geode (Aka Athlon GX now) uses an internal MSR based
@@ -177,7 +177,6 @@ static struct scsi_host_template cs5535_sht = {
177 .can_queue = ATA_DEF_QUEUE, 177 .can_queue = ATA_DEF_QUEUE,
178 .this_id = ATA_SHT_THIS_ID, 178 .this_id = ATA_SHT_THIS_ID,
179 .sg_tablesize = LIBATA_MAX_PRD, 179 .sg_tablesize = LIBATA_MAX_PRD,
180 .max_sectors = ATA_MAX_SECTORS,
181 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 180 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
182 .emulated = ATA_SHT_EMULATED, 181 .emulated = ATA_SHT_EMULATED,
183 .use_clustering = ATA_SHT_USE_CLUSTERING, 182 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -186,6 +185,8 @@ static struct scsi_host_template cs5535_sht = {
186 .slave_configure = ata_scsi_slave_config, 185 .slave_configure = ata_scsi_slave_config,
187 .slave_destroy = ata_scsi_slave_destroy, 186 .slave_destroy = ata_scsi_slave_destroy,
188 .bios_param = ata_std_bios_param, 187 .bios_param = ata_std_bios_param,
188 .resume = ata_scsi_device_resume,
189 .suspend = ata_scsi_device_suspend,
189}; 190};
190 191
191static struct ata_port_operations cs5535_port_ops = { 192static struct ata_port_operations cs5535_port_ops = {
@@ -268,7 +269,9 @@ static struct pci_driver cs5535_pci_driver = {
268 .name = DRV_NAME, 269 .name = DRV_NAME,
269 .id_table = cs5535, 270 .id_table = cs5535,
270 .probe = cs5535_init_one, 271 .probe = cs5535_init_one,
271 .remove = ata_pci_remove_one 272 .remove = ata_pci_remove_one,
273 .suspend = ata_pci_device_suspend,
274 .resume = ata_pci_device_resume,
272}; 275};
273 276
274static int __init cs5535_init(void) 277static int __init cs5535_init(void)
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 5a0b811907e..e2a95699bae 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -18,7 +18,7 @@
18#include <linux/libata.h> 18#include <linux/libata.h>
19 19
20#define DRV_NAME "pata_cypress" 20#define DRV_NAME "pata_cypress"
21#define DRV_VERSION "0.1.2" 21#define DRV_VERSION "0.1.4"
22 22
23/* here are the offset definitions for the registers */ 23/* here are the offset definitions for the registers */
24 24
@@ -128,7 +128,6 @@ static struct scsi_host_template cy82c693_sht = {
128 .can_queue = ATA_DEF_QUEUE, 128 .can_queue = ATA_DEF_QUEUE,
129 .this_id = ATA_SHT_THIS_ID, 129 .this_id = ATA_SHT_THIS_ID,
130 .sg_tablesize = LIBATA_MAX_PRD, 130 .sg_tablesize = LIBATA_MAX_PRD,
131 .max_sectors = ATA_MAX_SECTORS,
132 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 131 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
133 .emulated = ATA_SHT_EMULATED, 132 .emulated = ATA_SHT_EMULATED,
134 .use_clustering = ATA_SHT_USE_CLUSTERING, 133 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -137,6 +136,8 @@ static struct scsi_host_template cy82c693_sht = {
137 .slave_configure = ata_scsi_slave_config, 136 .slave_configure = ata_scsi_slave_config,
138 .slave_destroy = ata_scsi_slave_destroy, 137 .slave_destroy = ata_scsi_slave_destroy,
139 .bios_param = ata_std_bios_param, 138 .bios_param = ata_std_bios_param,
139 .resume = ata_scsi_device_resume,
140 .suspend = ata_scsi_device_suspend,
140}; 141};
141 142
142static struct ata_port_operations cy82c693_port_ops = { 143static struct ata_port_operations cy82c693_port_ops = {
@@ -204,7 +205,9 @@ static struct pci_driver cy82c693_pci_driver = {
204 .name = DRV_NAME, 205 .name = DRV_NAME,
205 .id_table = cy82c693, 206 .id_table = cy82c693,
206 .probe = cy82c693_init_one, 207 .probe = cy82c693_init_one,
207 .remove = ata_pci_remove_one 208 .remove = ata_pci_remove_one,
209 .suspend = ata_pci_device_suspend,
210 .resume = ata_pci_device_resume,
208}; 211};
209 212
210static int __init cy82c693_init(void) 213static int __init cy82c693_init(void)
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 755f79279de..edf8a63f50a 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -22,7 +22,7 @@
22#include <linux/ata.h> 22#include <linux/ata.h>
23 23
24#define DRV_NAME "pata_efar" 24#define DRV_NAME "pata_efar"
25#define DRV_VERSION "0.4.2" 25#define DRV_VERSION "0.4.3"
26 26
27/** 27/**
28 * efar_pre_reset - check for 40/80 pin 28 * efar_pre_reset - check for 40/80 pin
@@ -226,7 +226,6 @@ static struct scsi_host_template efar_sht = {
226 .can_queue = ATA_DEF_QUEUE, 226 .can_queue = ATA_DEF_QUEUE,
227 .this_id = ATA_SHT_THIS_ID, 227 .this_id = ATA_SHT_THIS_ID,
228 .sg_tablesize = LIBATA_MAX_PRD, 228 .sg_tablesize = LIBATA_MAX_PRD,
229 .max_sectors = ATA_MAX_SECTORS,
230 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 229 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
231 .emulated = ATA_SHT_EMULATED, 230 .emulated = ATA_SHT_EMULATED,
232 .use_clustering = ATA_SHT_USE_CLUSTERING, 231 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -235,6 +234,8 @@ static struct scsi_host_template efar_sht = {
235 .slave_configure = ata_scsi_slave_config, 234 .slave_configure = ata_scsi_slave_config,
236 .slave_destroy = ata_scsi_slave_destroy, 235 .slave_destroy = ata_scsi_slave_destroy,
237 .bios_param = ata_std_bios_param, 236 .bios_param = ata_std_bios_param,
237 .resume = ata_scsi_device_resume,
238 .suspend = ata_scsi_device_suspend,
238}; 239};
239 240
240static const struct ata_port_operations efar_ops = { 241static const struct ata_port_operations efar_ops = {
@@ -316,6 +317,8 @@ static struct pci_driver efar_pci_driver = {
316 .id_table = efar_pci_tbl, 317 .id_table = efar_pci_tbl,
317 .probe = efar_init_one, 318 .probe = efar_init_one,
318 .remove = ata_pci_remove_one, 319 .remove = ata_pci_remove_one,
320 .suspend = ata_pci_device_suspend,
321 .resume = ata_pci_device_resume,
319}; 322};
320 323
321static int __init efar_init(void) 324static int __init efar_init(void)
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index c0e150a9586..2663599a7c0 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -27,7 +27,7 @@
27#include <linux/libata.h> 27#include <linux/libata.h>
28 28
29#define DRV_NAME "pata_hpt366" 29#define DRV_NAME "pata_hpt366"
30#define DRV_VERSION "0.5" 30#define DRV_VERSION "0.5.3"
31 31
32struct hpt_clock { 32struct hpt_clock {
33 u8 xfer_speed; 33 u8 xfer_speed;
@@ -222,9 +222,17 @@ static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
222 222
223static int hpt36x_pre_reset(struct ata_port *ap) 223static int hpt36x_pre_reset(struct ata_port *ap)
224{ 224{
225 static const struct pci_bits hpt36x_enable_bits[] = {
226 { 0x50, 1, 0x04, 0x04 },
227 { 0x54, 1, 0x04, 0x04 }
228 };
229
225 u8 ata66; 230 u8 ata66;
226 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 231 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
227 232
233 if (!pci_test_config_bits(pdev, &hpt36x_enable_bits[ap->port_no]))
234 return -ENOENT;
235
228 pci_read_config_byte(pdev, 0x5A, &ata66); 236 pci_read_config_byte(pdev, 0x5A, &ata66);
229 if (ata66 & (1 << ap->port_no)) 237 if (ata66 & (1 << ap->port_no))
230 ap->cbl = ATA_CBL_PATA40; 238 ap->cbl = ATA_CBL_PATA40;
@@ -322,7 +330,6 @@ static struct scsi_host_template hpt36x_sht = {
322 .can_queue = ATA_DEF_QUEUE, 330 .can_queue = ATA_DEF_QUEUE,
323 .this_id = ATA_SHT_THIS_ID, 331 .this_id = ATA_SHT_THIS_ID,
324 .sg_tablesize = LIBATA_MAX_PRD, 332 .sg_tablesize = LIBATA_MAX_PRD,
325 .max_sectors = ATA_MAX_SECTORS,
326 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 333 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
327 .emulated = ATA_SHT_EMULATED, 334 .emulated = ATA_SHT_EMULATED,
328 .use_clustering = ATA_SHT_USE_CLUSTERING, 335 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -331,6 +338,8 @@ static struct scsi_host_template hpt36x_sht = {
331 .slave_configure = ata_scsi_slave_config, 338 .slave_configure = ata_scsi_slave_config,
332 .slave_destroy = ata_scsi_slave_destroy, 339 .slave_destroy = ata_scsi_slave_destroy,
333 .bios_param = ata_std_bios_param, 340 .bios_param = ata_std_bios_param,
341 .resume = ata_scsi_device_resume,
342 .suspend = ata_scsi_device_suspend,
334}; 343};
335 344
336/* 345/*
@@ -373,6 +382,27 @@ static struct ata_port_operations hpt366_port_ops = {
373}; 382};
374 383
375/** 384/**
385 * hpt36x_init_chipset - common chip setup
386 * @dev: PCI device
387 *
388 * Perform the chip setup work that must be done at both init and
389 * resume time
390 */
391
392static void hpt36x_init_chipset(struct pci_dev *dev)
393{
394 u8 drive_fast;
395 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
396 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
397 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
398 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
399
400 pci_read_config_byte(dev, 0x51, &drive_fast);
401 if (drive_fast & 0x80)
402 pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
403}
404
405/**
376 * hpt36x_init_one - Initialise an HPT366/368 406 * hpt36x_init_one - Initialise an HPT366/368
377 * @dev: PCI device 407 * @dev: PCI device
378 * @id: Entry in match table 408 * @id: Entry in match table
@@ -407,7 +437,6 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
407 437
408 u32 class_rev; 438 u32 class_rev;
409 u32 reg1; 439 u32 reg1;
410 u8 drive_fast;
411 440
412 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); 441 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
413 class_rev &= 0xFF; 442 class_rev &= 0xFF;
@@ -417,14 +446,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
417 if (class_rev > 2) 446 if (class_rev > 2)
418 return -ENODEV; 447 return -ENODEV;
419 448
420 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); 449 hpt36x_init_chipset(dev);
421 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
422 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
423 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
424
425 pci_read_config_byte(dev, 0x51, &drive_fast);
426 if (drive_fast & 0x80)
427 pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
428 450
429 pci_read_config_dword(dev, 0x40, &reg1); 451 pci_read_config_dword(dev, 0x40, &reg1);
430 452
@@ -445,9 +467,15 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
445 return ata_pci_init_one(dev, port_info, 2); 467 return ata_pci_init_one(dev, port_info, 2);
446} 468}
447 469
470static int hpt36x_reinit_one(struct pci_dev *dev)
471{
472 hpt36x_init_chipset(dev);
473 return ata_pci_device_resume(dev);
474}
475
476
448static const struct pci_device_id hpt36x[] = { 477static const struct pci_device_id hpt36x[] = {
449 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), }, 478 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
450
451 { }, 479 { },
452}; 480};
453 481
@@ -455,7 +483,9 @@ static struct pci_driver hpt36x_pci_driver = {
455 .name = DRV_NAME, 483 .name = DRV_NAME,
456 .id_table = hpt36x, 484 .id_table = hpt36x,
457 .probe = hpt36x_init_one, 485 .probe = hpt36x_init_one,
458 .remove = ata_pci_remove_one 486 .remove = ata_pci_remove_one,
487 .suspend = ata_pci_device_suspend,
488 .resume = hpt36x_reinit_one,
459}; 489};
460 490
461static int __init hpt36x_init(void) 491static int __init hpt36x_init(void)
@@ -463,13 +493,11 @@ static int __init hpt36x_init(void)
463 return pci_register_driver(&hpt36x_pci_driver); 493 return pci_register_driver(&hpt36x_pci_driver);
464} 494}
465 495
466
467static void __exit hpt36x_exit(void) 496static void __exit hpt36x_exit(void)
468{ 497{
469 pci_unregister_driver(&hpt36x_pci_driver); 498 pci_unregister_driver(&hpt36x_pci_driver);
470} 499}
471 500
472
473MODULE_AUTHOR("Alan Cox"); 501MODULE_AUTHOR("Alan Cox");
474MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368"); 502MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368");
475MODULE_LICENSE("GPL"); 503MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 1eeb16f0fb0..47082df7199 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -768,7 +768,6 @@ static struct scsi_host_template hpt37x_sht = {
768 .can_queue = ATA_DEF_QUEUE, 768 .can_queue = ATA_DEF_QUEUE,
769 .this_id = ATA_SHT_THIS_ID, 769 .this_id = ATA_SHT_THIS_ID,
770 .sg_tablesize = LIBATA_MAX_PRD, 770 .sg_tablesize = LIBATA_MAX_PRD,
771 .max_sectors = ATA_MAX_SECTORS,
772 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 771 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
773 .emulated = ATA_SHT_EMULATED, 772 .emulated = ATA_SHT_EMULATED,
774 .use_clustering = ATA_SHT_USE_CLUSTERING, 773 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 47d7664e9ee..f6817b4093a 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -334,7 +334,6 @@ static struct scsi_host_template hpt3x2n_sht = {
334 .can_queue = ATA_DEF_QUEUE, 334 .can_queue = ATA_DEF_QUEUE,
335 .this_id = ATA_SHT_THIS_ID, 335 .this_id = ATA_SHT_THIS_ID,
336 .sg_tablesize = LIBATA_MAX_PRD, 336 .sg_tablesize = LIBATA_MAX_PRD,
337 .max_sectors = ATA_MAX_SECTORS,
338 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 337 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
339 .emulated = ATA_SHT_EMULATED, 338 .emulated = ATA_SHT_EMULATED,
340 .use_clustering = ATA_SHT_USE_CLUSTERING, 339 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index d216cc564b5..5f1d385eb59 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -23,7 +23,7 @@
23#include <linux/libata.h> 23#include <linux/libata.h>
24 24
25#define DRV_NAME "pata_hpt3x3" 25#define DRV_NAME "pata_hpt3x3"
26#define DRV_VERSION "0.4.1" 26#define DRV_VERSION "0.4.2"
27 27
28static int hpt3x3_probe_init(struct ata_port *ap) 28static int hpt3x3_probe_init(struct ata_port *ap)
29{ 29{
@@ -111,7 +111,6 @@ static struct scsi_host_template hpt3x3_sht = {
111 .can_queue = ATA_DEF_QUEUE, 111 .can_queue = ATA_DEF_QUEUE,
112 .this_id = ATA_SHT_THIS_ID, 112 .this_id = ATA_SHT_THIS_ID,
113 .sg_tablesize = LIBATA_MAX_PRD, 113 .sg_tablesize = LIBATA_MAX_PRD,
114 .max_sectors = ATA_MAX_SECTORS,
115 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 114 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
116 .emulated = ATA_SHT_EMULATED, 115 .emulated = ATA_SHT_EMULATED,
117 .use_clustering = ATA_SHT_USE_CLUSTERING, 116 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -120,6 +119,8 @@ static struct scsi_host_template hpt3x3_sht = {
120 .slave_configure = ata_scsi_slave_config, 119 .slave_configure = ata_scsi_slave_config,
121 .slave_destroy = ata_scsi_slave_destroy, 120 .slave_destroy = ata_scsi_slave_destroy,
122 .bios_param = ata_std_bios_param, 121 .bios_param = ata_std_bios_param,
122 .resume = ata_scsi_device_resume,
123 .suspend = ata_scsi_device_suspend,
123}; 124};
124 125
125static struct ata_port_operations hpt3x3_port_ops = { 126static struct ata_port_operations hpt3x3_port_ops = {
@@ -158,6 +159,27 @@ static struct ata_port_operations hpt3x3_port_ops = {
158}; 159};
159 160
160/** 161/**
162 * hpt3x3_init_chipset - chip setup
163 * @dev: PCI device
164 *
165 * Perform the setup required at boot and on resume.
166 */
167
168static void hpt3x3_init_chipset(struct pci_dev *dev)
169{
170 u16 cmd;
171 /* Initialize the board */
172 pci_write_config_word(dev, 0x80, 0x00);
173 /* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
174 pci_read_config_word(dev, PCI_COMMAND, &cmd);
175 if (cmd & PCI_COMMAND_MEMORY)
176 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
177 else
178 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
179}
180
181
182/**
161 * hpt3x3_init_one - Initialise an HPT343/363 183 * hpt3x3_init_one - Initialise an HPT343/363
162 * @dev: PCI device 184 * @dev: PCI device
163 * @id: Entry in match table 185 * @id: Entry in match table
@@ -178,21 +200,18 @@ static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
178 .port_ops = &hpt3x3_port_ops 200 .port_ops = &hpt3x3_port_ops
179 }; 201 };
180 static struct ata_port_info *port_info[2] = { &info, &info }; 202 static struct ata_port_info *port_info[2] = { &info, &info };
181 u16 cmd;
182
183 /* Initialize the board */
184 pci_write_config_word(dev, 0x80, 0x00);
185 /* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
186 pci_read_config_word(dev, PCI_COMMAND, &cmd);
187 if (cmd & PCI_COMMAND_MEMORY)
188 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
189 else
190 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
191 203
204 hpt3x3_init_chipset(dev);
192 /* Now kick off ATA set up */ 205 /* Now kick off ATA set up */
193 return ata_pci_init_one(dev, port_info, 2); 206 return ata_pci_init_one(dev, port_info, 2);
194} 207}
195 208
209static int hpt3x3_reinit_one(struct pci_dev *dev)
210{
211 hpt3x3_init_chipset(dev);
212 return ata_pci_device_resume(dev);
213}
214
196static const struct pci_device_id hpt3x3[] = { 215static const struct pci_device_id hpt3x3[] = {
197 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), }, 216 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), },
198 217
@@ -203,7 +222,9 @@ static struct pci_driver hpt3x3_pci_driver = {
203 .name = DRV_NAME, 222 .name = DRV_NAME,
204 .id_table = hpt3x3, 223 .id_table = hpt3x3,
205 .probe = hpt3x3_init_one, 224 .probe = hpt3x3_init_one,
206 .remove = ata_pci_remove_one 225 .remove = ata_pci_remove_one,
226 .suspend = ata_pci_device_suspend,
227 .resume = hpt3x3_reinit_one,
207}; 228};
208 229
209static int __init hpt3x3_init(void) 230static int __init hpt3x3_init(void)
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 40ca2b82b7f..a97d55ae95c 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -27,7 +27,6 @@ static struct scsi_host_template isapnp_sht = {
27 .can_queue = ATA_DEF_QUEUE, 27 .can_queue = ATA_DEF_QUEUE,
28 .this_id = ATA_SHT_THIS_ID, 28 .this_id = ATA_SHT_THIS_ID,
29 .sg_tablesize = LIBATA_MAX_PRD, 29 .sg_tablesize = LIBATA_MAX_PRD,
30 .max_sectors = ATA_MAX_SECTORS,
31 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 30 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
32 .emulated = ATA_SHT_EMULATED, 31 .emulated = ATA_SHT_EMULATED,
33 .use_clustering = ATA_SHT_USE_CLUSTERING, 32 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 7f68f14be6f..0b56ff3d1cf 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -80,7 +80,7 @@
80 80
81 81
82#define DRV_NAME "pata_it821x" 82#define DRV_NAME "pata_it821x"
83#define DRV_VERSION "0.3.2" 83#define DRV_VERSION "0.3.3"
84 84
85struct it821x_dev 85struct it821x_dev
86{ 86{
@@ -666,9 +666,6 @@ static struct scsi_host_template it821x_sht = {
666 .can_queue = ATA_DEF_QUEUE, 666 .can_queue = ATA_DEF_QUEUE,
667 .this_id = ATA_SHT_THIS_ID, 667 .this_id = ATA_SHT_THIS_ID,
668 .sg_tablesize = LIBATA_MAX_PRD, 668 .sg_tablesize = LIBATA_MAX_PRD,
669 /* 255 sectors to begin with. This is locked in smart mode but not
670 in pass through */
671 .max_sectors = 255,
672 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 669 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
673 .emulated = ATA_SHT_EMULATED, 670 .emulated = ATA_SHT_EMULATED,
674 .use_clustering = ATA_SHT_USE_CLUSTERING, 671 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -677,6 +674,8 @@ static struct scsi_host_template it821x_sht = {
677 .slave_configure = ata_scsi_slave_config, 674 .slave_configure = ata_scsi_slave_config,
678 .slave_destroy = ata_scsi_slave_destroy, 675 .slave_destroy = ata_scsi_slave_destroy,
679 .bios_param = ata_std_bios_param, 676 .bios_param = ata_std_bios_param,
677 .resume = ata_scsi_device_resume,
678 .suspend = ata_scsi_device_suspend,
680}; 679};
681 680
682static struct ata_port_operations it821x_smart_port_ops = { 681static struct ata_port_operations it821x_smart_port_ops = {
@@ -809,6 +808,14 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
809 return ata_pci_init_one(pdev, port_info, 2); 808 return ata_pci_init_one(pdev, port_info, 2);
810} 809}
811 810
811static int it821x_reinit_one(struct pci_dev *pdev)
812{
813 /* Resume - turn raid back off if need be */
814 if (it8212_noraid)
815 it821x_disable_raid(pdev);
816 return ata_pci_device_resume(pdev);
817}
818
812static const struct pci_device_id it821x[] = { 819static const struct pci_device_id it821x[] = {
813 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), }, 820 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), },
814 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), }, 821 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), },
@@ -820,7 +827,9 @@ static struct pci_driver it821x_pci_driver = {
820 .name = DRV_NAME, 827 .name = DRV_NAME,
821 .id_table = it821x, 828 .id_table = it821x,
822 .probe = it821x_init_one, 829 .probe = it821x_init_one,
823 .remove = ata_pci_remove_one 830 .remove = ata_pci_remove_one,
831 .suspend = ata_pci_device_suspend,
832 .resume = it821x_reinit_one,
824}; 833};
825 834
826static int __init it821x_init(void) 835static int __init it821x_init(void)
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
new file mode 100644
index 00000000000..cb8924109f5
--- /dev/null
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -0,0 +1,271 @@
1/*
2 * ixp4xx PATA/Compact Flash driver
3 * Copyright (c) 2006 Tower Technologies
4 * Author: Alessandro Zummo <a.zummo@towertech.it>
5 *
6 * An ATA driver to handle a Compact Flash connected
7 * to the ixp4xx expansion bus in TrueIDE mode. The CF
8 * must have it chip selects connected to two CS lines
9 * on the ixp4xx. The interrupt line is optional, if not
10 * specified the driver will run in polling mode.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/libata.h>
21#include <linux/irq.h>
22#include <linux/platform_device.h>
23#include <scsi/scsi_host.h>
24
25#define DRV_NAME "pata_ixp4xx_cf"
26#define DRV_VERSION "0.1.1"
27
28static void ixp4xx_set_mode(struct ata_port *ap)
29{
30 int i;
31
32 for (i = 0; i < ATA_MAX_DEVICES; i++) {
33 struct ata_device *dev = &ap->device[i];
34 if (ata_dev_enabled(dev)) {
35 dev->pio_mode = XFER_PIO_0;
36 dev->xfer_mode = XFER_PIO_0;
37 dev->xfer_shift = ATA_SHIFT_PIO;
38 dev->flags |= ATA_DFLAG_PIO;
39 }
40 }
41}
42
43static void ixp4xx_phy_reset(struct ata_port *ap)
44{
45 ap->cbl = ATA_CBL_PATA40;
46 ata_port_probe(ap);
47 ata_bus_reset(ap);
48}
49
50static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
51 unsigned int buflen, int write_data)
52{
53 unsigned int i;
54 unsigned int words = buflen >> 1;
55 u16 *buf16 = (u16 *) buf;
56 struct ata_port *ap = adev->ap;
57 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
58 struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
59
60 /* set the expansion bus in 16bit mode and restore
61 * 8 bit mode after the transaction.
62 */
63 *data->cs0_cfg &= ~(0x01);
64 udelay(100);
65
66 /* Transfer multiple of 2 bytes */
67 if (write_data) {
68 for (i = 0; i < words; i++)
69 writew(buf16[i], mmio);
70 } else {
71 for (i = 0; i < words; i++)
72 buf16[i] = readw(mmio);
73 }
74
75 /* Transfer trailing 1 byte, if any. */
76 if (unlikely(buflen & 0x01)) {
77 u16 align_buf[1] = { 0 };
78 unsigned char *trailing_buf = buf + buflen - 1;
79
80 if (write_data) {
81 memcpy(align_buf, trailing_buf, 1);
82 writew(align_buf[0], mmio);
83 } else {
84 align_buf[0] = readw(mmio);
85 memcpy(trailing_buf, align_buf, 1);
86 }
87 }
88
89 udelay(100);
90 *data->cs0_cfg |= 0x01;
91}
92
93static void ixp4xx_irq_clear(struct ata_port *ap)
94{
95}
96
97static void ixp4xx_host_stop (struct ata_host *host)
98{
99 struct ixp4xx_pata_data *data = host->dev->platform_data;
100
101 iounmap(data->cs0);
102 iounmap(data->cs1);
103}
104
105static struct scsi_host_template ixp4xx_sht = {
106 .module = THIS_MODULE,
107 .name = DRV_NAME,
108 .ioctl = ata_scsi_ioctl,
109 .queuecommand = ata_scsi_queuecmd,
110 .can_queue = ATA_DEF_QUEUE,
111 .this_id = ATA_SHT_THIS_ID,
112 .sg_tablesize = LIBATA_MAX_PRD,
113 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
114 .emulated = ATA_SHT_EMULATED,
115 .use_clustering = ATA_SHT_USE_CLUSTERING,
116 .proc_name = DRV_NAME,
117 .dma_boundary = ATA_DMA_BOUNDARY,
118 .slave_configure = ata_scsi_slave_config,
119 .slave_destroy = ata_scsi_slave_destroy,
120 .bios_param = ata_std_bios_param,
121};
122
123static struct ata_port_operations ixp4xx_port_ops = {
124 .set_mode = ixp4xx_set_mode,
125 .mode_filter = ata_pci_default_filter,
126
127 .port_disable = ata_port_disable,
128 .tf_load = ata_tf_load,
129 .tf_read = ata_tf_read,
130 .check_status = ata_check_status,
131 .exec_command = ata_exec_command,
132 .dev_select = ata_std_dev_select,
133
134 .qc_prep = ata_qc_prep,
135 .qc_issue = ata_qc_issue_prot,
136 .eng_timeout = ata_eng_timeout,
137 .data_xfer = ixp4xx_mmio_data_xfer,
138
139 .irq_handler = ata_interrupt,
140 .irq_clear = ixp4xx_irq_clear,
141
142 .port_start = ata_port_start,
143 .port_stop = ata_port_stop,
144 .host_stop = ixp4xx_host_stop,
145
146 .phy_reset = ixp4xx_phy_reset,
147};
148
149static void ixp4xx_setup_port(struct ata_ioports *ioaddr,
150 struct ixp4xx_pata_data *data)
151{
152 ioaddr->cmd_addr = (unsigned long) data->cs0;
153 ioaddr->altstatus_addr = (unsigned long) data->cs1 + 0x06;
154 ioaddr->ctl_addr = (unsigned long) data->cs1 + 0x06;
155
156 ata_std_ports(ioaddr);
157
158#ifndef __ARMEB__
159
160 /* adjust the addresses to handle the address swizzling of the
161 * ixp4xx in little endian mode.
162 */
163
164 ioaddr->data_addr ^= 0x02;
165 ioaddr->cmd_addr ^= 0x03;
166 ioaddr->altstatus_addr ^= 0x03;
167 ioaddr->ctl_addr ^= 0x03;
168 ioaddr->error_addr ^= 0x03;
169 ioaddr->feature_addr ^= 0x03;
170 ioaddr->nsect_addr ^= 0x03;
171 ioaddr->lbal_addr ^= 0x03;
172 ioaddr->lbam_addr ^= 0x03;
173 ioaddr->lbah_addr ^= 0x03;
174 ioaddr->device_addr ^= 0x03;
175 ioaddr->status_addr ^= 0x03;
176 ioaddr->command_addr ^= 0x03;
177#endif
178}
179
180static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
181{
182 int ret;
183 unsigned int irq;
184 struct resource *cs0, *cs1;
185 struct ata_probe_ent ae;
186
187 struct ixp4xx_pata_data *data = pdev->dev.platform_data;
188
189 cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
190 cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
191
192 if (!cs0 || !cs1)
193 return -EINVAL;
194
195 pdev->dev.coherent_dma_mask = DMA_32BIT_MASK;
196
197 data->cs0 = ioremap(cs0->start, 0x1000);
198 data->cs1 = ioremap(cs1->start, 0x1000);
199
200 irq = platform_get_irq(pdev, 0);
201 if (irq)
202 set_irq_type(irq, IRQT_HIGH);
203
204 /* Setup expansion bus chip selects */
205 *data->cs0_cfg = data->cs0_bits;
206 *data->cs1_cfg = data->cs1_bits;
207
208 memset(&ae, 0, sizeof(struct ata_probe_ent));
209 INIT_LIST_HEAD(&ae.node);
210
211 ae.dev = &pdev->dev;
212 ae.port_ops = &ixp4xx_port_ops;
213 ae.sht = &ixp4xx_sht;
214 ae.n_ports = 1;
215 ae.pio_mask = 0x1f; /* PIO4 */
216 ae.irq = irq;
217 ae.irq_flags = 0;
218 ae.port_flags = ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY
219 | ATA_FLAG_NO_ATAPI | ATA_FLAG_SRST;
220
221 /* run in polling mode if no irq has been assigned */
222 if (!irq)
223 ae.port_flags |= ATA_FLAG_PIO_POLLING;
224
225 ixp4xx_setup_port(&ae.port[0], data);
226
227 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
228
229 ret = ata_device_add(&ae);
230 if (ret == 0)
231 return -ENODEV;
232
233 return 0;
234}
235
236static __devexit int ixp4xx_pata_remove(struct platform_device *dev)
237{
238 struct ata_host *host = platform_get_drvdata(dev);
239
240 ata_host_remove(host);
241 platform_set_drvdata(dev, NULL);
242
243 return 0;
244}
245
246static struct platform_driver ixp4xx_pata_platform_driver = {
247 .driver = {
248 .name = DRV_NAME,
249 .owner = THIS_MODULE,
250 },
251 .probe = ixp4xx_pata_probe,
252 .remove = __devexit_p(ixp4xx_pata_remove),
253};
254
255static int __init ixp4xx_pata_init(void)
256{
257 return platform_driver_register(&ixp4xx_pata_platform_driver);
258}
259
260static void __exit ixp4xx_pata_exit(void)
261{
262 platform_driver_unregister(&ixp4xx_pata_platform_driver);
263}
264
265MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
266MODULE_DESCRIPTION("low-level driver for ixp4xx Compact Flash PATA");
267MODULE_LICENSE("GPL");
268MODULE_VERSION(DRV_VERSION);
269
270module_init(ixp4xx_pata_init);
271module_exit(ixp4xx_pata_exit);
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 0210b10d49c..2d661cb4df3 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -19,7 +19,7 @@
19#include <linux/ata.h> 19#include <linux/ata.h>
20 20
21#define DRV_NAME "pata_jmicron" 21#define DRV_NAME "pata_jmicron"
22#define DRV_VERSION "0.1.2" 22#define DRV_VERSION "0.1.4"
23 23
24typedef enum { 24typedef enum {
25 PORT_PATA0 = 0, 25 PORT_PATA0 = 0,
@@ -128,8 +128,6 @@ static struct scsi_host_template jmicron_sht = {
128 .can_queue = ATA_DEF_QUEUE, 128 .can_queue = ATA_DEF_QUEUE,
129 .this_id = ATA_SHT_THIS_ID, 129 .this_id = ATA_SHT_THIS_ID,
130 .sg_tablesize = LIBATA_MAX_PRD, 130 .sg_tablesize = LIBATA_MAX_PRD,
131 /* Special handling needed if you have sector or LBA48 limits */
132 .max_sectors = ATA_MAX_SECTORS,
133 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 131 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
134 .emulated = ATA_SHT_EMULATED, 132 .emulated = ATA_SHT_EMULATED,
135 .use_clustering = ATA_SHT_USE_CLUSTERING, 133 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -213,12 +211,11 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
213 211
214 /* FIXME: We may want a way to override this in future */ 212 /* FIXME: We may want a way to override this in future */
215 pci_write_config_byte(pdev, 0x41, 0xa1); 213 pci_write_config_byte(pdev, 0x41, 0xa1);
216 }
217
218 /* PATA controller is fn 1, AHCI is fn 0 */
219 if (PCI_FUNC(pdev->devfn) != 1)
220 return -ENODEV;
221 214
215 /* PATA controller is fn 1, AHCI is fn 0 */
216 if (PCI_FUNC(pdev->devfn) != 1)
217 return -ENODEV;
218 }
222 if ( id->driver_data == 365 || id->driver_data == 366) { 219 if ( id->driver_data == 365 || id->driver_data == 366) {
223 /* The 365/66 have two PATA channels, redirect the second */ 220 /* The 365/66 have two PATA channels, redirect the second */
224 pci_read_config_dword(pdev, 0x80, &reg); 221 pci_read_config_dword(pdev, 0x80, &reg);
@@ -229,6 +226,27 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
229 return ata_pci_init_one(pdev, port_info, 2); 226 return ata_pci_init_one(pdev, port_info, 2);
230} 227}
231 228
229static int jmicron_reinit_one(struct pci_dev *pdev)
230{
231 u32 reg;
232
233 switch(pdev->device) {
234 case PCI_DEVICE_ID_JMICRON_JMB368:
235 break;
236 case PCI_DEVICE_ID_JMICRON_JMB365:
237 case PCI_DEVICE_ID_JMICRON_JMB366:
238 /* Restore mapping or disks swap and boy does it get ugly */
239 pci_read_config_dword(pdev, 0x80, &reg);
240 reg |= (1 << 24); /* IDE1 to PATA IDE secondary */
241 pci_write_config_dword(pdev, 0x80, reg);
242 /* Fall through */
243 default:
244 /* Make sure AHCI is turned back on */
245 pci_write_config_byte(pdev, 0x41, 0xa1);
246 }
247 return ata_pci_device_resume(pdev);
248}
249
232static const struct pci_device_id jmicron_pci_tbl[] = { 250static const struct pci_device_id jmicron_pci_tbl[] = {
233 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB361), 361}, 251 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB361), 361},
234 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB363), 363}, 252 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB363), 363},
@@ -244,6 +262,8 @@ static struct pci_driver jmicron_pci_driver = {
244 .id_table = jmicron_pci_tbl, 262 .id_table = jmicron_pci_tbl,
245 .probe = jmicron_init_one, 263 .probe = jmicron_init_one,
246 .remove = ata_pci_remove_one, 264 .remove = ata_pci_remove_one,
265 .suspend = ata_pci_device_suspend,
266 .resume = jmicron_reinit_one,
247}; 267};
248 268
249static int __init jmicron_init(void) 269static int __init jmicron_init(void)
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index b39078b2a47..c7d1738e4e6 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -128,7 +128,6 @@ static struct scsi_host_template legacy_sht = {
128 .can_queue = ATA_DEF_QUEUE, 128 .can_queue = ATA_DEF_QUEUE,
129 .this_id = ATA_SHT_THIS_ID, 129 .this_id = ATA_SHT_THIS_ID,
130 .sg_tablesize = LIBATA_MAX_PRD, 130 .sg_tablesize = LIBATA_MAX_PRD,
131 .max_sectors = ATA_MAX_SECTORS,
132 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 131 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
133 .emulated = ATA_SHT_EMULATED, 132 .emulated = ATA_SHT_EMULATED,
134 .use_clustering = ATA_SHT_USE_CLUSTERING, 133 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
new file mode 100644
index 00000000000..1c810ea0025
--- /dev/null
+++ b/drivers/ata/pata_marvell.c
@@ -0,0 +1,224 @@
1/*
2 * Marvell PATA driver.
3 *
4 * For the moment we drive the PATA port in legacy mode. That
5 * isn't making full use of the device functionality but it is
6 * easy to get working.
7 *
8 * (c) 2006 Red Hat <alan@redhat.com>
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/init.h>
15#include <linux/blkdev.h>
16#include <linux/delay.h>
17#include <linux/device.h>
18#include <scsi/scsi_host.h>
19#include <linux/libata.h>
20#include <linux/ata.h>
21
22#define DRV_NAME "pata_marvell"
23#define DRV_VERSION "0.1.1"
24
25/**
26 * marvell_pre_reset - check for 40/80 pin
27 * @ap: Port
28 *
29 * Perform the PATA port setup we need.
30 */
31
32static int marvell_pre_reset(struct ata_port *ap)
33{
34 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
35 u32 devices;
36 void __iomem *barp;
37 int i;
38
39 /* Check if our port is enabled */
40
41 barp = pci_iomap(pdev, 5, 0x10);
42 if (barp == NULL)
43 return -ENOMEM;
44 printk("BAR5:");
45 for(i = 0; i <= 0x0F; i++)
46 printk("%02X:%02X ", i, readb(barp + i));
47 printk("\n");
48
49 devices = readl(barp + 0x0C);
50 pci_iounmap(pdev, barp);
51
52 if ((pdev->device == 0x6145) && (ap->port_no == 0) &&
53 (!(devices & 0x10))) /* PATA enable ? */
54 return -ENOENT;
55
56 /* Cable type */
57 switch(ap->port_no)
58 {
59 case 0:
60 if (inb(ap->ioaddr.bmdma_addr + 1) & 1)
61 ap->cbl = ATA_CBL_PATA40;
62 else
63 ap->cbl = ATA_CBL_PATA80;
64 break;
65
66 case 1: /* Legacy SATA port */
67 ap->cbl = ATA_CBL_SATA;
68 break;
69 }
70 return ata_std_prereset(ap);
71}
72
73/**
74 * marvell_error_handler - Setup and error handler
75 * @ap: Port to handle
76 *
77 * LOCKING:
78 * None (inherited from caller).
79 */
80
81static void marvell_error_handler(struct ata_port *ap)
82{
83 return ata_bmdma_drive_eh(ap, marvell_pre_reset, ata_std_softreset,
84 NULL, ata_std_postreset);
85}
86
87/* No PIO or DMA methods needed for this device */
88
89static struct scsi_host_template marvell_sht = {
90 .module = THIS_MODULE,
91 .name = DRV_NAME,
92 .ioctl = ata_scsi_ioctl,
93 .queuecommand = ata_scsi_queuecmd,
94 .can_queue = ATA_DEF_QUEUE,
95 .this_id = ATA_SHT_THIS_ID,
96 .sg_tablesize = LIBATA_MAX_PRD,
97 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
98 .emulated = ATA_SHT_EMULATED,
99 .use_clustering = ATA_SHT_USE_CLUSTERING,
100 .proc_name = DRV_NAME,
101 .dma_boundary = ATA_DMA_BOUNDARY,
102 .slave_configure = ata_scsi_slave_config,
103 .slave_destroy = ata_scsi_slave_destroy,
104 /* Use standard CHS mapping rules */
105 .bios_param = ata_std_bios_param,
106 .resume = ata_scsi_device_resume,
107 .suspend = ata_scsi_device_suspend,
108};
109
110static const struct ata_port_operations marvell_ops = {
111 .port_disable = ata_port_disable,
112
113 /* Task file is PCI ATA format, use helpers */
114 .tf_load = ata_tf_load,
115 .tf_read = ata_tf_read,
116 .check_status = ata_check_status,
117 .exec_command = ata_exec_command,
118 .dev_select = ata_std_dev_select,
119
120 .freeze = ata_bmdma_freeze,
121 .thaw = ata_bmdma_thaw,
122 .error_handler = marvell_error_handler,
123 .post_internal_cmd = ata_bmdma_post_internal_cmd,
124
125 /* BMDMA handling is PCI ATA format, use helpers */
126 .bmdma_setup = ata_bmdma_setup,
127 .bmdma_start = ata_bmdma_start,
128 .bmdma_stop = ata_bmdma_stop,
129 .bmdma_status = ata_bmdma_status,
130 .qc_prep = ata_qc_prep,
131 .qc_issue = ata_qc_issue_prot,
132 .data_xfer = ata_pio_data_xfer,
133
134 /* Timeout handling */
135 .irq_handler = ata_interrupt,
136 .irq_clear = ata_bmdma_irq_clear,
137
138 /* Generic PATA PCI ATA helpers */
139 .port_start = ata_port_start,
140 .port_stop = ata_port_stop,
141 .host_stop = ata_host_stop,
142};
143
144
145/**
146 * marvell_init_one - Register Marvell ATA PCI device with kernel services
147 * @pdev: PCI device to register
148 * @ent: Entry in marvell_pci_tbl matching with @pdev
149 *
150 * Called from kernel PCI layer.
151 *
152 * LOCKING:
153 * Inherited from PCI layer (may sleep).
154 *
155 * RETURNS:
156 * Zero on success, or -ERRNO value.
157 */
158
159static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
160{
161 static struct ata_port_info info = {
162 .sht = &marvell_sht,
163 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
164
165 .pio_mask = 0x1f,
166 .mwdma_mask = 0x07,
167 .udma_mask = 0x3f,
168
169 .port_ops = &marvell_ops,
170 };
171 static struct ata_port_info info_sata = {
172 .sht = &marvell_sht,
173 /* Slave possible as its magically mapped not real */
174 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
175
176 .pio_mask = 0x1f,
177 .mwdma_mask = 0x07,
178 .udma_mask = 0x7f,
179
180 .port_ops = &marvell_ops,
181 };
182 struct ata_port_info *port_info[2] = { &info, &info_sata };
183 int n_port = 2;
184
185 if (pdev->device == 0x6101)
186 n_port = 1;
187
188 return ata_pci_init_one(pdev, port_info, n_port);
189}
190
191static const struct pci_device_id marvell_pci_tbl[] = {
192 { PCI_DEVICE(0x11AB, 0x6101), },
193 { PCI_DEVICE(0x11AB, 0x6145), },
194 { } /* terminate list */
195};
196
197static struct pci_driver marvell_pci_driver = {
198 .name = DRV_NAME,
199 .id_table = marvell_pci_tbl,
200 .probe = marvell_init_one,
201 .remove = ata_pci_remove_one,
202 .suspend = ata_pci_device_suspend,
203 .resume = ata_pci_device_resume,
204};
205
206static int __init marvell_init(void)
207{
208 return pci_register_driver(&marvell_pci_driver);
209}
210
211static void __exit marvell_exit(void)
212{
213 pci_unregister_driver(&marvell_pci_driver);
214}
215
216module_init(marvell_init);
217module_exit(marvell_exit);
218
219MODULE_AUTHOR("Alan Cox");
220MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode");
221MODULE_LICENSE("GPL");
222MODULE_DEVICE_TABLE(pci, marvell_pci_tbl);
223MODULE_VERSION(DRV_VERSION);
224
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index e00d406bfdf..4ccca938675 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -35,7 +35,7 @@
35#include <linux/libata.h> 35#include <linux/libata.h>
36 36
37#define DRV_NAME "pata_mpiix" 37#define DRV_NAME "pata_mpiix"
38#define DRV_VERSION "0.7.2" 38#define DRV_VERSION "0.7.3"
39 39
40enum { 40enum {
41 IDETIM = 0x6C, /* IDE control register */ 41 IDETIM = 0x6C, /* IDE control register */
@@ -159,7 +159,6 @@ static struct scsi_host_template mpiix_sht = {
159 .can_queue = ATA_DEF_QUEUE, 159 .can_queue = ATA_DEF_QUEUE,
160 .this_id = ATA_SHT_THIS_ID, 160 .this_id = ATA_SHT_THIS_ID,
161 .sg_tablesize = LIBATA_MAX_PRD, 161 .sg_tablesize = LIBATA_MAX_PRD,
162 .max_sectors = ATA_MAX_SECTORS,
163 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 162 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
164 .emulated = ATA_SHT_EMULATED, 163 .emulated = ATA_SHT_EMULATED,
165 .use_clustering = ATA_SHT_USE_CLUSTERING, 164 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -168,6 +167,8 @@ static struct scsi_host_template mpiix_sht = {
168 .slave_configure = ata_scsi_slave_config, 167 .slave_configure = ata_scsi_slave_config,
169 .slave_destroy = ata_scsi_slave_destroy, 168 .slave_destroy = ata_scsi_slave_destroy,
170 .bios_param = ata_std_bios_param, 169 .bios_param = ata_std_bios_param,
170 .resume = ata_scsi_device_resume,
171 .suspend = ata_scsi_device_suspend,
171}; 172};
172 173
173static struct ata_port_operations mpiix_port_ops = { 174static struct ata_port_operations mpiix_port_ops = {
@@ -285,7 +286,9 @@ static struct pci_driver mpiix_pci_driver = {
285 .name = DRV_NAME, 286 .name = DRV_NAME,
286 .id_table = mpiix, 287 .id_table = mpiix,
287 .probe = mpiix_init_one, 288 .probe = mpiix_init_one,
288 .remove = mpiix_remove_one 289 .remove = mpiix_remove_one,
290 .suspend = ata_pci_device_suspend,
291 .resume = ata_pci_device_resume,
289}; 292};
290 293
291static int __init mpiix_init(void) 294static int __init mpiix_init(void)
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 1963a4d3587..cf7fe037471 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -16,7 +16,7 @@
16#include <linux/ata.h> 16#include <linux/ata.h>
17 17
18#define DRV_NAME "pata_netcell" 18#define DRV_NAME "pata_netcell"
19#define DRV_VERSION "0.1.5" 19#define DRV_VERSION "0.1.6"
20 20
21/** 21/**
22 * netcell_probe_init - check for 40/80 pin 22 * netcell_probe_init - check for 40/80 pin
@@ -54,8 +54,6 @@ static struct scsi_host_template netcell_sht = {
54 .can_queue = ATA_DEF_QUEUE, 54 .can_queue = ATA_DEF_QUEUE,
55 .this_id = ATA_SHT_THIS_ID, 55 .this_id = ATA_SHT_THIS_ID,
56 .sg_tablesize = LIBATA_MAX_PRD, 56 .sg_tablesize = LIBATA_MAX_PRD,
57 /* Special handling needed if you have sector or LBA48 limits */
58 .max_sectors = ATA_MAX_SECTORS,
59 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 57 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
60 .emulated = ATA_SHT_EMULATED, 58 .emulated = ATA_SHT_EMULATED,
61 .use_clustering = ATA_SHT_USE_CLUSTERING, 59 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -65,6 +63,8 @@ static struct scsi_host_template netcell_sht = {
65 .slave_destroy = ata_scsi_slave_destroy, 63 .slave_destroy = ata_scsi_slave_destroy,
66 /* Use standard CHS mapping rules */ 64 /* Use standard CHS mapping rules */
67 .bios_param = ata_std_bios_param, 65 .bios_param = ata_std_bios_param,
66 .resume = ata_scsi_device_resume,
67 .suspend = ata_scsi_device_suspend,
68}; 68};
69 69
70static const struct ata_port_operations netcell_ops = { 70static const struct ata_port_operations netcell_ops = {
@@ -153,6 +153,8 @@ static struct pci_driver netcell_pci_driver = {
153 .id_table = netcell_pci_tbl, 153 .id_table = netcell_pci_tbl,
154 .probe = netcell_init_one, 154 .probe = netcell_init_one,
155 .remove = ata_pci_remove_one, 155 .remove = ata_pci_remove_one,
156 .suspend = ata_pci_device_suspend,
157 .resume = ata_pci_device_resume,
156}; 158};
157 159
158static int __init netcell_init(void) 160static int __init netcell_init(void)
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 7ec800f00ec..c3032eb9010 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -28,7 +28,7 @@
28#include <linux/libata.h> 28#include <linux/libata.h>
29 29
30#define DRV_NAME "pata_ns87410" 30#define DRV_NAME "pata_ns87410"
31#define DRV_VERSION "0.4.2" 31#define DRV_VERSION "0.4.3"
32 32
33/** 33/**
34 * ns87410_pre_reset - probe begin 34 * ns87410_pre_reset - probe begin
@@ -149,7 +149,6 @@ static struct scsi_host_template ns87410_sht = {
149 .can_queue = ATA_DEF_QUEUE, 149 .can_queue = ATA_DEF_QUEUE,
150 .this_id = ATA_SHT_THIS_ID, 150 .this_id = ATA_SHT_THIS_ID,
151 .sg_tablesize = LIBATA_MAX_PRD, 151 .sg_tablesize = LIBATA_MAX_PRD,
152 .max_sectors = ATA_MAX_SECTORS,
153 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 152 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
154 .emulated = ATA_SHT_EMULATED, 153 .emulated = ATA_SHT_EMULATED,
155 .use_clustering = ATA_SHT_USE_CLUSTERING, 154 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -158,6 +157,8 @@ static struct scsi_host_template ns87410_sht = {
158 .slave_configure = ata_scsi_slave_config, 157 .slave_configure = ata_scsi_slave_config,
159 .slave_destroy = ata_scsi_slave_destroy, 158 .slave_destroy = ata_scsi_slave_destroy,
160 .bios_param = ata_std_bios_param, 159 .bios_param = ata_std_bios_param,
160 .resume = ata_scsi_device_resume,
161 .suspend = ata_scsi_device_suspend,
161}; 162};
162 163
163static struct ata_port_operations ns87410_port_ops = { 164static struct ata_port_operations ns87410_port_ops = {
@@ -210,7 +211,9 @@ static struct pci_driver ns87410_pci_driver = {
210 .name = DRV_NAME, 211 .name = DRV_NAME,
211 .id_table = ns87410, 212 .id_table = ns87410,
212 .probe = ns87410_init_one, 213 .probe = ns87410_init_one,
213 .remove = ata_pci_remove_one 214 .remove = ata_pci_remove_one,
215 .suspend = ata_pci_device_suspend,
216 .resume = ata_pci_device_resume,
214}; 217};
215 218
216static int __init ns87410_init(void) 219static int __init ns87410_init(void)
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 8837256632e..10ac3cc1018 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -224,7 +224,6 @@ static struct scsi_host_template oldpiix_sht = {
224 .can_queue = ATA_DEF_QUEUE, 224 .can_queue = ATA_DEF_QUEUE,
225 .this_id = ATA_SHT_THIS_ID, 225 .this_id = ATA_SHT_THIS_ID,
226 .sg_tablesize = LIBATA_MAX_PRD, 226 .sg_tablesize = LIBATA_MAX_PRD,
227 .max_sectors = ATA_MAX_SECTORS,
228 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 227 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
229 .emulated = ATA_SHT_EMULATED, 228 .emulated = ATA_SHT_EMULATED,
230 .use_clustering = ATA_SHT_USE_CLUSTERING, 229 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -233,6 +232,8 @@ static struct scsi_host_template oldpiix_sht = {
233 .slave_configure = ata_scsi_slave_config, 232 .slave_configure = ata_scsi_slave_config,
234 .slave_destroy = ata_scsi_slave_destroy, 233 .slave_destroy = ata_scsi_slave_destroy,
235 .bios_param = ata_std_bios_param, 234 .bios_param = ata_std_bios_param,
235 .resume = ata_scsi_device_resume,
236 .suspend = ata_scsi_device_suspend,
236}; 237};
237 238
238static const struct ata_port_operations oldpiix_pata_ops = { 239static const struct ata_port_operations oldpiix_pata_ops = {
@@ -314,6 +315,8 @@ static struct pci_driver oldpiix_pci_driver = {
314 .id_table = oldpiix_pci_tbl, 315 .id_table = oldpiix_pci_tbl,
315 .probe = oldpiix_init_one, 316 .probe = oldpiix_init_one,
316 .remove = ata_pci_remove_one, 317 .remove = ata_pci_remove_one,
318 .suspend = ata_pci_device_suspend,
319 .resume = ata_pci_device_resume,
317}; 320};
318 321
319static int __init oldpiix_init(void) 322static int __init oldpiix_init(void)
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index c6319cf50de..c2988b0aa8e 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -34,7 +34,7 @@
34#include <linux/libata.h> 34#include <linux/libata.h>
35 35
36#define DRV_NAME "pata_opti" 36#define DRV_NAME "pata_opti"
37#define DRV_VERSION "0.2.5" 37#define DRV_VERSION "0.2.7"
38 38
39enum { 39enum {
40 READ_REG = 0, /* index of Read cycle timing register */ 40 READ_REG = 0, /* index of Read cycle timing register */
@@ -109,30 +109,6 @@ static void opti_write_reg(struct ata_port *ap, u8 val, int reg)
109 outb(0x83, regio + 2); 109 outb(0x83, regio + 2);
110} 110}
111 111
112#if 0
113/**
114 * opti_read_reg - control register read
115 * @ap: ATA port
116 * @reg: control register number
117 *
118 * The Opti uses magic 'trapdoor' register accesses to do configuration
119 * rather than using PCI space as other controllers do. The double inw
120 * on the error register activates configuration mode. We can then read
121 * the control register
122 */
123
124static u8 opti_read_reg(struct ata_port *ap, int reg)
125{
126 unsigned long regio = ap->ioaddr.cmd_addr;
127 u8 ret;
128 inw(regio + 1);
129 inw(regio + 1);
130 outb(3, regio + 2);
131 ret = inb(regio + reg);
132 outb(0x83, regio + 2);
133}
134#endif
135
136/** 112/**
137 * opti_set_piomode - set initial PIO mode data 113 * opti_set_piomode - set initial PIO mode data
138 * @ap: ATA interface 114 * @ap: ATA interface
@@ -195,7 +171,6 @@ static struct scsi_host_template opti_sht = {
195 .can_queue = ATA_DEF_QUEUE, 171 .can_queue = ATA_DEF_QUEUE,
196 .this_id = ATA_SHT_THIS_ID, 172 .this_id = ATA_SHT_THIS_ID,
197 .sg_tablesize = LIBATA_MAX_PRD, 173 .sg_tablesize = LIBATA_MAX_PRD,
198 .max_sectors = ATA_MAX_SECTORS,
199 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 174 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
200 .emulated = ATA_SHT_EMULATED, 175 .emulated = ATA_SHT_EMULATED,
201 .use_clustering = ATA_SHT_USE_CLUSTERING, 176 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -204,12 +179,13 @@ static struct scsi_host_template opti_sht = {
204 .slave_configure = ata_scsi_slave_config, 179 .slave_configure = ata_scsi_slave_config,
205 .slave_destroy = ata_scsi_slave_destroy, 180 .slave_destroy = ata_scsi_slave_destroy,
206 .bios_param = ata_std_bios_param, 181 .bios_param = ata_std_bios_param,
182 .resume = ata_scsi_device_resume,
183 .suspend = ata_scsi_device_suspend,
207}; 184};
208 185
209static struct ata_port_operations opti_port_ops = { 186static struct ata_port_operations opti_port_ops = {
210 .port_disable = ata_port_disable, 187 .port_disable = ata_port_disable,
211 .set_piomode = opti_set_piomode, 188 .set_piomode = opti_set_piomode,
212/* .set_dmamode = opti_set_dmamode, */
213 .tf_load = ata_tf_load, 189 .tf_load = ata_tf_load,
214 .tf_read = ata_tf_read, 190 .tf_read = ata_tf_read,
215 .check_status = ata_check_status, 191 .check_status = ata_check_status,
@@ -267,7 +243,9 @@ static struct pci_driver opti_pci_driver = {
267 .name = DRV_NAME, 243 .name = DRV_NAME,
268 .id_table = opti, 244 .id_table = opti,
269 .probe = opti_init_one, 245 .probe = opti_init_one,
270 .remove = ata_pci_remove_one 246 .remove = ata_pci_remove_one,
247 .suspend = ata_pci_device_suspend,
248 .resume = ata_pci_device_resume,
271}; 249};
272 250
273static int __init opti_init(void) 251static int __init opti_init(void)
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 2f4770cce04..80d111c569d 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -33,7 +33,7 @@
33#include <linux/libata.h> 33#include <linux/libata.h>
34 34
35#define DRV_NAME "pata_optidma" 35#define DRV_NAME "pata_optidma"
36#define DRV_VERSION "0.2.2" 36#define DRV_VERSION "0.2.3"
37 37
38enum { 38enum {
39 READ_REG = 0, /* index of Read cycle timing register */ 39 READ_REG = 0, /* index of Read cycle timing register */
@@ -352,7 +352,6 @@ static struct scsi_host_template optidma_sht = {
352 .can_queue = ATA_DEF_QUEUE, 352 .can_queue = ATA_DEF_QUEUE,
353 .this_id = ATA_SHT_THIS_ID, 353 .this_id = ATA_SHT_THIS_ID,
354 .sg_tablesize = LIBATA_MAX_PRD, 354 .sg_tablesize = LIBATA_MAX_PRD,
355 .max_sectors = ATA_MAX_SECTORS,
356 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 355 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
357 .emulated = ATA_SHT_EMULATED, 356 .emulated = ATA_SHT_EMULATED,
358 .use_clustering = ATA_SHT_USE_CLUSTERING, 357 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -361,6 +360,8 @@ static struct scsi_host_template optidma_sht = {
361 .slave_configure = ata_scsi_slave_config, 360 .slave_configure = ata_scsi_slave_config,
362 .slave_destroy = ata_scsi_slave_destroy, 361 .slave_destroy = ata_scsi_slave_destroy,
363 .bios_param = ata_std_bios_param, 362 .bios_param = ata_std_bios_param,
363 .resume = ata_scsi_device_resume,
364 .suspend = ata_scsi_device_suspend,
364}; 365};
365 366
366static struct ata_port_operations optidma_port_ops = { 367static struct ata_port_operations optidma_port_ops = {
@@ -522,7 +523,9 @@ static struct pci_driver optidma_pci_driver = {
522 .name = DRV_NAME, 523 .name = DRV_NAME,
523 .id_table = optidma, 524 .id_table = optidma,
524 .probe = optidma_init_one, 525 .probe = optidma_init_one,
525 .remove = ata_pci_remove_one 526 .remove = ata_pci_remove_one,
527 .suspend = ata_pci_device_suspend,
528 .resume = ata_pci_device_resume,
526}; 529};
527 530
528static int __init optidma_init(void) 531static int __init optidma_init(void)
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 999922de476..4ca6fa5dcb4 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -62,7 +62,6 @@ static struct scsi_host_template pcmcia_sht = {
62 .can_queue = ATA_DEF_QUEUE, 62 .can_queue = ATA_DEF_QUEUE,
63 .this_id = ATA_SHT_THIS_ID, 63 .this_id = ATA_SHT_THIS_ID,
64 .sg_tablesize = LIBATA_MAX_PRD, 64 .sg_tablesize = LIBATA_MAX_PRD,
65 .max_sectors = ATA_MAX_SECTORS,
66 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 65 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
67 .emulated = ATA_SHT_EMULATED, 66 .emulated = ATA_SHT_EMULATED,
68 .use_clustering = ATA_SHT_USE_CLUSTERING, 67 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index beb6d10a234..76dd1c935db 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -134,7 +134,6 @@ static struct scsi_host_template pdc2027x_sht = {
134 .can_queue = ATA_DEF_QUEUE, 134 .can_queue = ATA_DEF_QUEUE,
135 .this_id = ATA_SHT_THIS_ID, 135 .this_id = ATA_SHT_THIS_ID,
136 .sg_tablesize = LIBATA_MAX_PRD, 136 .sg_tablesize = LIBATA_MAX_PRD,
137 .max_sectors = ATA_MAX_SECTORS,
138 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 137 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
139 .emulated = ATA_SHT_EMULATED, 138 .emulated = ATA_SHT_EMULATED,
140 .use_clustering = ATA_SHT_USE_CLUSTERING, 139 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -854,7 +853,7 @@ static void __devexit pdc2027x_remove_one(struct pci_dev *pdev)
854 */ 853 */
855static int __init pdc2027x_init(void) 854static int __init pdc2027x_init(void)
856{ 855{
857 return pci_module_init(&pdc2027x_pci_driver); 856 return pci_register_driver(&pdc2027x_pci_driver);
858} 857}
859 858
860/** 859/**
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 6baf51b2fda..ad691b9e774 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -21,7 +21,7 @@
21#include <linux/libata.h> 21#include <linux/libata.h>
22 22
23#define DRV_NAME "pata_pdc202xx_old" 23#define DRV_NAME "pata_pdc202xx_old"
24#define DRV_VERSION "0.2.1" 24#define DRV_VERSION "0.2.3"
25 25
26/** 26/**
27 * pdc2024x_pre_reset - probe begin 27 * pdc2024x_pre_reset - probe begin
@@ -63,7 +63,7 @@ static void pdc2026x_error_handler(struct ata_port *ap)
63} 63}
64 64
65/** 65/**
66 * pdc_configure_piomode - set chip PIO timing 66 * pdc202xx_configure_piomode - set chip PIO timing
67 * @ap: ATA interface 67 * @ap: ATA interface
68 * @adev: ATA device 68 * @adev: ATA device
69 * @pio: PIO mode 69 * @pio: PIO mode
@@ -73,7 +73,7 @@ static void pdc2026x_error_handler(struct ata_port *ap)
73 * versa 73 * versa
74 */ 74 */
75 75
76static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio) 76static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
77{ 77{
78 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 78 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
79 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno; 79 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
@@ -98,7 +98,7 @@ static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev,
98} 98}
99 99
100/** 100/**
101 * pdc_set_piomode - set initial PIO mode data 101 * pdc202xx_set_piomode - set initial PIO mode data
102 * @ap: ATA interface 102 * @ap: ATA interface
103 * @adev: ATA device 103 * @adev: ATA device
104 * 104 *
@@ -106,13 +106,13 @@ static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev,
106 * but we want to set the PIO timing by default. 106 * but we want to set the PIO timing by default.
107 */ 107 */
108 108
109static void pdc_set_piomode(struct ata_port *ap, struct ata_device *adev) 109static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
110{ 110{
111 pdc_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); 111 pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
112} 112}
113 113
114/** 114/**
115 * pdc_configure_dmamode - set DMA mode in chip 115 * pdc202xx_configure_dmamode - set DMA mode in chip
116 * @ap: ATA interface 116 * @ap: ATA interface
117 * @adev: ATA device 117 * @adev: ATA device
118 * 118 *
@@ -120,7 +120,7 @@ static void pdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
120 * to occur. 120 * to occur.
121 */ 121 */
122 122
123static void pdc_set_dmamode(struct ata_port *ap, struct ata_device *adev) 123static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
124{ 124{
125 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 125 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
126 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno; 126 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
@@ -184,7 +184,7 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
184 184
185 /* The DMA clocks may have been trashed by a reset. FIXME: make conditional 185 /* The DMA clocks may have been trashed by a reset. FIXME: make conditional
186 and move to qc_issue ? */ 186 and move to qc_issue ? */
187 pdc_set_dmamode(ap, qc->dev); 187 pdc202xx_set_dmamode(ap, qc->dev);
188 188
189 /* Cases the state machine will not complete correctly without help */ 189 /* Cases the state machine will not complete correctly without help */
190 if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA) 190 if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA)
@@ -254,7 +254,7 @@ static void pdc2026x_dev_config(struct ata_port *ap, struct ata_device *adev)
254 adev->max_sectors = 256; 254 adev->max_sectors = 256;
255} 255}
256 256
257static struct scsi_host_template pdc_sht = { 257static struct scsi_host_template pdc202xx_sht = {
258 .module = THIS_MODULE, 258 .module = THIS_MODULE,
259 .name = DRV_NAME, 259 .name = DRV_NAME,
260 .ioctl = ata_scsi_ioctl, 260 .ioctl = ata_scsi_ioctl,
@@ -262,7 +262,6 @@ static struct scsi_host_template pdc_sht = {
262 .can_queue = ATA_DEF_QUEUE, 262 .can_queue = ATA_DEF_QUEUE,
263 .this_id = ATA_SHT_THIS_ID, 263 .this_id = ATA_SHT_THIS_ID,
264 .sg_tablesize = LIBATA_MAX_PRD, 264 .sg_tablesize = LIBATA_MAX_PRD,
265 .max_sectors = ATA_MAX_SECTORS,
266 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 265 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
267 .emulated = ATA_SHT_EMULATED, 266 .emulated = ATA_SHT_EMULATED,
268 .use_clustering = ATA_SHT_USE_CLUSTERING, 267 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -271,12 +270,14 @@ static struct scsi_host_template pdc_sht = {
271 .slave_configure = ata_scsi_slave_config, 270 .slave_configure = ata_scsi_slave_config,
272 .slave_destroy = ata_scsi_slave_destroy, 271 .slave_destroy = ata_scsi_slave_destroy,
273 .bios_param = ata_std_bios_param, 272 .bios_param = ata_std_bios_param,
273 .resume = ata_scsi_device_resume,
274 .suspend = ata_scsi_device_suspend,
274}; 275};
275 276
276static struct ata_port_operations pdc2024x_port_ops = { 277static struct ata_port_operations pdc2024x_port_ops = {
277 .port_disable = ata_port_disable, 278 .port_disable = ata_port_disable,
278 .set_piomode = pdc_set_piomode, 279 .set_piomode = pdc202xx_set_piomode,
279 .set_dmamode = pdc_set_dmamode, 280 .set_dmamode = pdc202xx_set_dmamode,
280 .mode_filter = ata_pci_default_filter, 281 .mode_filter = ata_pci_default_filter,
281 .tf_load = ata_tf_load, 282 .tf_load = ata_tf_load,
282 .tf_read = ata_tf_read, 283 .tf_read = ata_tf_read,
@@ -308,8 +309,8 @@ static struct ata_port_operations pdc2024x_port_ops = {
308 309
309static struct ata_port_operations pdc2026x_port_ops = { 310static struct ata_port_operations pdc2026x_port_ops = {
310 .port_disable = ata_port_disable, 311 .port_disable = ata_port_disable,
311 .set_piomode = pdc_set_piomode, 312 .set_piomode = pdc202xx_set_piomode,
312 .set_dmamode = pdc_set_dmamode, 313 .set_dmamode = pdc202xx_set_dmamode,
313 .mode_filter = ata_pci_default_filter, 314 .mode_filter = ata_pci_default_filter,
314 .tf_load = ata_tf_load, 315 .tf_load = ata_tf_load,
315 .tf_read = ata_tf_read, 316 .tf_read = ata_tf_read,
@@ -340,11 +341,11 @@ static struct ata_port_operations pdc2026x_port_ops = {
340 .host_stop = ata_host_stop 341 .host_stop = ata_host_stop
341}; 342};
342 343
343static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id) 344static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
344{ 345{
345 static struct ata_port_info info[3] = { 346 static struct ata_port_info info[3] = {
346 { 347 {
347 .sht = &pdc_sht, 348 .sht = &pdc202xx_sht,
348 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 349 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
349 .pio_mask = 0x1f, 350 .pio_mask = 0x1f,
350 .mwdma_mask = 0x07, 351 .mwdma_mask = 0x07,
@@ -352,7 +353,7 @@ static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
352 .port_ops = &pdc2024x_port_ops 353 .port_ops = &pdc2024x_port_ops
353 }, 354 },
354 { 355 {
355 .sht = &pdc_sht, 356 .sht = &pdc202xx_sht,
356 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 357 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
357 .pio_mask = 0x1f, 358 .pio_mask = 0x1f,
358 .mwdma_mask = 0x07, 359 .mwdma_mask = 0x07,
@@ -360,7 +361,7 @@ static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
360 .port_ops = &pdc2026x_port_ops 361 .port_ops = &pdc2026x_port_ops
361 }, 362 },
362 { 363 {
363 .sht = &pdc_sht, 364 .sht = &pdc202xx_sht,
364 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 365 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
365 .pio_mask = 0x1f, 366 .pio_mask = 0x1f,
366 .mwdma_mask = 0x07, 367 .mwdma_mask = 0x07,
@@ -386,7 +387,7 @@ static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
386 return ata_pci_init_one(dev, port_info, 2); 387 return ata_pci_init_one(dev, port_info, 2);
387} 388}
388 389
389static const struct pci_device_id pdc[] = { 390static const struct pci_device_id pdc202xx[] = {
390 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 }, 391 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
391 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 }, 392 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
392 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 }, 393 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
@@ -396,28 +397,30 @@ static const struct pci_device_id pdc[] = {
396 { }, 397 { },
397}; 398};
398 399
399static struct pci_driver pdc_pci_driver = { 400static struct pci_driver pdc202xx_pci_driver = {
400 .name = DRV_NAME, 401 .name = DRV_NAME,
401 .id_table = pdc, 402 .id_table = pdc202xx,
402 .probe = pdc_init_one, 403 .probe = pdc202xx_init_one,
403 .remove = ata_pci_remove_one 404 .remove = ata_pci_remove_one,
405 .suspend = ata_pci_device_suspend,
406 .resume = ata_pci_device_resume,
404}; 407};
405 408
406static int __init pdc_init(void) 409static int __init pdc202xx_init(void)
407{ 410{
408 return pci_register_driver(&pdc_pci_driver); 411 return pci_register_driver(&pdc202xx_pci_driver);
409} 412}
410 413
411static void __exit pdc_exit(void) 414static void __exit pdc202xx_exit(void)
412{ 415{
413 pci_unregister_driver(&pdc_pci_driver); 416 pci_unregister_driver(&pdc202xx_pci_driver);
414} 417}
415 418
416MODULE_AUTHOR("Alan Cox"); 419MODULE_AUTHOR("Alan Cox");
417MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267"); 420MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
418MODULE_LICENSE("GPL"); 421MODULE_LICENSE("GPL");
419MODULE_DEVICE_TABLE(pci, pdc); 422MODULE_DEVICE_TABLE(pci, pdc202xx);
420MODULE_VERSION(DRV_VERSION); 423MODULE_VERSION(DRV_VERSION);
421 424
422module_init(pdc_init); 425module_init(pdc202xx_init);
423module_exit(pdc_exit); 426module_exit(pdc202xx_exit);
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
new file mode 100644
index 00000000000..443b1d85c6c
--- /dev/null
+++ b/drivers/ata/pata_platform.c
@@ -0,0 +1,295 @@
1/*
2 * Generic platform device PATA driver
3 *
4 * Copyright (C) 2006 Paul Mundt
5 *
6 * Based on pata_pcmcia:
7 *
8 * Copyright 2005-2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/blkdev.h>
18#include <scsi/scsi_host.h>
19#include <linux/ata.h>
20#include <linux/libata.h>
21#include <linux/platform_device.h>
22#include <linux/pata_platform.h>
23
24#define DRV_NAME "pata_platform"
25#define DRV_VERSION "0.1.2"
26
27static int pio_mask = 1;
28
29/*
30 * Provide our own set_mode() as we don't want to change anything that has
31 * already been configured..
32 */
33static void pata_platform_set_mode(struct ata_port *ap)
34{
35 int i;
36
37 for (i = 0; i < ATA_MAX_DEVICES; i++) {
38 struct ata_device *dev = &ap->device[i];
39
40 if (ata_dev_enabled(dev)) {
41 /* We don't really care */
42 dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
43 dev->xfer_shift = ATA_SHIFT_PIO;
44 dev->flags |= ATA_DFLAG_PIO;
45 }
46 }
47}
48
49static void pata_platform_host_stop(struct ata_host *host)
50{
51 int i;
52
53 /*
54 * Unmap the bases for MMIO
55 */
56 for (i = 0; i < host->n_ports; i++) {
57 struct ata_port *ap = host->ports[i];
58
59 if (ap->flags & ATA_FLAG_MMIO) {
60 iounmap((void __iomem *)ap->ioaddr.ctl_addr);
61 iounmap((void __iomem *)ap->ioaddr.cmd_addr);
62 }
63 }
64}
65
66static struct scsi_host_template pata_platform_sht = {
67 .module = THIS_MODULE,
68 .name = DRV_NAME,
69 .ioctl = ata_scsi_ioctl,
70 .queuecommand = ata_scsi_queuecmd,
71 .can_queue = ATA_DEF_QUEUE,
72 .this_id = ATA_SHT_THIS_ID,
73 .sg_tablesize = LIBATA_MAX_PRD,
74 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
75 .emulated = ATA_SHT_EMULATED,
76 .use_clustering = ATA_SHT_USE_CLUSTERING,
77 .proc_name = DRV_NAME,
78 .dma_boundary = ATA_DMA_BOUNDARY,
79 .slave_configure = ata_scsi_slave_config,
80 .slave_destroy = ata_scsi_slave_destroy,
81 .bios_param = ata_std_bios_param,
82};
83
84static struct ata_port_operations pata_platform_port_ops = {
85 .set_mode = pata_platform_set_mode,
86
87 .port_disable = ata_port_disable,
88 .tf_load = ata_tf_load,
89 .tf_read = ata_tf_read,
90 .check_status = ata_check_status,
91 .exec_command = ata_exec_command,
92 .dev_select = ata_std_dev_select,
93
94 .freeze = ata_bmdma_freeze,
95 .thaw = ata_bmdma_thaw,
96 .error_handler = ata_bmdma_error_handler,
97 .post_internal_cmd = ata_bmdma_post_internal_cmd,
98
99 .qc_prep = ata_qc_prep,
100 .qc_issue = ata_qc_issue_prot,
101
102 .data_xfer = ata_pio_data_xfer_noirq,
103
104 .irq_handler = ata_interrupt,
105 .irq_clear = ata_bmdma_irq_clear,
106
107 .port_start = ata_port_start,
108 .port_stop = ata_port_stop,
109 .host_stop = pata_platform_host_stop
110};
111
112static void pata_platform_setup_port(struct ata_ioports *ioaddr,
113 struct pata_platform_info *info)
114{
115 unsigned int shift = 0;
116
117 /* Fixup the port shift for platforms that need it */
118 if (info && info->ioport_shift)
119 shift = info->ioport_shift;
120
121 ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << shift);
122 ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << shift);
123 ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << shift);
124 ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << shift);
125 ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << shift);
126 ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << shift);
127 ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << shift);
128 ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << shift);
129 ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << shift);
130 ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << shift);
131}
132
133/**
134 * pata_platform_probe - attach a platform interface
135 * @pdev: platform device
136 *
137 * Register a platform bus IDE interface. Such interfaces are PIO and we
138 * assume do not support IRQ sharing.
139 *
140 * Platform devices are expected to contain 3 resources per port:
141 *
142 * - I/O Base (IORESOURCE_IO or IORESOURCE_MEM)
143 * - CTL Base (IORESOURCE_IO or IORESOURCE_MEM)
144 * - IRQ (IORESOURCE_IRQ)
145 *
146 * If the base resources are both mem types, the ioremap() is handled
147 * here. For IORESOURCE_IO, it's assumed that there's no remapping
148 * necessary.
149 */
150static int __devinit pata_platform_probe(struct platform_device *pdev)
151{
152 struct resource *io_res, *ctl_res;
153 struct ata_probe_ent ae;
154 unsigned int mmio;
155 int ret;
156
157 /*
158 * Simple resource validation ..
159 */
160 if (unlikely(pdev->num_resources != 3)) {
161 dev_err(&pdev->dev, "invalid number of resources\n");
162 return -EINVAL;
163 }
164
165 /*
166 * Get the I/O base first
167 */
168 io_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
169 if (io_res == NULL) {
170 io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
171 if (unlikely(io_res == NULL))
172 return -EINVAL;
173 }
174
175 /*
176 * Then the CTL base
177 */
178 ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
179 if (ctl_res == NULL) {
180 ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
181 if (unlikely(ctl_res == NULL))
182 return -EINVAL;
183 }
184
185 /*
186 * Check for MMIO
187 */
188 mmio = (( io_res->flags == IORESOURCE_MEM) &&
189 (ctl_res->flags == IORESOURCE_MEM));
190
191 /*
192 * Now that that's out of the way, wire up the port..
193 */
194 memset(&ae, 0, sizeof(struct ata_probe_ent));
195 INIT_LIST_HEAD(&ae.node);
196 ae.dev = &pdev->dev;
197 ae.port_ops = &pata_platform_port_ops;
198 ae.sht = &pata_platform_sht;
199 ae.n_ports = 1;
200 ae.pio_mask = pio_mask;
201 ae.irq = platform_get_irq(pdev, 0);
202 ae.irq_flags = 0;
203 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
204
205 /*
206 * Handle the MMIO case
207 */
208 if (mmio) {
209 ae.port_flags |= ATA_FLAG_MMIO;
210
211 ae.port[0].cmd_addr = (unsigned long)ioremap(io_res->start,
212 io_res->end - io_res->start + 1);
213 if (unlikely(!ae.port[0].cmd_addr)) {
214 dev_err(&pdev->dev, "failed to remap IO base\n");
215 return -ENXIO;
216 }
217
218 ae.port[0].ctl_addr = (unsigned long)ioremap(ctl_res->start,
219 ctl_res->end - ctl_res->start + 1);
220 if (unlikely(!ae.port[0].ctl_addr)) {
221 dev_err(&pdev->dev, "failed to remap CTL base\n");
222 ret = -ENXIO;
223 goto bad_remap;
224 }
225 } else {
226 ae.port[0].cmd_addr = io_res->start;
227 ae.port[0].ctl_addr = ctl_res->start;
228 }
229
230 ae.port[0].altstatus_addr = ae.port[0].ctl_addr;
231
232 pata_platform_setup_port(&ae.port[0], pdev->dev.platform_data);
233
234 if (unlikely(ata_device_add(&ae) == 0)) {
235 ret = -ENODEV;
236 goto add_failed;
237 }
238
239 return 0;
240
241add_failed:
242 if (ae.port[0].ctl_addr && mmio)
243 iounmap((void __iomem *)ae.port[0].ctl_addr);
244bad_remap:
245 if (ae.port[0].cmd_addr && mmio)
246 iounmap((void __iomem *)ae.port[0].cmd_addr);
247
248 return ret;
249}
250
251/**
252 * pata_platform_remove - unplug a platform interface
253 * @pdev: platform device
254 *
255 * A platform bus ATA device has been unplugged. Perform the needed
256 * cleanup. Also called on module unload for any active devices.
257 */
258static int __devexit pata_platform_remove(struct platform_device *pdev)
259{
260 struct device *dev = &pdev->dev;
261 struct ata_host *host = dev_get_drvdata(dev);
262
263 ata_host_remove(host);
264 dev_set_drvdata(dev, NULL);
265
266 return 0;
267}
268
269static struct platform_driver pata_platform_driver = {
270 .probe = pata_platform_probe,
271 .remove = __devexit_p(pata_platform_remove),
272 .driver = {
273 .name = DRV_NAME,
274 .owner = THIS_MODULE,
275 },
276};
277
278static int __init pata_platform_init(void)
279{
280 return platform_driver_register(&pata_platform_driver);
281}
282
283static void __exit pata_platform_exit(void)
284{
285 platform_driver_unregister(&pata_platform_driver);
286}
287module_init(pata_platform_init);
288module_exit(pata_platform_exit);
289
290module_param(pio_mask, int, 0);
291
292MODULE_AUTHOR("Paul Mundt");
293MODULE_DESCRIPTION("low-level driver for platform device ATA");
294MODULE_LICENSE("GPL");
295MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index 314938dea1f..36f621abc39 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -157,7 +157,6 @@ static struct scsi_host_template qdi_sht = {
157 .can_queue = ATA_DEF_QUEUE, 157 .can_queue = ATA_DEF_QUEUE,
158 .this_id = ATA_SHT_THIS_ID, 158 .this_id = ATA_SHT_THIS_ID,
159 .sg_tablesize = LIBATA_MAX_PRD, 159 .sg_tablesize = LIBATA_MAX_PRD,
160 .max_sectors = ATA_MAX_SECTORS,
161 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 160 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
162 .emulated = ATA_SHT_EMULATED, 161 .emulated = ATA_SHT_EMULATED,
163 .use_clustering = ATA_SHT_USE_CLUSTERING, 162 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 048c2bb21ef..065541d034a 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -220,7 +220,6 @@ static struct scsi_host_template radisys_sht = {
220 .can_queue = ATA_DEF_QUEUE, 220 .can_queue = ATA_DEF_QUEUE,
221 .this_id = ATA_SHT_THIS_ID, 221 .this_id = ATA_SHT_THIS_ID,
222 .sg_tablesize = LIBATA_MAX_PRD, 222 .sg_tablesize = LIBATA_MAX_PRD,
223 .max_sectors = ATA_MAX_SECTORS,
224 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 223 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
225 .emulated = ATA_SHT_EMULATED, 224 .emulated = ATA_SHT_EMULATED,
226 .use_clustering = ATA_SHT_USE_CLUSTERING, 225 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -229,6 +228,8 @@ static struct scsi_host_template radisys_sht = {
229 .slave_configure = ata_scsi_slave_config, 228 .slave_configure = ata_scsi_slave_config,
230 .slave_destroy = ata_scsi_slave_destroy, 229 .slave_destroy = ata_scsi_slave_destroy,
231 .bios_param = ata_std_bios_param, 230 .bios_param = ata_std_bios_param,
231 .resume = ata_scsi_device_resume,
232 .suspend = ata_scsi_device_suspend,
232}; 233};
233 234
234static const struct ata_port_operations radisys_pata_ops = { 235static const struct ata_port_operations radisys_pata_ops = {
@@ -311,6 +312,8 @@ static struct pci_driver radisys_pci_driver = {
311 .id_table = radisys_pci_tbl, 312 .id_table = radisys_pci_tbl,
312 .probe = radisys_init_one, 313 .probe = radisys_init_one,
313 .remove = ata_pci_remove_one, 314 .remove = ata_pci_remove_one,
315 .suspend = ata_pci_device_suspend,
316 .resume = ata_pci_device_resume,
314}; 317};
315 318
316static int __init radisys_init(void) 319static int __init radisys_init(void)
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index e4e5ea423fe..3677c642c9f 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -21,7 +21,7 @@
21#include <linux/libata.h> 21#include <linux/libata.h>
22 22
23#define DRV_NAME "pata_rz1000" 23#define DRV_NAME "pata_rz1000"
24#define DRV_VERSION "0.2.2" 24#define DRV_VERSION "0.2.3"
25 25
26 26
27/** 27/**
@@ -83,7 +83,6 @@ static struct scsi_host_template rz1000_sht = {
83 .can_queue = ATA_DEF_QUEUE, 83 .can_queue = ATA_DEF_QUEUE,
84 .this_id = ATA_SHT_THIS_ID, 84 .this_id = ATA_SHT_THIS_ID,
85 .sg_tablesize = LIBATA_MAX_PRD, 85 .sg_tablesize = LIBATA_MAX_PRD,
86 .max_sectors = ATA_MAX_SECTORS,
87 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 86 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
88 .emulated = ATA_SHT_EMULATED, 87 .emulated = ATA_SHT_EMULATED,
89 .use_clustering = ATA_SHT_USE_CLUSTERING, 88 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -92,6 +91,8 @@ static struct scsi_host_template rz1000_sht = {
92 .slave_configure = ata_scsi_slave_config, 91 .slave_configure = ata_scsi_slave_config,
93 .slave_destroy = ata_scsi_slave_destroy, 92 .slave_destroy = ata_scsi_slave_destroy,
94 .bios_param = ata_std_bios_param, 93 .bios_param = ata_std_bios_param,
94 .resume = ata_scsi_device_resume,
95 .suspend = ata_scsi_device_suspend,
95}; 96};
96 97
97static struct ata_port_operations rz1000_port_ops = { 98static struct ata_port_operations rz1000_port_ops = {
@@ -129,6 +130,19 @@ static struct ata_port_operations rz1000_port_ops = {
129 .host_stop = ata_host_stop 130 .host_stop = ata_host_stop
130}; 131};
131 132
133static int rz1000_fifo_disable(struct pci_dev *pdev)
134{
135 u16 reg;
136 /* Be exceptionally paranoid as we must be sure to apply the fix */
137 if (pci_read_config_word(pdev, 0x40, &reg) != 0)
138 return -1;
139 reg &= 0xDFFF;
140 if (pci_write_config_word(pdev, 0x40, reg) != 0)
141 return -1;
142 printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
143 return 0;
144}
145
132/** 146/**
133 * rz1000_init_one - Register RZ1000 ATA PCI device with kernel services 147 * rz1000_init_one - Register RZ1000 ATA PCI device with kernel services
134 * @pdev: PCI device to register 148 * @pdev: PCI device to register
@@ -143,7 +157,6 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
143{ 157{
144 static int printed_version; 158 static int printed_version;
145 struct ata_port_info *port_info[2]; 159 struct ata_port_info *port_info[2];
146 u16 reg;
147 static struct ata_port_info info = { 160 static struct ata_port_info info = {
148 .sht = &rz1000_sht, 161 .sht = &rz1000_sht,
149 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 162 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
@@ -154,23 +167,25 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
154 if (!printed_version++) 167 if (!printed_version++)
155 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 168 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
156 169
157 /* Be exceptionally paranoid as we must be sure to apply the fix */ 170 if (rz1000_fifo_disable(pdev) == 0) {
158 if (pci_read_config_word(pdev, 0x40, &reg) != 0) 171 port_info[0] = &info;
159 goto fail; 172 port_info[1] = &info;
160 reg &= 0xDFFF; 173 return ata_pci_init_one(pdev, port_info, 2);
161 if (pci_write_config_word(pdev, 0x40, reg) != 0) 174 }
162 goto fail;
163 printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
164
165 port_info[0] = &info;
166 port_info[1] = &info;
167 return ata_pci_init_one(pdev, port_info, 2);
168fail:
169 printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n"); 175 printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n");
170 /* Not safe to use so skip */ 176 /* Not safe to use so skip */
171 return -ENODEV; 177 return -ENODEV;
172} 178}
173 179
180static int rz1000_reinit_one(struct pci_dev *pdev)
181{
182 /* If this fails on resume (which is a "cant happen" case), we
183 must stop as any progress risks data loss */
184 if (rz1000_fifo_disable(pdev))
185 panic("rz1000 fifo");
186 return ata_pci_device_resume(pdev);
187}
188
174static const struct pci_device_id pata_rz1000[] = { 189static const struct pci_device_id pata_rz1000[] = {
175 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), }, 190 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), },
176 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), }, 191 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), },
@@ -182,7 +197,9 @@ static struct pci_driver rz1000_pci_driver = {
182 .name = DRV_NAME, 197 .name = DRV_NAME,
183 .id_table = pata_rz1000, 198 .id_table = pata_rz1000,
184 .probe = rz1000_init_one, 199 .probe = rz1000_init_one,
185 .remove = ata_pci_remove_one 200 .remove = ata_pci_remove_one,
201 .suspend = ata_pci_device_suspend,
202 .resume = rz1000_reinit_one,
186}; 203};
187 204
188static int __init rz1000_init(void) 205static int __init rz1000_init(void)
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 0c75dae7476..a3b35bc5039 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -40,7 +40,7 @@
40#include <linux/libata.h> 40#include <linux/libata.h>
41 41
42#define DRV_NAME "sc1200" 42#define DRV_NAME "sc1200"
43#define DRV_VERSION "0.2.3" 43#define DRV_VERSION "0.2.4"
44 44
45#define SC1200_REV_A 0x00 45#define SC1200_REV_A 0x00
46#define SC1200_REV_B1 0x01 46#define SC1200_REV_B1 0x01
@@ -186,7 +186,6 @@ static struct scsi_host_template sc1200_sht = {
186 .can_queue = ATA_DEF_QUEUE, 186 .can_queue = ATA_DEF_QUEUE,
187 .this_id = ATA_SHT_THIS_ID, 187 .this_id = ATA_SHT_THIS_ID,
188 .sg_tablesize = LIBATA_MAX_PRD, 188 .sg_tablesize = LIBATA_MAX_PRD,
189 .max_sectors = ATA_MAX_SECTORS,
190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 189 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
191 .emulated = ATA_SHT_EMULATED, 190 .emulated = ATA_SHT_EMULATED,
192 .use_clustering = ATA_SHT_USE_CLUSTERING, 191 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -195,6 +194,8 @@ static struct scsi_host_template sc1200_sht = {
195 .slave_configure = ata_scsi_slave_config, 194 .slave_configure = ata_scsi_slave_config,
196 .slave_destroy = ata_scsi_slave_destroy, 195 .slave_destroy = ata_scsi_slave_destroy,
197 .bios_param = ata_std_bios_param, 196 .bios_param = ata_std_bios_param,
197 .resume = ata_scsi_device_resume,
198 .suspend = ata_scsi_device_suspend,
198}; 199};
199 200
200static struct ata_port_operations sc1200_port_ops = { 201static struct ata_port_operations sc1200_port_ops = {
@@ -264,7 +265,9 @@ static struct pci_driver sc1200_pci_driver = {
264 .name = DRV_NAME, 265 .name = DRV_NAME,
265 .id_table = sc1200, 266 .id_table = sc1200,
266 .probe = sc1200_init_one, 267 .probe = sc1200_init_one,
267 .remove = ata_pci_remove_one 268 .remove = ata_pci_remove_one,
269 .suspend = ata_pci_device_suspend,
270 .resume = ata_pci_device_resume,
268}; 271};
269 272
270static int __init sc1200_init(void) 273static int __init sc1200_init(void)
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index be7f60efcb6..f02b6a3b0f1 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -41,7 +41,7 @@
41#include <linux/libata.h> 41#include <linux/libata.h>
42 42
43#define DRV_NAME "pata_serverworks" 43#define DRV_NAME "pata_serverworks"
44#define DRV_VERSION "0.3.7" 44#define DRV_VERSION "0.3.9"
45 45
46#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */ 46#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
47#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */ 47#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
@@ -318,7 +318,6 @@ static struct scsi_host_template serverworks_sht = {
318 .can_queue = ATA_DEF_QUEUE, 318 .can_queue = ATA_DEF_QUEUE,
319 .this_id = ATA_SHT_THIS_ID, 319 .this_id = ATA_SHT_THIS_ID,
320 .sg_tablesize = LIBATA_MAX_PRD, 320 .sg_tablesize = LIBATA_MAX_PRD,
321 .max_sectors = ATA_MAX_SECTORS,
322 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 321 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
323 .emulated = ATA_SHT_EMULATED, 322 .emulated = ATA_SHT_EMULATED,
324 .use_clustering = ATA_SHT_USE_CLUSTERING, 323 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -327,6 +326,8 @@ static struct scsi_host_template serverworks_sht = {
327 .slave_configure = ata_scsi_slave_config, 326 .slave_configure = ata_scsi_slave_config,
328 .slave_destroy = ata_scsi_slave_destroy, 327 .slave_destroy = ata_scsi_slave_destroy,
329 .bios_param = ata_std_bios_param, 328 .bios_param = ata_std_bios_param,
329 .resume = ata_scsi_device_resume,
330 .suspend = ata_scsi_device_suspend,
330}; 331};
331 332
332static struct ata_port_operations serverworks_osb4_port_ops = { 333static struct ata_port_operations serverworks_osb4_port_ops = {
@@ -554,6 +555,30 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
554 return ata_pci_init_one(pdev, port_info, ports); 555 return ata_pci_init_one(pdev, port_info, ports);
555} 556}
556 557
558static int serverworks_reinit_one(struct pci_dev *pdev)
559{
560 /* Force master latency timer to 64 PCI clocks */
561 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
562
563 switch (pdev->device)
564 {
565 case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
566 serverworks_fixup_osb4(pdev);
567 break;
568 case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
569 ata_pci_clear_simplex(pdev);
570 /* fall through */
571 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
572 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
573 serverworks_fixup_csb(pdev);
574 break;
575 case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
576 serverworks_fixup_ht1000(pdev);
577 break;
578 }
579 return ata_pci_device_resume(pdev);
580}
581
557static const struct pci_device_id serverworks[] = { 582static const struct pci_device_id serverworks[] = {
558 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0}, 583 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
559 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2}, 584 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2},
@@ -568,7 +593,9 @@ static struct pci_driver serverworks_pci_driver = {
568 .name = DRV_NAME, 593 .name = DRV_NAME,
569 .id_table = serverworks, 594 .id_table = serverworks,
570 .probe = serverworks_init_one, 595 .probe = serverworks_init_one,
571 .remove = ata_pci_remove_one 596 .remove = ata_pci_remove_one,
597 .suspend = ata_pci_device_suspend,
598 .resume = serverworks_reinit_one,
572}; 599};
573 600
574static int __init serverworks_init(void) 601static int __init serverworks_init(void)
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 11942fd03b5..32cf0bfa892 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -33,7 +33,7 @@
33#include <linux/libata.h> 33#include <linux/libata.h>
34 34
35#define DRV_NAME "pata_sil680" 35#define DRV_NAME "pata_sil680"
36#define DRV_VERSION "0.3.2" 36#define DRV_VERSION "0.4.1"
37 37
38/** 38/**
39 * sil680_selreg - return register base 39 * sil680_selreg - return register base
@@ -218,7 +218,6 @@ static struct scsi_host_template sil680_sht = {
218 .can_queue = ATA_DEF_QUEUE, 218 .can_queue = ATA_DEF_QUEUE,
219 .this_id = ATA_SHT_THIS_ID, 219 .this_id = ATA_SHT_THIS_ID,
220 .sg_tablesize = LIBATA_MAX_PRD, 220 .sg_tablesize = LIBATA_MAX_PRD,
221 .max_sectors = ATA_MAX_SECTORS,
222 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 221 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
223 .emulated = ATA_SHT_EMULATED, 222 .emulated = ATA_SHT_EMULATED,
224 .use_clustering = ATA_SHT_USE_CLUSTERING, 223 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -263,32 +262,20 @@ static struct ata_port_operations sil680_port_ops = {
263 .host_stop = ata_host_stop 262 .host_stop = ata_host_stop
264}; 263};
265 264
266static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 265/**
266 * sil680_init_chip - chip setup
267 * @pdev: PCI device
268 *
269 * Perform all the chip setup which must be done both when the device
270 * is powered up on boot and when we resume in case we resumed from RAM.
271 * Returns the final clock settings.
272 */
273
274static u8 sil680_init_chip(struct pci_dev *pdev)
267{ 275{
268 static struct ata_port_info info = {
269 .sht = &sil680_sht,
270 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
271 .pio_mask = 0x1f,
272 .mwdma_mask = 0x07,
273 .udma_mask = 0x7f,
274 .port_ops = &sil680_port_ops
275 };
276 static struct ata_port_info info_slow = {
277 .sht = &sil680_sht,
278 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
279 .pio_mask = 0x1f,
280 .mwdma_mask = 0x07,
281 .udma_mask = 0x3f,
282 .port_ops = &sil680_port_ops
283 };
284 static struct ata_port_info *port_info[2] = {&info, &info};
285 static int printed_version;
286 u32 class_rev = 0; 276 u32 class_rev = 0;
287 u8 tmpbyte = 0; 277 u8 tmpbyte = 0;
288 278
289 if (!printed_version++)
290 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
291
292 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev); 279 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
293 class_rev &= 0xff; 280 class_rev &= 0xff;
294 /* FIXME: double check */ 281 /* FIXME: double check */
@@ -323,8 +310,6 @@ static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
323 pci_read_config_byte(pdev, 0x8A, &tmpbyte); 310 pci_read_config_byte(pdev, 0x8A, &tmpbyte);
324 printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n", 311 printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n",
325 tmpbyte & 1, tmpbyte & 0x30); 312 tmpbyte & 1, tmpbyte & 0x30);
326 if ((tmpbyte & 0x30) == 0)
327 port_info[0] = port_info[1] = &info_slow;
328 313
329 pci_write_config_byte(pdev, 0xA1, 0x72); 314 pci_write_config_byte(pdev, 0xA1, 0x72);
330 pci_write_config_word(pdev, 0xA2, 0x328A); 315 pci_write_config_word(pdev, 0xA2, 0x328A);
@@ -343,11 +328,51 @@ static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
343 case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n");break; 328 case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n");break;
344 /* This last case is _NOT_ ok */ 329 /* This last case is _NOT_ ok */
345 case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n"); 330 case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n");
346 return -EIO; 331 }
332 return tmpbyte & 0x30;
333}
334
335static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
336{
337 static struct ata_port_info info = {
338 .sht = &sil680_sht,
339 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
340 .pio_mask = 0x1f,
341 .mwdma_mask = 0x07,
342 .udma_mask = 0x7f,
343 .port_ops = &sil680_port_ops
344 };
345 static struct ata_port_info info_slow = {
346 .sht = &sil680_sht,
347 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
348 .pio_mask = 0x1f,
349 .mwdma_mask = 0x07,
350 .udma_mask = 0x3f,
351 .port_ops = &sil680_port_ops
352 };
353 static struct ata_port_info *port_info[2] = {&info, &info};
354 static int printed_version;
355
356 if (!printed_version++)
357 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
358
359 switch(sil680_init_chip(pdev))
360 {
361 case 0:
362 port_info[0] = port_info[1] = &info_slow;
363 break;
364 case 0x30:
365 return -ENODEV;
347 } 366 }
348 return ata_pci_init_one(pdev, port_info, 2); 367 return ata_pci_init_one(pdev, port_info, 2);
349} 368}
350 369
370static int sil680_reinit_one(struct pci_dev *pdev)
371{
372 sil680_init_chip(pdev);
373 return ata_pci_device_resume(pdev);
374}
375
351static const struct pci_device_id sil680[] = { 376static const struct pci_device_id sil680[] = {
352 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), }, 377 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },
353 378
@@ -358,7 +383,9 @@ static struct pci_driver sil680_pci_driver = {
358 .name = DRV_NAME, 383 .name = DRV_NAME,
359 .id_table = sil680, 384 .id_table = sil680,
360 .probe = sil680_init_one, 385 .probe = sil680_init_one,
361 .remove = ata_pci_remove_one 386 .remove = ata_pci_remove_one,
387 .suspend = ata_pci_device_suspend,
388 .resume = sil680_reinit_one,
362}; 389};
363 390
364static int __init sil680_init(void) 391static int __init sil680_init(void)
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 91e85f90941..916cedb3d75 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -34,7 +34,7 @@
34#include <linux/ata.h> 34#include <linux/ata.h>
35 35
36#define DRV_NAME "pata_sis" 36#define DRV_NAME "pata_sis"
37#define DRV_VERSION "0.4.4" 37#define DRV_VERSION "0.4.5"
38 38
39struct sis_chipset { 39struct sis_chipset {
40 u16 device; /* PCI host ID */ 40 u16 device; /* PCI host ID */
@@ -538,7 +538,6 @@ static struct scsi_host_template sis_sht = {
538 .can_queue = ATA_DEF_QUEUE, 538 .can_queue = ATA_DEF_QUEUE,
539 .this_id = ATA_SHT_THIS_ID, 539 .this_id = ATA_SHT_THIS_ID,
540 .sg_tablesize = LIBATA_MAX_PRD, 540 .sg_tablesize = LIBATA_MAX_PRD,
541 .max_sectors = ATA_MAX_SECTORS,
542 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 541 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
543 .emulated = ATA_SHT_EMULATED, 542 .emulated = ATA_SHT_EMULATED,
544 .use_clustering = ATA_SHT_USE_CLUSTERING, 543 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -547,6 +546,8 @@ static struct scsi_host_template sis_sht = {
547 .slave_configure = ata_scsi_slave_config, 546 .slave_configure = ata_scsi_slave_config,
548 .slave_destroy = ata_scsi_slave_destroy, 547 .slave_destroy = ata_scsi_slave_destroy,
549 .bios_param = ata_std_bios_param, 548 .bios_param = ata_std_bios_param,
549 .resume = ata_scsi_device_resume,
550 .suspend = ata_scsi_device_suspend,
550}; 551};
551 552
552static const struct ata_port_operations sis_133_ops = { 553static const struct ata_port_operations sis_133_ops = {
@@ -1000,6 +1001,8 @@ static struct pci_driver sis_pci_driver = {
1000 .id_table = sis_pci_tbl, 1001 .id_table = sis_pci_tbl,
1001 .probe = sis_init_one, 1002 .probe = sis_init_one,
1002 .remove = ata_pci_remove_one, 1003 .remove = ata_pci_remove_one,
1004 .suspend = ata_pci_device_suspend,
1005 .resume = ata_pci_device_resume,
1003}; 1006};
1004 1007
1005static int __init sis_init(void) 1008static int __init sis_init(void)
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index dc1cfc6d805..e94f515ef54 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -230,7 +230,6 @@ static struct scsi_host_template sl82c105_sht = {
230 .can_queue = ATA_DEF_QUEUE, 230 .can_queue = ATA_DEF_QUEUE,
231 .this_id = ATA_SHT_THIS_ID, 231 .this_id = ATA_SHT_THIS_ID,
232 .sg_tablesize = LIBATA_MAX_PRD, 232 .sg_tablesize = LIBATA_MAX_PRD,
233 .max_sectors = ATA_MAX_SECTORS,
234 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 233 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
235 .emulated = ATA_SHT_EMULATED, 234 .emulated = ATA_SHT_EMULATED,
236 .use_clustering = ATA_SHT_USE_CLUSTERING, 235 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index bfda1f7e760..a142971f130 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -43,7 +43,7 @@
43#include <linux/libata.h> 43#include <linux/libata.h>
44 44
45#define DRV_NAME "pata_triflex" 45#define DRV_NAME "pata_triflex"
46#define DRV_VERSION "0.2.5" 46#define DRV_VERSION "0.2.7"
47 47
48/** 48/**
49 * triflex_prereset - probe begin 49 * triflex_prereset - probe begin
@@ -185,7 +185,6 @@ static struct scsi_host_template triflex_sht = {
185 .can_queue = ATA_DEF_QUEUE, 185 .can_queue = ATA_DEF_QUEUE,
186 .this_id = ATA_SHT_THIS_ID, 186 .this_id = ATA_SHT_THIS_ID,
187 .sg_tablesize = LIBATA_MAX_PRD, 187 .sg_tablesize = LIBATA_MAX_PRD,
188 .max_sectors = ATA_MAX_SECTORS,
189 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 188 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
190 .emulated = ATA_SHT_EMULATED, 189 .emulated = ATA_SHT_EMULATED,
191 .use_clustering = ATA_SHT_USE_CLUSTERING, 190 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -194,6 +193,8 @@ static struct scsi_host_template triflex_sht = {
194 .slave_configure = ata_scsi_slave_config, 193 .slave_configure = ata_scsi_slave_config,
195 .slave_destroy = ata_scsi_slave_destroy, 194 .slave_destroy = ata_scsi_slave_destroy,
196 .bios_param = ata_std_bios_param, 195 .bios_param = ata_std_bios_param,
196 .resume = ata_scsi_device_resume,
197 .suspend = ata_scsi_device_suspend,
197}; 198};
198 199
199static struct ata_port_operations triflex_port_ops = { 200static struct ata_port_operations triflex_port_ops = {
@@ -258,7 +259,9 @@ static struct pci_driver triflex_pci_driver = {
258 .name = DRV_NAME, 259 .name = DRV_NAME,
259 .id_table = triflex, 260 .id_table = triflex,
260 .probe = triflex_init_one, 261 .probe = triflex_init_one,
261 .remove = ata_pci_remove_one 262 .remove = ata_pci_remove_one,
263 .suspend = ata_pci_device_suspend,
264 .resume = ata_pci_device_resume,
262}; 265};
263 266
264static int __init triflex_init(void) 267static int __init triflex_init(void)
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index c5f1616d224..cc09d47fb92 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -23,6 +23,7 @@
23 * VIA VT8233c - UDMA100 23 * VIA VT8233c - UDMA100
24 * VIA VT8235 - UDMA133 24 * VIA VT8235 - UDMA133
25 * VIA VT8237 - UDMA133 25 * VIA VT8237 - UDMA133
26 * VIA VT8251 - UDMA133
26 * 27 *
27 * Most registers remain compatible across chips. Others start reserved 28 * Most registers remain compatible across chips. Others start reserved
28 * and acquire sensible semantics if set to 1 (eg cable detect). A few 29 * and acquire sensible semantics if set to 1 (eg cable detect). A few
@@ -60,7 +61,7 @@
60#include <linux/libata.h> 61#include <linux/libata.h>
61 62
62#define DRV_NAME "pata_via" 63#define DRV_NAME "pata_via"
63#define DRV_VERSION "0.1.14" 64#define DRV_VERSION "0.2.0"
64 65
65/* 66/*
66 * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx 67 * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx
@@ -94,6 +95,7 @@ static const struct via_isa_bridge {
94 u8 rev_max; 95 u8 rev_max;
95 u16 flags; 96 u16 flags;
96} via_isa_bridges[] = { 97} via_isa_bridges[] = {
98 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
97 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 99 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
98 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES}, 100 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES},
99 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 101 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
@@ -288,7 +290,6 @@ static struct scsi_host_template via_sht = {
288 .can_queue = ATA_DEF_QUEUE, 290 .can_queue = ATA_DEF_QUEUE,
289 .this_id = ATA_SHT_THIS_ID, 291 .this_id = ATA_SHT_THIS_ID,
290 .sg_tablesize = LIBATA_MAX_PRD, 292 .sg_tablesize = LIBATA_MAX_PRD,
291 .max_sectors = ATA_MAX_SECTORS,
292 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 293 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
293 .emulated = ATA_SHT_EMULATED, 294 .emulated = ATA_SHT_EMULATED,
294 .use_clustering = ATA_SHT_USE_CLUSTERING, 295 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -297,6 +298,8 @@ static struct scsi_host_template via_sht = {
297 .slave_configure = ata_scsi_slave_config, 298 .slave_configure = ata_scsi_slave_config,
298 .slave_destroy = ata_scsi_slave_destroy, 299 .slave_destroy = ata_scsi_slave_destroy,
299 .bios_param = ata_std_bios_param, 300 .bios_param = ata_std_bios_param,
301 .resume = ata_scsi_device_resume,
302 .suspend = ata_scsi_device_suspend,
300}; 303};
301 304
302static struct ata_port_operations via_port_ops = { 305static struct ata_port_operations via_port_ops = {
@@ -370,8 +373,42 @@ static struct ata_port_operations via_port_ops_noirq = {
370}; 373};
371 374
372/** 375/**
376 * via_config_fifo - set up the FIFO
377 * @pdev: PCI device
378 * @flags: configuration flags
379 *
380 * Set the FIFO properties for this device if neccessary. Used both on
381 * set up and on and the resume path
382 */
383
384static void via_config_fifo(struct pci_dev *pdev, unsigned int flags)
385{
386 u8 enable;
387
388 /* 0x40 low bits indicate enabled channels */
389 pci_read_config_byte(pdev, 0x40 , &enable);
390 enable &= 3;
391
392 if (flags & VIA_SET_FIFO) {
393 u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20};
394 u8 fifo;
395
396 pci_read_config_byte(pdev, 0x43, &fifo);
397
398 /* Clear PREQ# until DDACK# for errata */
399 if (flags & VIA_BAD_PREQ)
400 fifo &= 0x7F;
401 else
402 fifo &= 0x9f;
403 /* Turn on FIFO for enabled channels */
404 fifo |= fifo_setting[enable];
405 pci_write_config_byte(pdev, 0x43, fifo);
406 }
407}
408
409/**
373 * via_init_one - discovery callback 410 * via_init_one - discovery callback
374 * @pdev: PCI device ID 411 * @pdev: PCI device
375 * @id: PCI table info 412 * @id: PCI table info
376 * 413 *
377 * A VIA IDE interface has been discovered. Figure out what revision 414 * A VIA IDE interface has been discovered. Figure out what revision
@@ -383,7 +420,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
383 /* Early VIA without UDMA support */ 420 /* Early VIA without UDMA support */
384 static struct ata_port_info via_mwdma_info = { 421 static struct ata_port_info via_mwdma_info = {
385 .sht = &via_sht, 422 .sht = &via_sht,
386 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 423 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
387 .pio_mask = 0x1f, 424 .pio_mask = 0x1f,
388 .mwdma_mask = 0x07, 425 .mwdma_mask = 0x07,
389 .port_ops = &via_port_ops 426 .port_ops = &via_port_ops
@@ -391,7 +428,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
391 /* Ditto with IRQ masking required */ 428 /* Ditto with IRQ masking required */
392 static struct ata_port_info via_mwdma_info_borked = { 429 static struct ata_port_info via_mwdma_info_borked = {
393 .sht = &via_sht, 430 .sht = &via_sht,
394 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 431 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
395 .pio_mask = 0x1f, 432 .pio_mask = 0x1f,
396 .mwdma_mask = 0x07, 433 .mwdma_mask = 0x07,
397 .port_ops = &via_port_ops_noirq, 434 .port_ops = &via_port_ops_noirq,
@@ -399,7 +436,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
399 /* VIA UDMA 33 devices (and borked 66) */ 436 /* VIA UDMA 33 devices (and borked 66) */
400 static struct ata_port_info via_udma33_info = { 437 static struct ata_port_info via_udma33_info = {
401 .sht = &via_sht, 438 .sht = &via_sht,
402 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 439 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
403 .pio_mask = 0x1f, 440 .pio_mask = 0x1f,
404 .mwdma_mask = 0x07, 441 .mwdma_mask = 0x07,
405 .udma_mask = 0x7, 442 .udma_mask = 0x7,
@@ -408,7 +445,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
408 /* VIA UDMA 66 devices */ 445 /* VIA UDMA 66 devices */
409 static struct ata_port_info via_udma66_info = { 446 static struct ata_port_info via_udma66_info = {
410 .sht = &via_sht, 447 .sht = &via_sht,
411 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 448 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
412 .pio_mask = 0x1f, 449 .pio_mask = 0x1f,
413 .mwdma_mask = 0x07, 450 .mwdma_mask = 0x07,
414 .udma_mask = 0x1f, 451 .udma_mask = 0x1f,
@@ -417,7 +454,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
417 /* VIA UDMA 100 devices */ 454 /* VIA UDMA 100 devices */
418 static struct ata_port_info via_udma100_info = { 455 static struct ata_port_info via_udma100_info = {
419 .sht = &via_sht, 456 .sht = &via_sht,
420 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 457 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
421 .pio_mask = 0x1f, 458 .pio_mask = 0x1f,
422 .mwdma_mask = 0x07, 459 .mwdma_mask = 0x07,
423 .udma_mask = 0x3f, 460 .udma_mask = 0x3f,
@@ -426,7 +463,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
426 /* UDMA133 with bad AST (All current 133) */ 463 /* UDMA133 with bad AST (All current 133) */
427 static struct ata_port_info via_udma133_info = { 464 static struct ata_port_info via_udma133_info = {
428 .sht = &via_sht, 465 .sht = &via_sht,
429 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 466 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
430 .pio_mask = 0x1f, 467 .pio_mask = 0x1f,
431 .mwdma_mask = 0x07, 468 .mwdma_mask = 0x07,
432 .udma_mask = 0x7f, /* FIXME: should check north bridge */ 469 .udma_mask = 0x7f, /* FIXME: should check north bridge */
@@ -471,21 +508,8 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
471 } 508 }
472 509
473 /* Initialise the FIFO for the enabled channels. */ 510 /* Initialise the FIFO for the enabled channels. */
474 if (config->flags & VIA_SET_FIFO) { 511 via_config_fifo(pdev, config->flags);
475 u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20}; 512
476 u8 fifo;
477
478 pci_read_config_byte(pdev, 0x43, &fifo);
479
480 /* Clear PREQ# until DDACK# for errata */
481 if (config->flags & VIA_BAD_PREQ)
482 fifo &= 0x7F;
483 else
484 fifo &= 0x9f;
485 /* Turn on FIFO for enabled channels */
486 fifo |= fifo_setting[enable];
487 pci_write_config_byte(pdev, 0x43, fifo);
488 }
489 /* Clock set up */ 513 /* Clock set up */
490 switch(config->flags & VIA_UDMA) { 514 switch(config->flags & VIA_UDMA) {
491 case VIA_UDMA_NONE: 515 case VIA_UDMA_NONE:
@@ -529,6 +553,39 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
529 return ata_pci_init_one(pdev, port_info, 2); 553 return ata_pci_init_one(pdev, port_info, 2);
530} 554}
531 555
556/**
557 * via_reinit_one - reinit after resume
558 * @pdev; PCI device
559 *
560 * Called when the VIA PATA device is resumed. We must then
561 * reconfigure the fifo and other setup we may have altered. In
562 * addition the kernel needs to have the resume methods on PCI
563 * quirk supported.
564 */
565
566static int via_reinit_one(struct pci_dev *pdev)
567{
568 u32 timing;
569 struct ata_host *host = dev_get_drvdata(&pdev->dev);
570 const struct via_isa_bridge *config = host->private_data;
571
572 via_config_fifo(pdev, config->flags);
573
574 if ((config->flags & VIA_UDMA) == VIA_UDMA_66) {
575 /* The 66 MHz devices require we enable the clock */
576 pci_read_config_dword(pdev, 0x50, &timing);
577 timing |= 0x80008;
578 pci_write_config_dword(pdev, 0x50, timing);
579 }
580 if (config->flags & VIA_BAD_CLK66) {
581 /* Disable the 66MHz clock on problem devices */
582 pci_read_config_dword(pdev, 0x50, &timing);
583 timing &= ~0x80008;
584 pci_write_config_dword(pdev, 0x50, timing);
585 }
586 return ata_pci_device_resume(pdev);
587}
588
532static const struct pci_device_id via[] = { 589static const struct pci_device_id via[] = {
533 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), }, 590 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), },
534 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), }, 591 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), },
@@ -542,7 +599,9 @@ static struct pci_driver via_pci_driver = {
542 .name = DRV_NAME, 599 .name = DRV_NAME,
543 .id_table = via, 600 .id_table = via,
544 .probe = via_init_one, 601 .probe = via_init_one,
545 .remove = ata_pci_remove_one 602 .remove = ata_pci_remove_one,
603 .suspend = ata_pci_device_suspend,
604 .resume = via_reinit_one,
546}; 605};
547 606
548static int __init via_init(void) 607static int __init via_init(void)
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
new file mode 100644
index 00000000000..3ea345cde52
--- /dev/null
+++ b/drivers/ata/pata_winbond.c
@@ -0,0 +1,306 @@
1/*
2 * pata_winbond.c - Winbond VLB ATA controllers
3 * (C) 2006 Red Hat <alan@redhat.com>
4 *
5 * Support for the Winbond 83759A when operating in advanced mode.
6 * Multichip mode is not currently supported.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/init.h>
13#include <linux/blkdev.h>
14#include <linux/delay.h>
15#include <scsi/scsi_host.h>
16#include <linux/libata.h>
17#include <linux/platform_device.h>
18
19#define DRV_NAME "pata_winbond"
20#define DRV_VERSION "0.0.1"
21
22#define NR_HOST 4 /* Two winbond controllers, two channels each */
23
24struct winbond_data {
25 unsigned long config;
26 struct platform_device *platform_dev;
27};
28
29static struct ata_host *winbond_host[NR_HOST];
30static struct winbond_data winbond_data[NR_HOST];
31static int nr_winbond_host;
32
33#ifdef MODULE
34static int probe_winbond = 1;
35#else
36static int probe_winbond;
37#endif
38
39static spinlock_t winbond_lock = SPIN_LOCK_UNLOCKED;
40
41static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
42{
43 unsigned long flags;
44 spin_lock_irqsave(&winbond_lock, flags);
45 outb(reg, port + 0x01);
46 outb(val, port + 0x02);
47 spin_unlock_irqrestore(&winbond_lock, flags);
48}
49
50static u8 winbond_readcfg(unsigned long port, u8 reg)
51{
52 u8 val;
53
54 unsigned long flags;
55 spin_lock_irqsave(&winbond_lock, flags);
56 outb(reg, port + 0x01);
57 val = inb(port + 0x02);
58 spin_unlock_irqrestore(&winbond_lock, flags);
59
60 return val;
61}
62
63static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
64{
65 struct ata_timing t;
66 struct winbond_data *winbond = ap->host->private_data;
67 int active, recovery;
68 u8 reg;
69 int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
70
71 reg = winbond_readcfg(winbond->config, 0x81);
72
73 /* Get the timing data in cycles */
74 if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
75 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
76 else
77 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
78
79 active = (FIT(t.active, 3, 17) - 1) & 0x0F;
80 recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F;
81 timing = (active << 4) | recovery;
82 winbond_writecfg(winbond->config, timing, reg);
83
84 /* Load the setup timing */
85
86 reg = 0x35;
87 if (adev->class != ATA_DEV_ATA)
88 reg |= 0x08; /* FIFO off */
89 if (!ata_pio_need_iordy(adev))
90 reg |= 0x02; /* IORDY off */
91 reg |= (FIT(t.setup, 0, 3) << 6);
92 winbond_writecfg(winbond->config, timing + 1, reg);
93}
94
95
96static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
97{
98 struct ata_port *ap = adev->ap;
99 int slop = buflen & 3;
100
101 if (ata_id_has_dword_io(adev->id)) {
102 if (write_data)
103 outsl(ap->ioaddr.data_addr, buf, buflen >> 2);
104 else
105 insl(ap->ioaddr.data_addr, buf, buflen >> 2);
106
107 if (unlikely(slop)) {
108 u32 pad;
109 if (write_data) {
110 memcpy(&pad, buf + buflen - slop, slop);
111 outl(le32_to_cpu(pad), ap->ioaddr.data_addr);
112 } else {
113 pad = cpu_to_le16(inl(ap->ioaddr.data_addr));
114 memcpy(buf + buflen - slop, &pad, slop);
115 }
116 }
117 } else
118 ata_pio_data_xfer(adev, buf, buflen, write_data);
119}
120
121static struct scsi_host_template winbond_sht = {
122 .module = THIS_MODULE,
123 .name = DRV_NAME,
124 .ioctl = ata_scsi_ioctl,
125 .queuecommand = ata_scsi_queuecmd,
126 .can_queue = ATA_DEF_QUEUE,
127 .this_id = ATA_SHT_THIS_ID,
128 .sg_tablesize = LIBATA_MAX_PRD,
129 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
130 .emulated = ATA_SHT_EMULATED,
131 .use_clustering = ATA_SHT_USE_CLUSTERING,
132 .proc_name = DRV_NAME,
133 .dma_boundary = ATA_DMA_BOUNDARY,
134 .slave_configure = ata_scsi_slave_config,
135 .slave_destroy = ata_scsi_slave_destroy,
136 .bios_param = ata_std_bios_param,
137};
138
139static struct ata_port_operations winbond_port_ops = {
140 .port_disable = ata_port_disable,
141 .set_piomode = winbond_set_piomode,
142
143 .tf_load = ata_tf_load,
144 .tf_read = ata_tf_read,
145 .check_status = ata_check_status,
146 .exec_command = ata_exec_command,
147 .dev_select = ata_std_dev_select,
148
149 .freeze = ata_bmdma_freeze,
150 .thaw = ata_bmdma_thaw,
151 .error_handler = ata_bmdma_error_handler,
152 .post_internal_cmd = ata_bmdma_post_internal_cmd,
153
154 .qc_prep = ata_qc_prep,
155 .qc_issue = ata_qc_issue_prot,
156
157 .data_xfer = winbond_data_xfer,
158
159 .irq_handler = ata_interrupt,
160 .irq_clear = ata_bmdma_irq_clear,
161
162 .port_start = ata_port_start,
163 .port_stop = ata_port_stop,
164 .host_stop = ata_host_stop
165};
166
167/**
168 * winbond_init_one - attach a winbond interface
169 * @type: Type to display
170 * @io: I/O port start
171 * @irq: interrupt line
172 * @fast: True if on a > 33Mhz VLB
173 *
174 * Register a VLB bus IDE interface. Such interfaces are PIO and we
175 * assume do not support IRQ sharing.
176 */
177
178static __init int winbond_init_one(unsigned long port)
179{
180 struct ata_probe_ent ae;
181 struct platform_device *pdev;
182 int ret;
183 u8 reg;
184 int i;
185
186 reg = winbond_readcfg(port, 0x81);
187 reg |= 0x80; /* jumpered mode off */
188 winbond_writecfg(port, 0x81, reg);
189 reg = winbond_readcfg(port, 0x83);
190 reg |= 0xF0; /* local control */
191 winbond_writecfg(port, 0x83, reg);
192 reg = winbond_readcfg(port, 0x85);
193 reg |= 0xF0; /* programmable timing */
194 winbond_writecfg(port, 0x85, reg);
195
196 reg = winbond_readcfg(port, 0x81);
197
198 if (!(reg & 0x03)) /* Disabled */
199 return 0;
200
201 for (i = 0; i < 2 ; i ++) {
202
203 if (reg & (1 << i)) {
204 /*
205 * Fill in a probe structure first of all
206 */
207
208 pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
209 if (pdev == NULL)
210 return -ENOMEM;
211
212 memset(&ae, 0, sizeof(struct ata_probe_ent));
213 INIT_LIST_HEAD(&ae.node);
214 ae.dev = &pdev->dev;
215
216 ae.port_ops = &winbond_port_ops;
217 ae.pio_mask = 0x1F;
218
219 ae.sht = &winbond_sht;
220
221 ae.n_ports = 1;
222 ae.irq = 14 + i;
223 ae.irq_flags = 0;
224 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
225 ae.port[0].cmd_addr = 0x1F0 - (0x80 * i);
226 ae.port[0].altstatus_addr = ae.port[0].cmd_addr + 0x0206;
227 ae.port[0].ctl_addr = ae.port[0].altstatus_addr;
228 ata_std_ports(&ae.port[0]);
229 /*
230 * Hook in a private data structure per channel
231 */
232 ae.private_data = &winbond_data[nr_winbond_host];
233 winbond_data[nr_winbond_host].config = port;
234 winbond_data[nr_winbond_host].platform_dev = pdev;
235
236 ret = ata_device_add(&ae);
237 if (ret == 0) {
238 platform_device_unregister(pdev);
239 return -ENODEV;
240 }
241 winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
242 }
243 }
244
245 return 0;
246}
247
248/**
249 * winbond_init - attach winbond interfaces
250 *
251 * Attach winbond IDE interfaces by scanning the ports it may occupy.
252 */
253
254static __init int winbond_init(void)
255{
256 static const unsigned long config[2] = { 0x130, 0x1B0 };
257
258 int ct = 0;
259 int i;
260
261 if (probe_winbond == 0)
262 return -ENODEV;
263
264 /*
265 * Check both base addresses
266 */
267
268 for (i = 0; i < 2; i++) {
269 if (probe_winbond & (1<<i)) {
270 int ret = 0;
271 unsigned long port = config[i];
272
273 if (request_region(port, 2, "pata_winbond")) {
274 ret = winbond_init_one(port);
275 if(ret <= 0)
276 release_region(port, 2);
277 else ct+= ret;
278 }
279 }
280 }
281 if (ct != 0)
282 return 0;
283 return -ENODEV;
284}
285
286static __exit void winbond_exit(void)
287{
288 int i;
289
290 for (i = 0; i < nr_winbond_host; i++) {
291 ata_host_remove(winbond_host[i]);
292 release_region(winbond_data[i].config, 2);
293 platform_device_unregister(winbond_data[i].platform_dev);
294 }
295}
296
297MODULE_AUTHOR("Alan Cox");
298MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
299MODULE_LICENSE("GPL");
300MODULE_VERSION(DRV_VERSION);
301
302module_init(winbond_init);
303module_exit(winbond_exit);
304
305module_param(probe_winbond, int, 0);
306
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index d65ebfd7c7b..0d316eb3c21 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -29,6 +29,11 @@
29 * NV-specific details such as register offsets, SATA phy location, 29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc. 30 * hotplug info, etc.
31 * 31 *
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
32 */ 37 */
33 38
34#include <linux/kernel.h> 39#include <linux/kernel.h>
@@ -40,10 +45,13 @@
40#include <linux/interrupt.h> 45#include <linux/interrupt.h>
41#include <linux/device.h> 46#include <linux/device.h>
42#include <scsi/scsi_host.h> 47#include <scsi/scsi_host.h>
48#include <scsi/scsi_device.h>
43#include <linux/libata.h> 49#include <linux/libata.h>
44 50
45#define DRV_NAME "sata_nv" 51#define DRV_NAME "sata_nv"
46#define DRV_VERSION "2.0" 52#define DRV_VERSION "3.2"
53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
47 55
48enum { 56enum {
49 NV_PORTS = 2, 57 NV_PORTS = 2,
@@ -78,8 +86,138 @@ enum {
78 // For PCI config register 20 86 // For PCI config register 20
79 NV_MCP_SATA_CFG_20 = 0x50, 87 NV_MCP_SATA_CFG_20 = 0x50,
80 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04, 88 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
89 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
90 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
91 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
92 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
93
94 NV_ADMA_MAX_CPBS = 32,
95 NV_ADMA_CPB_SZ = 128,
96 NV_ADMA_APRD_SZ = 16,
97 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
98 NV_ADMA_APRD_SZ,
99 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
100 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
101 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
102 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
103
104 /* BAR5 offset to ADMA general registers */
105 NV_ADMA_GEN = 0x400,
106 NV_ADMA_GEN_CTL = 0x00,
107 NV_ADMA_NOTIFIER_CLEAR = 0x30,
108
109 /* BAR5 offset to ADMA ports */
110 NV_ADMA_PORT = 0x480,
111
112 /* size of ADMA port register space */
113 NV_ADMA_PORT_SIZE = 0x100,
114
115 /* ADMA port registers */
116 NV_ADMA_CTL = 0x40,
117 NV_ADMA_CPB_COUNT = 0x42,
118 NV_ADMA_NEXT_CPB_IDX = 0x43,
119 NV_ADMA_STAT = 0x44,
120 NV_ADMA_CPB_BASE_LOW = 0x48,
121 NV_ADMA_CPB_BASE_HIGH = 0x4C,
122 NV_ADMA_APPEND = 0x50,
123 NV_ADMA_NOTIFIER = 0x68,
124 NV_ADMA_NOTIFIER_ERROR = 0x6C,
125
126 /* NV_ADMA_CTL register bits */
127 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
128 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
129 NV_ADMA_CTL_GO = (1 << 7),
130 NV_ADMA_CTL_AIEN = (1 << 8),
131 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
132 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
133
134 /* CPB response flag bits */
135 NV_CPB_RESP_DONE = (1 << 0),
136 NV_CPB_RESP_ATA_ERR = (1 << 3),
137 NV_CPB_RESP_CMD_ERR = (1 << 4),
138 NV_CPB_RESP_CPB_ERR = (1 << 7),
139
140 /* CPB control flag bits */
141 NV_CPB_CTL_CPB_VALID = (1 << 0),
142 NV_CPB_CTL_QUEUE = (1 << 1),
143 NV_CPB_CTL_APRD_VALID = (1 << 2),
144 NV_CPB_CTL_IEN = (1 << 3),
145 NV_CPB_CTL_FPDMA = (1 << 4),
146
147 /* APRD flags */
148 NV_APRD_WRITE = (1 << 1),
149 NV_APRD_END = (1 << 2),
150 NV_APRD_CONT = (1 << 3),
151
152 /* NV_ADMA_STAT flags */
153 NV_ADMA_STAT_TIMEOUT = (1 << 0),
154 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
155 NV_ADMA_STAT_HOTPLUG = (1 << 2),
156 NV_ADMA_STAT_CPBERR = (1 << 4),
157 NV_ADMA_STAT_SERROR = (1 << 5),
158 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
159 NV_ADMA_STAT_IDLE = (1 << 8),
160 NV_ADMA_STAT_LEGACY = (1 << 9),
161 NV_ADMA_STAT_STOPPED = (1 << 10),
162 NV_ADMA_STAT_DONE = (1 << 12),
163 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
164 NV_ADMA_STAT_TIMEOUT,
165
166 /* port flags */
167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
168 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
169
170};
171
172/* ADMA Physical Region Descriptor - one SG segment */
173struct nv_adma_prd {
174 __le64 addr;
175 __le32 len;
176 u8 flags;
177 u8 packet_len;
178 __le16 reserved;
179};
180
181enum nv_adma_regbits {
182 CMDEND = (1 << 15), /* end of command list */
183 WNB = (1 << 14), /* wait-not-BSY */
184 IGN = (1 << 13), /* ignore this entry */
185 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
186 DA2 = (1 << (2 + 8)),
187 DA1 = (1 << (1 + 8)),
188 DA0 = (1 << (0 + 8)),
189};
190
191/* ADMA Command Parameter Block
192 The first 5 SG segments are stored inside the Command Parameter Block itself.
193 If there are more than 5 segments the remainder are stored in a separate
194 memory area indicated by next_aprd. */
195struct nv_adma_cpb {
196 u8 resp_flags; /* 0 */
197 u8 reserved1; /* 1 */
198 u8 ctl_flags; /* 2 */
199 /* len is length of taskfile in 64 bit words */
200 u8 len; /* 3 */
201 u8 tag; /* 4 */
202 u8 next_cpb_idx; /* 5 */
203 __le16 reserved2; /* 6-7 */
204 __le16 tf[12]; /* 8-31 */
205 struct nv_adma_prd aprd[5]; /* 32-111 */
206 __le64 next_aprd; /* 112-119 */
207 __le64 reserved3; /* 120-127 */
208};
209
210
211struct nv_adma_port_priv {
212 struct nv_adma_cpb *cpb;
213 dma_addr_t cpb_dma;
214 struct nv_adma_prd *aprd;
215 dma_addr_t aprd_dma;
216 u8 flags;
81}; 217};
82 218
219#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
220
83static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 221static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
84static void nv_ck804_host_stop(struct ata_host *host); 222static void nv_ck804_host_stop(struct ata_host *host);
85static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); 223static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
@@ -93,13 +231,28 @@ static void nv_nf2_thaw(struct ata_port *ap);
93static void nv_ck804_freeze(struct ata_port *ap); 231static void nv_ck804_freeze(struct ata_port *ap);
94static void nv_ck804_thaw(struct ata_port *ap); 232static void nv_ck804_thaw(struct ata_port *ap);
95static void nv_error_handler(struct ata_port *ap); 233static void nv_error_handler(struct ata_port *ap);
234static int nv_adma_slave_config(struct scsi_device *sdev);
235static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
236static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
237static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
238static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
239static void nv_adma_irq_clear(struct ata_port *ap);
240static int nv_adma_port_start(struct ata_port *ap);
241static void nv_adma_port_stop(struct ata_port *ap);
242static void nv_adma_error_handler(struct ata_port *ap);
243static void nv_adma_host_stop(struct ata_host *host);
244static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
245static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
246static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
247static u8 nv_adma_bmdma_status(struct ata_port *ap);
96 248
97enum nv_host_type 249enum nv_host_type
98{ 250{
99 GENERIC, 251 GENERIC,
100 NFORCE2, 252 NFORCE2,
101 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */ 253 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
102 CK804 254 CK804,
255 ADMA
103}; 256};
104 257
105static const struct pci_device_id nv_pci_tbl[] = { 258static const struct pci_device_id nv_pci_tbl[] = {
@@ -160,6 +313,24 @@ static struct scsi_host_template nv_sht = {
160 .bios_param = ata_std_bios_param, 313 .bios_param = ata_std_bios_param,
161}; 314};
162 315
316static struct scsi_host_template nv_adma_sht = {
317 .module = THIS_MODULE,
318 .name = DRV_NAME,
319 .ioctl = ata_scsi_ioctl,
320 .queuecommand = ata_scsi_queuecmd,
321 .can_queue = NV_ADMA_MAX_CPBS,
322 .this_id = ATA_SHT_THIS_ID,
323 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
324 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
325 .emulated = ATA_SHT_EMULATED,
326 .use_clustering = ATA_SHT_USE_CLUSTERING,
327 .proc_name = DRV_NAME,
328 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
329 .slave_configure = nv_adma_slave_config,
330 .slave_destroy = ata_scsi_slave_destroy,
331 .bios_param = ata_std_bios_param,
332};
333
163static const struct ata_port_operations nv_generic_ops = { 334static const struct ata_port_operations nv_generic_ops = {
164 .port_disable = ata_port_disable, 335 .port_disable = ata_port_disable,
165 .tf_load = ata_tf_load, 336 .tf_load = ata_tf_load,
@@ -241,11 +412,40 @@ static const struct ata_port_operations nv_ck804_ops = {
241 .host_stop = nv_ck804_host_stop, 412 .host_stop = nv_ck804_host_stop,
242}; 413};
243 414
415static const struct ata_port_operations nv_adma_ops = {
416 .port_disable = ata_port_disable,
417 .tf_load = ata_tf_load,
418 .tf_read = ata_tf_read,
419 .check_atapi_dma = nv_adma_check_atapi_dma,
420 .exec_command = ata_exec_command,
421 .check_status = ata_check_status,
422 .dev_select = ata_std_dev_select,
423 .bmdma_setup = nv_adma_bmdma_setup,
424 .bmdma_start = nv_adma_bmdma_start,
425 .bmdma_stop = nv_adma_bmdma_stop,
426 .bmdma_status = nv_adma_bmdma_status,
427 .qc_prep = nv_adma_qc_prep,
428 .qc_issue = nv_adma_qc_issue,
429 .freeze = nv_ck804_freeze,
430 .thaw = nv_ck804_thaw,
431 .error_handler = nv_adma_error_handler,
432 .post_internal_cmd = nv_adma_bmdma_stop,
433 .data_xfer = ata_mmio_data_xfer,
434 .irq_handler = nv_adma_interrupt,
435 .irq_clear = nv_adma_irq_clear,
436 .scr_read = nv_scr_read,
437 .scr_write = nv_scr_write,
438 .port_start = nv_adma_port_start,
439 .port_stop = nv_adma_port_stop,
440 .host_stop = nv_adma_host_stop,
441};
442
244static struct ata_port_info nv_port_info[] = { 443static struct ata_port_info nv_port_info[] = {
245 /* generic */ 444 /* generic */
246 { 445 {
247 .sht = &nv_sht, 446 .sht = &nv_sht,
248 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 447 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
448 ATA_FLAG_HRST_TO_RESUME,
249 .pio_mask = NV_PIO_MASK, 449 .pio_mask = NV_PIO_MASK,
250 .mwdma_mask = NV_MWDMA_MASK, 450 .mwdma_mask = NV_MWDMA_MASK,
251 .udma_mask = NV_UDMA_MASK, 451 .udma_mask = NV_UDMA_MASK,
@@ -254,7 +454,8 @@ static struct ata_port_info nv_port_info[] = {
254 /* nforce2/3 */ 454 /* nforce2/3 */
255 { 455 {
256 .sht = &nv_sht, 456 .sht = &nv_sht,
257 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 457 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
458 ATA_FLAG_HRST_TO_RESUME,
258 .pio_mask = NV_PIO_MASK, 459 .pio_mask = NV_PIO_MASK,
259 .mwdma_mask = NV_MWDMA_MASK, 460 .mwdma_mask = NV_MWDMA_MASK,
260 .udma_mask = NV_UDMA_MASK, 461 .udma_mask = NV_UDMA_MASK,
@@ -263,12 +464,23 @@ static struct ata_port_info nv_port_info[] = {
263 /* ck804 */ 464 /* ck804 */
264 { 465 {
265 .sht = &nv_sht, 466 .sht = &nv_sht,
266 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 467 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
468 ATA_FLAG_HRST_TO_RESUME,
267 .pio_mask = NV_PIO_MASK, 469 .pio_mask = NV_PIO_MASK,
268 .mwdma_mask = NV_MWDMA_MASK, 470 .mwdma_mask = NV_MWDMA_MASK,
269 .udma_mask = NV_UDMA_MASK, 471 .udma_mask = NV_UDMA_MASK,
270 .port_ops = &nv_ck804_ops, 472 .port_ops = &nv_ck804_ops,
271 }, 473 },
474 /* ADMA */
475 {
476 .sht = &nv_adma_sht,
477 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
478 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
479 .pio_mask = NV_PIO_MASK,
480 .mwdma_mask = NV_MWDMA_MASK,
481 .udma_mask = NV_UDMA_MASK,
482 .port_ops = &nv_adma_ops,
483 },
272}; 484};
273 485
274MODULE_AUTHOR("NVIDIA"); 486MODULE_AUTHOR("NVIDIA");
@@ -277,37 +489,220 @@ MODULE_LICENSE("GPL");
277MODULE_DEVICE_TABLE(pci, nv_pci_tbl); 489MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
278MODULE_VERSION(DRV_VERSION); 490MODULE_VERSION(DRV_VERSION);
279 491
280static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance) 492static int adma_enabled = 1;
493
494static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
495 unsigned int port_no)
281{ 496{
282 struct ata_host *host = dev_instance; 497 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
283 unsigned int i; 498 return mmio;
284 unsigned int handled = 0; 499}
285 unsigned long flags;
286 500
287 spin_lock_irqsave(&host->lock, flags); 501static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
502{
503 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
504}
288 505
289 for (i = 0; i < host->n_ports; i++) { 506static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
290 struct ata_port *ap; 507{
508 return (ap->host->mmio_base + NV_ADMA_GEN);
509}
291 510
292 ap = host->ports[i]; 511static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
293 if (ap && 512{
294 !(ap->flags & ATA_FLAG_DISABLED)) { 513 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
295 struct ata_queued_cmd *qc; 514}
296 515
297 qc = ata_qc_from_tag(ap, ap->active_tag); 516static void nv_adma_register_mode(struct ata_port *ap)
298 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) 517{
299 handled += ata_host_intr(ap, qc); 518 void __iomem *mmio = nv_adma_ctl_block(ap);
300 else 519 struct nv_adma_port_priv *pp = ap->private_data;
301 // No request pending? Clear interrupt status 520 u16 tmp;
302 // anyway, in case there's one pending. 521
303 ap->ops->check_status(ap); 522 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
304 } 523 return;
524
525 tmp = readw(mmio + NV_ADMA_CTL);
526 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
527
528 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
529}
530
531static void nv_adma_mode(struct ata_port *ap)
532{
533 void __iomem *mmio = nv_adma_ctl_block(ap);
534 struct nv_adma_port_priv *pp = ap->private_data;
535 u16 tmp;
305 536
537 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
538 return;
539
540 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
541
542 tmp = readw(mmio + NV_ADMA_CTL);
543 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
544
545 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
546}
547
548static int nv_adma_slave_config(struct scsi_device *sdev)
549{
550 struct ata_port *ap = ata_shost_to_port(sdev->host);
551 struct nv_adma_port_priv *pp = ap->private_data;
552 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
553 u64 bounce_limit;
554 unsigned long segment_boundary;
555 unsigned short sg_tablesize;
556 int rc;
557 int adma_enable;
558 u32 current_reg, new_reg, config_mask;
559
560 rc = ata_scsi_slave_config(sdev);
561
562 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
563 /* Not a proper libata device, ignore */
564 return rc;
565
566 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
567 /*
568 * NVIDIA reports that ADMA mode does not support ATAPI commands.
569 * Therefore ATAPI commands are sent through the legacy interface.
570 * However, the legacy interface only supports 32-bit DMA.
571 * Restrict DMA parameters as required by the legacy interface
572 * when an ATAPI device is connected.
573 */
574 bounce_limit = ATA_DMA_MASK;
575 segment_boundary = ATA_DMA_BOUNDARY;
576 /* Subtract 1 since an extra entry may be needed for padding, see
577 libata-scsi.c */
578 sg_tablesize = LIBATA_MAX_PRD - 1;
579
580 /* Since the legacy DMA engine is in use, we need to disable ADMA
581 on the port. */
582 adma_enable = 0;
583 nv_adma_register_mode(ap);
584 }
585 else {
586 bounce_limit = *ap->dev->dma_mask;
587 segment_boundary = NV_ADMA_DMA_BOUNDARY;
588 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
589 adma_enable = 1;
590 }
591
592 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
593
594 if(ap->port_no == 1)
595 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
596 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
597 else
598 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
599 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
600
601 if(adma_enable) {
602 new_reg = current_reg | config_mask;
603 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
604 }
605 else {
606 new_reg = current_reg & ~config_mask;
607 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
306 } 608 }
609
610 if(current_reg != new_reg)
611 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
612
613 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
614 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
615 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
616 ata_port_printk(ap, KERN_INFO,
617 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
618 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
619 return rc;
620}
307 621
308 spin_unlock_irqrestore(&host->lock, flags); 622static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
623{
624 struct nv_adma_port_priv *pp = qc->ap->private_data;
625 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
626}
309 627
310 return IRQ_RETVAL(handled); 628static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
629{
630 unsigned int idx = 0;
631
632 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
633
634 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
635 cpb[idx++] = cpu_to_le16(IGN);
636 cpb[idx++] = cpu_to_le16(IGN);
637 cpb[idx++] = cpu_to_le16(IGN);
638 cpb[idx++] = cpu_to_le16(IGN);
639 cpb[idx++] = cpu_to_le16(IGN);
640 }
641 else {
642 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
643 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
644 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
645 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
646 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
647 }
648 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
649 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
650 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
651 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
652 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
653
654 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
655
656 return idx;
657}
658
659static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
660{
661 struct nv_adma_port_priv *pp = ap->private_data;
662 int complete = 0, have_err = 0;
663 u8 flags = pp->cpb[cpb_num].resp_flags;
664
665 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
666
667 if (flags & NV_CPB_RESP_DONE) {
668 VPRINTK("CPB flags done, flags=0x%x\n", flags);
669 complete = 1;
670 }
671 if (flags & NV_CPB_RESP_ATA_ERR) {
672 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
673 have_err = 1;
674 complete = 1;
675 }
676 if (flags & NV_CPB_RESP_CMD_ERR) {
677 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
678 have_err = 1;
679 complete = 1;
680 }
681 if (flags & NV_CPB_RESP_CPB_ERR) {
682 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
683 have_err = 1;
684 complete = 1;
685 }
686 if(complete || force_err)
687 {
688 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
689 if(likely(qc)) {
690 u8 ata_status = 0;
691 /* Only use the ATA port status for non-NCQ commands.
692 For NCQ commands the current status may have nothing to do with
693 the command just completed. */
694 if(qc->tf.protocol != ATA_PROT_NCQ)
695 ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4));
696
697 if(have_err || force_err)
698 ata_status |= ATA_ERR;
699
700 qc->err_mask |= ac_err_mask(ata_status);
701 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
702 qc->err_mask);
703 ata_qc_complete(qc);
704 }
705 }
311} 706}
312 707
313static int nv_host_intr(struct ata_port *ap, u8 irq_stat) 708static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
@@ -341,6 +736,486 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
341 return 1; 736 return 1;
342} 737}
343 738
739static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
740{
741 struct ata_host *host = dev_instance;
742 int i, handled = 0;
743 u32 notifier_clears[2];
744
745 spin_lock(&host->lock);
746
747 for (i = 0; i < host->n_ports; i++) {
748 struct ata_port *ap = host->ports[i];
749 notifier_clears[i] = 0;
750
751 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
752 struct nv_adma_port_priv *pp = ap->private_data;
753 void __iomem *mmio = nv_adma_ctl_block(ap);
754 u16 status;
755 u32 gen_ctl;
756 int have_global_err = 0;
757 u32 notifier, notifier_error;
758
759 /* if in ATA register mode, use standard ata interrupt handler */
760 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
761 u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
762 >> (NV_INT_PORT_SHIFT * i);
763 handled += nv_host_intr(ap, irq_stat);
764 continue;
765 }
766
767 notifier = readl(mmio + NV_ADMA_NOTIFIER);
768 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
769 notifier_clears[i] = notifier | notifier_error;
770
771 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
772
773 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
774 !notifier_error)
775 /* Nothing to do */
776 continue;
777
778 status = readw(mmio + NV_ADMA_STAT);
779
780 /* Clear status. Ensure the controller sees the clearing before we start
781 looking at any of the CPB statuses, so that any CPB completions after
782 this point in the handler will raise another interrupt. */
783 writew(status, mmio + NV_ADMA_STAT);
784 readw(mmio + NV_ADMA_STAT); /* flush posted write */
785 rmb();
786
787 /* freeze if hotplugged */
788 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
789 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
790 ata_port_freeze(ap);
791 handled++;
792 continue;
793 }
794
795 if (status & NV_ADMA_STAT_TIMEOUT) {
796 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
797 have_global_err = 1;
798 }
799 if (status & NV_ADMA_STAT_CPBERR) {
800 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
801 have_global_err = 1;
802 }
803 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
804 /** Check CPBs for completed commands */
805
806 if(ata_tag_valid(ap->active_tag))
807 /* Non-NCQ command */
808 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
809 (notifier_error & (1 << ap->active_tag)));
810 else {
811 int pos;
812 u32 active = ap->sactive;
813 while( (pos = ffs(active)) ) {
814 pos--;
815 nv_adma_check_cpb(ap, pos, have_global_err ||
816 (notifier_error & (1 << pos)) );
817 active &= ~(1 << pos );
818 }
819 }
820 }
821
822 handled++; /* irq handled if we got here */
823 }
824 }
825
826 if(notifier_clears[0] || notifier_clears[1]) {
827 /* Note: Both notifier clear registers must be written
828 if either is set, even if one is zero, according to NVIDIA. */
829 writel(notifier_clears[0],
830 nv_adma_notifier_clear_block(host->ports[0]));
831 writel(notifier_clears[1],
832 nv_adma_notifier_clear_block(host->ports[1]));
833 }
834
835 spin_unlock(&host->lock);
836
837 return IRQ_RETVAL(handled);
838}
839
840static void nv_adma_irq_clear(struct ata_port *ap)
841{
842 void __iomem *mmio = nv_adma_ctl_block(ap);
843 u16 status = readw(mmio + NV_ADMA_STAT);
844 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
845 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
846 unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
847
848 /* clear ADMA status */
849 writew(status, mmio + NV_ADMA_STAT);
850 writel(notifier | notifier_error,
851 nv_adma_notifier_clear_block(ap));
852
853 /** clear legacy status */
854 outb(inb(dma_stat_addr), dma_stat_addr);
855}
856
857static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
858{
859 struct ata_port *ap = qc->ap;
860 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
861 struct nv_adma_port_priv *pp = ap->private_data;
862 u8 dmactl;
863
864 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
865 WARN_ON(1);
866 return;
867 }
868
869 /* load PRD table addr. */
870 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
871
872 /* specify data direction, triple-check start bit is clear */
873 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
874 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
875 if (!rw)
876 dmactl |= ATA_DMA_WR;
877
878 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
879
880 /* issue r/w command */
881 ata_exec_command(ap, &qc->tf);
882}
883
884static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
885{
886 struct ata_port *ap = qc->ap;
887 struct nv_adma_port_priv *pp = ap->private_data;
888 u8 dmactl;
889
890 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
891 WARN_ON(1);
892 return;
893 }
894
895 /* start host DMA transaction */
896 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
897 outb(dmactl | ATA_DMA_START,
898 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
899}
900
901static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
902{
903 struct ata_port *ap = qc->ap;
904 struct nv_adma_port_priv *pp = ap->private_data;
905
906 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
907 return;
908
909 /* clear start/stop bit */
910 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
911 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
912
913 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
914 ata_altstatus(ap); /* dummy read */
915}
916
917static u8 nv_adma_bmdma_status(struct ata_port *ap)
918{
919 struct nv_adma_port_priv *pp = ap->private_data;
920
921 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
922
923 return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
924}
925
926static int nv_adma_port_start(struct ata_port *ap)
927{
928 struct device *dev = ap->host->dev;
929 struct nv_adma_port_priv *pp;
930 int rc;
931 void *mem;
932 dma_addr_t mem_dma;
933 void __iomem *mmio = nv_adma_ctl_block(ap);
934 u16 tmp;
935
936 VPRINTK("ENTER\n");
937
938 rc = ata_port_start(ap);
939 if (rc)
940 return rc;
941
942 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
943 if (!pp) {
944 rc = -ENOMEM;
945 goto err_out;
946 }
947
948 mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
949 &mem_dma, GFP_KERNEL);
950
951 if (!mem) {
952 rc = -ENOMEM;
953 goto err_out_kfree;
954 }
955 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
956
957 /*
958 * First item in chunk of DMA memory:
959 * 128-byte command parameter block (CPB)
960 * one for each command tag
961 */
962 pp->cpb = mem;
963 pp->cpb_dma = mem_dma;
964
965 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
966 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
967
968 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
969 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
970
971 /*
972 * Second item: block of ADMA_SGTBL_LEN s/g entries
973 */
974 pp->aprd = mem;
975 pp->aprd_dma = mem_dma;
976
977 ap->private_data = pp;
978
979 /* clear any outstanding interrupt conditions */
980 writew(0xffff, mmio + NV_ADMA_STAT);
981
982 /* initialize port variables */
983 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
984
985 /* clear CPB fetch count */
986 writew(0, mmio + NV_ADMA_CPB_COUNT);
987
988 /* clear GO for register mode */
989 tmp = readw(mmio + NV_ADMA_CTL);
990 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
991
992 tmp = readw(mmio + NV_ADMA_CTL);
993 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
994 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
995 udelay(1);
996 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
997 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
998
999 return 0;
1000
1001err_out_kfree:
1002 kfree(pp);
1003err_out:
1004 ata_port_stop(ap);
1005 return rc;
1006}
1007
1008static void nv_adma_port_stop(struct ata_port *ap)
1009{
1010 struct device *dev = ap->host->dev;
1011 struct nv_adma_port_priv *pp = ap->private_data;
1012 void __iomem *mmio = nv_adma_ctl_block(ap);
1013
1014 VPRINTK("ENTER\n");
1015
1016 writew(0, mmio + NV_ADMA_CTL);
1017
1018 ap->private_data = NULL;
1019 dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
1020 kfree(pp);
1021 ata_port_stop(ap);
1022}
1023
1024
1025static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1026{
1027 void __iomem *mmio = probe_ent->mmio_base;
1028 struct ata_ioports *ioport = &probe_ent->port[port];
1029
1030 VPRINTK("ENTER\n");
1031
1032 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1033
1034 ioport->cmd_addr = (unsigned long) mmio;
1035 ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4);
1036 ioport->error_addr =
1037 ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4);
1038 ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4);
1039 ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4);
1040 ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4);
1041 ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4);
1042 ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4);
1043 ioport->status_addr =
1044 ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4);
1045 ioport->altstatus_addr =
1046 ioport->ctl_addr = (unsigned long) mmio + 0x20;
1047}
1048
1049static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1050{
1051 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1052 unsigned int i;
1053 u32 tmp32;
1054
1055 VPRINTK("ENTER\n");
1056
1057 /* enable ADMA on the ports */
1058 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1059 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1060 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1061 NV_MCP_SATA_CFG_20_PORT1_EN |
1062 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1063
1064 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1065
1066 for (i = 0; i < probe_ent->n_ports; i++)
1067 nv_adma_setup_port(probe_ent, i);
1068
1069 for (i = 0; i < probe_ent->n_ports; i++) {
1070 void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
1071 u16 tmp;
1072
1073 /* enable interrupt, clear reset if not already clear */
1074 tmp = readw(mmio + NV_ADMA_CTL);
1075 writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1076 }
1077
1078 return 0;
1079}
1080
1081static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1082 struct scatterlist *sg,
1083 int idx,
1084 struct nv_adma_prd *aprd)
1085{
1086 u8 flags;
1087
1088 memset(aprd, 0, sizeof(struct nv_adma_prd));
1089
1090 flags = 0;
1091 if (qc->tf.flags & ATA_TFLAG_WRITE)
1092 flags |= NV_APRD_WRITE;
1093 if (idx == qc->n_elem - 1)
1094 flags |= NV_APRD_END;
1095 else if (idx != 4)
1096 flags |= NV_APRD_CONT;
1097
1098 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1099 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1100 aprd->flags = flags;
1101}
1102
1103static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1104{
1105 struct nv_adma_port_priv *pp = qc->ap->private_data;
1106 unsigned int idx;
1107 struct nv_adma_prd *aprd;
1108 struct scatterlist *sg;
1109
1110 VPRINTK("ENTER\n");
1111
1112 idx = 0;
1113
1114 ata_for_each_sg(sg, qc) {
1115 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1116 nv_adma_fill_aprd(qc, sg, idx, aprd);
1117 idx++;
1118 }
1119 if (idx > 5)
1120 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1121}
1122
1123static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1124{
1125 struct nv_adma_port_priv *pp = qc->ap->private_data;
1126 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1127 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1128 NV_CPB_CTL_APRD_VALID |
1129 NV_CPB_CTL_IEN;
1130
1131 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1132
1133 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1134 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1135 nv_adma_register_mode(qc->ap);
1136 ata_qc_prep(qc);
1137 return;
1138 }
1139
1140 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1141
1142 cpb->len = 3;
1143 cpb->tag = qc->tag;
1144 cpb->next_cpb_idx = 0;
1145
1146 /* turn on NCQ flags for NCQ commands */
1147 if (qc->tf.protocol == ATA_PROT_NCQ)
1148 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1149
1150 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1151
1152 nv_adma_fill_sg(qc, cpb);
1153
1154 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1155 finished filling in all of the contents */
1156 wmb();
1157 cpb->ctl_flags = ctl_flags;
1158}
1159
1160static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1161{
1162 struct nv_adma_port_priv *pp = qc->ap->private_data;
1163 void __iomem *mmio = nv_adma_ctl_block(qc->ap);
1164
1165 VPRINTK("ENTER\n");
1166
1167 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1168 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1169 /* use ATA register mode */
1170 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1171 nv_adma_register_mode(qc->ap);
1172 return ata_qc_issue_prot(qc);
1173 } else
1174 nv_adma_mode(qc->ap);
1175
1176 /* write append register, command tag in lower 8 bits
1177 and (number of cpbs to append -1) in top 8 bits */
1178 wmb();
1179 writew(qc->tag, mmio + NV_ADMA_APPEND);
1180
1181 DPRINTK("Issued tag %u\n",qc->tag);
1182
1183 return 0;
1184}
1185
1186static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1187{
1188 struct ata_host *host = dev_instance;
1189 unsigned int i;
1190 unsigned int handled = 0;
1191 unsigned long flags;
1192
1193 spin_lock_irqsave(&host->lock, flags);
1194
1195 for (i = 0; i < host->n_ports; i++) {
1196 struct ata_port *ap;
1197
1198 ap = host->ports[i];
1199 if (ap &&
1200 !(ap->flags & ATA_FLAG_DISABLED)) {
1201 struct ata_queued_cmd *qc;
1202
1203 qc = ata_qc_from_tag(ap, ap->active_tag);
1204 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1205 handled += ata_host_intr(ap, qc);
1206 else
1207 // No request pending? Clear interrupt status
1208 // anyway, in case there's one pending.
1209 ap->ops->check_status(ap);
1210 }
1211
1212 }
1213
1214 spin_unlock_irqrestore(&host->lock, flags);
1215
1216 return IRQ_RETVAL(handled);
1217}
1218
344static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat) 1219static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
345{ 1220{
346 int i, handled = 0; 1221 int i, handled = 0;
@@ -466,6 +1341,56 @@ static void nv_error_handler(struct ata_port *ap)
466 nv_hardreset, ata_std_postreset); 1341 nv_hardreset, ata_std_postreset);
467} 1342}
468 1343
1344static void nv_adma_error_handler(struct ata_port *ap)
1345{
1346 struct nv_adma_port_priv *pp = ap->private_data;
1347 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1348 void __iomem *mmio = nv_adma_ctl_block(ap);
1349 int i;
1350 u16 tmp;
1351
1352 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1353 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1354 u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
1355 u32 status = readw(mmio + NV_ADMA_STAT);
1356
1357 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1358 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1359 notifier, notifier_error, gen_ctl, status);
1360
1361 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1362 struct nv_adma_cpb *cpb = &pp->cpb[i];
1363 if( cpb->ctl_flags || cpb->resp_flags )
1364 ata_port_printk(ap, KERN_ERR,
1365 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1366 i, cpb->ctl_flags, cpb->resp_flags);
1367 }
1368
1369 /* Push us back into port register mode for error handling. */
1370 nv_adma_register_mode(ap);
1371
1372 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1373
1374 /* Mark all of the CPBs as invalid to prevent them from being executed */
1375 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1376 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1377
1378 /* clear CPB fetch count */
1379 writew(0, mmio + NV_ADMA_CPB_COUNT);
1380
1381 /* Reset channel */
1382 tmp = readw(mmio + NV_ADMA_CTL);
1383 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1384 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1385 udelay(1);
1386 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1387 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1388 }
1389
1390 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1391 nv_hardreset, ata_std_postreset);
1392}
1393
469static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 1394static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
470{ 1395{
471 static int printed_version = 0; 1396 static int printed_version = 0;
@@ -475,6 +1400,8 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
475 int rc; 1400 int rc;
476 u32 bar; 1401 u32 bar;
477 unsigned long base; 1402 unsigned long base;
1403 unsigned long type = ent->driver_data;
1404 int mask_set = 0;
478 1405
479 // Make sure this is a SATA controller by counting the number of bars 1406 // Make sure this is a SATA controller by counting the number of bars
480 // (NVIDIA SATA controllers will always have six bars). Otherwise, 1407 // (NVIDIA SATA controllers will always have six bars). Otherwise,
@@ -483,7 +1410,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
483 if (pci_resource_start(pdev, bar) == 0) 1410 if (pci_resource_start(pdev, bar) == 0)
484 return -ENODEV; 1411 return -ENODEV;
485 1412
486 if (!printed_version++) 1413 if ( !printed_version++)
487 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1414 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
488 1415
489 rc = pci_enable_device(pdev); 1416 rc = pci_enable_device(pdev);
@@ -496,16 +1423,26 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
496 goto err_out_disable; 1423 goto err_out_disable;
497 } 1424 }
498 1425
499 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 1426 if(type >= CK804 && adma_enabled) {
500 if (rc) 1427 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
501 goto err_out_regions; 1428 type = ADMA;
502 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 1429 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
503 if (rc) 1430 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
504 goto err_out_regions; 1431 mask_set = 1;
1432 }
1433
1434 if(!mask_set) {
1435 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1436 if (rc)
1437 goto err_out_regions;
1438 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1439 if (rc)
1440 goto err_out_regions;
1441 }
505 1442
506 rc = -ENOMEM; 1443 rc = -ENOMEM;
507 1444
508 ppi[0] = ppi[1] = &nv_port_info[ent->driver_data]; 1445 ppi[0] = ppi[1] = &nv_port_info[type];
509 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 1446 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
510 if (!probe_ent) 1447 if (!probe_ent)
511 goto err_out_regions; 1448 goto err_out_regions;
@@ -522,7 +1459,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
522 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET; 1459 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
523 1460
524 /* enable SATA space for CK804 */ 1461 /* enable SATA space for CK804 */
525 if (ent->driver_data == CK804) { 1462 if (type >= CK804) {
526 u8 regval; 1463 u8 regval;
527 1464
528 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval); 1465 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
@@ -532,6 +1469,12 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
532 1469
533 pci_set_master(pdev); 1470 pci_set_master(pdev);
534 1471
1472 if (type == ADMA) {
1473 rc = nv_adma_host_init(probe_ent);
1474 if (rc)
1475 goto err_out_iounmap;
1476 }
1477
535 rc = ata_device_add(probe_ent); 1478 rc = ata_device_add(probe_ent);
536 if (rc != NV_PORTS) 1479 if (rc != NV_PORTS)
537 goto err_out_iounmap; 1480 goto err_out_iounmap;
@@ -566,6 +1509,33 @@ static void nv_ck804_host_stop(struct ata_host *host)
566 ata_pci_host_stop(host); 1509 ata_pci_host_stop(host);
567} 1510}
568 1511
1512static void nv_adma_host_stop(struct ata_host *host)
1513{
1514 struct pci_dev *pdev = to_pci_dev(host->dev);
1515 int i;
1516 u32 tmp32;
1517
1518 for (i = 0; i < host->n_ports; i++) {
1519 void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
1520 u16 tmp;
1521
1522 /* disable interrupt */
1523 tmp = readw(mmio + NV_ADMA_CTL);
1524 writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1525 }
1526
1527 /* disable ADMA on the ports */
1528 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1529 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1530 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1531 NV_MCP_SATA_CFG_20_PORT1_EN |
1532 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1533
1534 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1535
1536 nv_ck804_host_stop(host);
1537}
1538
569static int __init nv_init(void) 1539static int __init nv_init(void)
570{ 1540{
571 return pci_register_driver(&nv_pci_driver); 1541 return pci_register_driver(&nv_pci_driver);
@@ -578,3 +1548,5 @@ static void __exit nv_exit(void)
578 1548
579module_init(nv_init); 1549module_init(nv_init);
580module_exit(nv_exit); 1550module_exit(nv_exit);
1551module_param_named(adma, adma_enabled, bool, 0444);
1552MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 72eda5160fa..a2778cf016b 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -46,20 +46,19 @@
46#include "sata_promise.h" 46#include "sata_promise.h"
47 47
48#define DRV_NAME "sata_promise" 48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.04" 49#define DRV_VERSION "1.05"
50 50
51 51
52enum { 52enum {
53 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ 53 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
54 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */ 54 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
55 PDC_TBG_MODE = 0x41, /* TBG mode */
56 PDC_FLASH_CTL = 0x44, /* Flash control register */ 55 PDC_FLASH_CTL = 0x44, /* Flash control register */
57 PDC_PCI_CTL = 0x48, /* PCI control and status register */
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ 56 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ 57 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ 58 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */ 59 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */ 60 PDC_TBG_MODE = 0x41C, /* TBG mode (not SATAII) */
61 PDC_SLEW_CTL = 0x470, /* slew rate control reg (not SATAII) */
63 62
64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | 63 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
65 (1<<8) | (1<<9) | (1<<10), 64 (1<<8) | (1<<9) | (1<<10),
@@ -78,6 +77,9 @@ enum {
78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | 77 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 78 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
80 ATA_FLAG_PIO_POLLING, 79 ATA_FLAG_PIO_POLLING,
80
81 /* hp->flags bits */
82 PDC_FLAG_GEN_II = (1 << 0),
81}; 83};
82 84
83 85
@@ -87,6 +89,7 @@ struct pdc_port_priv {
87}; 89};
88 90
89struct pdc_host_priv { 91struct pdc_host_priv {
92 unsigned long flags;
90 int hotplug_offset; 93 int hotplug_offset;
91}; 94};
92 95
@@ -235,20 +238,20 @@ static const struct ata_port_info pdc_port_info[] = {
235 238
236static const struct pci_device_id pdc_ata_pci_tbl[] = { 239static const struct pci_device_id pdc_ata_pci_tbl[] = {
237 { PCI_VDEVICE(PROMISE, 0x3371), board_2037x }, 240 { PCI_VDEVICE(PROMISE, 0x3371), board_2037x },
238 { PCI_VDEVICE(PROMISE, 0x3570), board_2037x },
239 { PCI_VDEVICE(PROMISE, 0x3571), board_2037x },
240 { PCI_VDEVICE(PROMISE, 0x3373), board_2037x }, 241 { PCI_VDEVICE(PROMISE, 0x3373), board_2037x },
241 { PCI_VDEVICE(PROMISE, 0x3375), board_2037x }, 242 { PCI_VDEVICE(PROMISE, 0x3375), board_2037x },
242 { PCI_VDEVICE(PROMISE, 0x3376), board_2037x }, 243 { PCI_VDEVICE(PROMISE, 0x3376), board_2037x },
244 { PCI_VDEVICE(PROMISE, 0x3570), board_2057x },
245 { PCI_VDEVICE(PROMISE, 0x3571), board_2057x },
243 { PCI_VDEVICE(PROMISE, 0x3574), board_2057x }, 246 { PCI_VDEVICE(PROMISE, 0x3574), board_2057x },
247 { PCI_VDEVICE(PROMISE, 0x3d73), board_2057x },
244 { PCI_VDEVICE(PROMISE, 0x3d75), board_2057x }, 248 { PCI_VDEVICE(PROMISE, 0x3d75), board_2057x },
245 { PCI_VDEVICE(PROMISE, 0x3d73), board_2037x },
246 249
247 { PCI_VDEVICE(PROMISE, 0x3318), board_20319 }, 250 { PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
248 { PCI_VDEVICE(PROMISE, 0x3319), board_20319 }, 251 { PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
249 { PCI_VDEVICE(PROMISE, 0x3515), board_20319 }, 252 { PCI_VDEVICE(PROMISE, 0x3515), board_20319 },
250 { PCI_VDEVICE(PROMISE, 0x3519), board_20319 }, 253 { PCI_VDEVICE(PROMISE, 0x3519), board_20319 },
251 { PCI_VDEVICE(PROMISE, 0x3d17), board_20319 }, 254 { PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
252 { PCI_VDEVICE(PROMISE, 0x3d18), board_40518 }, 255 { PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
253 256
254 { PCI_VDEVICE(PROMISE, 0x6629), board_20619 }, 257 { PCI_VDEVICE(PROMISE, 0x6629), board_20619 },
@@ -277,6 +280,7 @@ static struct pci_driver pdc_ata_pci_driver = {
277static int pdc_port_start(struct ata_port *ap) 280static int pdc_port_start(struct ata_port *ap)
278{ 281{
279 struct device *dev = ap->host->dev; 282 struct device *dev = ap->host->dev;
283 struct pdc_host_priv *hp = ap->host->private_data;
280 struct pdc_port_priv *pp; 284 struct pdc_port_priv *pp;
281 int rc; 285 int rc;
282 286
@@ -298,6 +302,16 @@ static int pdc_port_start(struct ata_port *ap)
298 302
299 ap->private_data = pp; 303 ap->private_data = pp;
300 304
305 /* fix up PHYMODE4 align timing */
306 if ((hp->flags & PDC_FLAG_GEN_II) && sata_scr_valid(ap)) {
307 void __iomem *mmio = (void __iomem *) ap->ioaddr.scr_addr;
308 unsigned int tmp;
309
310 tmp = readl(mmio + 0x014);
311 tmp = (tmp & ~3) | 1; /* set bits 1:0 = 0:1 */
312 writel(tmp, mmio + 0x014);
313 }
314
301 return 0; 315 return 0;
302 316
303err_out_kfree: 317err_out_kfree:
@@ -640,9 +654,11 @@ static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
640 * "TODO: figure out why we do this" 654 * "TODO: figure out why we do this"
641 */ 655 */
642 656
643 /* change FIFO_SHD to 8 dwords, enable BMR_BURST */ 657 /* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */
644 tmp = readl(mmio + PDC_FLASH_CTL); 658 tmp = readl(mmio + PDC_FLASH_CTL);
645 tmp |= 0x12000; /* bit 16 (fifo 8 dw) and 13 (bmr burst?) */ 659 tmp |= 0x02000; /* bit 13 (enable bmr burst) */
660 if (!(hp->flags & PDC_FLAG_GEN_II))
661 tmp |= 0x10000; /* bit 16 (fifo threshold at 8 dw) */
646 writel(tmp, mmio + PDC_FLASH_CTL); 662 writel(tmp, mmio + PDC_FLASH_CTL);
647 663
648 /* clear plug/unplug flags for all ports */ 664 /* clear plug/unplug flags for all ports */
@@ -653,6 +669,10 @@ static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
653 tmp = readl(mmio + hotplug_offset); 669 tmp = readl(mmio + hotplug_offset);
654 writel(tmp | 0xff0000, mmio + hotplug_offset); 670 writel(tmp | 0xff0000, mmio + hotplug_offset);
655 671
672 /* don't initialise TBG or SLEW on 2nd generation chips */
673 if (hp->flags & PDC_FLAG_GEN_II)
674 return;
675
656 /* reduce TBG clock to 133 Mhz. */ 676 /* reduce TBG clock to 133 Mhz. */
657 tmp = readl(mmio + PDC_TBG_MODE); 677 tmp = readl(mmio + PDC_TBG_MODE);
658 tmp &= ~0x30000; /* clear bit 17, 16*/ 678 tmp &= ~0x30000; /* clear bit 17, 16*/
@@ -746,6 +766,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
746 /* notice 4-port boards */ 766 /* notice 4-port boards */
747 switch (board_idx) { 767 switch (board_idx) {
748 case board_40518: 768 case board_40518:
769 hp->flags |= PDC_FLAG_GEN_II;
749 /* Override hotplug offset for SATAII150 */ 770 /* Override hotplug offset for SATAII150 */
750 hp->hotplug_offset = PDC2_SATA_PLUG_CSR; 771 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
751 /* Fall through */ 772 /* Fall through */
@@ -759,15 +780,14 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
759 probe_ent->port[3].scr_addr = base + 0x700; 780 probe_ent->port[3].scr_addr = base + 0x700;
760 break; 781 break;
761 case board_2057x: 782 case board_2057x:
783 case board_20771:
784 hp->flags |= PDC_FLAG_GEN_II;
762 /* Override hotplug offset for SATAII150 */ 785 /* Override hotplug offset for SATAII150 */
763 hp->hotplug_offset = PDC2_SATA_PLUG_CSR; 786 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
764 /* Fall through */ 787 /* Fall through */
765 case board_2037x: 788 case board_2037x:
766 probe_ent->n_ports = 2; 789 probe_ent->n_ports = 2;
767 break; 790 break;
768 case board_20771:
769 probe_ent->n_ports = 2;
770 break;
771 case board_20619: 791 case board_20619:
772 probe_ent->n_ports = 4; 792 probe_ent->n_ports = 4;
773 793
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index ca8d9931247..7808d0369d9 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -356,6 +356,7 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
356 356
357static void sil_host_intr(struct ata_port *ap, u32 bmdma2) 357static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
358{ 358{
359 struct ata_eh_info *ehi = &ap->eh_info;
359 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); 360 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
360 u8 status; 361 u8 status;
361 362
@@ -428,6 +429,10 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
428 /* kick HSM in the ass */ 429 /* kick HSM in the ass */
429 ata_hsm_move(ap, qc, status, 0); 430 ata_hsm_move(ap, qc, status, 0);
430 431
432 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
433 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
434 ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
435
431 return; 436 return;
432 437
433 err_hsm: 438 err_hsm:
@@ -534,6 +539,7 @@ static void sil_thaw(struct ata_port *ap)
534 */ 539 */
535static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) 540static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
536{ 541{
542 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
537 unsigned int n, quirks = 0; 543 unsigned int n, quirks = 0;
538 unsigned char model_num[41]; 544 unsigned char model_num[41];
539 545
@@ -549,16 +555,18 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
549 if (slow_down || 555 if (slow_down ||
550 ((ap->flags & SIL_FLAG_MOD15WRITE) && 556 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
551 (quirks & SIL_QUIRK_MOD15WRITE))) { 557 (quirks & SIL_QUIRK_MOD15WRITE))) {
552 ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix " 558 if (print_info)
553 "(mod15write workaround)\n"); 559 ata_dev_printk(dev, KERN_INFO, "applying Seagate "
560 "errata fix (mod15write workaround)\n");
554 dev->max_sectors = 15; 561 dev->max_sectors = 15;
555 return; 562 return;
556 } 563 }
557 564
558 /* limit to udma5 */ 565 /* limit to udma5 */
559 if (quirks & SIL_QUIRK_UDMA5MAX) { 566 if (quirks & SIL_QUIRK_UDMA5MAX) {
560 ata_dev_printk(dev, KERN_INFO, 567 if (print_info)
561 "applying Maxtor errata fix %s\n", model_num); 568 ata_dev_printk(dev, KERN_INFO, "applying Maxtor "
569 "errata fix %s\n", model_num);
562 dev->udma_mask &= ATA_UDMA5; 570 dev->udma_mask &= ATA_UDMA5;
563 return; 571 return;
564 } 572 }
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 169e200a6a7..5aa288d2fb8 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -100,10 +100,14 @@ enum {
100 */ 100 */
101 PORT_REGS_SIZE = 0x2000, 101 PORT_REGS_SIZE = 0x2000,
102 102
103 PORT_LRAM = 0x0000, /* 31 LRAM slots and PM regs */ 103 PORT_LRAM = 0x0000, /* 31 LRAM slots and PMP regs */
104 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */ 104 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
105 105
106 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */ 106 PORT_PMP = 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */
107 PORT_PMP_STATUS = 0x0000, /* port device status offset */
108 PORT_PMP_QACTIVE = 0x0004, /* port device QActive offset */
109 PORT_PMP_SIZE = 0x0008, /* 8 bytes per PMP */
110
107 /* 32 bit regs */ 111 /* 32 bit regs */
108 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */ 112 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
109 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */ 113 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
@@ -126,6 +130,7 @@ enum {
126 PORT_PHY_CFG = 0x1050, 130 PORT_PHY_CFG = 0x1050,
127 PORT_SLOT_STAT = 0x1800, 131 PORT_SLOT_STAT = 0x1800,
128 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */ 132 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
133 PORT_CONTEXT = 0x1e04,
129 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */ 134 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
130 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */ 135 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
131 PORT_SCONTROL = 0x1f00, 136 PORT_SCONTROL = 0x1f00,
@@ -139,9 +144,9 @@ enum {
139 PORT_CS_INIT = (1 << 2), /* port initialize */ 144 PORT_CS_INIT = (1 << 2), /* port initialize */
140 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */ 145 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
141 PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */ 146 PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */
142 PORT_CS_RESUME = (1 << 6), /* port resume */ 147 PORT_CS_PMP_RESUME = (1 << 6), /* PMP resume */
143 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */ 148 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
144 PORT_CS_PM_EN = (1 << 13), /* port multiplier enable */ 149 PORT_CS_PMP_EN = (1 << 13), /* port multiplier enable */
145 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */ 150 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
146 151
147 /* PORT_IRQ_STAT/ENABLE_SET/CLR */ 152 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
@@ -562,7 +567,7 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class)
562 567
563 /* do SRST */ 568 /* do SRST */
564 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST); 569 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
565 prb->fis[1] = 0; /* no PM yet */ 570 prb->fis[1] = 0; /* no PMP yet */
566 571
567 writel((u32)paddr, port + PORT_CMD_ACTIVATE); 572 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
568 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); 573 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
@@ -1050,7 +1055,8 @@ static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
1050 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); 1055 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1051 1056
1052 /* Clear port multiplier enable and resume bits */ 1057 /* Clear port multiplier enable and resume bits */
1053 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); 1058 writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME,
1059 port + PORT_CTRL_CLR);
1054 } 1060 }
1055 1061
1056 /* Turn on interrupts */ 1062 /* Turn on interrupts */
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 9d1235ba06b..9c25a1e9173 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -173,7 +173,7 @@ static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
173 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED)) 173 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
174 pci_read_config_dword(pdev, cfg_addr+0x10, &val2); 174 pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
175 175
176 return val|val2; 176 return (val|val2) & 0xfffffffb; /* avoid problems with powerdowned ports */
177} 177}
178 178
179static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val) 179static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
@@ -212,7 +212,7 @@ static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
212 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED)) 212 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
213 val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10); 213 val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
214 214
215 return val | val2; 215 return (val | val2) & 0xfffffffb;
216} 216}
217 217
218static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) 218static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
@@ -239,7 +239,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
239 static int printed_version; 239 static int printed_version;
240 struct ata_probe_ent *probe_ent = NULL; 240 struct ata_probe_ent *probe_ent = NULL;
241 int rc; 241 int rc;
242 u32 genctl; 242 u32 genctl, val;
243 struct ata_port_info pi = sis_port_info, *ppi[2] = { &pi, &pi }; 243 struct ata_port_info pi = sis_port_info, *ppi[2] = { &pi, &pi };
244 int pci_dev_busy = 0; 244 int pci_dev_busy = 0;
245 u8 pmr; 245 u8 pmr;
@@ -285,17 +285,24 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
285 if (ent->device != 0x182) { 285 if (ent->device != 0x182) {
286 if ((pmr & SIS_PMR_COMBINED) == 0) { 286 if ((pmr & SIS_PMR_COMBINED) == 0) {
287 dev_printk(KERN_INFO, &pdev->dev, 287 dev_printk(KERN_INFO, &pdev->dev,
288 "Detected SiS 180/181 chipset in SATA mode\n"); 288 "Detected SiS 180/181/964 chipset in SATA mode\n");
289 port2_start = 64; 289 port2_start = 64;
290 } 290 }
291 else { 291 else {
292 dev_printk(KERN_INFO, &pdev->dev, 292 dev_printk(KERN_INFO, &pdev->dev,
293 "Detected SiS 180/181 chipset in combined mode\n"); 293 "Detected SiS 180/181 chipset in combined mode\n");
294 port2_start=0; 294 port2_start=0;
295 pi.flags |= ATA_FLAG_SLAVE_POSS;
295 } 296 }
296 } 297 }
297 else { 298 else {
298 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182 chipset\n"); 299 pci_read_config_dword ( pdev, 0x6C, &val);
300 if (val & (1L << 31)) {
301 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965 chipset\n");
302 pi.flags |= ATA_FLAG_SLAVE_POSS;
303 }
304 else
305 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965L chipset\n");
299 port2_start = 0x20; 306 port2_start = 0x20;
300 } 307 }
301 308