aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-10-30 21:37:12 -0500
committerPaul Mackerras <paulus@samba.org>2005-10-30 21:37:12 -0500
commit23fd07750a789a66fe88cf173d52a18f1a387da4 (patch)
tree06fdd6df35fdb835abdaa9b754d62f6b84b97250 /drivers/scsi
parentbd787d438a59266af3c9f6351644c85ef1dd21fe (diff)
parented28f96ac1960f30f818374d65be71d2fdf811b0 (diff)
Merge ../linux-2.6 by hand
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig22
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/ahci.c78
-rw-r--r--drivers/scsi/arm/scsi.h6
-rw-r--r--drivers/scsi/ata_piix.c19
-rw-r--r--drivers/scsi/ch.c2
-rw-r--r--drivers/scsi/dec_esp.c2
-rw-r--r--drivers/scsi/eata.c2
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/ide-scsi.c38
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/lasi700.c6
-rw-r--r--drivers/scsi/libata-core.c970
-rw-r--r--drivers/scsi/libata-scsi.c1506
-rw-r--r--drivers/scsi/libata.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c10
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/osst.c8
-rw-r--r--drivers/scsi/pdc_adma.c741
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_rscn.c2
-rw-r--r--drivers/scsi/sata_mv.c1138
-rw-r--r--drivers/scsi/sata_nv.c11
-rw-r--r--drivers/scsi/sata_promise.c49
-rw-r--r--drivers/scsi/sata_qstor.c35
-rw-r--r--drivers/scsi/sata_sil.c13
-rw-r--r--drivers/scsi/sata_sil24.c871
-rw-r--r--drivers/scsi/sata_sis.c17
-rw-r--r--drivers/scsi/sata_svw.c29
-rw-r--r--drivers/scsi/sata_sx4.c42
-rw-r--r--drivers/scsi/sata_uli.c9
-rw-r--r--drivers/scsi/sata_via.c42
-rw-r--r--drivers/scsi/sata_vsc.c37
-rw-r--r--drivers/scsi/scsi.c8
-rw-r--r--drivers/scsi/scsi_ioctl.c3
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c3
-rw-r--r--drivers/scsi/sg.c29
-rw-r--r--drivers/scsi/st.c18
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h2
-rw-r--r--drivers/scsi/zalon.c4
45 files changed, 4678 insertions, 1139 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3ee9b8b33be0..9c9f162bd6ed 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -489,11 +489,11 @@ config SCSI_SATA_NV
489 489
490 If unsure, say N. 490 If unsure, say N.
491 491
492config SCSI_SATA_PROMISE 492config SCSI_PDC_ADMA
493 tristate "Promise SATA TX2/TX4 support" 493 tristate "Pacific Digital ADMA support"
494 depends on SCSI_SATA && PCI 494 depends on SCSI_SATA && PCI
495 help 495 help
496 This option enables support for Promise Serial ATA TX2/TX4. 496 This option enables support for Pacific Digital ADMA controllers
497 497
498 If unsure, say N. 498 If unsure, say N.
499 499
@@ -505,6 +505,14 @@ config SCSI_SATA_QSTOR
505 505
506 If unsure, say N. 506 If unsure, say N.
507 507
508config SCSI_SATA_PROMISE
509 tristate "Promise SATA TX2/TX4 support"
510 depends on SCSI_SATA && PCI
511 help
512 This option enables support for Promise Serial ATA TX2/TX4.
513
514 If unsure, say N.
515
508config SCSI_SATA_SX4 516config SCSI_SATA_SX4
509 tristate "Promise SATA SX4 support" 517 tristate "Promise SATA SX4 support"
510 depends on SCSI_SATA && PCI && EXPERIMENTAL 518 depends on SCSI_SATA && PCI && EXPERIMENTAL
@@ -521,6 +529,14 @@ config SCSI_SATA_SIL
521 529
522 If unsure, say N. 530 If unsure, say N.
523 531
532config SCSI_SATA_SIL24
533 tristate "Silicon Image 3124/3132 SATA support"
534 depends on SCSI_SATA && PCI && EXPERIMENTAL
535 help
536 This option enables support for Silicon Image 3124/3132 Serial ATA.
537
538 If unsure, say N.
539
524config SCSI_SATA_SIS 540config SCSI_SATA_SIS
525 tristate "SiS 964/180 SATA support" 541 tristate "SiS 964/180 SATA support"
526 depends on SCSI_SATA && PCI && EXPERIMENTAL 542 depends on SCSI_SATA && PCI && EXPERIMENTAL
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 48529d180ca8..2d4439826c08 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o
130obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o 130obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o
131obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o 131obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o
132obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o 132obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o
133obj-$(CONFIG_SCSI_SATA_SIL24) += libata.o sata_sil24.o
133obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o 134obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o
134obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o 135obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o
135obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o 136obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o
@@ -137,6 +138,7 @@ obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o
137obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o 138obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o
138obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o 139obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o
139obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o 140obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o
141obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o
140 142
141obj-$(CONFIG_ARM) += arm/ 143obj-$(CONFIG_ARM) += arm/
142 144
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index c2c8fa828e24..e2a5657d5fdb 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -41,6 +41,7 @@
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/sched.h> 42#include <linux/sched.h>
43#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
44#include <linux/device.h>
44#include "scsi.h" 45#include "scsi.h"
45#include <scsi/scsi_host.h> 46#include <scsi/scsi_host.h>
46#include <linux/libata.h> 47#include <linux/libata.h>
@@ -192,7 +193,6 @@ static void ahci_port_stop(struct ata_port *ap);
192static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 193static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
193static void ahci_qc_prep(struct ata_queued_cmd *qc); 194static void ahci_qc_prep(struct ata_queued_cmd *qc);
194static u8 ahci_check_status(struct ata_port *ap); 195static u8 ahci_check_status(struct ata_port *ap);
195static u8 ahci_check_err(struct ata_port *ap);
196static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 196static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
197static void ahci_remove_one (struct pci_dev *pdev); 197static void ahci_remove_one (struct pci_dev *pdev);
198 198
@@ -216,12 +216,11 @@ static Scsi_Host_Template ahci_sht = {
216 .ordered_flush = 1, 216 .ordered_flush = 1,
217}; 217};
218 218
219static struct ata_port_operations ahci_ops = { 219static const struct ata_port_operations ahci_ops = {
220 .port_disable = ata_port_disable, 220 .port_disable = ata_port_disable,
221 221
222 .check_status = ahci_check_status, 222 .check_status = ahci_check_status,
223 .check_altstatus = ahci_check_status, 223 .check_altstatus = ahci_check_status,
224 .check_err = ahci_check_err,
225 .dev_select = ata_noop_dev_select, 224 .dev_select = ata_noop_dev_select,
226 225
227 .tf_read = ahci_tf_read, 226 .tf_read = ahci_tf_read,
@@ -407,7 +406,7 @@ static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
407 return 0xffffffffU; 406 return 0xffffffffU;
408 } 407 }
409 408
410 return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 409 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
411} 410}
412 411
413 412
@@ -425,7 +424,7 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
425 return; 424 return;
426 } 425 }
427 426
428 writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 427 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
429} 428}
430 429
431static void ahci_phy_reset(struct ata_port *ap) 430static void ahci_phy_reset(struct ata_port *ap)
@@ -453,18 +452,11 @@ static void ahci_phy_reset(struct ata_port *ap)
453 452
454static u8 ahci_check_status(struct ata_port *ap) 453static u8 ahci_check_status(struct ata_port *ap)
455{ 454{
456 void *mmio = (void *) ap->ioaddr.cmd_addr; 455 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
457 456
458 return readl(mmio + PORT_TFDATA) & 0xFF; 457 return readl(mmio + PORT_TFDATA) & 0xFF;
459} 458}
460 459
461static u8 ahci_check_err(struct ata_port *ap)
462{
463 void *mmio = (void *) ap->ioaddr.cmd_addr;
464
465 return (readl(mmio + PORT_TFDATA) >> 8) & 0xFF;
466}
467
468static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 460static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
469{ 461{
470 struct ahci_port_priv *pp = ap->private_data; 462 struct ahci_port_priv *pp = ap->private_data;
@@ -609,7 +601,7 @@ static void ahci_eng_timeout(struct ata_port *ap)
609 * not being called from the SCSI EH. 601 * not being called from the SCSI EH.
610 */ 602 */
611 qc->scsidone = scsi_finish_command; 603 qc->scsidone = scsi_finish_command;
612 ata_qc_complete(qc, ATA_ERR); 604 ata_qc_complete(qc, AC_ERR_OTHER);
613 } 605 }
614 606
615 spin_unlock_irqrestore(&host_set->lock, flags); 607 spin_unlock_irqrestore(&host_set->lock, flags);
@@ -638,7 +630,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
638 if (status & PORT_IRQ_FATAL) { 630 if (status & PORT_IRQ_FATAL) {
639 ahci_intr_error(ap, status); 631 ahci_intr_error(ap, status);
640 if (qc) 632 if (qc)
641 ata_qc_complete(qc, ATA_ERR); 633 ata_qc_complete(qc, AC_ERR_OTHER);
642 } 634 }
643 635
644 return 1; 636 return 1;
@@ -672,17 +664,35 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
672 664
673 for (i = 0; i < host_set->n_ports; i++) { 665 for (i = 0; i < host_set->n_ports; i++) {
674 struct ata_port *ap; 666 struct ata_port *ap;
675 u32 tmp;
676 667
677 VPRINTK("port %u\n", i); 668 if (!(irq_stat & (1 << i)))
669 continue;
670
678 ap = host_set->ports[i]; 671 ap = host_set->ports[i];
679 tmp = irq_stat & (1 << i); 672 if (ap) {
680 if (tmp && ap) {
681 struct ata_queued_cmd *qc; 673 struct ata_queued_cmd *qc;
682 qc = ata_qc_from_tag(ap, ap->active_tag); 674 qc = ata_qc_from_tag(ap, ap->active_tag);
683 if (ahci_host_intr(ap, qc)) 675 if (!ahci_host_intr(ap, qc))
684 irq_ack |= (1 << i); 676 if (ata_ratelimit()) {
677 struct pci_dev *pdev =
678 to_pci_dev(ap->host_set->dev);
679 dev_printk(KERN_WARNING, &pdev->dev,
680 "unhandled interrupt on port %u\n",
681 i);
682 }
683
684 VPRINTK("port %u\n", i);
685 } else {
686 VPRINTK("port %u (no irq)\n", i);
687 if (ata_ratelimit()) {
688 struct pci_dev *pdev =
689 to_pci_dev(ap->host_set->dev);
690 dev_printk(KERN_WARNING, &pdev->dev,
691 "interrupt on disabled port %u\n", i);
692 }
685 } 693 }
694
695 irq_ack |= (1 << i);
686 } 696 }
687 697
688 if (irq_ack) { 698 if (irq_ack) {
@@ -750,8 +760,8 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
750 760
751 tmp = readl(mmio + HOST_CTL); 761 tmp = readl(mmio + HOST_CTL);
752 if (tmp & HOST_RESET) { 762 if (tmp & HOST_RESET) {
753 printk(KERN_ERR DRV_NAME "(%s): controller reset failed (0x%x)\n", 763 dev_printk(KERN_ERR, &pdev->dev,
754 pci_name(pdev), tmp); 764 "controller reset failed (0x%x)\n", tmp);
755 return -EIO; 765 return -EIO;
756 } 766 }
757 767
@@ -779,22 +789,22 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
779 if (rc) { 789 if (rc) {
780 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 790 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
781 if (rc) { 791 if (rc) {
782 printk(KERN_ERR DRV_NAME "(%s): 64-bit DMA enable failed\n", 792 dev_printk(KERN_ERR, &pdev->dev,
783 pci_name(pdev)); 793 "64-bit DMA enable failed\n");
784 return rc; 794 return rc;
785 } 795 }
786 } 796 }
787 } else { 797 } else {
788 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 798 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
789 if (rc) { 799 if (rc) {
790 printk(KERN_ERR DRV_NAME "(%s): 32-bit DMA enable failed\n", 800 dev_printk(KERN_ERR, &pdev->dev,
791 pci_name(pdev)); 801 "32-bit DMA enable failed\n");
792 return rc; 802 return rc;
793 } 803 }
794 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 804 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
795 if (rc) { 805 if (rc) {
796 printk(KERN_ERR DRV_NAME "(%s): 32-bit consistent DMA enable failed\n", 806 dev_printk(KERN_ERR, &pdev->dev,
797 pci_name(pdev)); 807 "32-bit consistent DMA enable failed\n");
798 return rc; 808 return rc;
799 } 809 }
800 } 810 }
@@ -897,10 +907,10 @@ static void ahci_print_info(struct ata_probe_ent *probe_ent)
897 else 907 else
898 scc_s = "unknown"; 908 scc_s = "unknown";
899 909
900 printk(KERN_INFO DRV_NAME "(%s) AHCI %02x%02x.%02x%02x " 910 dev_printk(KERN_INFO, &pdev->dev,
911 "AHCI %02x%02x.%02x%02x "
901 "%u slots %u ports %s Gbps 0x%x impl %s mode\n" 912 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
902 , 913 ,
903 pci_name(pdev),
904 914
905 (vers >> 24) & 0xff, 915 (vers >> 24) & 0xff,
906 (vers >> 16) & 0xff, 916 (vers >> 16) & 0xff,
@@ -913,11 +923,11 @@ static void ahci_print_info(struct ata_probe_ent *probe_ent)
913 impl, 923 impl,
914 scc_s); 924 scc_s);
915 925
916 printk(KERN_INFO DRV_NAME "(%s) flags: " 926 dev_printk(KERN_INFO, &pdev->dev,
927 "flags: "
917 "%s%s%s%s%s%s" 928 "%s%s%s%s%s%s"
918 "%s%s%s%s%s%s%s\n" 929 "%s%s%s%s%s%s%s\n"
919 , 930 ,
920 pci_name(pdev),
921 931
922 cap & (1 << 31) ? "64bit " : "", 932 cap & (1 << 31) ? "64bit " : "",
923 cap & (1 << 30) ? "ncq " : "", 933 cap & (1 << 30) ? "ncq " : "",
@@ -950,7 +960,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
950 VPRINTK("ENTER\n"); 960 VPRINTK("ENTER\n");
951 961
952 if (!printed_version++) 962 if (!printed_version++)
953 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 963 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
954 964
955 rc = pci_enable_device(pdev); 965 rc = pci_enable_device(pdev);
956 if (rc) 966 if (rc)
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h
index 48e1c4d9738b..19937640e2e7 100644
--- a/drivers/scsi/arm/scsi.h
+++ b/drivers/scsi/arm/scsi.h
@@ -10,6 +10,8 @@
10 * Commonly used scsi driver functions. 10 * Commonly used scsi driver functions.
11 */ 11 */
12 12
13#include <linux/scatterlist.h>
14
13#define BELT_AND_BRACES 15#define BELT_AND_BRACES
14 16
15/* 17/*
@@ -22,9 +24,7 @@ static inline int copy_SCp_to_sg(struct scatterlist *sg, Scsi_Pointer *SCp, int
22 24
23 BUG_ON(bufs + 1 > max); 25 BUG_ON(bufs + 1 > max);
24 26
25 sg->page = virt_to_page(SCp->ptr); 27 sg_set_buf(sg, SCp->ptr, SCp->this_residual);
26 sg->offset = offset_in_page(SCp->ptr);
27 sg->length = SCp->this_residual;
28 28
29 if (bufs) 29 if (bufs)
30 memcpy(sg + 1, SCp->buffer + 1, 30 memcpy(sg + 1, SCp->buffer + 1,
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index d71cef767cec..7f8aa1b552ce 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -45,6 +45,7 @@
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/blkdev.h> 46#include <linux/blkdev.h>
47#include <linux/delay.h> 47#include <linux/delay.h>
48#include <linux/device.h>
48#include "scsi.h" 49#include "scsi.h"
49#include <scsi/scsi_host.h> 50#include <scsi/scsi_host.h>
50#include <linux/libata.h> 51#include <linux/libata.h>
@@ -147,7 +148,7 @@ static Scsi_Host_Template piix_sht = {
147 .ordered_flush = 1, 148 .ordered_flush = 1,
148}; 149};
149 150
150static struct ata_port_operations piix_pata_ops = { 151static const struct ata_port_operations piix_pata_ops = {
151 .port_disable = ata_port_disable, 152 .port_disable = ata_port_disable,
152 .set_piomode = piix_set_piomode, 153 .set_piomode = piix_set_piomode,
153 .set_dmamode = piix_set_dmamode, 154 .set_dmamode = piix_set_dmamode,
@@ -177,7 +178,7 @@ static struct ata_port_operations piix_pata_ops = {
177 .host_stop = ata_host_stop, 178 .host_stop = ata_host_stop,
178}; 179};
179 180
180static struct ata_port_operations piix_sata_ops = { 181static const struct ata_port_operations piix_sata_ops = {
181 .port_disable = ata_port_disable, 182 .port_disable = ata_port_disable,
182 183
183 .tf_load = ata_tf_load, 184 .tf_load = ata_tf_load,
@@ -621,18 +622,19 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
621{ 622{
622 static int printed_version; 623 static int printed_version;
623 struct ata_port_info *port_info[2]; 624 struct ata_port_info *port_info[2];
624 unsigned int combined = 0, n_ports = 1; 625 unsigned int combined = 0;
625 unsigned int pata_chan = 0, sata_chan = 0; 626 unsigned int pata_chan = 0, sata_chan = 0;
626 627
627 if (!printed_version++) 628 if (!printed_version++)
628 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 629 dev_printk(KERN_DEBUG, &pdev->dev,
630 "version " DRV_VERSION "\n");
629 631
630 /* no hotplugging support (FIXME) */ 632 /* no hotplugging support (FIXME) */
631 if (!in_module_init) 633 if (!in_module_init)
632 return -ENODEV; 634 return -ENODEV;
633 635
634 port_info[0] = &piix_port_info[ent->driver_data]; 636 port_info[0] = &piix_port_info[ent->driver_data];
635 port_info[1] = NULL; 637 port_info[1] = &piix_port_info[ent->driver_data];
636 638
637 if (port_info[0]->host_flags & PIIX_FLAG_AHCI) { 639 if (port_info[0]->host_flags & PIIX_FLAG_AHCI) {
638 u8 tmp; 640 u8 tmp;
@@ -670,12 +672,13 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
670 port_info[sata_chan] = &piix_port_info[ent->driver_data]; 672 port_info[sata_chan] = &piix_port_info[ent->driver_data];
671 port_info[sata_chan]->host_flags |= ATA_FLAG_SLAVE_POSS; 673 port_info[sata_chan]->host_flags |= ATA_FLAG_SLAVE_POSS;
672 port_info[pata_chan] = &piix_port_info[ich5_pata]; 674 port_info[pata_chan] = &piix_port_info[ich5_pata];
673 n_ports++;
674 675
675 printk(KERN_WARNING DRV_NAME ": combined mode detected\n"); 676 dev_printk(KERN_WARNING, &pdev->dev,
677 "combined mode detected (p=%u, s=%u)\n",
678 pata_chan, sata_chan);
676 } 679 }
677 680
678 return ata_pci_init_one(pdev, port_info, n_ports); 681 return ata_pci_init_one(pdev, port_info, 2);
679} 682}
680 683
681static int __init piix_init(void) 684static int __init piix_init(void)
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index da6e51c7fe69..540147cb51ce 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -936,7 +936,7 @@ static int ch_probe(struct device *dev)
936 if (init) 936 if (init)
937 ch_init_elem(ch); 937 ch_init_elem(ch);
938 938
939 class_device_create(ch_sysfs_class, 939 class_device_create(ch_sysfs_class, NULL,
940 MKDEV(SCSI_CHANGER_MAJOR,ch->minor), 940 MKDEV(SCSI_CHANGER_MAJOR,ch->minor),
941 dev, "s%s", ch->name); 941 dev, "s%s", ch->name);
942 942
diff --git a/drivers/scsi/dec_esp.c b/drivers/scsi/dec_esp.c
index 315f95a0d6c0..4f39890b44ac 100644
--- a/drivers/scsi/dec_esp.c
+++ b/drivers/scsi/dec_esp.c
@@ -228,7 +228,7 @@ static int dec_esp_detect(Scsi_Host_Template * tpnt)
228 mem_start = get_tc_base_addr(slot); 228 mem_start = get_tc_base_addr(slot);
229 229
230 /* Store base addr into esp struct */ 230 /* Store base addr into esp struct */
231 esp->slot = PHYSADDR(mem_start); 231 esp->slot = CPHYSADDR(mem_start);
232 232
233 esp->dregs = 0; 233 esp->dregs = 0;
234 esp->eregs = (struct ESP_regs *) (mem_start + DEC_SCSI_SREG); 234 esp->eregs = (struct ESP_regs *) (mem_start + DEC_SCSI_SREG);
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index c10e45b94b62..3d13fdee4fc2 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1357,7 +1357,7 @@ static int port_detect(unsigned long port_base, unsigned int j,
1357 1357
1358 for (i = 0; i < shost->can_queue; i++) { 1358 for (i = 0; i < shost->can_queue; i++) {
1359 size_t sz = shost->sg_tablesize *sizeof(struct sg_list); 1359 size_t sz = shost->sg_tablesize *sizeof(struct sg_list);
1360 unsigned int gfp_mask = (shost->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC; 1360 gfp_t gfp_mask = (shost->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC;
1361 ha->cp[i].sglist = kmalloc(sz, gfp_mask); 1361 ha->cp[i].sglist = kmalloc(sz, gfp_mask);
1362 if (!ha->cp[i].sglist) { 1362 if (!ha->cp[i].sglist) {
1363 printk 1363 printk
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 02fe371b0ab8..f24d84538fd5 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -287,7 +287,8 @@ static void scsi_host_dev_release(struct device *dev)
287struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) 287struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
288{ 288{
289 struct Scsi_Host *shost; 289 struct Scsi_Host *shost;
290 int gfp_mask = GFP_KERNEL, rval; 290 gfp_t gfp_mask = GFP_KERNEL;
291 int rval;
291 292
292 if (sht->unchecked_isa_dma && privsize) 293 if (sht->unchecked_isa_dma && privsize)
293 gfp_mask |= __GFP_DMA; 294 gfp_mask |= __GFP_DMA;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 3d62c9bcbff7..00d6a6657ebc 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -180,19 +180,12 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
180 return; 180 return;
181 } 181 }
182 count = min(pc->sg->length - pc->b_count, bcount); 182 count = min(pc->sg->length - pc->b_count, bcount);
183 if (PageHighMem(pc->sg->page)) { 183 buf = kmap_atomic(pc->sg->page, KM_IRQ0);
184 unsigned long flags; 184 drive->hwif->atapi_input_bytes(drive,
185 185 buf + pc->b_count + pc->sg->offset, count);
186 local_irq_save(flags); 186 kunmap_atomic(buf, KM_IRQ0);
187 buf = kmap_atomic(pc->sg->page, KM_IRQ0) + pc->sg->offset; 187 bcount -= count;
188 drive->hwif->atapi_input_bytes(drive, buf + pc->b_count, count); 188 pc->b_count += count;
189 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
190 local_irq_restore(flags);
191 } else {
192 buf = page_address(pc->sg->page) + pc->sg->offset;
193 drive->hwif->atapi_input_bytes(drive, buf + pc->b_count, count);
194 }
195 bcount -= count; pc->b_count += count;
196 if (pc->b_count == pc->sg->length) { 189 if (pc->b_count == pc->sg->length) {
197 pc->sg++; 190 pc->sg++;
198 pc->b_count = 0; 191 pc->b_count = 0;
@@ -212,19 +205,12 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
212 return; 205 return;
213 } 206 }
214 count = min(pc->sg->length - pc->b_count, bcount); 207 count = min(pc->sg->length - pc->b_count, bcount);
215 if (PageHighMem(pc->sg->page)) { 208 buf = kmap_atomic(pc->sg->page, KM_IRQ0);
216 unsigned long flags; 209 drive->hwif->atapi_output_bytes(drive,
217 210 buf + pc->b_count + pc->sg->offset, count);
218 local_irq_save(flags); 211 kunmap_atomic(buf, KM_IRQ0);
219 buf = kmap_atomic(pc->sg->page, KM_IRQ0) + pc->sg->offset; 212 bcount -= count;
220 drive->hwif->atapi_output_bytes(drive, buf + pc->b_count, count); 213 pc->b_count += count;
221 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
222 local_irq_restore(flags);
223 } else {
224 buf = page_address(pc->sg->page) + pc->sg->offset;
225 drive->hwif->atapi_output_bytes(drive, buf + pc->b_count, count);
226 }
227 bcount -= count; pc->b_count += count;
228 if (pc->b_count == pc->sg->length) { 214 if (pc->b_count == pc->sg->length) {
229 pc->sg++; 215 pc->sg++;
230 pc->b_count = 0; 216 pc->b_count = 0;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index babd48363402..e0039dfae8e5 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4944,6 +4944,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
4944 int rc; 4944 int rc;
4945 4945
4946 ENTER; 4946 ENTER;
4947 pci_unblock_user_cfg_access(ioa_cfg->pdev);
4947 rc = pci_restore_state(ioa_cfg->pdev); 4948 rc = pci_restore_state(ioa_cfg->pdev);
4948 4949
4949 if (rc != PCIBIOS_SUCCESSFUL) { 4950 if (rc != PCIBIOS_SUCCESSFUL) {
@@ -4998,6 +4999,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
4998 int rc; 4999 int rc;
4999 5000
5000 ENTER; 5001 ENTER;
5002 pci_block_user_cfg_access(ioa_cfg->pdev);
5001 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); 5003 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5002 5004
5003 if (rc != PCIBIOS_SUCCESSFUL) { 5005 if (rc != PCIBIOS_SUCCESSFUL) {
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index 4cbb6187cc44..459a4daebece 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -98,7 +98,7 @@ MODULE_DEVICE_TABLE(parisc, lasi700_ids);
98static int __init 98static int __init
99lasi700_probe(struct parisc_device *dev) 99lasi700_probe(struct parisc_device *dev)
100{ 100{
101 unsigned long base = dev->hpa + LASI_SCSI_CORE_OFFSET; 101 unsigned long base = dev->hpa.start + LASI_SCSI_CORE_OFFSET;
102 struct NCR_700_Host_Parameters *hostdata; 102 struct NCR_700_Host_Parameters *hostdata;
103 struct Scsi_Host *host; 103 struct Scsi_Host *host;
104 104
@@ -125,8 +125,6 @@ lasi700_probe(struct parisc_device *dev)
125 hostdata->dmode_extra = DMODE_FC2; 125 hostdata->dmode_extra = DMODE_FC2;
126 } 126 }
127 127
128 NCR_700_set_mem_mapped(hostdata);
129
130 host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev); 128 host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev);
131 if (!host) 129 if (!host)
132 goto out_kfree; 130 goto out_kfree;
@@ -168,7 +166,7 @@ lasi700_driver_remove(struct parisc_device *dev)
168} 166}
169 167
170static struct parisc_driver lasi700_driver = { 168static struct parisc_driver lasi700_driver = {
171 .name = "Lasi SCSI", 169 .name = "lasi_scsi",
172 .id_table = lasi700_ids, 170 .id_table = lasi700_ids,
173 .probe = lasi700_probe, 171 .probe = lasi700_probe,
174 .remove = __devexit_p(lasi700_driver_remove), 172 .remove = __devexit_p(lasi700_driver_remove),
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index e5b01997117a..8be7dc0b47b8 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -48,6 +48,8 @@
48#include <linux/completion.h> 48#include <linux/completion.h>
49#include <linux/suspend.h> 49#include <linux/suspend.h>
50#include <linux/workqueue.h> 50#include <linux/workqueue.h>
51#include <linux/jiffies.h>
52#include <linux/scatterlist.h>
51#include <scsi/scsi.h> 53#include <scsi/scsi.h>
52#include "scsi.h" 54#include "scsi.h"
53#include "scsi_priv.h" 55#include "scsi_priv.h"
@@ -62,14 +64,15 @@
62static unsigned int ata_busy_sleep (struct ata_port *ap, 64static unsigned int ata_busy_sleep (struct ata_port *ap,
63 unsigned long tmout_pat, 65 unsigned long tmout_pat,
64 unsigned long tmout); 66 unsigned long tmout);
67static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
68static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
65static void ata_set_mode(struct ata_port *ap); 69static void ata_set_mode(struct ata_port *ap);
66static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); 70static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
67static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift); 71static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
68static int fgb(u32 bitmap); 72static int fgb(u32 bitmap);
69static int ata_choose_xfer_mode(struct ata_port *ap, 73static int ata_choose_xfer_mode(const struct ata_port *ap,
70 u8 *xfer_mode_out, 74 u8 *xfer_mode_out,
71 unsigned int *xfer_shift_out); 75 unsigned int *xfer_shift_out);
72static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
73static void __ata_qc_complete(struct ata_queued_cmd *qc); 76static void __ata_qc_complete(struct ata_queued_cmd *qc);
74 77
75static unsigned int ata_unique_id = 1; 78static unsigned int ata_unique_id = 1;
@@ -85,7 +88,7 @@ MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_VERSION); 88MODULE_VERSION(DRV_VERSION);
86 89
87/** 90/**
88 * ata_tf_load - send taskfile registers to host controller 91 * ata_tf_load_pio - send taskfile registers to host controller
89 * @ap: Port to which output is sent 92 * @ap: Port to which output is sent
90 * @tf: ATA taskfile register set 93 * @tf: ATA taskfile register set
91 * 94 *
@@ -95,7 +98,7 @@ MODULE_VERSION(DRV_VERSION);
95 * Inherited from caller. 98 * Inherited from caller.
96 */ 99 */
97 100
98static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf) 101static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
99{ 102{
100 struct ata_ioports *ioaddr = &ap->ioaddr; 103 struct ata_ioports *ioaddr = &ap->ioaddr;
101 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 104 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -153,7 +156,7 @@ static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
153 * Inherited from caller. 156 * Inherited from caller.
154 */ 157 */
155 158
156static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) 159static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
157{ 160{
158 struct ata_ioports *ioaddr = &ap->ioaddr; 161 struct ata_ioports *ioaddr = &ap->ioaddr;
159 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 162 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -222,7 +225,7 @@ static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
222 * LOCKING: 225 * LOCKING:
223 * Inherited from caller. 226 * Inherited from caller.
224 */ 227 */
225void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) 228void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
226{ 229{
227 if (ap->flags & ATA_FLAG_MMIO) 230 if (ap->flags & ATA_FLAG_MMIO)
228 ata_tf_load_mmio(ap, tf); 231 ata_tf_load_mmio(ap, tf);
@@ -242,7 +245,7 @@ void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
242 * spin_lock_irqsave(host_set lock) 245 * spin_lock_irqsave(host_set lock)
243 */ 246 */
244 247
245static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf) 248static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
246{ 249{
247 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); 250 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
248 251
@@ -263,7 +266,7 @@ static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
263 * spin_lock_irqsave(host_set lock) 266 * spin_lock_irqsave(host_set lock)
264 */ 267 */
265 268
266static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) 269static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
267{ 270{
268 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); 271 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
269 272
@@ -283,7 +286,7 @@ static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
283 * LOCKING: 286 * LOCKING:
284 * spin_lock_irqsave(host_set lock) 287 * spin_lock_irqsave(host_set lock)
285 */ 288 */
286void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf) 289void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
287{ 290{
288 if (ap->flags & ATA_FLAG_MMIO) 291 if (ap->flags & ATA_FLAG_MMIO)
289 ata_exec_command_mmio(ap, tf); 292 ata_exec_command_mmio(ap, tf);
@@ -303,7 +306,7 @@ void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
303 * Obtains host_set lock. 306 * Obtains host_set lock.
304 */ 307 */
305 308
306static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf) 309static inline void ata_exec(struct ata_port *ap, const struct ata_taskfile *tf)
307{ 310{
308 unsigned long flags; 311 unsigned long flags;
309 312
@@ -326,7 +329,7 @@ static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
326 * Obtains host_set lock. 329 * Obtains host_set lock.
327 */ 330 */
328 331
329static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf) 332static void ata_tf_to_host(struct ata_port *ap, const struct ata_taskfile *tf)
330{ 333{
331 ap->ops->tf_load(ap, tf); 334 ap->ops->tf_load(ap, tf);
332 335
@@ -346,7 +349,7 @@ static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
346 * spin_lock_irqsave(host_set lock) 349 * spin_lock_irqsave(host_set lock)
347 */ 350 */
348 351
349void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf) 352void ata_tf_to_host_nolock(struct ata_port *ap, const struct ata_taskfile *tf)
350{ 353{
351 ap->ops->tf_load(ap, tf); 354 ap->ops->tf_load(ap, tf);
352 ap->ops->exec_command(ap, tf); 355 ap->ops->exec_command(ap, tf);
@@ -368,6 +371,8 @@ static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
368{ 371{
369 struct ata_ioports *ioaddr = &ap->ioaddr; 372 struct ata_ioports *ioaddr = &ap->ioaddr;
370 373
374 tf->command = ata_check_status(ap);
375 tf->feature = inb(ioaddr->error_addr);
371 tf->nsect = inb(ioaddr->nsect_addr); 376 tf->nsect = inb(ioaddr->nsect_addr);
372 tf->lbal = inb(ioaddr->lbal_addr); 377 tf->lbal = inb(ioaddr->lbal_addr);
373 tf->lbam = inb(ioaddr->lbam_addr); 378 tf->lbam = inb(ioaddr->lbam_addr);
@@ -400,6 +405,8 @@ static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
400{ 405{
401 struct ata_ioports *ioaddr = &ap->ioaddr; 406 struct ata_ioports *ioaddr = &ap->ioaddr;
402 407
408 tf->command = ata_check_status(ap);
409 tf->feature = readb((void __iomem *)ioaddr->error_addr);
403 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr); 410 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
404 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr); 411 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
405 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr); 412 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
@@ -520,30 +527,6 @@ u8 ata_altstatus(struct ata_port *ap)
520 527
521 528
522/** 529/**
523 * ata_chk_err - Read device error reg
524 * @ap: port where the device is
525 *
526 * Reads ATA taskfile error register for
527 * currently-selected device and return its value.
528 *
529 * Note: may NOT be used as the check_err() entry in
530 * ata_port_operations.
531 *
532 * LOCKING:
533 * Inherited from caller.
534 */
535u8 ata_chk_err(struct ata_port *ap)
536{
537 if (ap->ops->check_err)
538 return ap->ops->check_err(ap);
539
540 if (ap->flags & ATA_FLAG_MMIO) {
541 return readb((void __iomem *) ap->ioaddr.error_addr);
542 }
543 return inb(ap->ioaddr.error_addr);
544}
545
546/**
547 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 530 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
548 * @tf: Taskfile to convert 531 * @tf: Taskfile to convert
549 * @fis: Buffer into which data will output 532 * @fis: Buffer into which data will output
@@ -556,7 +539,7 @@ u8 ata_chk_err(struct ata_port *ap)
556 * Inherited from caller. 539 * Inherited from caller.
557 */ 540 */
558 541
559void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp) 542void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
560{ 543{
561 fis[0] = 0x27; /* Register - Host to Device FIS */ 544 fis[0] = 0x27; /* Register - Host to Device FIS */
562 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number, 545 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
@@ -597,7 +580,7 @@ void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
597 * Inherited from caller. 580 * Inherited from caller.
598 */ 581 */
599 582
600void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf) 583void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
601{ 584{
602 tf->command = fis[2]; /* status */ 585 tf->command = fis[2]; /* status */
603 tf->feature = fis[3]; /* error */ 586 tf->feature = fis[3]; /* error */
@@ -615,79 +598,53 @@ void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
615 tf->hob_nsect = fis[13]; 598 tf->hob_nsect = fis[13];
616} 599}
617 600
618/** 601static const u8 ata_rw_cmds[] = {
619 * ata_prot_to_cmd - determine which read/write opcodes to use 602 /* pio multi */
620 * @protocol: ATA_PROT_xxx taskfile protocol 603 ATA_CMD_READ_MULTI,
621 * @lba48: true is lba48 is present 604 ATA_CMD_WRITE_MULTI,
622 * 605 ATA_CMD_READ_MULTI_EXT,
623 * Given necessary input, determine which read/write commands 606 ATA_CMD_WRITE_MULTI_EXT,
624 * to use to transfer data. 607 /* pio */
625 * 608 ATA_CMD_PIO_READ,
626 * LOCKING: 609 ATA_CMD_PIO_WRITE,
627 * None. 610 ATA_CMD_PIO_READ_EXT,
628 */ 611 ATA_CMD_PIO_WRITE_EXT,
629static int ata_prot_to_cmd(int protocol, int lba48) 612 /* dma */
630{ 613 ATA_CMD_READ,
631 int rcmd = 0, wcmd = 0; 614 ATA_CMD_WRITE,
632 615 ATA_CMD_READ_EXT,
633 switch (protocol) { 616 ATA_CMD_WRITE_EXT
634 case ATA_PROT_PIO: 617};
635 if (lba48) {
636 rcmd = ATA_CMD_PIO_READ_EXT;
637 wcmd = ATA_CMD_PIO_WRITE_EXT;
638 } else {
639 rcmd = ATA_CMD_PIO_READ;
640 wcmd = ATA_CMD_PIO_WRITE;
641 }
642 break;
643
644 case ATA_PROT_DMA:
645 if (lba48) {
646 rcmd = ATA_CMD_READ_EXT;
647 wcmd = ATA_CMD_WRITE_EXT;
648 } else {
649 rcmd = ATA_CMD_READ;
650 wcmd = ATA_CMD_WRITE;
651 }
652 break;
653
654 default:
655 return -1;
656 }
657
658 return rcmd | (wcmd << 8);
659}
660 618
661/** 619/**
662 * ata_dev_set_protocol - set taskfile protocol and r/w commands 620 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
663 * @dev: device to examine and configure 621 * @qc: command to examine and configure
664 * 622 *
665 * Examine the device configuration, after we have 623 * Examine the device configuration and tf->flags to calculate
666 * read the identify-device page and configured the 624 * the proper read/write commands and protocol to use.
667 * data transfer mode. Set internal state related to
668 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
669 * and calculate the proper read/write commands to use.
670 * 625 *
671 * LOCKING: 626 * LOCKING:
672 * caller. 627 * caller.
673 */ 628 */
674static void ata_dev_set_protocol(struct ata_device *dev) 629void ata_rwcmd_protocol(struct ata_queued_cmd *qc)
675{ 630{
676 int pio = (dev->flags & ATA_DFLAG_PIO); 631 struct ata_taskfile *tf = &qc->tf;
677 int lba48 = (dev->flags & ATA_DFLAG_LBA48); 632 struct ata_device *dev = qc->dev;
678 int proto, cmd;
679 633
680 if (pio) 634 int index, lba48, write;
681 proto = dev->xfer_protocol = ATA_PROT_PIO; 635
682 else 636 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
683 proto = dev->xfer_protocol = ATA_PROT_DMA; 637 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
684 638
685 cmd = ata_prot_to_cmd(proto, lba48); 639 if (dev->flags & ATA_DFLAG_PIO) {
686 if (cmd < 0) 640 tf->protocol = ATA_PROT_PIO;
687 BUG(); 641 index = dev->multi_count ? 0 : 4;
642 } else {
643 tf->protocol = ATA_PROT_DMA;
644 index = 8;
645 }
688 646
689 dev->read_cmd = cmd & 0xff; 647 tf->command = ata_rw_cmds[index + lba48 + write];
690 dev->write_cmd = (cmd >> 8) & 0xff;
691} 648}
692 649
693static const char * xfer_mode_str[] = { 650static const char * xfer_mode_str[] = {
@@ -869,7 +826,7 @@ static unsigned int ata_devchk(struct ata_port *ap,
869 * the event of failure. 826 * the event of failure.
870 */ 827 */
871 828
872unsigned int ata_dev_classify(struct ata_taskfile *tf) 829unsigned int ata_dev_classify(const struct ata_taskfile *tf)
873{ 830{
874 /* Apple's open source Darwin code hints that some devices only 831 /* Apple's open source Darwin code hints that some devices only
875 * put a proper signature into the LBA mid/high registers, 832 * put a proper signature into the LBA mid/high registers,
@@ -921,8 +878,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
921 878
922 memset(&tf, 0, sizeof(tf)); 879 memset(&tf, 0, sizeof(tf));
923 880
924 err = ata_chk_err(ap);
925 ap->ops->tf_read(ap, &tf); 881 ap->ops->tf_read(ap, &tf);
882 err = tf.feature;
926 883
927 dev->class = ATA_DEV_NONE; 884 dev->class = ATA_DEV_NONE;
928 885
@@ -961,7 +918,7 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
961 * caller. 918 * caller.
962 */ 919 */
963 920
964void ata_dev_id_string(u16 *id, unsigned char *s, 921void ata_dev_id_string(const u16 *id, unsigned char *s,
965 unsigned int ofs, unsigned int len) 922 unsigned int ofs, unsigned int len)
966{ 923{
967 unsigned int c; 924 unsigned int c;
@@ -1078,7 +1035,7 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
1078 * caller. 1035 * caller.
1079 */ 1036 */
1080 1037
1081static inline void ata_dump_id(struct ata_device *dev) 1038static inline void ata_dump_id(const struct ata_device *dev)
1082{ 1039{
1083 DPRINTK("49==0x%04x " 1040 DPRINTK("49==0x%04x "
1084 "53==0x%04x " 1041 "53==0x%04x "
@@ -1106,6 +1063,31 @@ static inline void ata_dump_id(struct ata_device *dev)
1106 dev->id[93]); 1063 dev->id[93]);
1107} 1064}
1108 1065
1066/*
1067 * Compute the PIO modes available for this device. This is not as
1068 * trivial as it seems if we must consider early devices correctly.
1069 *
1070 * FIXME: pre IDE drive timing (do we care ?).
1071 */
1072
1073static unsigned int ata_pio_modes(const struct ata_device *adev)
1074{
1075 u16 modes;
1076
1077 /* Usual case. Word 53 indicates word 88 is valid */
1078 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) {
1079 modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
1080 modes <<= 3;
1081 modes |= 0x7;
1082 return modes;
1083 }
1084
1085 /* If word 88 isn't valid then Word 51 holds the PIO timing number
1086 for the maximum. Turn it into a mask and return it */
1087 modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
1088 return modes;
1089}
1090
1109/** 1091/**
1110 * ata_dev_identify - obtain IDENTIFY x DEVICE page 1092 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1111 * @ap: port on which device we wish to probe resides 1093 * @ap: port on which device we wish to probe resides
@@ -1131,10 +1113,9 @@ static inline void ata_dump_id(struct ata_device *dev)
1131static void ata_dev_identify(struct ata_port *ap, unsigned int device) 1113static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1132{ 1114{
1133 struct ata_device *dev = &ap->device[device]; 1115 struct ata_device *dev = &ap->device[device];
1134 unsigned int i; 1116 unsigned int major_version;
1135 u16 tmp; 1117 u16 tmp;
1136 unsigned long xfer_modes; 1118 unsigned long xfer_modes;
1137 u8 status;
1138 unsigned int using_edd; 1119 unsigned int using_edd;
1139 DECLARE_COMPLETION(wait); 1120 DECLARE_COMPLETION(wait);
1140 struct ata_queued_cmd *qc; 1121 struct ata_queued_cmd *qc;
@@ -1188,8 +1169,11 @@ retry:
1188 else 1169 else
1189 wait_for_completion(&wait); 1170 wait_for_completion(&wait);
1190 1171
1191 status = ata_chk_status(ap); 1172 spin_lock_irqsave(&ap->host_set->lock, flags);
1192 if (status & ATA_ERR) { 1173 ap->ops->tf_read(ap, &qc->tf);
1174 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1175
1176 if (qc->tf.command & ATA_ERR) {
1193 /* 1177 /*
1194 * arg! EDD works for all test cases, but seems to return 1178 * arg! EDD works for all test cases, but seems to return
1195 * the ATA signature for some ATAPI devices. Until the 1179 * the ATA signature for some ATAPI devices. Until the
@@ -1202,7 +1186,7 @@ retry:
1202 * to have this problem. 1186 * to have this problem.
1203 */ 1187 */
1204 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) { 1188 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1205 u8 err = ata_chk_err(ap); 1189 u8 err = qc->tf.feature;
1206 if (err & ATA_ABORTED) { 1190 if (err & ATA_ABORTED) {
1207 dev->class = ATA_DEV_ATAPI; 1191 dev->class = ATA_DEV_ATAPI;
1208 qc->cursg = 0; 1192 qc->cursg = 0;
@@ -1229,9 +1213,9 @@ retry:
1229 * common ATA, ATAPI feature tests 1213 * common ATA, ATAPI feature tests
1230 */ 1214 */
1231 1215
1232 /* we require LBA and DMA support (bits 8 & 9 of word 49) */ 1216 /* we require DMA support (bits 8 of word 49) */
1233 if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) { 1217 if (!ata_id_has_dma(dev->id)) {
1234 printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id); 1218 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1235 goto err_out_nosup; 1219 goto err_out_nosup;
1236 } 1220 }
1237 1221
@@ -1239,10 +1223,8 @@ retry:
1239 xfer_modes = dev->id[ATA_ID_UDMA_MODES]; 1223 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1240 if (!xfer_modes) 1224 if (!xfer_modes)
1241 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA; 1225 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1242 if (!xfer_modes) { 1226 if (!xfer_modes)
1243 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3); 1227 xfer_modes = ata_pio_modes(dev);
1244 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1245 }
1246 1228
1247 ata_dump_id(dev); 1229 ata_dump_id(dev);
1248 1230
@@ -1251,32 +1233,75 @@ retry:
1251 if (!ata_id_is_ata(dev->id)) /* sanity check */ 1233 if (!ata_id_is_ata(dev->id)) /* sanity check */
1252 goto err_out_nosup; 1234 goto err_out_nosup;
1253 1235
1236 /* get major version */
1254 tmp = dev->id[ATA_ID_MAJOR_VER]; 1237 tmp = dev->id[ATA_ID_MAJOR_VER];
1255 for (i = 14; i >= 1; i--) 1238 for (major_version = 14; major_version >= 1; major_version--)
1256 if (tmp & (1 << i)) 1239 if (tmp & (1 << major_version))
1257 break; 1240 break;
1258 1241
1259 /* we require at least ATA-3 */ 1242 /*
1260 if (i < 3) { 1243 * The exact sequence expected by certain pre-ATA4 drives is:
1261 printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id); 1244 * SRST RESET
1262 goto err_out_nosup; 1245 * IDENTIFY
1246 * INITIALIZE DEVICE PARAMETERS
1247 * anything else..
1248 * Some drives were very specific about that exact sequence.
1249 */
1250 if (major_version < 4 || (!ata_id_has_lba(dev->id))) {
1251 ata_dev_init_params(ap, dev);
1252
1253 /* current CHS translation info (id[53-58]) might be
1254 * changed. reread the identify device info.
1255 */
1256 ata_dev_reread_id(ap, dev);
1263 } 1257 }
1264 1258
1265 if (ata_id_has_lba48(dev->id)) { 1259 if (ata_id_has_lba(dev->id)) {
1266 dev->flags |= ATA_DFLAG_LBA48; 1260 dev->flags |= ATA_DFLAG_LBA;
1267 dev->n_sectors = ata_id_u64(dev->id, 100); 1261
1268 } else { 1262 if (ata_id_has_lba48(dev->id)) {
1269 dev->n_sectors = ata_id_u32(dev->id, 60); 1263 dev->flags |= ATA_DFLAG_LBA48;
1264 dev->n_sectors = ata_id_u64(dev->id, 100);
1265 } else {
1266 dev->n_sectors = ata_id_u32(dev->id, 60);
1267 }
1268
1269 /* print device info to dmesg */
1270 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1271 ap->id, device,
1272 major_version,
1273 ata_mode_string(xfer_modes),
1274 (unsigned long long)dev->n_sectors,
1275 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1276 } else {
1277 /* CHS */
1278
1279 /* Default translation */
1280 dev->cylinders = dev->id[1];
1281 dev->heads = dev->id[3];
1282 dev->sectors = dev->id[6];
1283 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1284
1285 if (ata_id_current_chs_valid(dev->id)) {
1286 /* Current CHS translation is valid. */
1287 dev->cylinders = dev->id[54];
1288 dev->heads = dev->id[55];
1289 dev->sectors = dev->id[56];
1290
1291 dev->n_sectors = ata_id_u32(dev->id, 57);
1292 }
1293
1294 /* print device info to dmesg */
1295 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1296 ap->id, device,
1297 major_version,
1298 ata_mode_string(xfer_modes),
1299 (unsigned long long)dev->n_sectors,
1300 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1301
1270 } 1302 }
1271 1303
1272 ap->host->max_cmd_len = 16; 1304 ap->host->max_cmd_len = 16;
1273
1274 /* print device info to dmesg */
1275 printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1276 ap->id, device,
1277 ata_mode_string(xfer_modes),
1278 (unsigned long long)dev->n_sectors,
1279 dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
1280 } 1305 }
1281 1306
1282 /* ATAPI-specific feature tests */ 1307 /* ATAPI-specific feature tests */
@@ -1310,7 +1335,7 @@ err_out:
1310} 1335}
1311 1336
1312 1337
1313static inline u8 ata_dev_knobble(struct ata_port *ap) 1338static inline u8 ata_dev_knobble(const struct ata_port *ap)
1314{ 1339{
1315 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id))); 1340 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1316} 1341}
@@ -1496,7 +1521,153 @@ void ata_port_disable(struct ata_port *ap)
1496 ap->flags |= ATA_FLAG_PORT_DISABLED; 1521 ap->flags |= ATA_FLAG_PORT_DISABLED;
1497} 1522}
1498 1523
1499static struct { 1524/*
1525 * This mode timing computation functionality is ported over from
1526 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1527 */
1528/*
1529 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1530 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1531 * for PIO 5, which is a nonstandard extension and UDMA6, which
1532 * is currently supported only by Maxtor drives.
1533 */
1534
1535static const struct ata_timing ata_timing[] = {
1536
1537 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1538 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1539 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1540 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1541
1542 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1543 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1544 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1545
1546/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1547
1548 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1549 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1550 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1551
1552 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1553 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1554 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1555
1556/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1557 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1558 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1559
1560 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1561 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1562 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1563
1564/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1565
1566 { 0xFF }
1567};
1568
1569#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1570#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1571
1572static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1573{
1574 q->setup = EZ(t->setup * 1000, T);
1575 q->act8b = EZ(t->act8b * 1000, T);
1576 q->rec8b = EZ(t->rec8b * 1000, T);
1577 q->cyc8b = EZ(t->cyc8b * 1000, T);
1578 q->active = EZ(t->active * 1000, T);
1579 q->recover = EZ(t->recover * 1000, T);
1580 q->cycle = EZ(t->cycle * 1000, T);
1581 q->udma = EZ(t->udma * 1000, UT);
1582}
1583
1584void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1585 struct ata_timing *m, unsigned int what)
1586{
1587 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1588 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1589 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1590 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1591 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1592 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1593 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1594 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1595}
1596
1597static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1598{
1599 const struct ata_timing *t;
1600
1601 for (t = ata_timing; t->mode != speed; t++)
1602 if (t->mode == 0xFF)
1603 return NULL;
1604 return t;
1605}
1606
1607int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1608 struct ata_timing *t, int T, int UT)
1609{
1610 const struct ata_timing *s;
1611 struct ata_timing p;
1612
1613 /*
1614 * Find the mode.
1615 */
1616
1617 if (!(s = ata_timing_find_mode(speed)))
1618 return -EINVAL;
1619
1620 /*
1621 * If the drive is an EIDE drive, it can tell us it needs extended
1622 * PIO/MW_DMA cycle timing.
1623 */
1624
1625 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1626 memset(&p, 0, sizeof(p));
1627 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1628 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1629 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1630 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1631 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1632 }
1633 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1634 }
1635
1636 /*
1637 * Convert the timing to bus clock counts.
1638 */
1639
1640 ata_timing_quantize(s, t, T, UT);
1641
1642 /*
1643 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T
1644 * and some other commands. We have to ensure that the DMA cycle timing is
1645 * slower/equal than the fastest PIO timing.
1646 */
1647
1648 if (speed > XFER_PIO_4) {
1649 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1650 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1651 }
1652
1653 /*
1654 * Lenghten active & recovery time so that cycle time is correct.
1655 */
1656
1657 if (t->act8b + t->rec8b < t->cyc8b) {
1658 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1659 t->rec8b = t->cyc8b - t->act8b;
1660 }
1661
1662 if (t->active + t->recover < t->cycle) {
1663 t->active += (t->cycle - (t->active + t->recover)) / 2;
1664 t->recover = t->cycle - t->active;
1665 }
1666
1667 return 0;
1668}
1669
1670static const struct {
1500 unsigned int shift; 1671 unsigned int shift;
1501 u8 base; 1672 u8 base;
1502} xfer_mode_classes[] = { 1673} xfer_mode_classes[] = {
@@ -1603,7 +1774,7 @@ static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1603 */ 1774 */
1604static void ata_set_mode(struct ata_port *ap) 1775static void ata_set_mode(struct ata_port *ap)
1605{ 1776{
1606 unsigned int i, xfer_shift; 1777 unsigned int xfer_shift;
1607 u8 xfer_mode; 1778 u8 xfer_mode;
1608 int rc; 1779 int rc;
1609 1780
@@ -1632,11 +1803,6 @@ static void ata_set_mode(struct ata_port *ap)
1632 if (ap->ops->post_set_mode) 1803 if (ap->ops->post_set_mode)
1633 ap->ops->post_set_mode(ap); 1804 ap->ops->post_set_mode(ap);
1634 1805
1635 for (i = 0; i < 2; i++) {
1636 struct ata_device *dev = &ap->device[i];
1637 ata_dev_set_protocol(dev);
1638 }
1639
1640 return; 1806 return;
1641 1807
1642err_out: 1808err_out:
@@ -1910,7 +2076,8 @@ err_out:
1910 DPRINTK("EXIT\n"); 2076 DPRINTK("EXIT\n");
1911} 2077}
1912 2078
1913static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev) 2079static void ata_pr_blacklisted(const struct ata_port *ap,
2080 const struct ata_device *dev)
1914{ 2081{
1915 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n", 2082 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
1916 ap->id, dev->devno); 2083 ap->id, dev->devno);
@@ -1948,7 +2115,7 @@ static const char * ata_dma_blacklist [] = {
1948 "_NEC DV5800A", 2115 "_NEC DV5800A",
1949}; 2116};
1950 2117
1951static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev) 2118static int ata_dma_blacklisted(const struct ata_device *dev)
1952{ 2119{
1953 unsigned char model_num[40]; 2120 unsigned char model_num[40];
1954 char *s; 2121 char *s;
@@ -1973,9 +2140,9 @@ static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
1973 return 0; 2140 return 0;
1974} 2141}
1975 2142
1976static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift) 2143static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
1977{ 2144{
1978 struct ata_device *master, *slave; 2145 const struct ata_device *master, *slave;
1979 unsigned int mask; 2146 unsigned int mask;
1980 2147
1981 master = &ap->device[0]; 2148 master = &ap->device[0];
@@ -1987,14 +2154,14 @@ static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
1987 mask = ap->udma_mask; 2154 mask = ap->udma_mask;
1988 if (ata_dev_present(master)) { 2155 if (ata_dev_present(master)) {
1989 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff); 2156 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
1990 if (ata_dma_blacklisted(ap, master)) { 2157 if (ata_dma_blacklisted(master)) {
1991 mask = 0; 2158 mask = 0;
1992 ata_pr_blacklisted(ap, master); 2159 ata_pr_blacklisted(ap, master);
1993 } 2160 }
1994 } 2161 }
1995 if (ata_dev_present(slave)) { 2162 if (ata_dev_present(slave)) {
1996 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff); 2163 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
1997 if (ata_dma_blacklisted(ap, slave)) { 2164 if (ata_dma_blacklisted(slave)) {
1998 mask = 0; 2165 mask = 0;
1999 ata_pr_blacklisted(ap, slave); 2166 ata_pr_blacklisted(ap, slave);
2000 } 2167 }
@@ -2004,14 +2171,14 @@ static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
2004 mask = ap->mwdma_mask; 2171 mask = ap->mwdma_mask;
2005 if (ata_dev_present(master)) { 2172 if (ata_dev_present(master)) {
2006 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07); 2173 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2007 if (ata_dma_blacklisted(ap, master)) { 2174 if (ata_dma_blacklisted(master)) {
2008 mask = 0; 2175 mask = 0;
2009 ata_pr_blacklisted(ap, master); 2176 ata_pr_blacklisted(ap, master);
2010 } 2177 }
2011 } 2178 }
2012 if (ata_dev_present(slave)) { 2179 if (ata_dev_present(slave)) {
2013 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07); 2180 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2014 if (ata_dma_blacklisted(ap, slave)) { 2181 if (ata_dma_blacklisted(slave)) {
2015 mask = 0; 2182 mask = 0;
2016 ata_pr_blacklisted(ap, slave); 2183 ata_pr_blacklisted(ap, slave);
2017 } 2184 }
@@ -2075,7 +2242,7 @@ static int fgb(u32 bitmap)
2075 * Zero on success, negative on error. 2242 * Zero on success, negative on error.
2076 */ 2243 */
2077 2244
2078static int ata_choose_xfer_mode(struct ata_port *ap, 2245static int ata_choose_xfer_mode(const struct ata_port *ap,
2079 u8 *xfer_mode_out, 2246 u8 *xfer_mode_out,
2080 unsigned int *xfer_shift_out) 2247 unsigned int *xfer_shift_out)
2081{ 2248{
@@ -2144,6 +2311,110 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2144} 2311}
2145 2312
2146/** 2313/**
2314 * ata_dev_reread_id - Reread the device identify device info
2315 * @ap: port where the device is
2316 * @dev: device to reread the identify device info
2317 *
2318 * LOCKING:
2319 */
2320
2321static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
2322{
2323 DECLARE_COMPLETION(wait);
2324 struct ata_queued_cmd *qc;
2325 unsigned long flags;
2326 int rc;
2327
2328 qc = ata_qc_new_init(ap, dev);
2329 BUG_ON(qc == NULL);
2330
2331 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
2332 qc->dma_dir = DMA_FROM_DEVICE;
2333
2334 if (dev->class == ATA_DEV_ATA) {
2335 qc->tf.command = ATA_CMD_ID_ATA;
2336 DPRINTK("do ATA identify\n");
2337 } else {
2338 qc->tf.command = ATA_CMD_ID_ATAPI;
2339 DPRINTK("do ATAPI identify\n");
2340 }
2341
2342 qc->tf.flags |= ATA_TFLAG_DEVICE;
2343 qc->tf.protocol = ATA_PROT_PIO;
2344 qc->nsect = 1;
2345
2346 qc->waiting = &wait;
2347 qc->complete_fn = ata_qc_complete_noop;
2348
2349 spin_lock_irqsave(&ap->host_set->lock, flags);
2350 rc = ata_qc_issue(qc);
2351 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2352
2353 if (rc)
2354 goto err_out;
2355
2356 wait_for_completion(&wait);
2357
2358 swap_buf_le16(dev->id, ATA_ID_WORDS);
2359
2360 ata_dump_id(dev);
2361
2362 DPRINTK("EXIT\n");
2363
2364 return;
2365err_out:
2366 ata_port_disable(ap);
2367}
2368
2369/**
2370 * ata_dev_init_params - Issue INIT DEV PARAMS command
2371 * @ap: Port associated with device @dev
2372 * @dev: Device to which command will be sent
2373 *
2374 * LOCKING:
2375 */
2376
2377static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2378{
2379 DECLARE_COMPLETION(wait);
2380 struct ata_queued_cmd *qc;
2381 int rc;
2382 unsigned long flags;
2383 u16 sectors = dev->id[6];
2384 u16 heads = dev->id[3];
2385
2386 /* Number of sectors per track 1-255. Number of heads 1-16 */
2387 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2388 return;
2389
2390 /* set up init dev params taskfile */
2391 DPRINTK("init dev params \n");
2392
2393 qc = ata_qc_new_init(ap, dev);
2394 BUG_ON(qc == NULL);
2395
2396 qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
2397 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2398 qc->tf.protocol = ATA_PROT_NODATA;
2399 qc->tf.nsect = sectors;
2400 qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2401
2402 qc->waiting = &wait;
2403 qc->complete_fn = ata_qc_complete_noop;
2404
2405 spin_lock_irqsave(&ap->host_set->lock, flags);
2406 rc = ata_qc_issue(qc);
2407 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2408
2409 if (rc)
2410 ata_port_disable(ap);
2411 else
2412 wait_for_completion(&wait);
2413
2414 DPRINTK("EXIT\n");
2415}
2416
2417/**
2147 * ata_sg_clean - Unmap DMA memory associated with command 2418 * ata_sg_clean - Unmap DMA memory associated with command
2148 * @qc: Command containing DMA memory to be released 2419 * @qc: Command containing DMA memory to be released
2149 * 2420 *
@@ -2284,19 +2555,12 @@ void ata_qc_prep(struct ata_queued_cmd *qc)
2284 2555
2285void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) 2556void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2286{ 2557{
2287 struct scatterlist *sg;
2288
2289 qc->flags |= ATA_QCFLAG_SINGLE; 2558 qc->flags |= ATA_QCFLAG_SINGLE;
2290 2559
2291 memset(&qc->sgent, 0, sizeof(qc->sgent));
2292 qc->sg = &qc->sgent; 2560 qc->sg = &qc->sgent;
2293 qc->n_elem = 1; 2561 qc->n_elem = 1;
2294 qc->buf_virt = buf; 2562 qc->buf_virt = buf;
2295 2563 sg_init_one(qc->sg, buf, buflen);
2296 sg = qc->sg;
2297 sg->page = virt_to_page(buf);
2298 sg->offset = (unsigned long) buf & ~PAGE_MASK;
2299 sg->length = buflen;
2300} 2564}
2301 2565
2302/** 2566/**
@@ -2399,7 +2663,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2399 * None. (grabs host lock) 2663 * None. (grabs host lock)
2400 */ 2664 */
2401 2665
2402void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 2666void ata_poll_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
2403{ 2667{
2404 struct ata_port *ap = qc->ap; 2668 struct ata_port *ap = qc->ap;
2405 unsigned long flags; 2669 unsigned long flags;
@@ -2407,38 +2671,38 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2407 spin_lock_irqsave(&ap->host_set->lock, flags); 2671 spin_lock_irqsave(&ap->host_set->lock, flags);
2408 ap->flags &= ~ATA_FLAG_NOINTR; 2672 ap->flags &= ~ATA_FLAG_NOINTR;
2409 ata_irq_on(ap); 2673 ata_irq_on(ap);
2410 ata_qc_complete(qc, drv_stat); 2674 ata_qc_complete(qc, err_mask);
2411 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2675 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2412} 2676}
2413 2677
2414/** 2678/**
2415 * ata_pio_poll - 2679 * ata_pio_poll -
2416 * @ap: 2680 * @ap: the target ata_port
2417 * 2681 *
2418 * LOCKING: 2682 * LOCKING:
2419 * None. (executing in kernel thread context) 2683 * None. (executing in kernel thread context)
2420 * 2684 *
2421 * RETURNS: 2685 * RETURNS:
2422 * 2686 * timeout value to use
2423 */ 2687 */
2424 2688
2425static unsigned long ata_pio_poll(struct ata_port *ap) 2689static unsigned long ata_pio_poll(struct ata_port *ap)
2426{ 2690{
2427 u8 status; 2691 u8 status;
2428 unsigned int poll_state = PIO_ST_UNKNOWN; 2692 unsigned int poll_state = HSM_ST_UNKNOWN;
2429 unsigned int reg_state = PIO_ST_UNKNOWN; 2693 unsigned int reg_state = HSM_ST_UNKNOWN;
2430 const unsigned int tmout_state = PIO_ST_TMOUT; 2694 const unsigned int tmout_state = HSM_ST_TMOUT;
2431 2695
2432 switch (ap->pio_task_state) { 2696 switch (ap->hsm_task_state) {
2433 case PIO_ST: 2697 case HSM_ST:
2434 case PIO_ST_POLL: 2698 case HSM_ST_POLL:
2435 poll_state = PIO_ST_POLL; 2699 poll_state = HSM_ST_POLL;
2436 reg_state = PIO_ST; 2700 reg_state = HSM_ST;
2437 break; 2701 break;
2438 case PIO_ST_LAST: 2702 case HSM_ST_LAST:
2439 case PIO_ST_LAST_POLL: 2703 case HSM_ST_LAST_POLL:
2440 poll_state = PIO_ST_LAST_POLL; 2704 poll_state = HSM_ST_LAST_POLL;
2441 reg_state = PIO_ST_LAST; 2705 reg_state = HSM_ST_LAST;
2442 break; 2706 break;
2443 default: 2707 default:
2444 BUG(); 2708 BUG();
@@ -2448,20 +2712,20 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2448 status = ata_chk_status(ap); 2712 status = ata_chk_status(ap);
2449 if (status & ATA_BUSY) { 2713 if (status & ATA_BUSY) {
2450 if (time_after(jiffies, ap->pio_task_timeout)) { 2714 if (time_after(jiffies, ap->pio_task_timeout)) {
2451 ap->pio_task_state = tmout_state; 2715 ap->hsm_task_state = tmout_state;
2452 return 0; 2716 return 0;
2453 } 2717 }
2454 ap->pio_task_state = poll_state; 2718 ap->hsm_task_state = poll_state;
2455 return ATA_SHORT_PAUSE; 2719 return ATA_SHORT_PAUSE;
2456 } 2720 }
2457 2721
2458 ap->pio_task_state = reg_state; 2722 ap->hsm_task_state = reg_state;
2459 return 0; 2723 return 0;
2460} 2724}
2461 2725
2462/** 2726/**
2463 * ata_pio_complete - 2727 * ata_pio_complete - check if drive is busy or idle
2464 * @ap: 2728 * @ap: the target ata_port
2465 * 2729 *
2466 * LOCKING: 2730 * LOCKING:
2467 * None. (executing in kernel thread context) 2731 * None. (executing in kernel thread context)
@@ -2480,14 +2744,14 @@ static int ata_pio_complete (struct ata_port *ap)
2480 * we enter, BSY will be cleared in a chk-status or two. If not, 2744 * we enter, BSY will be cleared in a chk-status or two. If not,
2481 * the drive is probably seeking or something. Snooze for a couple 2745 * the drive is probably seeking or something. Snooze for a couple
2482 * msecs, then chk-status again. If still busy, fall back to 2746 * msecs, then chk-status again. If still busy, fall back to
2483 * PIO_ST_POLL state. 2747 * HSM_ST_POLL state.
2484 */ 2748 */
2485 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2749 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2486 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2750 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2487 msleep(2); 2751 msleep(2);
2488 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2752 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2489 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2753 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2490 ap->pio_task_state = PIO_ST_LAST_POLL; 2754 ap->hsm_task_state = HSM_ST_LAST_POLL;
2491 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 2755 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2492 return 0; 2756 return 0;
2493 } 2757 }
@@ -2495,16 +2759,16 @@ static int ata_pio_complete (struct ata_port *ap)
2495 2759
2496 drv_stat = ata_wait_idle(ap); 2760 drv_stat = ata_wait_idle(ap);
2497 if (!ata_ok(drv_stat)) { 2761 if (!ata_ok(drv_stat)) {
2498 ap->pio_task_state = PIO_ST_ERR; 2762 ap->hsm_task_state = HSM_ST_ERR;
2499 return 0; 2763 return 0;
2500 } 2764 }
2501 2765
2502 qc = ata_qc_from_tag(ap, ap->active_tag); 2766 qc = ata_qc_from_tag(ap, ap->active_tag);
2503 assert(qc != NULL); 2767 assert(qc != NULL);
2504 2768
2505 ap->pio_task_state = PIO_ST_IDLE; 2769 ap->hsm_task_state = HSM_ST_IDLE;
2506 2770
2507 ata_poll_qc_complete(qc, drv_stat); 2771 ata_poll_qc_complete(qc, 0);
2508 2772
2509 /* another command may start at this point */ 2773 /* another command may start at this point */
2510 2774
@@ -2513,7 +2777,7 @@ static int ata_pio_complete (struct ata_port *ap)
2513 2777
2514 2778
2515/** 2779/**
2516 * swap_buf_le16 - 2780 * swap_buf_le16 - swap halves of 16-words in place
2517 * @buf: Buffer to swap 2781 * @buf: Buffer to swap
2518 * @buf_words: Number of 16-bit words in buffer. 2782 * @buf_words: Number of 16-bit words in buffer.
2519 * 2783 *
@@ -2522,6 +2786,7 @@ static int ata_pio_complete (struct ata_port *ap)
2522 * vice-versa. 2786 * vice-versa.
2523 * 2787 *
2524 * LOCKING: 2788 * LOCKING:
2789 * Inherited from caller.
2525 */ 2790 */
2526void swap_buf_le16(u16 *buf, unsigned int buf_words) 2791void swap_buf_le16(u16 *buf, unsigned int buf_words)
2527{ 2792{
@@ -2544,7 +2809,6 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
2544 * 2809 *
2545 * LOCKING: 2810 * LOCKING:
2546 * Inherited from caller. 2811 * Inherited from caller.
2547 *
2548 */ 2812 */
2549 2813
2550static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, 2814static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
@@ -2590,7 +2854,6 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2590 * 2854 *
2591 * LOCKING: 2855 * LOCKING:
2592 * Inherited from caller. 2856 * Inherited from caller.
2593 *
2594 */ 2857 */
2595 2858
2596static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, 2859static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
@@ -2630,7 +2893,6 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2630 * 2893 *
2631 * LOCKING: 2894 * LOCKING:
2632 * Inherited from caller. 2895 * Inherited from caller.
2633 *
2634 */ 2896 */
2635 2897
2636static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, 2898static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
@@ -2662,7 +2924,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
2662 unsigned char *buf; 2924 unsigned char *buf;
2663 2925
2664 if (qc->cursect == (qc->nsect - 1)) 2926 if (qc->cursect == (qc->nsect - 1))
2665 ap->pio_task_state = PIO_ST_LAST; 2927 ap->hsm_task_state = HSM_ST_LAST;
2666 2928
2667 page = sg[qc->cursg].page; 2929 page = sg[qc->cursg].page;
2668 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE; 2930 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
@@ -2712,7 +2974,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2712 unsigned int offset, count; 2974 unsigned int offset, count;
2713 2975
2714 if (qc->curbytes + bytes >= qc->nbytes) 2976 if (qc->curbytes + bytes >= qc->nbytes)
2715 ap->pio_task_state = PIO_ST_LAST; 2977 ap->hsm_task_state = HSM_ST_LAST;
2716 2978
2717next_sg: 2979next_sg:
2718 if (unlikely(qc->cursg >= qc->n_elem)) { 2980 if (unlikely(qc->cursg >= qc->n_elem)) {
@@ -2734,7 +2996,7 @@ next_sg:
2734 for (i = 0; i < words; i++) 2996 for (i = 0; i < words; i++)
2735 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); 2997 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
2736 2998
2737 ap->pio_task_state = PIO_ST_LAST; 2999 ap->hsm_task_state = HSM_ST_LAST;
2738 return; 3000 return;
2739 } 3001 }
2740 3002
@@ -2783,7 +3045,6 @@ next_sg:
2783 * 3045 *
2784 * LOCKING: 3046 * LOCKING:
2785 * Inherited from caller. 3047 * Inherited from caller.
2786 *
2787 */ 3048 */
2788 3049
2789static void atapi_pio_bytes(struct ata_queued_cmd *qc) 3050static void atapi_pio_bytes(struct ata_queued_cmd *qc)
@@ -2815,12 +3076,12 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2815err_out: 3076err_out:
2816 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3077 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2817 ap->id, dev->devno); 3078 ap->id, dev->devno);
2818 ap->pio_task_state = PIO_ST_ERR; 3079 ap->hsm_task_state = HSM_ST_ERR;
2819} 3080}
2820 3081
2821/** 3082/**
2822 * ata_pio_sector - 3083 * ata_pio_block - start PIO on a block
2823 * @ap: 3084 * @ap: the target ata_port
2824 * 3085 *
2825 * LOCKING: 3086 * LOCKING:
2826 * None. (executing in kernel thread context) 3087 * None. (executing in kernel thread context)
@@ -2832,19 +3093,19 @@ static void ata_pio_block(struct ata_port *ap)
2832 u8 status; 3093 u8 status;
2833 3094
2834 /* 3095 /*
2835 * This is purely hueristic. This is a fast path. 3096 * This is purely heuristic. This is a fast path.
2836 * Sometimes when we enter, BSY will be cleared in 3097 * Sometimes when we enter, BSY will be cleared in
2837 * a chk-status or two. If not, the drive is probably seeking 3098 * a chk-status or two. If not, the drive is probably seeking
2838 * or something. Snooze for a couple msecs, then 3099 * or something. Snooze for a couple msecs, then
2839 * chk-status again. If still busy, fall back to 3100 * chk-status again. If still busy, fall back to
2840 * PIO_ST_POLL state. 3101 * HSM_ST_POLL state.
2841 */ 3102 */
2842 status = ata_busy_wait(ap, ATA_BUSY, 5); 3103 status = ata_busy_wait(ap, ATA_BUSY, 5);
2843 if (status & ATA_BUSY) { 3104 if (status & ATA_BUSY) {
2844 msleep(2); 3105 msleep(2);
2845 status = ata_busy_wait(ap, ATA_BUSY, 10); 3106 status = ata_busy_wait(ap, ATA_BUSY, 10);
2846 if (status & ATA_BUSY) { 3107 if (status & ATA_BUSY) {
2847 ap->pio_task_state = PIO_ST_POLL; 3108 ap->hsm_task_state = HSM_ST_POLL;
2848 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 3109 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2849 return; 3110 return;
2850 } 3111 }
@@ -2856,7 +3117,7 @@ static void ata_pio_block(struct ata_port *ap)
2856 if (is_atapi_taskfile(&qc->tf)) { 3117 if (is_atapi_taskfile(&qc->tf)) {
2857 /* no more data to transfer or unsupported ATAPI command */ 3118 /* no more data to transfer or unsupported ATAPI command */
2858 if ((status & ATA_DRQ) == 0) { 3119 if ((status & ATA_DRQ) == 0) {
2859 ap->pio_task_state = PIO_ST_LAST; 3120 ap->hsm_task_state = HSM_ST_LAST;
2860 return; 3121 return;
2861 } 3122 }
2862 3123
@@ -2864,7 +3125,7 @@ static void ata_pio_block(struct ata_port *ap)
2864 } else { 3125 } else {
2865 /* handle BSY=0, DRQ=0 as error */ 3126 /* handle BSY=0, DRQ=0 as error */
2866 if ((status & ATA_DRQ) == 0) { 3127 if ((status & ATA_DRQ) == 0) {
2867 ap->pio_task_state = PIO_ST_ERR; 3128 ap->hsm_task_state = HSM_ST_ERR;
2868 return; 3129 return;
2869 } 3130 }
2870 3131
@@ -2875,18 +3136,15 @@ static void ata_pio_block(struct ata_port *ap)
2875static void ata_pio_error(struct ata_port *ap) 3136static void ata_pio_error(struct ata_port *ap)
2876{ 3137{
2877 struct ata_queued_cmd *qc; 3138 struct ata_queued_cmd *qc;
2878 u8 drv_stat; 3139
3140 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
2879 3141
2880 qc = ata_qc_from_tag(ap, ap->active_tag); 3142 qc = ata_qc_from_tag(ap, ap->active_tag);
2881 assert(qc != NULL); 3143 assert(qc != NULL);
2882 3144
2883 drv_stat = ata_chk_status(ap); 3145 ap->hsm_task_state = HSM_ST_IDLE;
2884 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2885 ap->id, drv_stat);
2886
2887 ap->pio_task_state = PIO_ST_IDLE;
2888 3146
2889 ata_poll_qc_complete(qc, drv_stat | ATA_ERR); 3147 ata_poll_qc_complete(qc, AC_ERR_ATA_BUS);
2890} 3148}
2891 3149
2892static void ata_pio_task(void *_data) 3150static void ata_pio_task(void *_data)
@@ -2899,25 +3157,25 @@ fsm_start:
2899 timeout = 0; 3157 timeout = 0;
2900 qc_completed = 0; 3158 qc_completed = 0;
2901 3159
2902 switch (ap->pio_task_state) { 3160 switch (ap->hsm_task_state) {
2903 case PIO_ST_IDLE: 3161 case HSM_ST_IDLE:
2904 return; 3162 return;
2905 3163
2906 case PIO_ST: 3164 case HSM_ST:
2907 ata_pio_block(ap); 3165 ata_pio_block(ap);
2908 break; 3166 break;
2909 3167
2910 case PIO_ST_LAST: 3168 case HSM_ST_LAST:
2911 qc_completed = ata_pio_complete(ap); 3169 qc_completed = ata_pio_complete(ap);
2912 break; 3170 break;
2913 3171
2914 case PIO_ST_POLL: 3172 case HSM_ST_POLL:
2915 case PIO_ST_LAST_POLL: 3173 case HSM_ST_LAST_POLL:
2916 timeout = ata_pio_poll(ap); 3174 timeout = ata_pio_poll(ap);
2917 break; 3175 break;
2918 3176
2919 case PIO_ST_TMOUT: 3177 case HSM_ST_TMOUT:
2920 case PIO_ST_ERR: 3178 case HSM_ST_ERR:
2921 ata_pio_error(ap); 3179 ata_pio_error(ap);
2922 return; 3180 return;
2923 } 3181 }
@@ -2928,52 +3186,6 @@ fsm_start:
2928 goto fsm_start; 3186 goto fsm_start;
2929} 3187}
2930 3188
2931static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2932 struct scsi_cmnd *cmd)
2933{
2934 DECLARE_COMPLETION(wait);
2935 struct ata_queued_cmd *qc;
2936 unsigned long flags;
2937 int rc;
2938
2939 DPRINTK("ATAPI request sense\n");
2940
2941 qc = ata_qc_new_init(ap, dev);
2942 BUG_ON(qc == NULL);
2943
2944 /* FIXME: is this needed? */
2945 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2946
2947 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2948 qc->dma_dir = DMA_FROM_DEVICE;
2949
2950 memset(&qc->cdb, 0, ap->cdb_len);
2951 qc->cdb[0] = REQUEST_SENSE;
2952 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2953
2954 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2955 qc->tf.command = ATA_CMD_PACKET;
2956
2957 qc->tf.protocol = ATA_PROT_ATAPI;
2958 qc->tf.lbam = (8 * 1024) & 0xff;
2959 qc->tf.lbah = (8 * 1024) >> 8;
2960 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2961
2962 qc->waiting = &wait;
2963 qc->complete_fn = ata_qc_complete_noop;
2964
2965 spin_lock_irqsave(&ap->host_set->lock, flags);
2966 rc = ata_qc_issue(qc);
2967 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2968
2969 if (rc)
2970 ata_port_disable(ap);
2971 else
2972 wait_for_completion(&wait);
2973
2974 DPRINTK("EXIT\n");
2975}
2976
2977/** 3189/**
2978 * ata_qc_timeout - Handle timeout of queued command 3190 * ata_qc_timeout - Handle timeout of queued command
2979 * @qc: Command that timed out 3191 * @qc: Command that timed out
@@ -3055,7 +3267,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3055 ap->id, qc->tf.command, drv_stat, host_stat); 3267 ap->id, qc->tf.command, drv_stat, host_stat);
3056 3268
3057 /* complete taskfile transaction */ 3269 /* complete taskfile transaction */
3058 ata_qc_complete(qc, drv_stat); 3270 ata_qc_complete(qc, ac_err_mask(drv_stat));
3059 break; 3271 break;
3060 } 3272 }
3061 3273
@@ -3091,14 +3303,14 @@ void ata_eng_timeout(struct ata_port *ap)
3091 DPRINTK("ENTER\n"); 3303 DPRINTK("ENTER\n");
3092 3304
3093 qc = ata_qc_from_tag(ap, ap->active_tag); 3305 qc = ata_qc_from_tag(ap, ap->active_tag);
3094 if (!qc) { 3306 if (qc)
3307 ata_qc_timeout(qc);
3308 else {
3095 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 3309 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3096 ap->id); 3310 ap->id);
3097 goto out; 3311 goto out;
3098 } 3312 }
3099 3313
3100 ata_qc_timeout(qc);
3101
3102out: 3314out:
3103 DPRINTK("EXIT\n"); 3315 DPRINTK("EXIT\n");
3104} 3316}
@@ -3155,15 +3367,12 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3155 qc->nbytes = qc->curbytes = 0; 3367 qc->nbytes = qc->curbytes = 0;
3156 3368
3157 ata_tf_init(ap, &qc->tf, dev->devno); 3369 ata_tf_init(ap, &qc->tf, dev->devno);
3158
3159 if (dev->flags & ATA_DFLAG_LBA48)
3160 qc->tf.flags |= ATA_TFLAG_LBA48;
3161 } 3370 }
3162 3371
3163 return qc; 3372 return qc;
3164} 3373}
3165 3374
3166static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat) 3375int ata_qc_complete_noop(struct ata_queued_cmd *qc, unsigned int err_mask)
3167{ 3376{
3168 return 0; 3377 return 0;
3169} 3378}
@@ -3201,7 +3410,6 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc)
3201 * 3410 *
3202 * LOCKING: 3411 * LOCKING:
3203 * spin_lock_irqsave(host_set lock) 3412 * spin_lock_irqsave(host_set lock)
3204 *
3205 */ 3413 */
3206void ata_qc_free(struct ata_queued_cmd *qc) 3414void ata_qc_free(struct ata_queued_cmd *qc)
3207{ 3415{
@@ -3221,10 +3429,9 @@ void ata_qc_free(struct ata_queued_cmd *qc)
3221 * 3429 *
3222 * LOCKING: 3430 * LOCKING:
3223 * spin_lock_irqsave(host_set lock) 3431 * spin_lock_irqsave(host_set lock)
3224 *
3225 */ 3432 */
3226 3433
3227void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 3434void ata_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
3228{ 3435{
3229 int rc; 3436 int rc;
3230 3437
@@ -3241,7 +3448,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
3241 qc->flags &= ~ATA_QCFLAG_ACTIVE; 3448 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3242 3449
3243 /* call completion callback */ 3450 /* call completion callback */
3244 rc = qc->complete_fn(qc, drv_stat); 3451 rc = qc->complete_fn(qc, err_mask);
3245 3452
3246 /* if callback indicates not to complete command (non-zero), 3453 /* if callback indicates not to complete command (non-zero),
3247 * return immediately 3454 * return immediately
@@ -3360,7 +3567,7 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3360 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 3567 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3361 ata_qc_set_polling(qc); 3568 ata_qc_set_polling(qc);
3362 ata_tf_to_host_nolock(ap, &qc->tf); 3569 ata_tf_to_host_nolock(ap, &qc->tf);
3363 ap->pio_task_state = PIO_ST; 3570 ap->hsm_task_state = HSM_ST;
3364 queue_work(ata_wq, &ap->pio_task); 3571 queue_work(ata_wq, &ap->pio_task);
3365 break; 3572 break;
3366 3573
@@ -3586,7 +3793,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
3586 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3793 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3587 host_stat = readb(mmio + ATA_DMA_STATUS); 3794 host_stat = readb(mmio + ATA_DMA_STATUS);
3588 } else 3795 } else
3589 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 3796 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3590 return host_stat; 3797 return host_stat;
3591} 3798}
3592 3799
@@ -3679,7 +3886,7 @@ inline unsigned int ata_host_intr (struct ata_port *ap,
3679 ap->ops->irq_clear(ap); 3886 ap->ops->irq_clear(ap);
3680 3887
3681 /* complete taskfile transaction */ 3888 /* complete taskfile transaction */
3682 ata_qc_complete(qc, status); 3889 ata_qc_complete(qc, ac_err_mask(status));
3683 break; 3890 break;
3684 3891
3685 default: 3892 default:
@@ -3715,7 +3922,6 @@ idle_irq:
3715 * 3922 *
3716 * RETURNS: 3923 * RETURNS:
3717 * IRQ_NONE or IRQ_HANDLED. 3924 * IRQ_NONE or IRQ_HANDLED.
3718 *
3719 */ 3925 */
3720 3926
3721irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 3927irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
@@ -3775,7 +3981,7 @@ static void atapi_packet_task(void *_data)
3775 /* sleep-wait for BSY to clear */ 3981 /* sleep-wait for BSY to clear */
3776 DPRINTK("busy wait\n"); 3982 DPRINTK("busy wait\n");
3777 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) 3983 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
3778 goto err_out; 3984 goto err_out_status;
3779 3985
3780 /* make sure DRQ is set */ 3986 /* make sure DRQ is set */
3781 status = ata_chk_status(ap); 3987 status = ata_chk_status(ap);
@@ -3806,14 +4012,16 @@ static void atapi_packet_task(void *_data)
3806 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4012 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3807 4013
3808 /* PIO commands are handled by polling */ 4014 /* PIO commands are handled by polling */
3809 ap->pio_task_state = PIO_ST; 4015 ap->hsm_task_state = HSM_ST;
3810 queue_work(ata_wq, &ap->pio_task); 4016 queue_work(ata_wq, &ap->pio_task);
3811 } 4017 }
3812 4018
3813 return; 4019 return;
3814 4020
4021err_out_status:
4022 status = ata_chk_status(ap);
3815err_out: 4023err_out:
3816 ata_poll_qc_complete(qc, ATA_ERR); 4024 ata_poll_qc_complete(qc, __ac_err_mask(status));
3817} 4025}
3818 4026
3819 4027
@@ -3827,6 +4035,7 @@ err_out:
3827 * May be used as the port_start() entry in ata_port_operations. 4035 * May be used as the port_start() entry in ata_port_operations.
3828 * 4036 *
3829 * LOCKING: 4037 * LOCKING:
4038 * Inherited from caller.
3830 */ 4039 */
3831 4040
3832int ata_port_start (struct ata_port *ap) 4041int ata_port_start (struct ata_port *ap)
@@ -3852,6 +4061,7 @@ int ata_port_start (struct ata_port *ap)
3852 * May be used as the port_stop() entry in ata_port_operations. 4061 * May be used as the port_stop() entry in ata_port_operations.
3853 * 4062 *
3854 * LOCKING: 4063 * LOCKING:
4064 * Inherited from caller.
3855 */ 4065 */
3856 4066
3857void ata_port_stop (struct ata_port *ap) 4067void ata_port_stop (struct ata_port *ap)
@@ -3874,6 +4084,7 @@ void ata_host_stop (struct ata_host_set *host_set)
3874 * @do_unregister: 1 if we fully unregister, 0 to just stop the port 4084 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3875 * 4085 *
3876 * LOCKING: 4086 * LOCKING:
4087 * Inherited from caller.
3877 */ 4088 */
3878 4089
3879static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) 4090static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
@@ -3901,12 +4112,11 @@ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3901 * 4112 *
3902 * LOCKING: 4113 * LOCKING:
3903 * Inherited from caller. 4114 * Inherited from caller.
3904 *
3905 */ 4115 */
3906 4116
3907static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, 4117static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3908 struct ata_host_set *host_set, 4118 struct ata_host_set *host_set,
3909 struct ata_probe_ent *ent, unsigned int port_no) 4119 const struct ata_probe_ent *ent, unsigned int port_no)
3910{ 4120{
3911 unsigned int i; 4121 unsigned int i;
3912 4122
@@ -3962,10 +4172,9 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3962 * 4172 *
3963 * RETURNS: 4173 * RETURNS:
3964 * New ata_port on success, for NULL on error. 4174 * New ata_port on success, for NULL on error.
3965 *
3966 */ 4175 */
3967 4176
3968static struct ata_port * ata_host_add(struct ata_probe_ent *ent, 4177static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
3969 struct ata_host_set *host_set, 4178 struct ata_host_set *host_set,
3970 unsigned int port_no) 4179 unsigned int port_no)
3971{ 4180{
@@ -4010,10 +4219,9 @@ err_out:
4010 * 4219 *
4011 * RETURNS: 4220 * RETURNS:
4012 * Number of ports registered. Zero on error (no ports registered). 4221 * Number of ports registered. Zero on error (no ports registered).
4013 *
4014 */ 4222 */
4015 4223
4016int ata_device_add(struct ata_probe_ent *ent) 4224int ata_device_add(const struct ata_probe_ent *ent)
4017{ 4225{
4018 unsigned int count = 0, i; 4226 unsigned int count = 0, i;
4019 struct device *dev = ent->dev; 4227 struct device *dev = ent->dev;
@@ -4021,11 +4229,10 @@ int ata_device_add(struct ata_probe_ent *ent)
4021 4229
4022 DPRINTK("ENTER\n"); 4230 DPRINTK("ENTER\n");
4023 /* alloc a container for our list of ATA ports (buses) */ 4231 /* alloc a container for our list of ATA ports (buses) */
4024 host_set = kmalloc(sizeof(struct ata_host_set) + 4232 host_set = kzalloc(sizeof(struct ata_host_set) +
4025 (ent->n_ports * sizeof(void *)), GFP_KERNEL); 4233 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4026 if (!host_set) 4234 if (!host_set)
4027 return 0; 4235 return 0;
4028 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
4029 spin_lock_init(&host_set->lock); 4236 spin_lock_init(&host_set->lock);
4030 4237
4031 host_set->dev = dev; 4238 host_set->dev = dev;
@@ -4065,10 +4272,8 @@ int ata_device_add(struct ata_probe_ent *ent)
4065 count++; 4272 count++;
4066 } 4273 }
4067 4274
4068 if (!count) { 4275 if (!count)
4069 kfree(host_set); 4276 goto err_free_ret;
4070 return 0;
4071 }
4072 4277
4073 /* obtain irq, that is shared between channels */ 4278 /* obtain irq, that is shared between channels */
4074 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, 4279 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
@@ -4113,7 +4318,7 @@ int ata_device_add(struct ata_probe_ent *ent)
4113 for (i = 0; i < count; i++) { 4318 for (i = 0; i < count; i++) {
4114 struct ata_port *ap = host_set->ports[i]; 4319 struct ata_port *ap = host_set->ports[i];
4115 4320
4116 scsi_scan_host(ap->host); 4321 ata_scsi_scan_host(ap);
4117 } 4322 }
4118 4323
4119 dev_set_drvdata(dev, host_set); 4324 dev_set_drvdata(dev, host_set);
@@ -4126,6 +4331,7 @@ err_out:
4126 ata_host_remove(host_set->ports[i], 1); 4331 ata_host_remove(host_set->ports[i], 1);
4127 scsi_host_put(host_set->ports[i]->host); 4332 scsi_host_put(host_set->ports[i]->host);
4128 } 4333 }
4334err_free_ret:
4129 kfree(host_set); 4335 kfree(host_set);
4130 VPRINTK("EXIT, returning 0\n"); 4336 VPRINTK("EXIT, returning 0\n");
4131 return 0; 4337 return 0;
@@ -4142,7 +4348,6 @@ err_out:
4142 * Inherited from calling layer (may sleep). 4348 * Inherited from calling layer (may sleep).
4143 */ 4349 */
4144 4350
4145
4146void ata_host_set_remove(struct ata_host_set *host_set) 4351void ata_host_set_remove(struct ata_host_set *host_set)
4147{ 4352{
4148 struct ata_port *ap; 4353 struct ata_port *ap;
@@ -4232,19 +4437,17 @@ void ata_std_ports(struct ata_ioports *ioaddr)
4232} 4437}
4233 4438
4234static struct ata_probe_ent * 4439static struct ata_probe_ent *
4235ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port) 4440ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
4236{ 4441{
4237 struct ata_probe_ent *probe_ent; 4442 struct ata_probe_ent *probe_ent;
4238 4443
4239 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 4444 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
4240 if (!probe_ent) { 4445 if (!probe_ent) {
4241 printk(KERN_ERR DRV_NAME "(%s): out of memory\n", 4446 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4242 kobject_name(&(dev->kobj))); 4447 kobject_name(&(dev->kobj)));
4243 return NULL; 4448 return NULL;
4244 } 4449 }
4245 4450
4246 memset(probe_ent, 0, sizeof(*probe_ent));
4247
4248 INIT_LIST_HEAD(&probe_ent->node); 4451 INIT_LIST_HEAD(&probe_ent->node);
4249 probe_ent->dev = dev; 4452 probe_ent->dev = dev;
4250 4453
@@ -4273,85 +4476,86 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
4273 * ata_pci_init_native_mode - Initialize native-mode driver 4476 * ata_pci_init_native_mode - Initialize native-mode driver
4274 * @pdev: pci device to be initialized 4477 * @pdev: pci device to be initialized
4275 * @port: array[2] of pointers to port info structures. 4478 * @port: array[2] of pointers to port info structures.
4479 * @ports: bitmap of ports present
4276 * 4480 *
4277 * Utility function which allocates and initializes an 4481 * Utility function which allocates and initializes an
4278 * ata_probe_ent structure for a standard dual-port 4482 * ata_probe_ent structure for a standard dual-port
4279 * PIO-based IDE controller. The returned ata_probe_ent 4483 * PIO-based IDE controller. The returned ata_probe_ent
4280 * structure can be passed to ata_device_add(). The returned 4484 * structure can be passed to ata_device_add(). The returned
4281 * ata_probe_ent structure should then be freed with kfree(). 4485 * ata_probe_ent structure should then be freed with kfree().
4486 *
4487 * The caller need only pass the address of the primary port, the
4488 * secondary will be deduced automatically. If the device has non
4489 * standard secondary port mappings this function can be called twice,
4490 * once for each interface.
4282 */ 4491 */
4283 4492
4284struct ata_probe_ent * 4493struct ata_probe_ent *
4285ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port) 4494ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
4286{ 4495{
4287 struct ata_probe_ent *probe_ent = 4496 struct ata_probe_ent *probe_ent =
4288 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 4497 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4498 int p = 0;
4499
4289 if (!probe_ent) 4500 if (!probe_ent)
4290 return NULL; 4501 return NULL;
4291 4502
4292 probe_ent->n_ports = 2;
4293 probe_ent->irq = pdev->irq; 4503 probe_ent->irq = pdev->irq;
4294 probe_ent->irq_flags = SA_SHIRQ; 4504 probe_ent->irq_flags = SA_SHIRQ;
4295 4505
4296 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); 4506 if (ports & ATA_PORT_PRIMARY) {
4297 probe_ent->port[0].altstatus_addr = 4507 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
4298 probe_ent->port[0].ctl_addr = 4508 probe_ent->port[p].altstatus_addr =
4299 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; 4509 probe_ent->port[p].ctl_addr =
4300 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); 4510 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4301 4511 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
4302 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); 4512 ata_std_ports(&probe_ent->port[p]);
4303 probe_ent->port[1].altstatus_addr = 4513 p++;
4304 probe_ent->port[1].ctl_addr = 4514 }
4305 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4306 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4307 4515
4308 ata_std_ports(&probe_ent->port[0]); 4516 if (ports & ATA_PORT_SECONDARY) {
4309 ata_std_ports(&probe_ent->port[1]); 4517 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
4518 probe_ent->port[p].altstatus_addr =
4519 probe_ent->port[p].ctl_addr =
4520 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4521 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4522 ata_std_ports(&probe_ent->port[p]);
4523 p++;
4524 }
4310 4525
4526 probe_ent->n_ports = p;
4311 return probe_ent; 4527 return probe_ent;
4312} 4528}
4313 4529
4314static struct ata_probe_ent * 4530static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
4315ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4316 struct ata_probe_ent **ppe2)
4317{ 4531{
4318 struct ata_probe_ent *probe_ent, *probe_ent2; 4532 struct ata_probe_ent *probe_ent;
4319 4533
4320 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 4534 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
4321 if (!probe_ent) 4535 if (!probe_ent)
4322 return NULL; 4536 return NULL;
4323 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
4324 if (!probe_ent2) {
4325 kfree(probe_ent);
4326 return NULL;
4327 }
4328
4329 probe_ent->n_ports = 1;
4330 probe_ent->irq = 14;
4331 4537
4332 probe_ent->hard_port_no = 0;
4333 probe_ent->legacy_mode = 1; 4538 probe_ent->legacy_mode = 1;
4334 4539 probe_ent->n_ports = 1;
4335 probe_ent2->n_ports = 1; 4540 probe_ent->hard_port_no = port_num;
4336 probe_ent2->irq = 15; 4541
4337 4542 switch(port_num)
4338 probe_ent2->hard_port_no = 1; 4543 {
4339 probe_ent2->legacy_mode = 1; 4544 case 0:
4340 4545 probe_ent->irq = 14;
4341 probe_ent->port[0].cmd_addr = 0x1f0; 4546 probe_ent->port[0].cmd_addr = 0x1f0;
4342 probe_ent->port[0].altstatus_addr = 4547 probe_ent->port[0].altstatus_addr =
4343 probe_ent->port[0].ctl_addr = 0x3f6; 4548 probe_ent->port[0].ctl_addr = 0x3f6;
4344 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); 4549 break;
4345 4550 case 1:
4346 probe_ent2->port[0].cmd_addr = 0x170; 4551 probe_ent->irq = 15;
4347 probe_ent2->port[0].altstatus_addr = 4552 probe_ent->port[0].cmd_addr = 0x170;
4348 probe_ent2->port[0].ctl_addr = 0x376; 4553 probe_ent->port[0].altstatus_addr =
4349 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8; 4554 probe_ent->port[0].ctl_addr = 0x376;
4350 4555 break;
4556 }
4557 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
4351 ata_std_ports(&probe_ent->port[0]); 4558 ata_std_ports(&probe_ent->port[0]);
4352 ata_std_ports(&probe_ent2->port[0]);
4353
4354 *ppe2 = probe_ent2;
4355 return probe_ent; 4559 return probe_ent;
4356} 4560}
4357 4561
@@ -4374,13 +4578,12 @@ ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4374 * 4578 *
4375 * RETURNS: 4579 * RETURNS:
4376 * Zero on success, negative on errno-based value on error. 4580 * Zero on success, negative on errno-based value on error.
4377 *
4378 */ 4581 */
4379 4582
4380int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 4583int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4381 unsigned int n_ports) 4584 unsigned int n_ports)
4382{ 4585{
4383 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL; 4586 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
4384 struct ata_port_info *port[2]; 4587 struct ata_port_info *port[2];
4385 u8 tmp8, mask; 4588 u8 tmp8, mask;
4386 unsigned int legacy_mode = 0; 4589 unsigned int legacy_mode = 0;
@@ -4397,7 +4600,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4397 4600
4398 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0 4601 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4399 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 4602 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4400 /* TODO: support transitioning to native mode? */ 4603 /* TODO: What if one channel is in native mode ... */
4401 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 4604 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4402 mask = (1 << 2) | (1 << 0); 4605 mask = (1 << 2) | (1 << 0);
4403 if ((tmp8 & mask) != mask) 4606 if ((tmp8 & mask) != mask)
@@ -4405,11 +4608,20 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4405 } 4608 }
4406 4609
4407 /* FIXME... */ 4610 /* FIXME... */
4408 if ((!legacy_mode) && (n_ports > 1)) { 4611 if ((!legacy_mode) && (n_ports > 2)) {
4409 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n"); 4612 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
4410 return -EINVAL; 4613 n_ports = 2;
4614 /* For now */
4411 } 4615 }
4412 4616
4617 /* FIXME: Really for ATA it isn't safe because the device may be
4618 multi-purpose and we want to leave it alone if it was already
4619 enabled. Secondly for shared use as Arjan says we want refcounting
4620
4621 Checking dev->is_enabled is insufficient as this is not set at
4622 boot for the primary video which is BIOS enabled
4623 */
4624
4413 rc = pci_enable_device(pdev); 4625 rc = pci_enable_device(pdev);
4414 if (rc) 4626 if (rc)
4415 return rc; 4627 return rc;
@@ -4420,6 +4632,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4420 goto err_out; 4632 goto err_out;
4421 } 4633 }
4422 4634
4635 /* FIXME: Should use platform specific mappers for legacy port ranges */
4423 if (legacy_mode) { 4636 if (legacy_mode) {
4424 if (!request_region(0x1f0, 8, "libata")) { 4637 if (!request_region(0x1f0, 8, "libata")) {
4425 struct resource *conflict, res; 4638 struct resource *conflict, res;
@@ -4464,10 +4677,17 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4464 goto err_out_regions; 4677 goto err_out_regions;
4465 4678
4466 if (legacy_mode) { 4679 if (legacy_mode) {
4467 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2); 4680 if (legacy_mode & (1 << 0))
4468 } else 4681 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
4469 probe_ent = ata_pci_init_native_mode(pdev, port); 4682 if (legacy_mode & (1 << 1))
4470 if (!probe_ent) { 4683 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
4684 } else {
4685 if (n_ports == 2)
4686 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
4687 else
4688 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
4689 }
4690 if (!probe_ent && !probe_ent2) {
4471 rc = -ENOMEM; 4691 rc = -ENOMEM;
4472 goto err_out_regions; 4692 goto err_out_regions;
4473 } 4693 }
@@ -4505,7 +4725,7 @@ err_out:
4505 * @pdev: PCI device that was removed 4725 * @pdev: PCI device that was removed
4506 * 4726 *
4507 * PCI layer indicates to libata via this hook that 4727 * PCI layer indicates to libata via this hook that
4508 * hot-unplug or module unload event has occured. 4728 * hot-unplug or module unload event has occurred.
4509 * Handle this by unregistering all objects associated 4729 * Handle this by unregistering all objects associated
4510 * with this PCI device. Free those objects. Then finally 4730 * with this PCI device. Free those objects. Then finally
4511 * release PCI resources and disable device. 4731 * release PCI resources and disable device.
@@ -4526,7 +4746,7 @@ void ata_pci_remove_one (struct pci_dev *pdev)
4526} 4746}
4527 4747
4528/* move to PCI subsystem */ 4748/* move to PCI subsystem */
4529int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits) 4749int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4530{ 4750{
4531 unsigned long tmp = 0; 4751 unsigned long tmp = 0;
4532 4752
@@ -4579,6 +4799,27 @@ static void __exit ata_exit(void)
4579module_init(ata_init); 4799module_init(ata_init);
4580module_exit(ata_exit); 4800module_exit(ata_exit);
4581 4801
4802static unsigned long ratelimit_time;
4803static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4804
4805int ata_ratelimit(void)
4806{
4807 int rc;
4808 unsigned long flags;
4809
4810 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4811
4812 if (time_after(jiffies, ratelimit_time)) {
4813 rc = 1;
4814 ratelimit_time = jiffies + (HZ/5);
4815 } else
4816 rc = 0;
4817
4818 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4819
4820 return rc;
4821}
4822
4582/* 4823/*
4583 * libata is essentially a library of internal helper functions for 4824 * libata is essentially a library of internal helper functions for
4584 * low-level ATA host controller drivers. As such, the API/ABI is 4825 * low-level ATA host controller drivers. As such, the API/ABI is
@@ -4603,7 +4844,6 @@ EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4603EXPORT_SYMBOL_GPL(ata_tf_from_fis); 4844EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4604EXPORT_SYMBOL_GPL(ata_check_status); 4845EXPORT_SYMBOL_GPL(ata_check_status);
4605EXPORT_SYMBOL_GPL(ata_altstatus); 4846EXPORT_SYMBOL_GPL(ata_altstatus);
4606EXPORT_SYMBOL_GPL(ata_chk_err);
4607EXPORT_SYMBOL_GPL(ata_exec_command); 4847EXPORT_SYMBOL_GPL(ata_exec_command);
4608EXPORT_SYMBOL_GPL(ata_port_start); 4848EXPORT_SYMBOL_GPL(ata_port_start);
4609EXPORT_SYMBOL_GPL(ata_port_stop); 4849EXPORT_SYMBOL_GPL(ata_port_stop);
@@ -4620,6 +4860,7 @@ EXPORT_SYMBOL_GPL(sata_phy_reset);
4620EXPORT_SYMBOL_GPL(__sata_phy_reset); 4860EXPORT_SYMBOL_GPL(__sata_phy_reset);
4621EXPORT_SYMBOL_GPL(ata_bus_reset); 4861EXPORT_SYMBOL_GPL(ata_bus_reset);
4622EXPORT_SYMBOL_GPL(ata_port_disable); 4862EXPORT_SYMBOL_GPL(ata_port_disable);
4863EXPORT_SYMBOL_GPL(ata_ratelimit);
4623EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 4864EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4624EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 4865EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4625EXPORT_SYMBOL_GPL(ata_scsi_error); 4866EXPORT_SYMBOL_GPL(ata_scsi_error);
@@ -4631,6 +4872,9 @@ EXPORT_SYMBOL_GPL(ata_dev_id_string);
4631EXPORT_SYMBOL_GPL(ata_dev_config); 4872EXPORT_SYMBOL_GPL(ata_dev_config);
4632EXPORT_SYMBOL_GPL(ata_scsi_simulate); 4873EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4633 4874
4875EXPORT_SYMBOL_GPL(ata_timing_compute);
4876EXPORT_SYMBOL_GPL(ata_timing_merge);
4877
4634#ifdef CONFIG_PCI 4878#ifdef CONFIG_PCI
4635EXPORT_SYMBOL_GPL(pci_test_config_bits); 4879EXPORT_SYMBOL_GPL(pci_test_config_bits);
4636EXPORT_SYMBOL_GPL(ata_pci_host_stop); 4880EXPORT_SYMBOL_GPL(ata_pci_host_stop);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 104fd9a63e73..1e3792f86fcf 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -40,14 +40,64 @@
40#include "scsi.h" 40#include "scsi.h"
41#include <scsi/scsi_host.h> 41#include <scsi/scsi_host.h>
42#include <linux/libata.h> 42#include <linux/libata.h>
43#include <linux/hdreg.h>
43#include <asm/uaccess.h> 44#include <asm/uaccess.h>
44 45
45#include "libata.h" 46#include "libata.h"
46 47
47typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, u8 *scsicmd); 48#define SECTOR_SIZE 512
48static struct ata_device *
49ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev);
50 49
50typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
51static struct ata_device *
52ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev);
53
54#define RW_RECOVERY_MPAGE 0x1
55#define RW_RECOVERY_MPAGE_LEN 12
56#define CACHE_MPAGE 0x8
57#define CACHE_MPAGE_LEN 20
58#define CONTROL_MPAGE 0xa
59#define CONTROL_MPAGE_LEN 12
60#define ALL_MPAGES 0x3f
61#define ALL_SUB_MPAGES 0xff
62
63
64static const u8 def_rw_recovery_mpage[] = {
65 RW_RECOVERY_MPAGE,
66 RW_RECOVERY_MPAGE_LEN - 2,
67 (1 << 7) | /* AWRE, sat-r06 say it shall be 0 */
68 (1 << 6), /* ARRE (auto read reallocation) */
69 0, /* read retry count */
70 0, 0, 0, 0,
71 0, /* write retry count */
72 0, 0, 0
73};
74
75static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
76 CACHE_MPAGE,
77 CACHE_MPAGE_LEN - 2,
78 0, /* contains WCE, needs to be 0 for logic */
79 0, 0, 0, 0, 0, 0, 0, 0, 0,
80 0, /* contains DRA, needs to be 0 for logic */
81 0, 0, 0, 0, 0, 0, 0
82};
83
84static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
85 CONTROL_MPAGE,
86 CONTROL_MPAGE_LEN - 2,
87 2, /* DSENSE=0, GLTSD=1 */
88 0, /* [QAM+QERR may be 1, see 05-359r1] */
89 0, 0, 0, 0, 0xff, 0xff,
90 0, 30 /* extended self test time, see 05-359r1 */
91};
92
93
94static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
95 void (*done)(struct scsi_cmnd *))
96{
97 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
98 /* "Invalid field in cbd" */
99 done(cmd);
100}
51 101
52/** 102/**
53 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd. 103 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
@@ -78,6 +128,150 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
78 return 0; 128 return 0;
79} 129}
80 130
131/**
132 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
133 * @dev: Device to whom we are issuing command
134 * @arg: User provided data for issuing command
135 *
136 * LOCKING:
137 * Defined by the SCSI layer. We don't really care.
138 *
139 * RETURNS:
140 * Zero on success, negative errno on error.
141 */
142
143int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
144{
145 int rc = 0;
146 u8 scsi_cmd[MAX_COMMAND_SIZE];
147 u8 args[4], *argbuf = NULL;
148 int argsize = 0;
149 struct scsi_request *sreq;
150
151 if (NULL == (void *)arg)
152 return -EINVAL;
153
154 if (copy_from_user(args, arg, sizeof(args)))
155 return -EFAULT;
156
157 sreq = scsi_allocate_request(scsidev, GFP_KERNEL);
158 if (!sreq)
159 return -EINTR;
160
161 memset(scsi_cmd, 0, sizeof(scsi_cmd));
162
163 if (args[3]) {
164 argsize = SECTOR_SIZE * args[3];
165 argbuf = kmalloc(argsize, GFP_KERNEL);
166 if (argbuf == NULL) {
167 rc = -ENOMEM;
168 goto error;
169 }
170
171 scsi_cmd[1] = (4 << 1); /* PIO Data-in */
172 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
173 block count in sector count field */
174 sreq->sr_data_direction = DMA_FROM_DEVICE;
175 } else {
176 scsi_cmd[1] = (3 << 1); /* Non-data */
177 /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
178 sreq->sr_data_direction = DMA_NONE;
179 }
180
181 scsi_cmd[0] = ATA_16;
182
183 scsi_cmd[4] = args[2];
184 if (args[0] == WIN_SMART) { /* hack -- ide driver does this too... */
185 scsi_cmd[6] = args[3];
186 scsi_cmd[8] = args[1];
187 scsi_cmd[10] = 0x4f;
188 scsi_cmd[12] = 0xc2;
189 } else {
190 scsi_cmd[6] = args[1];
191 }
192 scsi_cmd[14] = args[0];
193
194 /* Good values for timeout and retries? Values below
195 from scsi_ioctl_send_command() for default case... */
196 scsi_wait_req(sreq, scsi_cmd, argbuf, argsize, (10*HZ), 5);
197
198 if (sreq->sr_result) {
199 rc = -EIO;
200 goto error;
201 }
202
203 /* Need code to retrieve data from check condition? */
204
205 if ((argbuf)
206 && copy_to_user((void *)(arg + sizeof(args)), argbuf, argsize))
207 rc = -EFAULT;
208error:
209 scsi_release_request(sreq);
210
211 if (argbuf)
212 kfree(argbuf);
213
214 return rc;
215}
216
217/**
218 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
219 * @dev: Device to whom we are issuing command
220 * @arg: User provided data for issuing command
221 *
222 * LOCKING:
223 * Defined by the SCSI layer. We don't really care.
224 *
225 * RETURNS:
226 * Zero on success, negative errno on error.
227 */
228int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
229{
230 int rc = 0;
231 u8 scsi_cmd[MAX_COMMAND_SIZE];
232 u8 args[7];
233 struct scsi_request *sreq;
234
235 if (NULL == (void *)arg)
236 return -EINVAL;
237
238 if (copy_from_user(args, arg, sizeof(args)))
239 return -EFAULT;
240
241 memset(scsi_cmd, 0, sizeof(scsi_cmd));
242 scsi_cmd[0] = ATA_16;
243 scsi_cmd[1] = (3 << 1); /* Non-data */
244 /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
245 scsi_cmd[4] = args[1];
246 scsi_cmd[6] = args[2];
247 scsi_cmd[8] = args[3];
248 scsi_cmd[10] = args[4];
249 scsi_cmd[12] = args[5];
250 scsi_cmd[14] = args[0];
251
252 sreq = scsi_allocate_request(scsidev, GFP_KERNEL);
253 if (!sreq) {
254 rc = -EINTR;
255 goto error;
256 }
257
258 sreq->sr_data_direction = DMA_NONE;
259 /* Good values for timeout and retries? Values below
260 from scsi_ioctl_send_command() for default case... */
261 scsi_wait_req(sreq, scsi_cmd, NULL, 0, (10*HZ), 5);
262
263 if (sreq->sr_result) {
264 rc = -EIO;
265 goto error;
266 }
267
268 /* Need code to retrieve data from check condition? */
269
270error:
271 scsi_release_request(sreq);
272 return rc;
273}
274
81int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) 275int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
82{ 276{
83 struct ata_port *ap; 277 struct ata_port *ap;
@@ -107,6 +301,16 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
107 return -EINVAL; 301 return -EINVAL;
108 return 0; 302 return 0;
109 303
304 case HDIO_DRIVE_CMD:
305 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
306 return -EACCES;
307 return ata_cmd_ioctl(scsidev, arg);
308
309 case HDIO_DRIVE_TASK:
310 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
311 return -EACCES;
312 return ata_task_ioctl(scsidev, arg);
313
110 default: 314 default:
111 rc = -ENOTTY; 315 rc = -ENOTTY;
112 break; 316 break;
@@ -165,24 +369,70 @@ struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap,
165} 369}
166 370
167/** 371/**
372 * ata_dump_status - user friendly display of error info
373 * @id: id of the port in question
374 * @tf: ptr to filled out taskfile
375 *
376 * Decode and dump the ATA error/status registers for the user so
377 * that they have some idea what really happened at the non
378 * make-believe layer.
379 *
380 * LOCKING:
381 * inherited from caller
382 */
383void ata_dump_status(unsigned id, struct ata_taskfile *tf)
384{
385 u8 stat = tf->command, err = tf->feature;
386
387 printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat);
388 if (stat & ATA_BUSY) {
389 printk("Busy }\n"); /* Data is not valid in this case */
390 } else {
391 if (stat & 0x40) printk("DriveReady ");
392 if (stat & 0x20) printk("DeviceFault ");
393 if (stat & 0x10) printk("SeekComplete ");
394 if (stat & 0x08) printk("DataRequest ");
395 if (stat & 0x04) printk("CorrectedError ");
396 if (stat & 0x02) printk("Index ");
397 if (stat & 0x01) printk("Error ");
398 printk("}\n");
399
400 if (err) {
401 printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
402 if (err & 0x04) printk("DriveStatusError ");
403 if (err & 0x80) {
404 if (err & 0x04) printk("BadCRC ");
405 else printk("Sector ");
406 }
407 if (err & 0x40) printk("UncorrectableError ");
408 if (err & 0x10) printk("SectorIdNotFound ");
409 if (err & 0x02) printk("TrackZeroNotFound ");
410 if (err & 0x01) printk("AddrMarkNotFound ");
411 printk("}\n");
412 }
413 }
414}
415
416/**
168 * ata_to_sense_error - convert ATA error to SCSI error 417 * ata_to_sense_error - convert ATA error to SCSI error
169 * @qc: Command that we are erroring out
170 * @drv_stat: value contained in ATA status register 418 * @drv_stat: value contained in ATA status register
419 * @drv_err: value contained in ATA error register
420 * @sk: the sense key we'll fill out
421 * @asc: the additional sense code we'll fill out
422 * @ascq: the additional sense code qualifier we'll fill out
171 * 423 *
172 * Converts an ATA error into a SCSI error. While we are at it 424 * Converts an ATA error into a SCSI error. Fill out pointers to
173 * we decode and dump the ATA error for the user so that they 425 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor
174 * have some idea what really happened at the non make-believe 426 * format sense blocks.
175 * layer.
176 * 427 *
177 * LOCKING: 428 * LOCKING:
178 * spin_lock_irqsave(host_set lock) 429 * spin_lock_irqsave(host_set lock)
179 */ 430 */
180 431void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
181void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat) 432 u8 *ascq)
182{ 433{
183 struct scsi_cmnd *cmd = qc->scsicmd; 434 int i;
184 u8 err = 0; 435
185 unsigned char *sb = cmd->sense_buffer;
186 /* Based on the 3ware driver translation table */ 436 /* Based on the 3ware driver translation table */
187 static unsigned char sense_table[][4] = { 437 static unsigned char sense_table[][4] = {
188 /* BBD|ECC|ID|MAR */ 438 /* BBD|ECC|ID|MAR */
@@ -223,105 +473,192 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
223 {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered 473 {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered
224 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark 474 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
225 }; 475 };
226 int i = 0;
227
228 cmd->result = SAM_STAT_CHECK_CONDITION;
229 476
230 /* 477 /*
231 * Is this an error we can process/parse 478 * Is this an error we can process/parse
232 */ 479 */
480 if (drv_stat & ATA_BUSY) {
481 drv_err = 0; /* Ignore the err bits, they're invalid */
482 }
483
484 if (drv_err) {
485 /* Look for drv_err */
486 for (i = 0; sense_table[i][0] != 0xFF; i++) {
487 /* Look for best matches first */
488 if ((sense_table[i][0] & drv_err) ==
489 sense_table[i][0]) {
490 *sk = sense_table[i][1];
491 *asc = sense_table[i][2];
492 *ascq = sense_table[i][3];
493 goto translate_done;
494 }
495 }
496 /* No immediate match */
497 printk(KERN_WARNING "ata%u: no sense translation for "
498 "error 0x%02x\n", id, drv_err);
499 }
500
501 /* Fall back to interpreting status bits */
502 for (i = 0; stat_table[i][0] != 0xFF; i++) {
503 if (stat_table[i][0] & drv_stat) {
504 *sk = stat_table[i][1];
505 *asc = stat_table[i][2];
506 *ascq = stat_table[i][3];
507 goto translate_done;
508 }
509 }
510 /* No error? Undecoded? */
511 printk(KERN_WARNING "ata%u: no sense translation for status: 0x%02x\n",
512 id, drv_stat);
513
514 /* For our last chance pick, use medium read error because
515 * it's much more common than an ATA drive telling you a write
516 * has failed.
517 */
518 *sk = MEDIUM_ERROR;
519 *asc = 0x11; /* "unrecovered read error" */
520 *ascq = 0x04; /* "auto-reallocation failed" */
521
522 translate_done:
523 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x to "
524 "SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n", id, drv_stat, drv_err,
525 *sk, *asc, *ascq);
526 return;
527}
528
529/*
530 * ata_gen_ata_desc_sense - Generate check condition sense block.
531 * @qc: Command that completed.
532 *
533 * This function is specific to the ATA descriptor format sense
534 * block specified for the ATA pass through commands. Regardless
535 * of whether the command errored or not, return a sense
536 * block. Copy all controller registers into the sense
537 * block. Clear sense key, ASC & ASCQ if there is no error.
538 *
539 * LOCKING:
540 * spin_lock_irqsave(host_set lock)
541 */
542void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
543{
544 struct scsi_cmnd *cmd = qc->scsicmd;
545 struct ata_taskfile *tf = &qc->tf;
546 unsigned char *sb = cmd->sense_buffer;
547 unsigned char *desc = sb + 8;
233 548
234 if(drv_stat & ATA_ERR) 549 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
235 /* Read the err bits */
236 err = ata_chk_err(qc->ap);
237 550
238 /* Display the ATA level error info */ 551 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
239 552
240 printk(KERN_WARNING "ata%u: status=0x%02x { ", qc->ap->id, drv_stat); 553 /*
241 if(drv_stat & 0x80) 554 * Read the controller registers.
242 { 555 */
243 printk("Busy "); 556 assert(NULL != qc->ap->ops->tf_read);
244 err = 0; /* Data is not valid in this case */ 557 qc->ap->ops->tf_read(qc->ap, tf);
558
559 /*
560 * Use ata_to_sense_error() to map status register bits
561 * onto sense key, asc & ascq.
562 */
563 if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
564 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
565 &sb[1], &sb[2], &sb[3]);
566 sb[1] &= 0x0f;
245 } 567 }
246 else { 568
247 if(drv_stat & 0x40) printk("DriveReady "); 569 /*
248 if(drv_stat & 0x20) printk("DeviceFault "); 570 * Sense data is current and format is descriptor.
249 if(drv_stat & 0x10) printk("SeekComplete "); 571 */
250 if(drv_stat & 0x08) printk("DataRequest "); 572 sb[0] = 0x72;
251 if(drv_stat & 0x04) printk("CorrectedError "); 573
252 if(drv_stat & 0x02) printk("Index "); 574 desc[0] = 0x09;
253 if(drv_stat & 0x01) printk("Error "); 575
576 /*
577 * Set length of additional sense data.
578 * Since we only populate descriptor 0, the total
579 * length is the same (fixed) length as descriptor 0.
580 */
581 desc[1] = sb[7] = 14;
582
583 /*
584 * Copy registers into sense buffer.
585 */
586 desc[2] = 0x00;
587 desc[3] = tf->feature; /* == error reg */
588 desc[5] = tf->nsect;
589 desc[7] = tf->lbal;
590 desc[9] = tf->lbam;
591 desc[11] = tf->lbah;
592 desc[12] = tf->device;
593 desc[13] = tf->command; /* == status reg */
594
595 /*
596 * Fill in Extend bit, and the high order bytes
597 * if applicable.
598 */
599 if (tf->flags & ATA_TFLAG_LBA48) {
600 desc[2] |= 0x01;
601 desc[4] = tf->hob_nsect;
602 desc[6] = tf->hob_lbal;
603 desc[8] = tf->hob_lbam;
604 desc[10] = tf->hob_lbah;
254 } 605 }
255 printk("}\n"); 606}
256
257 if(err)
258 {
259 printk(KERN_WARNING "ata%u: error=0x%02x { ", qc->ap->id, err);
260 if(err & 0x04) printk("DriveStatusError ");
261 if(err & 0x80)
262 {
263 if(err & 0x04)
264 printk("BadCRC ");
265 else
266 printk("Sector ");
267 }
268 if(err & 0x40) printk("UncorrectableError ");
269 if(err & 0x10) printk("SectorIdNotFound ");
270 if(err & 0x02) printk("TrackZeroNotFound ");
271 if(err & 0x01) printk("AddrMarkNotFound ");
272 printk("}\n");
273 607
274 /* Should we dump sector info here too ?? */ 608/**
609 * ata_gen_fixed_sense - generate a SCSI fixed sense block
610 * @qc: Command that we are erroring out
611 *
612 * Leverage ata_to_sense_error() to give us the codes. Fit our
613 * LBA in here if there's room.
614 *
615 * LOCKING:
616 * inherited from caller
617 */
618void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
619{
620 struct scsi_cmnd *cmd = qc->scsicmd;
621 struct ata_taskfile *tf = &qc->tf;
622 unsigned char *sb = cmd->sense_buffer;
623
624 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
625
626 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
627
628 /*
629 * Read the controller registers.
630 */
631 assert(NULL != qc->ap->ops->tf_read);
632 qc->ap->ops->tf_read(qc->ap, tf);
633
634 /*
635 * Use ata_to_sense_error() to map status register bits
636 * onto sense key, asc & ascq.
637 */
638 if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
639 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
640 &sb[2], &sb[12], &sb[13]);
641 sb[2] &= 0x0f;
275 } 642 }
276 643
644 sb[0] = 0x70;
645 sb[7] = 0x0a;
277 646
278 /* Look for err */ 647 if (tf->flags & ATA_TFLAG_LBA48) {
279 while(sense_table[i][0] != 0xFF) 648 /* TODO: find solution for LBA48 descriptors */
280 {
281 /* Look for best matches first */
282 if((sense_table[i][0] & err) == sense_table[i][0])
283 {
284 sb[0] = 0x70;
285 sb[2] = sense_table[i][1];
286 sb[7] = 0x0a;
287 sb[12] = sense_table[i][2];
288 sb[13] = sense_table[i][3];
289 return;
290 }
291 i++;
292 } 649 }
293 /* No immediate match */
294 if(err)
295 printk(KERN_DEBUG "ata%u: no sense translation for 0x%02x\n", qc->ap->id, err);
296 650
297 i = 0; 651 else if (tf->flags & ATA_TFLAG_LBA) {
298 /* Fall back to interpreting status bits */ 652 /* A small (28b) LBA will fit in the 32b info field */
299 while(stat_table[i][0] != 0xFF) 653 sb[0] |= 0x80; /* set valid bit */
300 { 654 sb[3] = tf->device & 0x0f;
301 if(stat_table[i][0] & drv_stat) 655 sb[4] = tf->lbah;
302 { 656 sb[5] = tf->lbam;
303 sb[0] = 0x70; 657 sb[6] = tf->lbal;
304 sb[2] = stat_table[i][1];
305 sb[7] = 0x0a;
306 sb[12] = stat_table[i][2];
307 sb[13] = stat_table[i][3];
308 return;
309 }
310 i++;
311 } 658 }
312 /* No error ?? */
313 printk(KERN_ERR "ata%u: called with no error (%02X)!\n", qc->ap->id, drv_stat);
314 /* additional-sense-code[-qualifier] */
315 659
316 sb[0] = 0x70; 660 else {
317 sb[2] = MEDIUM_ERROR; 661 /* TODO: C/H/S */
318 sb[7] = 0x0A;
319 if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
320 sb[12] = 0x11; /* "unrecovered read error" */
321 sb[13] = 0x04;
322 } else {
323 sb[12] = 0x0C; /* "write error - */
324 sb[13] = 0x02; /* auto-reallocation failed" */
325 } 662 }
326} 663}
327 664
@@ -420,7 +757,7 @@ int ata_scsi_error(struct Scsi_Host *host)
420 */ 757 */
421 758
422static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc, 759static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
423 u8 *scsicmd) 760 const u8 *scsicmd)
424{ 761{
425 struct ata_taskfile *tf = &qc->tf; 762 struct ata_taskfile *tf = &qc->tf;
426 763
@@ -430,15 +767,26 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
430 ; /* ignore IMMED bit, violates sat-r05 */ 767 ; /* ignore IMMED bit, violates sat-r05 */
431 } 768 }
432 if (scsicmd[4] & 0x2) 769 if (scsicmd[4] & 0x2)
433 return 1; /* LOEJ bit set not supported */ 770 goto invalid_fld; /* LOEJ bit set not supported */
434 if (((scsicmd[4] >> 4) & 0xf) != 0) 771 if (((scsicmd[4] >> 4) & 0xf) != 0)
435 return 1; /* power conditions not supported */ 772 goto invalid_fld; /* power conditions not supported */
436 if (scsicmd[4] & 0x1) { 773 if (scsicmd[4] & 0x1) {
437 tf->nsect = 1; /* 1 sector, lba=0 */ 774 tf->nsect = 1; /* 1 sector, lba=0 */
438 tf->lbah = 0x0; 775
439 tf->lbam = 0x0; 776 if (qc->dev->flags & ATA_DFLAG_LBA) {
440 tf->lbal = 0x0; 777 qc->tf.flags |= ATA_TFLAG_LBA;
441 tf->device |= ATA_LBA; 778
779 tf->lbah = 0x0;
780 tf->lbam = 0x0;
781 tf->lbal = 0x0;
782 tf->device |= ATA_LBA;
783 } else {
784 /* CHS */
785 tf->lbal = 0x1; /* sect */
786 tf->lbam = 0x0; /* cyl low */
787 tf->lbah = 0x0; /* cyl high */
788 }
789
442 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ 790 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
443 } else { 791 } else {
444 tf->nsect = 0; /* time period value (0 implies now) */ 792 tf->nsect = 0; /* time period value (0 implies now) */
@@ -453,6 +801,11 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
453 */ 801 */
454 802
455 return 0; 803 return 0;
804
805invalid_fld:
806 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
807 /* "Invalid field in cbd" */
808 return 1;
456} 809}
457 810
458 811
@@ -471,14 +824,14 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
471 * Zero on success, non-zero on error. 824 * Zero on success, non-zero on error.
472 */ 825 */
473 826
474static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 827static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
475{ 828{
476 struct ata_taskfile *tf = &qc->tf; 829 struct ata_taskfile *tf = &qc->tf;
477 830
478 tf->flags |= ATA_TFLAG_DEVICE; 831 tf->flags |= ATA_TFLAG_DEVICE;
479 tf->protocol = ATA_PROT_NODATA; 832 tf->protocol = ATA_PROT_NODATA;
480 833
481 if ((tf->flags & ATA_TFLAG_LBA48) && 834 if ((qc->dev->flags & ATA_DFLAG_LBA48) &&
482 (ata_id_has_flush_ext(qc->dev->id))) 835 (ata_id_has_flush_ext(qc->dev->id)))
483 tf->command = ATA_CMD_FLUSH_EXT; 836 tf->command = ATA_CMD_FLUSH_EXT;
484 else 837 else
@@ -488,6 +841,99 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
488} 841}
489 842
490/** 843/**
844 * scsi_6_lba_len - Get LBA and transfer length
845 * @scsicmd: SCSI command to translate
846 *
847 * Calculate LBA and transfer length for 6-byte commands.
848 *
849 * RETURNS:
850 * @plba: the LBA
851 * @plen: the transfer length
852 */
853
854static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
855{
856 u64 lba = 0;
857 u32 len = 0;
858
859 VPRINTK("six-byte command\n");
860
861 lba |= ((u64)scsicmd[2]) << 8;
862 lba |= ((u64)scsicmd[3]);
863
864 len |= ((u32)scsicmd[4]);
865
866 *plba = lba;
867 *plen = len;
868}
869
870/**
871 * scsi_10_lba_len - Get LBA and transfer length
872 * @scsicmd: SCSI command to translate
873 *
874 * Calculate LBA and transfer length for 10-byte commands.
875 *
876 * RETURNS:
877 * @plba: the LBA
878 * @plen: the transfer length
879 */
880
881static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
882{
883 u64 lba = 0;
884 u32 len = 0;
885
886 VPRINTK("ten-byte command\n");
887
888 lba |= ((u64)scsicmd[2]) << 24;
889 lba |= ((u64)scsicmd[3]) << 16;
890 lba |= ((u64)scsicmd[4]) << 8;
891 lba |= ((u64)scsicmd[5]);
892
893 len |= ((u32)scsicmd[7]) << 8;
894 len |= ((u32)scsicmd[8]);
895
896 *plba = lba;
897 *plen = len;
898}
899
900/**
901 * scsi_16_lba_len - Get LBA and transfer length
902 * @scsicmd: SCSI command to translate
903 *
904 * Calculate LBA and transfer length for 16-byte commands.
905 *
906 * RETURNS:
907 * @plba: the LBA
908 * @plen: the transfer length
909 */
910
911static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
912{
913 u64 lba = 0;
914 u32 len = 0;
915
916 VPRINTK("sixteen-byte command\n");
917
918 lba |= ((u64)scsicmd[2]) << 56;
919 lba |= ((u64)scsicmd[3]) << 48;
920 lba |= ((u64)scsicmd[4]) << 40;
921 lba |= ((u64)scsicmd[5]) << 32;
922 lba |= ((u64)scsicmd[6]) << 24;
923 lba |= ((u64)scsicmd[7]) << 16;
924 lba |= ((u64)scsicmd[8]) << 8;
925 lba |= ((u64)scsicmd[9]);
926
927 len |= ((u32)scsicmd[10]) << 24;
928 len |= ((u32)scsicmd[11]) << 16;
929 len |= ((u32)scsicmd[12]) << 8;
930 len |= ((u32)scsicmd[13]);
931
932 *plba = lba;
933 *plen = len;
934}
935
936/**
491 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one 937 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
492 * @qc: Storage for translated ATA taskfile 938 * @qc: Storage for translated ATA taskfile
493 * @scsicmd: SCSI command to translate 939 * @scsicmd: SCSI command to translate
@@ -501,82 +947,110 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
501 * Zero on success, non-zero on error. 947 * Zero on success, non-zero on error.
502 */ 948 */
503 949
504static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 950static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
505{ 951{
506 struct ata_taskfile *tf = &qc->tf; 952 struct ata_taskfile *tf = &qc->tf;
507 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; 953 struct ata_device *dev = qc->dev;
508 u64 dev_sectors = qc->dev->n_sectors; 954 u64 dev_sectors = qc->dev->n_sectors;
509 u64 sect = 0; 955 u64 block;
510 u32 n_sect = 0; 956 u32 n_block;
511 957
512 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 958 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
513 tf->protocol = ATA_PROT_NODATA; 959 tf->protocol = ATA_PROT_NODATA;
514 tf->device |= ATA_LBA;
515 960
516 if (scsicmd[0] == VERIFY) { 961 if (scsicmd[0] == VERIFY)
517 sect |= ((u64)scsicmd[2]) << 24; 962 scsi_10_lba_len(scsicmd, &block, &n_block);
518 sect |= ((u64)scsicmd[3]) << 16; 963 else if (scsicmd[0] == VERIFY_16)
519 sect |= ((u64)scsicmd[4]) << 8; 964 scsi_16_lba_len(scsicmd, &block, &n_block);
520 sect |= ((u64)scsicmd[5]); 965 else
966 goto invalid_fld;
521 967
522 n_sect |= ((u32)scsicmd[7]) << 8; 968 if (!n_block)
523 n_sect |= ((u32)scsicmd[8]); 969 goto nothing_to_do;
524 } 970 if (block >= dev_sectors)
971 goto out_of_range;
972 if ((block + n_block) > dev_sectors)
973 goto out_of_range;
525 974
526 else if (scsicmd[0] == VERIFY_16) { 975 if (dev->flags & ATA_DFLAG_LBA) {
527 sect |= ((u64)scsicmd[2]) << 56; 976 tf->flags |= ATA_TFLAG_LBA;
528 sect |= ((u64)scsicmd[3]) << 48;
529 sect |= ((u64)scsicmd[4]) << 40;
530 sect |= ((u64)scsicmd[5]) << 32;
531 sect |= ((u64)scsicmd[6]) << 24;
532 sect |= ((u64)scsicmd[7]) << 16;
533 sect |= ((u64)scsicmd[8]) << 8;
534 sect |= ((u64)scsicmd[9]);
535
536 n_sect |= ((u32)scsicmd[10]) << 24;
537 n_sect |= ((u32)scsicmd[11]) << 16;
538 n_sect |= ((u32)scsicmd[12]) << 8;
539 n_sect |= ((u32)scsicmd[13]);
540 }
541 977
542 else 978 if (dev->flags & ATA_DFLAG_LBA48) {
543 return 1; 979 if (n_block > (64 * 1024))
980 goto invalid_fld;
544 981
545 if (!n_sect) 982 /* use LBA48 */
546 return 1; 983 tf->flags |= ATA_TFLAG_LBA48;
547 if (sect >= dev_sectors) 984 tf->command = ATA_CMD_VERIFY_EXT;
548 return 1;
549 if ((sect + n_sect) > dev_sectors)
550 return 1;
551 if (lba48) {
552 if (n_sect > (64 * 1024))
553 return 1;
554 } else {
555 if (n_sect > 256)
556 return 1;
557 }
558 985
559 if (lba48) { 986 tf->hob_nsect = (n_block >> 8) & 0xff;
560 tf->command = ATA_CMD_VERIFY_EXT; 987
988 tf->hob_lbah = (block >> 40) & 0xff;
989 tf->hob_lbam = (block >> 32) & 0xff;
990 tf->hob_lbal = (block >> 24) & 0xff;
991 } else {
992 if (n_block > 256)
993 goto invalid_fld;
994
995 /* use LBA28 */
996 tf->command = ATA_CMD_VERIFY;
997
998 tf->device |= (block >> 24) & 0xf;
999 }
561 1000
562 tf->hob_nsect = (n_sect >> 8) & 0xff; 1001 tf->nsect = n_block & 0xff;
563 1002
564 tf->hob_lbah = (sect >> 40) & 0xff; 1003 tf->lbah = (block >> 16) & 0xff;
565 tf->hob_lbam = (sect >> 32) & 0xff; 1004 tf->lbam = (block >> 8) & 0xff;
566 tf->hob_lbal = (sect >> 24) & 0xff; 1005 tf->lbal = block & 0xff;
1006
1007 tf->device |= ATA_LBA;
567 } else { 1008 } else {
1009 /* CHS */
1010 u32 sect, head, cyl, track;
1011
1012 if (n_block > 256)
1013 goto invalid_fld;
1014
1015 /* Convert LBA to CHS */
1016 track = (u32)block / dev->sectors;
1017 cyl = track / dev->heads;
1018 head = track % dev->heads;
1019 sect = (u32)block % dev->sectors + 1;
1020
1021 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1022 (u32)block, track, cyl, head, sect);
1023
1024 /* Check whether the converted CHS can fit.
1025 Cylinder: 0-65535
1026 Head: 0-15
1027 Sector: 1-255*/
1028 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1029 goto out_of_range;
1030
568 tf->command = ATA_CMD_VERIFY; 1031 tf->command = ATA_CMD_VERIFY;
569 1032 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
570 tf->device |= (sect >> 24) & 0xf; 1033 tf->lbal = sect;
1034 tf->lbam = cyl;
1035 tf->lbah = cyl >> 8;
1036 tf->device |= head;
571 } 1037 }
572 1038
573 tf->nsect = n_sect & 0xff; 1039 return 0;
574 1040
575 tf->lbah = (sect >> 16) & 0xff; 1041invalid_fld:
576 tf->lbam = (sect >> 8) & 0xff; 1042 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
577 tf->lbal = sect & 0xff; 1043 /* "Invalid field in cbd" */
1044 return 1;
578 1045
579 return 0; 1046out_of_range:
1047 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
1048 /* "Logical Block Address out of range" */
1049 return 1;
1050
1051nothing_to_do:
1052 qc->scsicmd->result = SAM_STAT_GOOD;
1053 return 1;
580} 1054}
581 1055
582/** 1056/**
@@ -599,117 +1073,175 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
599 * Zero on success, non-zero on error. 1073 * Zero on success, non-zero on error.
600 */ 1074 */
601 1075
602static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 1076static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
603{ 1077{
604 struct ata_taskfile *tf = &qc->tf; 1078 struct ata_taskfile *tf = &qc->tf;
605 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; 1079 struct ata_device *dev = qc->dev;
1080 u64 block;
1081 u32 n_block;
606 1082
607 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1083 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
608 tf->protocol = qc->dev->xfer_protocol;
609 tf->device |= ATA_LBA;
610 1084
611 if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 || 1085 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
612 scsicmd[0] == READ_16) { 1086 scsicmd[0] == WRITE_16)
613 tf->command = qc->dev->read_cmd;
614 } else {
615 tf->command = qc->dev->write_cmd;
616 tf->flags |= ATA_TFLAG_WRITE; 1087 tf->flags |= ATA_TFLAG_WRITE;
617 }
618 1088
619 if (scsicmd[0] == READ_10 || scsicmd[0] == WRITE_10) { 1089 /* Calculate the SCSI LBA and transfer length. */
620 if (lba48) { 1090 switch (scsicmd[0]) {
621 tf->hob_nsect = scsicmd[7]; 1091 case READ_10:
622 tf->hob_lbal = scsicmd[2]; 1092 case WRITE_10:
1093 scsi_10_lba_len(scsicmd, &block, &n_block);
1094 break;
1095 case READ_6:
1096 case WRITE_6:
1097 scsi_6_lba_len(scsicmd, &block, &n_block);
623 1098
624 qc->nsect = ((unsigned int)scsicmd[7] << 8) | 1099 /* for 6-byte r/w commands, transfer length 0
625 scsicmd[8]; 1100 * means 256 blocks of data, not 0 block.
626 } else { 1101 */
627 /* if we don't support LBA48 addressing, the request 1102 if (!n_block)
628 * -may- be too large. */ 1103 n_block = 256;
629 if ((scsicmd[2] & 0xf0) || scsicmd[7]) 1104 break;
630 return 1; 1105 case READ_16:
1106 case WRITE_16:
1107 scsi_16_lba_len(scsicmd, &block, &n_block);
1108 break;
1109 default:
1110 DPRINTK("no-byte command\n");
1111 goto invalid_fld;
1112 }
631 1113
632 /* stores LBA27:24 in lower 4 bits of device reg */ 1114 /* Check and compose ATA command */
633 tf->device |= scsicmd[2]; 1115 if (!n_block)
1116 /* For 10-byte and 16-byte SCSI R/W commands, transfer
1117 * length 0 means transfer 0 block of data.
1118 * However, for ATA R/W commands, sector count 0 means
1119 * 256 or 65536 sectors, not 0 sectors as in SCSI.
1120 */
1121 goto nothing_to_do;
634 1122
635 qc->nsect = scsicmd[8]; 1123 if (dev->flags & ATA_DFLAG_LBA) {
636 } 1124 tf->flags |= ATA_TFLAG_LBA;
637 1125
638 tf->nsect = scsicmd[8]; 1126 if (dev->flags & ATA_DFLAG_LBA48) {
639 tf->lbal = scsicmd[5]; 1127 /* The request -may- be too large for LBA48. */
640 tf->lbam = scsicmd[4]; 1128 if ((block >> 48) || (n_block > 65536))
641 tf->lbah = scsicmd[3]; 1129 goto out_of_range;
642 1130
643 VPRINTK("ten-byte command\n"); 1131 /* use LBA48 */
644 if (qc->nsect == 0) /* we don't support length==0 cmds */ 1132 tf->flags |= ATA_TFLAG_LBA48;
645 return 1;
646 return 0;
647 }
648 1133
649 if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) { 1134 tf->hob_nsect = (n_block >> 8) & 0xff;
650 qc->nsect = tf->nsect = scsicmd[4];
651 if (!qc->nsect) {
652 qc->nsect = 256;
653 if (lba48)
654 tf->hob_nsect = 1;
655 }
656 1135
657 tf->lbal = scsicmd[3]; 1136 tf->hob_lbah = (block >> 40) & 0xff;
658 tf->lbam = scsicmd[2]; 1137 tf->hob_lbam = (block >> 32) & 0xff;
659 tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */ 1138 tf->hob_lbal = (block >> 24) & 0xff;
1139 } else {
1140 /* use LBA28 */
660 1141
661 VPRINTK("six-byte command\n"); 1142 /* The request -may- be too large for LBA28. */
662 return 0; 1143 if ((block >> 28) || (n_block > 256))
663 } 1144 goto out_of_range;
664 1145
665 if (scsicmd[0] == READ_16 || scsicmd[0] == WRITE_16) { 1146 tf->device |= (block >> 24) & 0xf;
666 /* rule out impossible LBAs and sector counts */ 1147 }
667 if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11])
668 return 1;
669 1148
670 if (lba48) { 1149 ata_rwcmd_protocol(qc);
671 tf->hob_nsect = scsicmd[12];
672 tf->hob_lbal = scsicmd[6];
673 tf->hob_lbam = scsicmd[5];
674 tf->hob_lbah = scsicmd[4];
675 1150
676 qc->nsect = ((unsigned int)scsicmd[12] << 8) | 1151 qc->nsect = n_block;
677 scsicmd[13]; 1152 tf->nsect = n_block & 0xff;
678 } else {
679 /* once again, filter out impossible non-zero values */
680 if (scsicmd[4] || scsicmd[5] || scsicmd[12] ||
681 (scsicmd[6] & 0xf0))
682 return 1;
683 1153
684 /* stores LBA27:24 in lower 4 bits of device reg */ 1154 tf->lbah = (block >> 16) & 0xff;
685 tf->device |= scsicmd[6]; 1155 tf->lbam = (block >> 8) & 0xff;
1156 tf->lbal = block & 0xff;
686 1157
687 qc->nsect = scsicmd[13]; 1158 tf->device |= ATA_LBA;
688 } 1159 } else {
1160 /* CHS */
1161 u32 sect, head, cyl, track;
1162
1163 /* The request -may- be too large for CHS addressing. */
1164 if ((block >> 28) || (n_block > 256))
1165 goto out_of_range;
1166
1167 ata_rwcmd_protocol(qc);
1168
1169 /* Convert LBA to CHS */
1170 track = (u32)block / dev->sectors;
1171 cyl = track / dev->heads;
1172 head = track % dev->heads;
1173 sect = (u32)block % dev->sectors + 1;
1174
1175 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1176 (u32)block, track, cyl, head, sect);
1177
1178 /* Check whether the converted CHS can fit.
1179 Cylinder: 0-65535
1180 Head: 0-15
1181 Sector: 1-255*/
1182 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1183 goto out_of_range;
1184
1185 qc->nsect = n_block;
1186 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1187 tf->lbal = sect;
1188 tf->lbam = cyl;
1189 tf->lbah = cyl >> 8;
1190 tf->device |= head;
1191 }
689 1192
690 tf->nsect = scsicmd[13]; 1193 return 0;
691 tf->lbal = scsicmd[9];
692 tf->lbam = scsicmd[8];
693 tf->lbah = scsicmd[7];
694 1194
695 VPRINTK("sixteen-byte command\n"); 1195invalid_fld:
696 if (qc->nsect == 0) /* we don't support length==0 cmds */ 1196 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
697 return 1; 1197 /* "Invalid field in cbd" */
698 return 0; 1198 return 1;
699 }
700 1199
701 DPRINTK("no-byte command\n"); 1200out_of_range:
1201 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
1202 /* "Logical Block Address out of range" */
1203 return 1;
1204
1205nothing_to_do:
1206 qc->scsicmd->result = SAM_STAT_GOOD;
702 return 1; 1207 return 1;
703} 1208}
704 1209
705static int ata_scsi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 1210static int ata_scsi_qc_complete(struct ata_queued_cmd *qc,
1211 unsigned int err_mask)
706{ 1212{
707 struct scsi_cmnd *cmd = qc->scsicmd; 1213 struct scsi_cmnd *cmd = qc->scsicmd;
1214 u8 *cdb = cmd->cmnd;
1215 int need_sense = (err_mask != 0);
1216
1217 /* For ATA pass thru (SAT) commands, generate a sense block if
1218 * user mandated it or if there's an error. Note that if we
1219 * generate because the user forced us to, a check condition
1220 * is generated and the ATA register values are returned
1221 * whether the command completed successfully or not. If there
1222 * was no error, SK, ASC and ASCQ will all be zero.
1223 */
1224 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1225 ((cdb[2] & 0x20) || need_sense)) {
1226 ata_gen_ata_desc_sense(qc);
1227 } else {
1228 if (!need_sense) {
1229 cmd->result = SAM_STAT_GOOD;
1230 } else {
1231 /* TODO: decide which descriptor format to use
1232 * for 48b LBA devices and call that here
1233 * instead of the fixed desc, which is only
1234 * good for smaller LBA (and maybe CHS?)
1235 * devices.
1236 */
1237 ata_gen_fixed_sense(qc);
1238 }
1239 }
708 1240
709 if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) 1241 if (need_sense) {
710 ata_to_sense_error(qc, drv_stat); 1242 /* The ata_gen_..._sense routines fill in tf */
711 else 1243 ata_dump_status(qc->ap->id, &qc->tf);
712 cmd->result = SAM_STAT_GOOD; 1244 }
713 1245
714 qc->scsidone(cmd); 1246 qc->scsidone(cmd);
715 1247
@@ -731,6 +1263,12 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
731 * This function sets up an ata_queued_cmd structure for the 1263 * This function sets up an ata_queued_cmd structure for the
732 * SCSI command, and sends that ata_queued_cmd to the hardware. 1264 * SCSI command, and sends that ata_queued_cmd to the hardware.
733 * 1265 *
1266 * The xlat_func argument (actor) returns 0 if ready to execute
1267 * ATA command, else 1 to finish translation. If 1 is returned
1268 * then cmd->result (and possibly cmd->sense_buffer) are assumed
1269 * to be set reflecting an error condition or clean (early)
1270 * termination.
1271 *
734 * LOCKING: 1272 * LOCKING:
735 * spin_lock_irqsave(host_set lock) 1273 * spin_lock_irqsave(host_set lock)
736 */ 1274 */
@@ -747,7 +1285,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
747 1285
748 qc = ata_scsi_qc_new(ap, dev, cmd, done); 1286 qc = ata_scsi_qc_new(ap, dev, cmd, done);
749 if (!qc) 1287 if (!qc)
750 return; 1288 goto err_mem;
751 1289
752 /* data is present; dma-map it */ 1290 /* data is present; dma-map it */
753 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 1291 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
@@ -755,7 +1293,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
755 if (unlikely(cmd->request_bufflen < 1)) { 1293 if (unlikely(cmd->request_bufflen < 1)) {
756 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", 1294 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n",
757 ap->id, dev->devno); 1295 ap->id, dev->devno);
758 goto err_out; 1296 goto err_did;
759 } 1297 }
760 1298
761 if (cmd->use_sg) 1299 if (cmd->use_sg)
@@ -770,19 +1308,28 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
770 qc->complete_fn = ata_scsi_qc_complete; 1308 qc->complete_fn = ata_scsi_qc_complete;
771 1309
772 if (xlat_func(qc, scsicmd)) 1310 if (xlat_func(qc, scsicmd))
773 goto err_out; 1311 goto early_finish;
774 1312
775 /* select device, send command to hardware */ 1313 /* select device, send command to hardware */
776 if (ata_qc_issue(qc)) 1314 if (ata_qc_issue(qc))
777 goto err_out; 1315 goto err_did;
778 1316
779 VPRINTK("EXIT\n"); 1317 VPRINTK("EXIT\n");
780 return; 1318 return;
781 1319
782err_out: 1320early_finish:
1321 ata_qc_free(qc);
1322 done(cmd);
1323 DPRINTK("EXIT - early finish (good or error)\n");
1324 return;
1325
1326err_did:
783 ata_qc_free(qc); 1327 ata_qc_free(qc);
784 ata_bad_cdb(cmd, done); 1328err_mem:
785 DPRINTK("EXIT - badcmd\n"); 1329 cmd->result = (DID_ERROR << 16);
1330 done(cmd);
1331 DPRINTK("EXIT - internal\n");
1332 return;
786} 1333}
787 1334
788/** 1335/**
@@ -849,7 +1396,8 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
849 * Mapping the response buffer, calling the command's handler, 1396 * Mapping the response buffer, calling the command's handler,
850 * and handling the handler's return value. This return value 1397 * and handling the handler's return value. This return value
851 * indicates whether the handler wishes the SCSI command to be 1398 * indicates whether the handler wishes the SCSI command to be
852 * completed successfully, or not. 1399 * completed successfully (0), or not (in which case cmd->result
1400 * and sense buffer are assumed to be set).
853 * 1401 *
854 * LOCKING: 1402 * LOCKING:
855 * spin_lock_irqsave(host_set lock) 1403 * spin_lock_irqsave(host_set lock)
@@ -868,12 +1416,9 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
868 rc = actor(args, rbuf, buflen); 1416 rc = actor(args, rbuf, buflen);
869 ata_scsi_rbuf_put(cmd, rbuf); 1417 ata_scsi_rbuf_put(cmd, rbuf);
870 1418
871 if (rc) 1419 if (rc == 0)
872 ata_bad_cdb(cmd, args->done);
873 else {
874 cmd->result = SAM_STAT_GOOD; 1420 cmd->result = SAM_STAT_GOOD;
875 args->done(cmd); 1421 args->done(cmd);
876 }
877} 1422}
878 1423
879/** 1424/**
@@ -1087,13 +1632,9 @@ static void ata_msense_push(u8 **ptr_io, const u8 *last,
1087static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io, 1632static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io,
1088 const u8 *last) 1633 const u8 *last)
1089{ 1634{
1090 u8 page[] = { 1635 u8 page[CACHE_MPAGE_LEN];
1091 0x8, /* page code */
1092 0x12, /* page length */
1093 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 10 zeroes */
1094 0, 0, 0, 0, 0, 0, 0, 0 /* 8 zeroes */
1095 };
1096 1636
1637 memcpy(page, def_cache_mpage, sizeof(page));
1097 if (ata_id_wcache_enabled(id)) 1638 if (ata_id_wcache_enabled(id))
1098 page[2] |= (1 << 2); /* write cache enable */ 1639 page[2] |= (1 << 2); /* write cache enable */
1099 if (!ata_id_rahead_enabled(id)) 1640 if (!ata_id_rahead_enabled(id))
@@ -1117,15 +1658,9 @@ static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io,
1117 1658
1118static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last) 1659static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last)
1119{ 1660{
1120 const u8 page[] = {0xa, 0xa, 6, 0, 0, 0, 0, 0, 0xff, 0xff, 0, 30}; 1661 ata_msense_push(ptr_io, last, def_control_mpage,
1121 1662 sizeof(def_control_mpage));
1122 /* byte 2: set the descriptor format sense data bit (bit 2) 1663 return sizeof(def_control_mpage);
1123 * since we need to support returning this format for SAT
1124 * commands and any SCSI commands against a 48b LBA device.
1125 */
1126
1127 ata_msense_push(ptr_io, last, page, sizeof(page));
1128 return sizeof(page);
1129} 1664}
1130 1665
1131/** 1666/**
@@ -1142,15 +1677,10 @@ static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last)
1142 1677
1143static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last) 1678static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
1144{ 1679{
1145 const u8 page[] = {
1146 0x1, /* page code */
1147 0xa, /* page length */
1148 (1 << 7) | (1 << 6), /* note auto r/w reallocation */
1149 0, 0, 0, 0, 0, 0, 0, 0, 0 /* 9 zeroes */
1150 };
1151 1680
1152 ata_msense_push(ptr_io, last, page, sizeof(page)); 1681 ata_msense_push(ptr_io, last, def_rw_recovery_mpage,
1153 return sizeof(page); 1682 sizeof(def_rw_recovery_mpage));
1683 return sizeof(def_rw_recovery_mpage);
1154} 1684}
1155 1685
1156/** 1686/**
@@ -1159,7 +1689,9 @@ static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
1159 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1689 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1160 * @buflen: Response buffer length. 1690 * @buflen: Response buffer length.
1161 * 1691 *
1162 * Simulate MODE SENSE commands. 1692 * Simulate MODE SENSE commands. Assume this is invoked for direct
1693 * access devices (e.g. disks) only. There should be no block
1694 * descriptor for other device types.
1163 * 1695 *
1164 * LOCKING: 1696 * LOCKING:
1165 * spin_lock_irqsave(host_set lock) 1697 * spin_lock_irqsave(host_set lock)
@@ -1169,61 +1701,115 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1169 unsigned int buflen) 1701 unsigned int buflen)
1170{ 1702{
1171 u8 *scsicmd = args->cmd->cmnd, *p, *last; 1703 u8 *scsicmd = args->cmd->cmnd, *p, *last;
1172 unsigned int page_control, six_byte, output_len; 1704 const u8 sat_blk_desc[] = {
1705 0, 0, 0, 0, /* number of blocks: sat unspecified */
1706 0,
1707 0, 0x2, 0x0 /* block length: 512 bytes */
1708 };
1709 u8 pg, spg;
1710 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen;
1173 1711
1174 VPRINTK("ENTER\n"); 1712 VPRINTK("ENTER\n");
1175 1713
1176 six_byte = (scsicmd[0] == MODE_SENSE); 1714 six_byte = (scsicmd[0] == MODE_SENSE);
1177 1715 ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
1178 /* we only support saved and current values (which we treat 1716 /*
1179 * in the same manner) 1717 * LLBA bit in msense(10) ignored (compliant)
1180 */ 1718 */
1719
1181 page_control = scsicmd[2] >> 6; 1720 page_control = scsicmd[2] >> 6;
1182 if ((page_control != 0) && (page_control != 3)) 1721 switch (page_control) {
1183 return 1; 1722 case 0: /* current */
1723 break; /* supported */
1724 case 3: /* saved */
1725 goto saving_not_supp;
1726 case 1: /* changeable */
1727 case 2: /* defaults */
1728 default:
1729 goto invalid_fld;
1730 }
1184 1731
1185 if (six_byte) 1732 if (six_byte) {
1186 output_len = 4; 1733 output_len = 4 + (ebd ? 8 : 0);
1187 else 1734 alloc_len = scsicmd[4];
1188 output_len = 8; 1735 } else {
1736 output_len = 8 + (ebd ? 8 : 0);
1737 alloc_len = (scsicmd[7] << 8) + scsicmd[8];
1738 }
1739 minlen = (alloc_len < buflen) ? alloc_len : buflen;
1189 1740
1190 p = rbuf + output_len; 1741 p = rbuf + output_len;
1191 last = rbuf + buflen - 1; 1742 last = rbuf + minlen - 1;
1192 1743
1193 switch(scsicmd[2] & 0x3f) { 1744 pg = scsicmd[2] & 0x3f;
1194 case 0x01: /* r/w error recovery */ 1745 spg = scsicmd[3];
1746 /*
1747 * No mode subpages supported (yet) but asking for _all_
1748 * subpages may be valid
1749 */
1750 if (spg && (spg != ALL_SUB_MPAGES))
1751 goto invalid_fld;
1752
1753 switch(pg) {
1754 case RW_RECOVERY_MPAGE:
1195 output_len += ata_msense_rw_recovery(&p, last); 1755 output_len += ata_msense_rw_recovery(&p, last);
1196 break; 1756 break;
1197 1757
1198 case 0x08: /* caching */ 1758 case CACHE_MPAGE:
1199 output_len += ata_msense_caching(args->id, &p, last); 1759 output_len += ata_msense_caching(args->id, &p, last);
1200 break; 1760 break;
1201 1761
1202 case 0x0a: { /* control mode */ 1762 case CONTROL_MPAGE: {
1203 output_len += ata_msense_ctl_mode(&p, last); 1763 output_len += ata_msense_ctl_mode(&p, last);
1204 break; 1764 break;
1205 } 1765 }
1206 1766
1207 case 0x3f: /* all pages */ 1767 case ALL_MPAGES:
1208 output_len += ata_msense_rw_recovery(&p, last); 1768 output_len += ata_msense_rw_recovery(&p, last);
1209 output_len += ata_msense_caching(args->id, &p, last); 1769 output_len += ata_msense_caching(args->id, &p, last);
1210 output_len += ata_msense_ctl_mode(&p, last); 1770 output_len += ata_msense_ctl_mode(&p, last);
1211 break; 1771 break;
1212 1772
1213 default: /* invalid page code */ 1773 default: /* invalid page code */
1214 return 1; 1774 goto invalid_fld;
1215 } 1775 }
1216 1776
1777 if (minlen < 1)
1778 return 0;
1217 if (six_byte) { 1779 if (six_byte) {
1218 output_len--; 1780 output_len--;
1219 rbuf[0] = output_len; 1781 rbuf[0] = output_len;
1782 if (ebd) {
1783 if (minlen > 3)
1784 rbuf[3] = sizeof(sat_blk_desc);
1785 if (minlen > 11)
1786 memcpy(rbuf + 4, sat_blk_desc,
1787 sizeof(sat_blk_desc));
1788 }
1220 } else { 1789 } else {
1221 output_len -= 2; 1790 output_len -= 2;
1222 rbuf[0] = output_len >> 8; 1791 rbuf[0] = output_len >> 8;
1223 rbuf[1] = output_len; 1792 if (minlen > 1)
1793 rbuf[1] = output_len;
1794 if (ebd) {
1795 if (minlen > 7)
1796 rbuf[7] = sizeof(sat_blk_desc);
1797 if (minlen > 15)
1798 memcpy(rbuf + 8, sat_blk_desc,
1799 sizeof(sat_blk_desc));
1800 }
1224 } 1801 }
1225
1226 return 0; 1802 return 0;
1803
1804invalid_fld:
1805 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
1806 /* "Invalid field in cbd" */
1807 return 1;
1808
1809saving_not_supp:
1810 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
1811 /* "Saving parameters not supported" */
1812 return 1;
1227} 1813}
1228 1814
1229/** 1815/**
@@ -1246,10 +1832,20 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
1246 1832
1247 VPRINTK("ENTER\n"); 1833 VPRINTK("ENTER\n");
1248 1834
1249 if (ata_id_has_lba48(args->id)) 1835 if (ata_id_has_lba(args->id)) {
1250 n_sectors = ata_id_u64(args->id, 100); 1836 if (ata_id_has_lba48(args->id))
1251 else 1837 n_sectors = ata_id_u64(args->id, 100);
1252 n_sectors = ata_id_u32(args->id, 60); 1838 else
1839 n_sectors = ata_id_u32(args->id, 60);
1840 } else {
1841 /* CHS default translation */
1842 n_sectors = args->id[1] * args->id[3] * args->id[6];
1843
1844 if (ata_id_current_chs_valid(args->id))
1845 /* CHS current translation */
1846 n_sectors = ata_id_u32(args->id, 57);
1847 }
1848
1253 n_sectors--; /* ATA TotalUserSectors - 1 */ 1849 n_sectors--; /* ATA TotalUserSectors - 1 */
1254 1850
1255 if (args->cmd->cmnd[0] == READ_CAPACITY) { 1851 if (args->cmd->cmnd[0] == READ_CAPACITY) {
@@ -1313,6 +1909,34 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
1313} 1909}
1314 1910
1315/** 1911/**
1912 * ata_scsi_set_sense - Set SCSI sense data and status
1913 * @cmd: SCSI request to be handled
1914 * @sk: SCSI-defined sense key
1915 * @asc: SCSI-defined additional sense code
1916 * @ascq: SCSI-defined additional sense code qualifier
1917 *
1918 * Helper function that builds a valid fixed format, current
1919 * response code and the given sense key (sk), additional sense
1920 * code (asc) and additional sense code qualifier (ascq) with
1921 * a SCSI command status of %SAM_STAT_CHECK_CONDITION and
1922 * DRIVER_SENSE set in the upper bits of scsi_cmnd::result .
1923 *
1924 * LOCKING:
1925 * Not required
1926 */
1927
1928void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
1929{
1930 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1931
1932 cmd->sense_buffer[0] = 0x70; /* fixed format, current */
1933 cmd->sense_buffer[2] = sk;
1934 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */
1935 cmd->sense_buffer[12] = asc;
1936 cmd->sense_buffer[13] = ascq;
1937}
1938
1939/**
1316 * ata_scsi_badcmd - End a SCSI request with an error 1940 * ata_scsi_badcmd - End a SCSI request with an error
1317 * @cmd: SCSI request to be handled 1941 * @cmd: SCSI request to be handled
1318 * @done: SCSI command completion function 1942 * @done: SCSI command completion function
@@ -1330,30 +1954,89 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
1330void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) 1954void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
1331{ 1955{
1332 DPRINTK("ENTER\n"); 1956 DPRINTK("ENTER\n");
1333 cmd->result = SAM_STAT_CHECK_CONDITION; 1957 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq);
1334
1335 cmd->sense_buffer[0] = 0x70;
1336 cmd->sense_buffer[2] = ILLEGAL_REQUEST;
1337 cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */
1338 cmd->sense_buffer[12] = asc;
1339 cmd->sense_buffer[13] = ascq;
1340 1958
1341 done(cmd); 1959 done(cmd);
1342} 1960}
1343 1961
1344static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 1962void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
1963 struct scsi_cmnd *cmd)
1964{
1965 DECLARE_COMPLETION(wait);
1966 struct ata_queued_cmd *qc;
1967 unsigned long flags;
1968 int rc;
1969
1970 DPRINTK("ATAPI request sense\n");
1971
1972 qc = ata_qc_new_init(ap, dev);
1973 BUG_ON(qc == NULL);
1974
1975 /* FIXME: is this needed? */
1976 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
1977
1978 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
1979 qc->dma_dir = DMA_FROM_DEVICE;
1980
1981 memset(&qc->cdb, 0, ap->cdb_len);
1982 qc->cdb[0] = REQUEST_SENSE;
1983 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
1984
1985 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1986 qc->tf.command = ATA_CMD_PACKET;
1987
1988 qc->tf.protocol = ATA_PROT_ATAPI;
1989 qc->tf.lbam = (8 * 1024) & 0xff;
1990 qc->tf.lbah = (8 * 1024) >> 8;
1991 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
1992
1993 qc->waiting = &wait;
1994 qc->complete_fn = ata_qc_complete_noop;
1995
1996 spin_lock_irqsave(&ap->host_set->lock, flags);
1997 rc = ata_qc_issue(qc);
1998 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1999
2000 if (rc)
2001 ata_port_disable(ap);
2002 else
2003 wait_for_completion(&wait);
2004
2005 DPRINTK("EXIT\n");
2006}
2007
2008static int atapi_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
1345{ 2009{
1346 struct scsi_cmnd *cmd = qc->scsicmd; 2010 struct scsi_cmnd *cmd = qc->scsicmd;
1347 2011
1348 if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) { 2012 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2013
2014 if (unlikely(err_mask & AC_ERR_DEV)) {
1349 DPRINTK("request check condition\n"); 2015 DPRINTK("request check condition\n");
1350 2016
2017 /* FIXME: command completion with check condition
2018 * but no sense causes the error handler to run,
2019 * which then issues REQUEST SENSE, fills in the sense
2020 * buffer, and completes the command (for the second
2021 * time). We need to issue REQUEST SENSE some other
2022 * way, to avoid completing the command twice.
2023 */
1351 cmd->result = SAM_STAT_CHECK_CONDITION; 2024 cmd->result = SAM_STAT_CHECK_CONDITION;
1352 2025
1353 qc->scsidone(cmd); 2026 qc->scsidone(cmd);
1354 2027
1355 return 1; 2028 return 1;
1356 } else { 2029 }
2030
2031 else if (unlikely(err_mask))
2032 /* FIXME: not quite right; we don't want the
2033 * translation of taskfile registers into
2034 * a sense descriptors, since that's only
2035 * correct for ATA, not ATAPI
2036 */
2037 ata_gen_ata_desc_sense(qc);
2038
2039 else {
1357 u8 *scsicmd = cmd->cmnd; 2040 u8 *scsicmd = cmd->cmnd;
1358 2041
1359 if (scsicmd[0] == INQUIRY) { 2042 if (scsicmd[0] == INQUIRY) {
@@ -1361,15 +2044,30 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
1361 unsigned int buflen; 2044 unsigned int buflen;
1362 2045
1363 buflen = ata_scsi_rbuf_get(cmd, &buf); 2046 buflen = ata_scsi_rbuf_get(cmd, &buf);
1364 buf[2] = 0x5; 2047
1365 buf[3] = (buf[3] & 0xf0) | 2; 2048 /* ATAPI devices typically report zero for their SCSI version,
2049 * and sometimes deviate from the spec WRT response data
2050 * format. If SCSI version is reported as zero like normal,
2051 * then we make the following fixups: 1) Fake MMC-5 version,
2052 * to indicate to the Linux scsi midlayer this is a modern
2053 * device. 2) Ensure response data format / ATAPI information
2054 * are always correct.
2055 */
2056 /* FIXME: do we ever override EVPD pages and the like, with
2057 * this code?
2058 */
2059 if (buf[2] == 0) {
2060 buf[2] = 0x5;
2061 buf[3] = 0x32;
2062 }
2063
1366 ata_scsi_rbuf_put(cmd, buf); 2064 ata_scsi_rbuf_put(cmd, buf);
1367 } 2065 }
2066
1368 cmd->result = SAM_STAT_GOOD; 2067 cmd->result = SAM_STAT_GOOD;
1369 } 2068 }
1370 2069
1371 qc->scsidone(cmd); 2070 qc->scsidone(cmd);
1372
1373 return 0; 2071 return 0;
1374} 2072}
1375/** 2073/**
@@ -1384,7 +2082,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
1384 * Zero on success, non-zero on failure. 2082 * Zero on success, non-zero on failure.
1385 */ 2083 */
1386 2084
1387static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 2085static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1388{ 2086{
1389 struct scsi_cmnd *cmd = qc->scsicmd; 2087 struct scsi_cmnd *cmd = qc->scsicmd;
1390 struct ata_device *dev = qc->dev; 2088 struct ata_device *dev = qc->dev;
@@ -1453,7 +2151,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
1453 */ 2151 */
1454 2152
1455static struct ata_device * 2153static struct ata_device *
1456ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev) 2154ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
1457{ 2155{
1458 struct ata_device *dev; 2156 struct ata_device *dev;
1459 2157
@@ -1478,6 +2176,143 @@ ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev)
1478 return dev; 2176 return dev;
1479} 2177}
1480 2178
2179/*
2180 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
2181 * @byte1: Byte 1 from pass-thru CDB.
2182 *
2183 * RETURNS:
2184 * ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise.
2185 */
2186static u8
2187ata_scsi_map_proto(u8 byte1)
2188{
2189 switch((byte1 & 0x1e) >> 1) {
2190 case 3: /* Non-data */
2191 return ATA_PROT_NODATA;
2192
2193 case 6: /* DMA */
2194 return ATA_PROT_DMA;
2195
2196 case 4: /* PIO Data-in */
2197 case 5: /* PIO Data-out */
2198 if (byte1 & 0xe0) {
2199 return ATA_PROT_PIO_MULT;
2200 }
2201 return ATA_PROT_PIO;
2202
2203 case 10: /* Device Reset */
2204 case 0: /* Hard Reset */
2205 case 1: /* SRST */
2206 case 2: /* Bus Idle */
2207 case 7: /* Packet */
2208 case 8: /* DMA Queued */
2209 case 9: /* Device Diagnostic */
2210 case 11: /* UDMA Data-in */
2211 case 12: /* UDMA Data-Out */
2212 case 13: /* FPDMA */
2213 default: /* Reserved */
2214 break;
2215 }
2216
2217 return ATA_PROT_UNKNOWN;
2218}
2219
2220/**
2221 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
2222 * @qc: command structure to be initialized
2223 * @cmd: SCSI command to convert
2224 *
2225 * Handles either 12 or 16-byte versions of the CDB.
2226 *
2227 * RETURNS:
2228 * Zero on success, non-zero on failure.
2229 */
2230static unsigned int
2231ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2232{
2233 struct ata_taskfile *tf = &(qc->tf);
2234 struct scsi_cmnd *cmd = qc->scsicmd;
2235
2236 if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN)
2237 return 1;
2238
2239 /*
2240 * 12 and 16 byte CDBs use different offsets to
2241 * provide the various register values.
2242 */
2243 if (scsicmd[0] == ATA_16) {
2244 /*
2245 * 16-byte CDB - may contain extended commands.
2246 *
2247 * If that is the case, copy the upper byte register values.
2248 */
2249 if (scsicmd[1] & 0x01) {
2250 tf->hob_feature = scsicmd[3];
2251 tf->hob_nsect = scsicmd[5];
2252 tf->hob_lbal = scsicmd[7];
2253 tf->hob_lbam = scsicmd[9];
2254 tf->hob_lbah = scsicmd[11];
2255 tf->flags |= ATA_TFLAG_LBA48;
2256 } else
2257 tf->flags &= ~ATA_TFLAG_LBA48;
2258
2259 /*
2260 * Always copy low byte, device and command registers.
2261 */
2262 tf->feature = scsicmd[4];
2263 tf->nsect = scsicmd[6];
2264 tf->lbal = scsicmd[8];
2265 tf->lbam = scsicmd[10];
2266 tf->lbah = scsicmd[12];
2267 tf->device = scsicmd[13];
2268 tf->command = scsicmd[14];
2269 } else {
2270 /*
2271 * 12-byte CDB - incapable of extended commands.
2272 */
2273 tf->flags &= ~ATA_TFLAG_LBA48;
2274
2275 tf->feature = scsicmd[3];
2276 tf->nsect = scsicmd[4];
2277 tf->lbal = scsicmd[5];
2278 tf->lbam = scsicmd[6];
2279 tf->lbah = scsicmd[7];
2280 tf->device = scsicmd[8];
2281 tf->command = scsicmd[9];
2282 }
2283
2284 /*
2285 * Filter SET_FEATURES - XFER MODE command -- otherwise,
2286 * SET_FEATURES - XFER MODE must be preceded/succeeded
2287 * by an update to hardware-specific registers for each
2288 * controller (i.e. the reason for ->set_piomode(),
2289 * ->set_dmamode(), and ->post_set_mode() hooks).
2290 */
2291 if ((tf->command == ATA_CMD_SET_FEATURES)
2292 && (tf->feature == SETFEATURES_XFER))
2293 return 1;
2294
2295 /*
2296 * Set flags so that all registers will be written,
2297 * and pass on write indication (used for PIO/DMA
2298 * setup.)
2299 */
2300 tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE);
2301
2302 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2303 tf->flags |= ATA_TFLAG_WRITE;
2304
2305 /*
2306 * Set transfer length.
2307 *
2308 * TODO: find out if we need to do more here to
2309 * cover scatter/gather case.
2310 */
2311 qc->nsect = cmd->bufflen / ATA_SECT_SIZE;
2312
2313 return 0;
2314}
2315
1481/** 2316/**
1482 * ata_get_xlat_func - check if SCSI to ATA translation is possible 2317 * ata_get_xlat_func - check if SCSI to ATA translation is possible
1483 * @dev: ATA device 2318 * @dev: ATA device
@@ -1510,6 +2345,11 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
1510 case VERIFY: 2345 case VERIFY:
1511 case VERIFY_16: 2346 case VERIFY_16:
1512 return ata_scsi_verify_xlat; 2347 return ata_scsi_verify_xlat;
2348
2349 case ATA_12:
2350 case ATA_16:
2351 return ata_scsi_pass_thru;
2352
1513 case START_STOP: 2353 case START_STOP:
1514 return ata_scsi_start_stop_xlat; 2354 return ata_scsi_start_stop_xlat;
1515 } 2355 }
@@ -1610,7 +2450,7 @@ void ata_scsi_simulate(u16 *id,
1610 void (*done)(struct scsi_cmnd *)) 2450 void (*done)(struct scsi_cmnd *))
1611{ 2451{
1612 struct ata_scsi_args args; 2452 struct ata_scsi_args args;
1613 u8 *scsicmd = cmd->cmnd; 2453 const u8 *scsicmd = cmd->cmnd;
1614 2454
1615 args.id = id; 2455 args.id = id;
1616 args.cmd = cmd; 2456 args.cmd = cmd;
@@ -1630,7 +2470,7 @@ void ata_scsi_simulate(u16 *id,
1630 2470
1631 case INQUIRY: 2471 case INQUIRY:
1632 if (scsicmd[1] & 2) /* is CmdDt set? */ 2472 if (scsicmd[1] & 2) /* is CmdDt set? */
1633 ata_bad_cdb(cmd, done); 2473 ata_scsi_invalid_field(cmd, done);
1634 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 2474 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
1635 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 2475 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
1636 else if (scsicmd[2] == 0x00) 2476 else if (scsicmd[2] == 0x00)
@@ -1640,7 +2480,7 @@ void ata_scsi_simulate(u16 *id,
1640 else if (scsicmd[2] == 0x83) 2480 else if (scsicmd[2] == 0x83)
1641 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 2481 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
1642 else 2482 else
1643 ata_bad_cdb(cmd, done); 2483 ata_scsi_invalid_field(cmd, done);
1644 break; 2484 break;
1645 2485
1646 case MODE_SENSE: 2486 case MODE_SENSE:
@@ -1650,7 +2490,7 @@ void ata_scsi_simulate(u16 *id,
1650 2490
1651 case MODE_SELECT: /* unconditionally return */ 2491 case MODE_SELECT: /* unconditionally return */
1652 case MODE_SELECT_10: /* bad-field-in-cdb */ 2492 case MODE_SELECT_10: /* bad-field-in-cdb */
1653 ata_bad_cdb(cmd, done); 2493 ata_scsi_invalid_field(cmd, done);
1654 break; 2494 break;
1655 2495
1656 case READ_CAPACITY: 2496 case READ_CAPACITY:
@@ -1661,20 +2501,38 @@ void ata_scsi_simulate(u16 *id,
1661 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 2501 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
1662 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 2502 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
1663 else 2503 else
1664 ata_bad_cdb(cmd, done); 2504 ata_scsi_invalid_field(cmd, done);
1665 break; 2505 break;
1666 2506
1667 case REPORT_LUNS: 2507 case REPORT_LUNS:
1668 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); 2508 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
1669 break; 2509 break;
1670 2510
1671 /* mandantory commands we haven't implemented yet */ 2511 /* mandatory commands we haven't implemented yet */
1672 case REQUEST_SENSE: 2512 case REQUEST_SENSE:
1673 2513
1674 /* all other commands */ 2514 /* all other commands */
1675 default: 2515 default:
1676 ata_bad_scsiop(cmd, done); 2516 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
2517 /* "Invalid command operation code" */
2518 done(cmd);
1677 break; 2519 break;
1678 } 2520 }
1679} 2521}
1680 2522
2523void ata_scsi_scan_host(struct ata_port *ap)
2524{
2525 struct ata_device *dev;
2526 unsigned int i;
2527
2528 if (ap->flags & ATA_FLAG_PORT_DISABLED)
2529 return;
2530
2531 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2532 dev = &ap->device[i];
2533
2534 if (ata_dev_present(dev))
2535 scsi_scan_target(&ap->host->shost_gendev, 0, i, 0, 0);
2536 }
2537}
2538
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index d608b3a0f6fe..10ecd9e15e4f 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -39,19 +39,25 @@ struct ata_scsi_args {
39 39
40/* libata-core.c */ 40/* libata-core.c */
41extern int atapi_enabled; 41extern int atapi_enabled;
42extern int ata_qc_complete_noop(struct ata_queued_cmd *qc, unsigned int err_mask);
42extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 43extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
43 struct ata_device *dev); 44 struct ata_device *dev);
45extern void ata_rwcmd_protocol(struct ata_queued_cmd *qc);
44extern void ata_qc_free(struct ata_queued_cmd *qc); 46extern void ata_qc_free(struct ata_queued_cmd *qc);
45extern int ata_qc_issue(struct ata_queued_cmd *qc); 47extern int ata_qc_issue(struct ata_queued_cmd *qc);
46extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 48extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
47extern void ata_dev_select(struct ata_port *ap, unsigned int device, 49extern void ata_dev_select(struct ata_port *ap, unsigned int device,
48 unsigned int wait, unsigned int can_sleep); 50 unsigned int wait, unsigned int can_sleep);
49extern void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf); 51extern void ata_tf_to_host_nolock(struct ata_port *ap, const struct ata_taskfile *tf);
50extern void swap_buf_le16(u16 *buf, unsigned int buf_words); 52extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
53extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
54extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
51 55
52 56
53/* libata-scsi.c */ 57/* libata-scsi.c */
54extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat); 58extern void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
59 struct scsi_cmnd *cmd);
60extern void ata_scsi_scan_host(struct ata_port *ap);
55extern int ata_scsi_error(struct Scsi_Host *host); 61extern int ata_scsi_error(struct Scsi_Host *host);
56extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 62extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
57 unsigned int buflen); 63 unsigned int buflen);
@@ -76,18 +82,10 @@ extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
76extern void ata_scsi_badcmd(struct scsi_cmnd *cmd, 82extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
77 void (*done)(struct scsi_cmnd *), 83 void (*done)(struct scsi_cmnd *),
78 u8 asc, u8 ascq); 84 u8 asc, u8 ascq);
85extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
86 u8 sk, u8 asc, u8 ascq);
79extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 87extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
80 unsigned int (*actor) (struct ata_scsi_args *args, 88 unsigned int (*actor) (struct ata_scsi_args *args,
81 u8 *rbuf, unsigned int buflen)); 89 u8 *rbuf, unsigned int buflen));
82 90
83static inline void ata_bad_scsiop(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
84{
85 ata_scsi_badcmd(cmd, done, 0x20, 0x00);
86}
87
88static inline void ata_bad_cdb(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
89{
90 ata_scsi_badcmd(cmd, done, 0x24, 0x00);
91}
92
93#endif /* __LIBATA_H__ */ 91#endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 0aba13ceaacf..352df47bcaca 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -39,7 +39,7 @@
39#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 39#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
40 40
41static void * 41static void *
42lpfc_pool_kmalloc(unsigned int gfp_flags, void *data) 42lpfc_pool_kmalloc(gfp_t gfp_flags, void *data)
43{ 43{
44 return kmalloc((unsigned long)data, gfp_flags); 44 return kmalloc((unsigned long)data, gfp_flags);
45} 45}
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index d47be8e0ea3a..c9e743ba09ec 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -76,7 +76,7 @@ static void megaraid_exit(void);
76 76
77static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *); 77static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *);
78static void megaraid_detach_one(struct pci_dev *); 78static void megaraid_detach_one(struct pci_dev *);
79static void megaraid_mbox_shutdown(struct device *); 79static void megaraid_mbox_shutdown(struct pci_dev *);
80 80
81static int megaraid_io_attach(adapter_t *); 81static int megaraid_io_attach(adapter_t *);
82static void megaraid_io_detach(adapter_t *); 82static void megaraid_io_detach(adapter_t *);
@@ -369,9 +369,7 @@ static struct pci_driver megaraid_pci_driver_g = {
369 .id_table = pci_id_table_g, 369 .id_table = pci_id_table_g,
370 .probe = megaraid_probe_one, 370 .probe = megaraid_probe_one,
371 .remove = __devexit_p(megaraid_detach_one), 371 .remove = __devexit_p(megaraid_detach_one),
372 .driver = { 372 .shutdown = megaraid_mbox_shutdown,
373 .shutdown = megaraid_mbox_shutdown,
374 }
375}; 373};
376 374
377 375
@@ -673,9 +671,9 @@ megaraid_detach_one(struct pci_dev *pdev)
673 * Shutdown notification, perform flush cache 671 * Shutdown notification, perform flush cache
674 */ 672 */
675static void 673static void
676megaraid_mbox_shutdown(struct device *device) 674megaraid_mbox_shutdown(struct pci_dev *pdev)
677{ 675{
678 adapter_t *adapter = pci_get_drvdata(to_pci_dev(device)); 676 adapter_t *adapter = pci_get_drvdata(pdev);
679 static int counter; 677 static int counter;
680 678
681 if (!adapter) { 679 if (!adapter) {
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index b235556b7b65..bdccf73cf9fe 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -730,7 +730,7 @@ static void start_phase(struct mesh_state *ms)
730 * issue a SEQ_MSGOUT to get the mesh to drop ACK. 730 * issue a SEQ_MSGOUT to get the mesh to drop ACK.
731 */ 731 */
732 if ((in_8(&mr->bus_status0) & BS0_ATN) == 0) { 732 if ((in_8(&mr->bus_status0) & BS0_ATN) == 0) {
733 dlog(ms, "bus0 was %.2x explictly asserting ATN", mr->bus_status0); 733 dlog(ms, "bus0 was %.2x explicitly asserting ATN", mr->bus_status0);
734 out_8(&mr->bus_status0, BS0_ATN); /* explicit ATN */ 734 out_8(&mr->bus_status0, BS0_ATN); /* explicit ATN */
735 mesh_flush_io(mr); 735 mesh_flush_io(mr);
736 udelay(1); 736 udelay(1);
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 3f2f2464fa63..172839fce0eb 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -5146,7 +5146,8 @@ static long osst_compat_ioctl(struct file * file, unsigned int cmd_in, unsigned
5146/* Try to allocate a new tape buffer skeleton. Caller must not hold os_scsi_tapes_lock */ 5146/* Try to allocate a new tape buffer skeleton. Caller must not hold os_scsi_tapes_lock */
5147static struct osst_buffer * new_tape_buffer( int from_initialization, int need_dma, int max_sg ) 5147static struct osst_buffer * new_tape_buffer( int from_initialization, int need_dma, int max_sg )
5148{ 5148{
5149 int i, priority; 5149 int i;
5150 gfp_t priority;
5150 struct osst_buffer *tb; 5151 struct osst_buffer *tb;
5151 5152
5152 if (from_initialization) 5153 if (from_initialization)
@@ -5178,7 +5179,8 @@ static struct osst_buffer * new_tape_buffer( int from_initialization, int need_d
5178/* Try to allocate a temporary (while a user has the device open) enlarged tape buffer */ 5179/* Try to allocate a temporary (while a user has the device open) enlarged tape buffer */
5179static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma) 5180static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
5180{ 5181{
5181 int segs, nbr, max_segs, b_size, priority, order, got; 5182 int segs, nbr, max_segs, b_size, order, got;
5183 gfp_t priority;
5182 5184
5183 if (STbuffer->buffer_size >= OS_FRAME_SIZE) 5185 if (STbuffer->buffer_size >= OS_FRAME_SIZE)
5184 return 1; 5186 return 1;
@@ -5627,7 +5629,7 @@ static void osst_sysfs_add(dev_t dev, struct device *device, struct osst_tape *
5627 5629
5628 if (!osst_sysfs_valid) return; 5630 if (!osst_sysfs_valid) return;
5629 5631
5630 osst_class_member = class_device_create(osst_sysfs_class, dev, device, "%s", name); 5632 osst_class_member = class_device_create(osst_sysfs_class, NULL, dev, device, "%s", name);
5631 if (IS_ERR(osst_class_member)) { 5633 if (IS_ERR(osst_class_member)) {
5632 printk(KERN_WARNING "osst :W: Unable to add sysfs class member %s\n", name); 5634 printk(KERN_WARNING "osst :W: Unable to add sysfs class member %s\n", name);
5633 return; 5635 return;
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
new file mode 100644
index 000000000000..665017eda8a6
--- /dev/null
+++ b/drivers/scsi/pdc_adma.c
@@ -0,0 +1,741 @@
1/*
2 * pdc_adma.c - Pacific Digital Corporation ADMA
3 *
4 * Maintained by: Mark Lord <mlord@pobox.com>
5 *
6 * Copyright 2005 Mark Lord
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 *
27 * Supports ATA disks in single-packet ADMA mode.
28 * Uses PIO for everything else.
29 *
30 * TODO: Use ADMA transfers for ATAPI devices, when possible.
31 * This requires careful attention to a number of quirks of the chip.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/sched.h>
43#include <linux/device.h>
44#include "scsi.h"
45#include <scsi/scsi_host.h>
46#include <asm/io.h>
47#include <linux/libata.h>
48
49#define DRV_NAME "pdc_adma"
50#define DRV_VERSION "0.03"
51
52/* macro to calculate base address for ATA regs */
53#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
54
55/* macro to calculate base address for ADMA regs */
56#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20))
57
58enum {
59 ADMA_PORTS = 2,
60 ADMA_CPB_BYTES = 40,
61 ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16,
62 ADMA_PKT_BYTES = ADMA_CPB_BYTES + ADMA_PRD_BYTES,
63
64 ADMA_DMA_BOUNDARY = 0xffffffff,
65
66 /* global register offsets */
67 ADMA_MODE_LOCK = 0x00c7,
68
69 /* per-channel register offsets */
70 ADMA_CONTROL = 0x0000, /* ADMA control */
71 ADMA_STATUS = 0x0002, /* ADMA status */
72 ADMA_CPB_COUNT = 0x0004, /* CPB count */
73 ADMA_CPB_CURRENT = 0x000c, /* current CPB address */
74 ADMA_CPB_NEXT = 0x000c, /* next CPB address */
75 ADMA_CPB_LOOKUP = 0x0010, /* CPB lookup table */
76 ADMA_FIFO_IN = 0x0014, /* input FIFO threshold */
77 ADMA_FIFO_OUT = 0x0016, /* output FIFO threshold */
78
79 /* ADMA_CONTROL register bits */
80 aNIEN = (1 << 8), /* irq mask: 1==masked */
81 aGO = (1 << 7), /* packet trigger ("Go!") */
82 aRSTADM = (1 << 5), /* ADMA logic reset */
83 aPIOMD4 = 0x0003, /* PIO mode 4 */
84
85 /* ADMA_STATUS register bits */
86 aPSD = (1 << 6),
87 aUIRQ = (1 << 4),
88 aPERR = (1 << 0),
89
90 /* CPB bits */
91 cDONE = (1 << 0),
92 cVLD = (1 << 0),
93 cDAT = (1 << 2),
94 cIEN = (1 << 3),
95
96 /* PRD bits */
97 pORD = (1 << 4),
98 pDIRO = (1 << 5),
99 pEND = (1 << 7),
100
101 /* ATA register flags */
102 rIGN = (1 << 5),
103 rEND = (1 << 7),
104
105 /* ATA register addresses */
106 ADMA_REGS_CONTROL = 0x0e,
107 ADMA_REGS_SECTOR_COUNT = 0x12,
108 ADMA_REGS_LBA_LOW = 0x13,
109 ADMA_REGS_LBA_MID = 0x14,
110 ADMA_REGS_LBA_HIGH = 0x15,
111 ADMA_REGS_DEVICE = 0x16,
112 ADMA_REGS_COMMAND = 0x17,
113
114 /* PCI device IDs */
115 board_1841_idx = 0, /* ADMA 2-port controller */
116};
117
118typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;
119
120struct adma_port_priv {
121 u8 *pkt;
122 dma_addr_t pkt_dma;
123 adma_state_t state;
124};
125
126static int adma_ata_init_one (struct pci_dev *pdev,
127 const struct pci_device_id *ent);
128static irqreturn_t adma_intr (int irq, void *dev_instance,
129 struct pt_regs *regs);
130static int adma_port_start(struct ata_port *ap);
131static void adma_host_stop(struct ata_host_set *host_set);
132static void adma_port_stop(struct ata_port *ap);
133static void adma_phy_reset(struct ata_port *ap);
134static void adma_qc_prep(struct ata_queued_cmd *qc);
135static int adma_qc_issue(struct ata_queued_cmd *qc);
136static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
137static void adma_bmdma_stop(struct ata_queued_cmd *qc);
138static u8 adma_bmdma_status(struct ata_port *ap);
139static void adma_irq_clear(struct ata_port *ap);
140static void adma_eng_timeout(struct ata_port *ap);
141
142static Scsi_Host_Template adma_ata_sht = {
143 .module = THIS_MODULE,
144 .name = DRV_NAME,
145 .ioctl = ata_scsi_ioctl,
146 .queuecommand = ata_scsi_queuecmd,
147 .eh_strategy_handler = ata_scsi_error,
148 .can_queue = ATA_DEF_QUEUE,
149 .this_id = ATA_SHT_THIS_ID,
150 .sg_tablesize = LIBATA_MAX_PRD,
151 .max_sectors = ATA_MAX_SECTORS,
152 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
153 .emulated = ATA_SHT_EMULATED,
154 .use_clustering = ENABLE_CLUSTERING,
155 .proc_name = DRV_NAME,
156 .dma_boundary = ADMA_DMA_BOUNDARY,
157 .slave_configure = ata_scsi_slave_config,
158 .bios_param = ata_std_bios_param,
159};
160
161static const struct ata_port_operations adma_ata_ops = {
162 .port_disable = ata_port_disable,
163 .tf_load = ata_tf_load,
164 .tf_read = ata_tf_read,
165 .check_status = ata_check_status,
166 .check_atapi_dma = adma_check_atapi_dma,
167 .exec_command = ata_exec_command,
168 .dev_select = ata_std_dev_select,
169 .phy_reset = adma_phy_reset,
170 .qc_prep = adma_qc_prep,
171 .qc_issue = adma_qc_issue,
172 .eng_timeout = adma_eng_timeout,
173 .irq_handler = adma_intr,
174 .irq_clear = adma_irq_clear,
175 .port_start = adma_port_start,
176 .port_stop = adma_port_stop,
177 .host_stop = adma_host_stop,
178 .bmdma_stop = adma_bmdma_stop,
179 .bmdma_status = adma_bmdma_status,
180};
181
182static struct ata_port_info adma_port_info[] = {
183 /* board_1841_idx */
184 {
185 .sht = &adma_ata_sht,
186 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
187 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO,
188 .pio_mask = 0x10, /* pio4 */
189 .udma_mask = 0x1f, /* udma0-4 */
190 .port_ops = &adma_ata_ops,
191 },
192};
193
194static struct pci_device_id adma_ata_pci_tbl[] = {
195 { PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
196 board_1841_idx },
197
198 { } /* terminate list */
199};
200
201static struct pci_driver adma_ata_pci_driver = {
202 .name = DRV_NAME,
203 .id_table = adma_ata_pci_tbl,
204 .probe = adma_ata_init_one,
205 .remove = ata_pci_remove_one,
206};
207
208static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
209{
210 return 1; /* ATAPI DMA not yet supported */
211}
212
213static void adma_bmdma_stop(struct ata_queued_cmd *qc)
214{
215 /* nothing */
216}
217
218static u8 adma_bmdma_status(struct ata_port *ap)
219{
220 return 0;
221}
222
223static void adma_irq_clear(struct ata_port *ap)
224{
225 /* nothing */
226}
227
228static void adma_reset_engine(void __iomem *chan)
229{
230 /* reset ADMA to idle state */
231 writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
232 udelay(2);
233 writew(aPIOMD4, chan + ADMA_CONTROL);
234 udelay(2);
235}
236
237static void adma_reinit_engine(struct ata_port *ap)
238{
239 struct adma_port_priv *pp = ap->private_data;
240 void __iomem *mmio_base = ap->host_set->mmio_base;
241 void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
242
243 /* mask/clear ATA interrupts */
244 writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr);
245 ata_check_status(ap);
246
247 /* reset the ADMA engine */
248 adma_reset_engine(chan);
249
250 /* set in-FIFO threshold to 0x100 */
251 writew(0x100, chan + ADMA_FIFO_IN);
252
253 /* set CPB pointer */
254 writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);
255
256 /* set out-FIFO threshold to 0x100 */
257 writew(0x100, chan + ADMA_FIFO_OUT);
258
259 /* set CPB count */
260 writew(1, chan + ADMA_CPB_COUNT);
261
262 /* read/discard ADMA status */
263 readb(chan + ADMA_STATUS);
264}
265
266static inline void adma_enter_reg_mode(struct ata_port *ap)
267{
268 void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
269
270 writew(aPIOMD4, chan + ADMA_CONTROL);
271 readb(chan + ADMA_STATUS); /* flush */
272}
273
274static void adma_phy_reset(struct ata_port *ap)
275{
276 struct adma_port_priv *pp = ap->private_data;
277
278 pp->state = adma_state_idle;
279 adma_reinit_engine(ap);
280 ata_port_probe(ap);
281 ata_bus_reset(ap);
282}
283
284static void adma_eng_timeout(struct ata_port *ap)
285{
286 struct adma_port_priv *pp = ap->private_data;
287
288 if (pp->state != adma_state_idle) /* healthy paranoia */
289 pp->state = adma_state_mmio;
290 adma_reinit_engine(ap);
291 ata_eng_timeout(ap);
292}
293
294static int adma_fill_sg(struct ata_queued_cmd *qc)
295{
296 struct scatterlist *sg = qc->sg;
297 struct ata_port *ap = qc->ap;
298 struct adma_port_priv *pp = ap->private_data;
299 u8 *buf = pp->pkt;
300 int nelem, i = (2 + buf[3]) * 8;
301 u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
302
303 for (nelem = 0; nelem < qc->n_elem; nelem++,sg++) {
304 u32 addr;
305 u32 len;
306
307 addr = (u32)sg_dma_address(sg);
308 *(__le32 *)(buf + i) = cpu_to_le32(addr);
309 i += 4;
310
311 len = sg_dma_len(sg) >> 3;
312 *(__le32 *)(buf + i) = cpu_to_le32(len);
313 i += 4;
314
315 if ((nelem + 1) == qc->n_elem)
316 pFLAGS |= pEND;
317 buf[i++] = pFLAGS;
318 buf[i++] = qc->dev->dma_mode & 0xf;
319 buf[i++] = 0; /* pPKLW */
320 buf[i++] = 0; /* reserved */
321
322 *(__le32 *)(buf + i)
323 = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
324 i += 4;
325
326 VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", nelem,
327 (unsigned long)addr, len);
328 }
329 return i;
330}
331
332static void adma_qc_prep(struct ata_queued_cmd *qc)
333{
334 struct adma_port_priv *pp = qc->ap->private_data;
335 u8 *buf = pp->pkt;
336 u32 pkt_dma = (u32)pp->pkt_dma;
337 int i = 0;
338
339 VPRINTK("ENTER\n");
340
341 adma_enter_reg_mode(qc->ap);
342 if (qc->tf.protocol != ATA_PROT_DMA) {
343 ata_qc_prep(qc);
344 return;
345 }
346
347 buf[i++] = 0; /* Response flags */
348 buf[i++] = 0; /* reserved */
349 buf[i++] = cVLD | cDAT | cIEN;
350 i++; /* cLEN, gets filled in below */
351
352 *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma); /* cNCPB */
353 i += 4; /* cNCPB */
354 i += 4; /* cPRD, gets filled in below */
355
356 buf[i++] = 0; /* reserved */
357 buf[i++] = 0; /* reserved */
358 buf[i++] = 0; /* reserved */
359 buf[i++] = 0; /* reserved */
360
361 /* ATA registers; must be a multiple of 4 */
362 buf[i++] = qc->tf.device;
363 buf[i++] = ADMA_REGS_DEVICE;
364 if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
365 buf[i++] = qc->tf.hob_nsect;
366 buf[i++] = ADMA_REGS_SECTOR_COUNT;
367 buf[i++] = qc->tf.hob_lbal;
368 buf[i++] = ADMA_REGS_LBA_LOW;
369 buf[i++] = qc->tf.hob_lbam;
370 buf[i++] = ADMA_REGS_LBA_MID;
371 buf[i++] = qc->tf.hob_lbah;
372 buf[i++] = ADMA_REGS_LBA_HIGH;
373 }
374 buf[i++] = qc->tf.nsect;
375 buf[i++] = ADMA_REGS_SECTOR_COUNT;
376 buf[i++] = qc->tf.lbal;
377 buf[i++] = ADMA_REGS_LBA_LOW;
378 buf[i++] = qc->tf.lbam;
379 buf[i++] = ADMA_REGS_LBA_MID;
380 buf[i++] = qc->tf.lbah;
381 buf[i++] = ADMA_REGS_LBA_HIGH;
382 buf[i++] = 0;
383 buf[i++] = ADMA_REGS_CONTROL;
384 buf[i++] = rIGN;
385 buf[i++] = 0;
386 buf[i++] = qc->tf.command;
387 buf[i++] = ADMA_REGS_COMMAND | rEND;
388
389 buf[3] = (i >> 3) - 2; /* cLEN */
390 *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i); /* cPRD */
391
392 i = adma_fill_sg(qc);
393 wmb(); /* flush PRDs and pkt to memory */
394#if 0
395 /* dump out CPB + PRDs for debug */
396 {
397 int j, len = 0;
398 static char obuf[2048];
399 for (j = 0; j < i; ++j) {
400 len += sprintf(obuf+len, "%02x ", buf[j]);
401 if ((j & 7) == 7) {
402 printk("%s\n", obuf);
403 len = 0;
404 }
405 }
406 if (len)
407 printk("%s\n", obuf);
408 }
409#endif
410}
411
412static inline void adma_packet_start(struct ata_queued_cmd *qc)
413{
414 struct ata_port *ap = qc->ap;
415 void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
416
417 VPRINTK("ENTER, ap %p\n", ap);
418
419 /* fire up the ADMA engine */
420 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
421}
422
423static int adma_qc_issue(struct ata_queued_cmd *qc)
424{
425 struct adma_port_priv *pp = qc->ap->private_data;
426
427 switch (qc->tf.protocol) {
428 case ATA_PROT_DMA:
429 pp->state = adma_state_pkt;
430 adma_packet_start(qc);
431 return 0;
432
433 case ATA_PROT_ATAPI_DMA:
434 BUG();
435 break;
436
437 default:
438 break;
439 }
440
441 pp->state = adma_state_mmio;
442 return ata_qc_issue_prot(qc);
443}
444
445static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
446{
447 unsigned int handled = 0, port_no;
448 u8 __iomem *mmio_base = host_set->mmio_base;
449
450 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
451 struct ata_port *ap = host_set->ports[port_no];
452 struct adma_port_priv *pp;
453 struct ata_queued_cmd *qc;
454 void __iomem *chan = ADMA_REGS(mmio_base, port_no);
455 u8 status = readb(chan + ADMA_STATUS);
456
457 if (status == 0)
458 continue;
459 handled = 1;
460 adma_enter_reg_mode(ap);
461 if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))
462 continue;
463 pp = ap->private_data;
464 if (!pp || pp->state != adma_state_pkt)
465 continue;
466 qc = ata_qc_from_tag(ap, ap->active_tag);
467 if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
468 unsigned int err_mask = 0;
469
470 if ((status & (aPERR | aPSD | aUIRQ)))
471 err_mask = AC_ERR_OTHER;
472 else if (pp->pkt[0] != cDONE)
473 err_mask = AC_ERR_OTHER;
474
475 ata_qc_complete(qc, err_mask);
476 }
477 }
478 return handled;
479}
480
481static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
482{
483 unsigned int handled = 0, port_no;
484
485 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
486 struct ata_port *ap;
487 ap = host_set->ports[port_no];
488 if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) {
489 struct ata_queued_cmd *qc;
490 struct adma_port_priv *pp = ap->private_data;
491 if (!pp || pp->state != adma_state_mmio)
492 continue;
493 qc = ata_qc_from_tag(ap, ap->active_tag);
494 if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
495
496 /* check main status, clearing INTRQ */
497 u8 status = ata_check_status(ap);
498 if ((status & ATA_BUSY))
499 continue;
500 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
501 ap->id, qc->tf.protocol, status);
502
503 /* complete taskfile transaction */
504 pp->state = adma_state_idle;
505 ata_qc_complete(qc, ac_err_mask(status));
506 handled = 1;
507 }
508 }
509 }
510 return handled;
511}
512
513static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs)
514{
515 struct ata_host_set *host_set = dev_instance;
516 unsigned int handled = 0;
517
518 VPRINTK("ENTER\n");
519
520 spin_lock(&host_set->lock);
521 handled = adma_intr_pkt(host_set) | adma_intr_mmio(host_set);
522 spin_unlock(&host_set->lock);
523
524 VPRINTK("EXIT\n");
525
526 return IRQ_RETVAL(handled);
527}
528
529static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
530{
531 port->cmd_addr =
532 port->data_addr = base + 0x000;
533 port->error_addr =
534 port->feature_addr = base + 0x004;
535 port->nsect_addr = base + 0x008;
536 port->lbal_addr = base + 0x00c;
537 port->lbam_addr = base + 0x010;
538 port->lbah_addr = base + 0x014;
539 port->device_addr = base + 0x018;
540 port->status_addr =
541 port->command_addr = base + 0x01c;
542 port->altstatus_addr =
543 port->ctl_addr = base + 0x038;
544}
545
546static int adma_port_start(struct ata_port *ap)
547{
548 struct device *dev = ap->host_set->dev;
549 struct adma_port_priv *pp;
550 int rc;
551
552 rc = ata_port_start(ap);
553 if (rc)
554 return rc;
555 adma_enter_reg_mode(ap);
556 rc = -ENOMEM;
557 pp = kcalloc(1, sizeof(*pp), GFP_KERNEL);
558 if (!pp)
559 goto err_out;
560 pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
561 GFP_KERNEL);
562 if (!pp->pkt)
563 goto err_out_kfree;
564 /* paranoia? */
565 if ((pp->pkt_dma & 7) != 0) {
566 printk("bad alignment for pp->pkt_dma: %08x\n",
567 (u32)pp->pkt_dma);
568 dma_free_coherent(dev, ADMA_PKT_BYTES,
569 pp->pkt, pp->pkt_dma);
570 goto err_out_kfree;
571 }
572 memset(pp->pkt, 0, ADMA_PKT_BYTES);
573 ap->private_data = pp;
574 adma_reinit_engine(ap);
575 return 0;
576
577err_out_kfree:
578 kfree(pp);
579err_out:
580 ata_port_stop(ap);
581 return rc;
582}
583
584static void adma_port_stop(struct ata_port *ap)
585{
586 struct device *dev = ap->host_set->dev;
587 struct adma_port_priv *pp = ap->private_data;
588
589 adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no));
590 if (pp != NULL) {
591 ap->private_data = NULL;
592 if (pp->pkt != NULL)
593 dma_free_coherent(dev, ADMA_PKT_BYTES,
594 pp->pkt, pp->pkt_dma);
595 kfree(pp);
596 }
597 ata_port_stop(ap);
598}
599
600static void adma_host_stop(struct ata_host_set *host_set)
601{
602 unsigned int port_no;
603
604 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
605 adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no));
606
607 ata_pci_host_stop(host_set);
608}
609
610static void adma_host_init(unsigned int chip_id,
611 struct ata_probe_ent *probe_ent)
612{
613 unsigned int port_no;
614 void __iomem *mmio_base = probe_ent->mmio_base;
615
616 /* enable/lock aGO operation */
617 writeb(7, mmio_base + ADMA_MODE_LOCK);
618
619 /* reset the ADMA logic */
620 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
621 adma_reset_engine(ADMA_REGS(mmio_base, port_no));
622}
623
624static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
625{
626 int rc;
627
628 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
629 if (rc) {
630 dev_printk(KERN_ERR, &pdev->dev,
631 "32-bit DMA enable failed\n");
632 return rc;
633 }
634 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
635 if (rc) {
636 dev_printk(KERN_ERR, &pdev->dev,
637 "32-bit consistent DMA enable failed\n");
638 return rc;
639 }
640 return 0;
641}
642
643static int adma_ata_init_one(struct pci_dev *pdev,
644 const struct pci_device_id *ent)
645{
646 static int printed_version;
647 struct ata_probe_ent *probe_ent = NULL;
648 void __iomem *mmio_base;
649 unsigned int board_idx = (unsigned int) ent->driver_data;
650 int rc, port_no;
651
652 if (!printed_version++)
653 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
654
655 rc = pci_enable_device(pdev);
656 if (rc)
657 return rc;
658
659 rc = pci_request_regions(pdev, DRV_NAME);
660 if (rc)
661 goto err_out;
662
663 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
664 rc = -ENODEV;
665 goto err_out_regions;
666 }
667
668 mmio_base = pci_iomap(pdev, 4, 0);
669 if (mmio_base == NULL) {
670 rc = -ENOMEM;
671 goto err_out_regions;
672 }
673
674 rc = adma_set_dma_masks(pdev, mmio_base);
675 if (rc)
676 goto err_out_iounmap;
677
678 probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL);
679 if (probe_ent == NULL) {
680 rc = -ENOMEM;
681 goto err_out_iounmap;
682 }
683
684 probe_ent->dev = pci_dev_to_dev(pdev);
685 INIT_LIST_HEAD(&probe_ent->node);
686
687 probe_ent->sht = adma_port_info[board_idx].sht;
688 probe_ent->host_flags = adma_port_info[board_idx].host_flags;
689 probe_ent->pio_mask = adma_port_info[board_idx].pio_mask;
690 probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask;
691 probe_ent->udma_mask = adma_port_info[board_idx].udma_mask;
692 probe_ent->port_ops = adma_port_info[board_idx].port_ops;
693
694 probe_ent->irq = pdev->irq;
695 probe_ent->irq_flags = SA_SHIRQ;
696 probe_ent->mmio_base = mmio_base;
697 probe_ent->n_ports = ADMA_PORTS;
698
699 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
700 adma_ata_setup_port(&probe_ent->port[port_no],
701 ADMA_ATA_REGS((unsigned long)mmio_base, port_no));
702 }
703
704 pci_set_master(pdev);
705
706 /* initialize adapter */
707 adma_host_init(board_idx, probe_ent);
708
709 rc = ata_device_add(probe_ent);
710 kfree(probe_ent);
711 if (rc != ADMA_PORTS)
712 goto err_out_iounmap;
713 return 0;
714
715err_out_iounmap:
716 pci_iounmap(pdev, mmio_base);
717err_out_regions:
718 pci_release_regions(pdev);
719err_out:
720 pci_disable_device(pdev);
721 return rc;
722}
723
724static int __init adma_ata_init(void)
725{
726 return pci_module_init(&adma_ata_pci_driver);
727}
728
729static void __exit adma_ata_exit(void)
730{
731 pci_unregister_driver(&adma_ata_pci_driver);
732}
733
734MODULE_AUTHOR("Mark Lord");
735MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
736MODULE_LICENSE("GPL");
737MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
738MODULE_VERSION(DRV_VERSION);
739
740module_init(adma_ata_init);
741module_exit(adma_ata_exit);
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1ed32e7b5472..e451941ad81d 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -52,7 +52,7 @@ extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
52extern int qla24xx_load_risc_flash(scsi_qla_host_t *, uint32_t *); 52extern int qla24xx_load_risc_flash(scsi_qla_host_t *, uint32_t *);
53extern int qla24xx_load_risc_hotplug(scsi_qla_host_t *, uint32_t *); 53extern int qla24xx_load_risc_hotplug(scsi_qla_host_t *, uint32_t *);
54 54
55extern fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *, int); 55extern fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t);
56 56
57extern int qla2x00_loop_resync(scsi_qla_host_t *); 57extern int qla2x00_loop_resync(scsi_qla_host_t *);
58 58
@@ -277,7 +277,7 @@ extern int qla2x00_fdmi_register(scsi_qla_host_t *);
277/* 277/*
278 * Global Function Prototypes in qla_rscn.c source file. 278 * Global Function Prototypes in qla_rscn.c source file.
279 */ 279 */
280extern fc_port_t *qla2x00_alloc_rscn_fcport(scsi_qla_host_t *, int); 280extern fc_port_t *qla2x00_alloc_rscn_fcport(scsi_qla_host_t *, gfp_t);
281extern int qla2x00_handle_port_rscn(scsi_qla_host_t *, uint32_t, fc_port_t *, 281extern int qla2x00_handle_port_rscn(scsi_qla_host_t *, uint32_t, fc_port_t *,
282 int); 282 int);
283extern void qla2x00_process_iodesc(scsi_qla_host_t *, struct mbx_entry *); 283extern void qla2x00_process_iodesc(scsi_qla_host_t *, struct mbx_entry *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 23d095d3817b..fbb6feee40cf 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1685,7 +1685,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1685 * Returns a pointer to the allocated fcport, or NULL, if none available. 1685 * Returns a pointer to the allocated fcport, or NULL, if none available.
1686 */ 1686 */
1687fc_port_t * 1687fc_port_t *
1688qla2x00_alloc_fcport(scsi_qla_host_t *ha, int flags) 1688qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1689{ 1689{
1690 fc_port_t *fcport; 1690 fc_port_t *fcport;
1691 1691
diff --git a/drivers/scsi/qla2xxx/qla_rscn.c b/drivers/scsi/qla2xxx/qla_rscn.c
index 1eba98828636..7534efcc8918 100644
--- a/drivers/scsi/qla2xxx/qla_rscn.c
+++ b/drivers/scsi/qla2xxx/qla_rscn.c
@@ -1066,7 +1066,7 @@ qla2x00_send_login_iocb_cb(scsi_qla_host_t *ha, struct io_descriptor *iodesc,
1066 * Returns a pointer to the allocated RSCN fcport, or NULL, if none available. 1066 * Returns a pointer to the allocated RSCN fcport, or NULL, if none available.
1067 */ 1067 */
1068fc_port_t * 1068fc_port_t *
1069qla2x00_alloc_rscn_fcport(scsi_qla_host_t *ha, int flags) 1069qla2x00_alloc_rscn_fcport(scsi_qla_host_t *ha, gfp_t flags)
1070{ 1070{
1071 fc_port_t *fcport; 1071 fc_port_t *fcport;
1072 1072
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index ea76fe44585e..46dbdee79f77 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -29,13 +29,14 @@
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/device.h>
32#include "scsi.h" 33#include "scsi.h"
33#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
34#include <linux/libata.h> 35#include <linux/libata.h>
35#include <asm/io.h> 36#include <asm/io.h>
36 37
37#define DRV_NAME "sata_mv" 38#define DRV_NAME "sata_mv"
38#define DRV_VERSION "0.12" 39#define DRV_VERSION "0.25"
39 40
40enum { 41enum {
41 /* BAR's are enumerated in terms of pci_resource_start() terms */ 42 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -55,31 +56,61 @@ enum {
55 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 56 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
56 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 57 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
57 58
58 MV_Q_CT = 32, 59 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
59 MV_CRQB_SZ = 32,
60 MV_CRPB_SZ = 8,
61 60
62 MV_DMA_BOUNDARY = 0xffffffffU, 61 MV_MAX_Q_DEPTH = 32,
63 SATAHC_MASK = (~(MV_SATAHC_REG_SZ - 1)), 62 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
63
64 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
65 * CRPB needs alignment on a 256B boundary. Size == 256B
66 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
67 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
68 */
69 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
70 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
71 MV_MAX_SG_CT = 176,
72 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
73 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
74
75 /* Our DMA boundary is determined by an ePRD being unable to handle
76 * anything larger than 64KB
77 */
78 MV_DMA_BOUNDARY = 0xffffU,
64 79
65 MV_PORTS_PER_HC = 4, 80 MV_PORTS_PER_HC = 4,
66 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ 81 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
67 MV_PORT_HC_SHIFT = 2, 82 MV_PORT_HC_SHIFT = 2,
68 /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */ 83 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
69 MV_PORT_MASK = 3, 84 MV_PORT_MASK = 3,
70 85
71 /* Host Flags */ 86 /* Host Flags */
72 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 87 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
73 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 88 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
74 MV_FLAG_BDMA = (1 << 28), /* Basic DMA */ 89 MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */
90 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
91 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
92 MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE |
93 MV_FLAG_GLBL_SFT_RST),
75 94
76 chip_504x = 0, 95 chip_504x = 0,
77 chip_508x = 1, 96 chip_508x = 1,
78 chip_604x = 2, 97 chip_604x = 2,
79 chip_608x = 3, 98 chip_608x = 3,
80 99
100 CRQB_FLAG_READ = (1 << 0),
101 CRQB_TAG_SHIFT = 1,
102 CRQB_CMD_ADDR_SHIFT = 8,
103 CRQB_CMD_CS = (0x2 << 11),
104 CRQB_CMD_LAST = (1 << 15),
105
106 CRPB_FLAG_STATUS_SHIFT = 8,
107
108 EPRD_FLAG_END_OF_TBL = (1 << 31),
109
81 /* PCI interface registers */ 110 /* PCI interface registers */
82 111
112 PCI_COMMAND_OFS = 0xc00,
113
83 PCI_MAIN_CMD_STS_OFS = 0xd30, 114 PCI_MAIN_CMD_STS_OFS = 0xd30,
84 STOP_PCI_MASTER = (1 << 2), 115 STOP_PCI_MASTER = (1 << 2),
85 PCI_MASTER_EMPTY = (1 << 3), 116 PCI_MASTER_EMPTY = (1 << 3),
@@ -111,20 +142,13 @@ enum {
111 HC_CFG_OFS = 0, 142 HC_CFG_OFS = 0,
112 143
113 HC_IRQ_CAUSE_OFS = 0x14, 144 HC_IRQ_CAUSE_OFS = 0x14,
114 CRBP_DMA_DONE = (1 << 0), /* shift by port # */ 145 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
115 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ 146 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
116 DEV_IRQ = (1 << 8), /* shift by port # */ 147 DEV_IRQ = (1 << 8), /* shift by port # */
117 148
118 /* Shadow block registers */ 149 /* Shadow block registers */
119 SHD_PIO_DATA_OFS = 0x100, 150 SHD_BLK_OFS = 0x100,
120 SHD_FEA_ERR_OFS = 0x104, 151 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
121 SHD_SECT_CNT_OFS = 0x108,
122 SHD_LBA_L_OFS = 0x10C,
123 SHD_LBA_M_OFS = 0x110,
124 SHD_LBA_H_OFS = 0x114,
125 SHD_DEV_HD_OFS = 0x118,
126 SHD_CMD_STA_OFS = 0x11C,
127 SHD_CTL_AST_OFS = 0x120,
128 152
129 /* SATA registers */ 153 /* SATA registers */
130 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 154 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
@@ -132,6 +156,11 @@ enum {
132 156
133 /* Port registers */ 157 /* Port registers */
134 EDMA_CFG_OFS = 0, 158 EDMA_CFG_OFS = 0,
159 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
160 EDMA_CFG_NCQ = (1 << 5),
161 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
162 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
163 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
135 164
136 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 165 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
137 EDMA_ERR_IRQ_MASK_OFS = 0xc, 166 EDMA_ERR_IRQ_MASK_OFS = 0xc,
@@ -161,33 +190,84 @@ enum {
161 EDMA_ERR_LNK_DATA_TX | 190 EDMA_ERR_LNK_DATA_TX |
162 EDMA_ERR_TRANS_PROTO), 191 EDMA_ERR_TRANS_PROTO),
163 192
193 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
194 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
195 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
196
197 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
198 EDMA_REQ_Q_PTR_SHIFT = 5,
199
200 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
201 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
202 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
203 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
204 EDMA_RSP_Q_PTR_SHIFT = 3,
205
164 EDMA_CMD_OFS = 0x28, 206 EDMA_CMD_OFS = 0x28,
165 EDMA_EN = (1 << 0), 207 EDMA_EN = (1 << 0),
166 EDMA_DS = (1 << 1), 208 EDMA_DS = (1 << 1),
167 ATA_RST = (1 << 2), 209 ATA_RST = (1 << 2),
168 210
169 /* BDMA is 6xxx part only */ 211 /* Host private flags (hp_flags) */
170 BDMA_CMD_OFS = 0x224, 212 MV_HP_FLAG_MSI = (1 << 0),
171 BDMA_START = (1 << 0),
172 213
173 MV_UNDEF = 0, 214 /* Port private flags (pp_flags) */
215 MV_PP_FLAG_EDMA_EN = (1 << 0),
216 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
174}; 217};
175 218
176struct mv_port_priv { 219/* Command ReQuest Block: 32B */
220struct mv_crqb {
221 u32 sg_addr;
222 u32 sg_addr_hi;
223 u16 ctrl_flags;
224 u16 ata_cmd[11];
225};
177 226
227/* Command ResPonse Block: 8B */
228struct mv_crpb {
229 u16 id;
230 u16 flags;
231 u32 tmstmp;
178}; 232};
179 233
180struct mv_host_priv { 234/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
235struct mv_sg {
236 u32 addr;
237 u32 flags_size;
238 u32 addr_hi;
239 u32 reserved;
240};
181 241
242struct mv_port_priv {
243 struct mv_crqb *crqb;
244 dma_addr_t crqb_dma;
245 struct mv_crpb *crpb;
246 dma_addr_t crpb_dma;
247 struct mv_sg *sg_tbl;
248 dma_addr_t sg_tbl_dma;
249
250 unsigned req_producer; /* cp of req_in_ptr */
251 unsigned rsp_consumer; /* cp of rsp_out_ptr */
252 u32 pp_flags;
253};
254
255struct mv_host_priv {
256 u32 hp_flags;
182}; 257};
183 258
184static void mv_irq_clear(struct ata_port *ap); 259static void mv_irq_clear(struct ata_port *ap);
185static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); 260static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
186static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 261static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
187static void mv_phy_reset(struct ata_port *ap); 262static void mv_phy_reset(struct ata_port *ap);
188static int mv_master_reset(void __iomem *mmio_base); 263static void mv_host_stop(struct ata_host_set *host_set);
264static int mv_port_start(struct ata_port *ap);
265static void mv_port_stop(struct ata_port *ap);
266static void mv_qc_prep(struct ata_queued_cmd *qc);
267static int mv_qc_issue(struct ata_queued_cmd *qc);
189static irqreturn_t mv_interrupt(int irq, void *dev_instance, 268static irqreturn_t mv_interrupt(int irq, void *dev_instance,
190 struct pt_regs *regs); 269 struct pt_regs *regs);
270static void mv_eng_timeout(struct ata_port *ap);
191static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 271static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
192 272
193static Scsi_Host_Template mv_sht = { 273static Scsi_Host_Template mv_sht = {
@@ -196,13 +276,13 @@ static Scsi_Host_Template mv_sht = {
196 .ioctl = ata_scsi_ioctl, 276 .ioctl = ata_scsi_ioctl,
197 .queuecommand = ata_scsi_queuecmd, 277 .queuecommand = ata_scsi_queuecmd,
198 .eh_strategy_handler = ata_scsi_error, 278 .eh_strategy_handler = ata_scsi_error,
199 .can_queue = ATA_DEF_QUEUE, 279 .can_queue = MV_USE_Q_DEPTH,
200 .this_id = ATA_SHT_THIS_ID, 280 .this_id = ATA_SHT_THIS_ID,
201 .sg_tablesize = MV_UNDEF, 281 .sg_tablesize = MV_MAX_SG_CT,
202 .max_sectors = ATA_MAX_SECTORS, 282 .max_sectors = ATA_MAX_SECTORS,
203 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 283 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
204 .emulated = ATA_SHT_EMULATED, 284 .emulated = ATA_SHT_EMULATED,
205 .use_clustering = MV_UNDEF, 285 .use_clustering = ATA_SHT_USE_CLUSTERING,
206 .proc_name = DRV_NAME, 286 .proc_name = DRV_NAME,
207 .dma_boundary = MV_DMA_BOUNDARY, 287 .dma_boundary = MV_DMA_BOUNDARY,
208 .slave_configure = ata_scsi_slave_config, 288 .slave_configure = ata_scsi_slave_config,
@@ -210,7 +290,7 @@ static Scsi_Host_Template mv_sht = {
210 .ordered_flush = 1, 290 .ordered_flush = 1,
211}; 291};
212 292
213static struct ata_port_operations mv_ops = { 293static const struct ata_port_operations mv_ops = {
214 .port_disable = ata_port_disable, 294 .port_disable = ata_port_disable,
215 295
216 .tf_load = ata_tf_load, 296 .tf_load = ata_tf_load,
@@ -221,10 +301,10 @@ static struct ata_port_operations mv_ops = {
221 301
222 .phy_reset = mv_phy_reset, 302 .phy_reset = mv_phy_reset,
223 303
224 .qc_prep = ata_qc_prep, 304 .qc_prep = mv_qc_prep,
225 .qc_issue = ata_qc_issue_prot, 305 .qc_issue = mv_qc_issue,
226 306
227 .eng_timeout = ata_eng_timeout, 307 .eng_timeout = mv_eng_timeout,
228 308
229 .irq_handler = mv_interrupt, 309 .irq_handler = mv_interrupt,
230 .irq_clear = mv_irq_clear, 310 .irq_clear = mv_irq_clear,
@@ -232,46 +312,39 @@ static struct ata_port_operations mv_ops = {
232 .scr_read = mv_scr_read, 312 .scr_read = mv_scr_read,
233 .scr_write = mv_scr_write, 313 .scr_write = mv_scr_write,
234 314
235 .port_start = ata_port_start, 315 .port_start = mv_port_start,
236 .port_stop = ata_port_stop, 316 .port_stop = mv_port_stop,
237 .host_stop = ata_host_stop, 317 .host_stop = mv_host_stop,
238}; 318};
239 319
240static struct ata_port_info mv_port_info[] = { 320static struct ata_port_info mv_port_info[] = {
241 { /* chip_504x */ 321 { /* chip_504x */
242 .sht = &mv_sht, 322 .sht = &mv_sht,
243 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 323 .host_flags = MV_COMMON_FLAGS,
244 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), 324 .pio_mask = 0x1f, /* pio0-4 */
245 .pio_mask = 0x1f, /* pio4-0 */ 325 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
246 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
247 .port_ops = &mv_ops, 326 .port_ops = &mv_ops,
248 }, 327 },
249 { /* chip_508x */ 328 { /* chip_508x */
250 .sht = &mv_sht, 329 .sht = &mv_sht,
251 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 330 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
252 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 331 .pio_mask = 0x1f, /* pio0-4 */
253 MV_FLAG_DUAL_HC), 332 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
254 .pio_mask = 0x1f, /* pio4-0 */
255 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
256 .port_ops = &mv_ops, 333 .port_ops = &mv_ops,
257 }, 334 },
258 { /* chip_604x */ 335 { /* chip_604x */
259 .sht = &mv_sht, 336 .sht = &mv_sht,
260 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 337 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
261 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 338 .pio_mask = 0x1f, /* pio0-4 */
262 MV_FLAG_IRQ_COALESCE | MV_FLAG_BDMA), 339 .udma_mask = 0x7f, /* udma0-6 */
263 .pio_mask = 0x1f, /* pio4-0 */
264 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
265 .port_ops = &mv_ops, 340 .port_ops = &mv_ops,
266 }, 341 },
267 { /* chip_608x */ 342 { /* chip_608x */
268 .sht = &mv_sht, 343 .sht = &mv_sht,
269 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 344 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
270 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 345 MV_FLAG_DUAL_HC),
271 MV_FLAG_IRQ_COALESCE | MV_FLAG_DUAL_HC | 346 .pio_mask = 0x1f, /* pio0-4 */
272 MV_FLAG_BDMA), 347 .udma_mask = 0x7f, /* udma0-6 */
273 .pio_mask = 0x1f, /* pio4-0 */
274 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
275 .port_ops = &mv_ops, 348 .port_ops = &mv_ops,
276 }, 349 },
277}; 350};
@@ -306,12 +379,6 @@ static inline void writelfl(unsigned long data, void __iomem *addr)
306 (void) readl(addr); /* flush to avoid PCI posted write */ 379 (void) readl(addr); /* flush to avoid PCI posted write */
307} 380}
308 381
309static inline void __iomem *mv_port_addr_to_hc_base(void __iomem *port_mmio)
310{
311 return ((void __iomem *)((unsigned long)port_mmio &
312 (unsigned long)SATAHC_MASK));
313}
314
315static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 382static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
316{ 383{
317 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 384 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
@@ -329,24 +396,150 @@ static inline void __iomem *mv_ap_base(struct ata_port *ap)
329 return mv_port_base(ap->host_set->mmio_base, ap->port_no); 396 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
330} 397}
331 398
332static inline int mv_get_hc_count(unsigned long flags) 399static inline int mv_get_hc_count(unsigned long hp_flags)
333{ 400{
334 return ((flags & MV_FLAG_DUAL_HC) ? 2 : 1); 401 return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
335} 402}
336 403
337static inline int mv_is_edma_active(struct ata_port *ap) 404static void mv_irq_clear(struct ata_port *ap)
405{
406}
407
408/**
409 * mv_start_dma - Enable eDMA engine
410 * @base: port base address
411 * @pp: port private data
412 *
413 * Verify the local cache of the eDMA state is accurate with an
414 * assert.
415 *
416 * LOCKING:
417 * Inherited from caller.
418 */
419static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
420{
421 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
422 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
423 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
424 }
425 assert(EDMA_EN & readl(base + EDMA_CMD_OFS));
426}
427
428/**
429 * mv_stop_dma - Disable eDMA engine
430 * @ap: ATA channel to manipulate
431 *
432 * Verify the local cache of the eDMA state is accurate with an
433 * assert.
434 *
435 * LOCKING:
436 * Inherited from caller.
437 */
438static void mv_stop_dma(struct ata_port *ap)
338{ 439{
339 void __iomem *port_mmio = mv_ap_base(ap); 440 void __iomem *port_mmio = mv_ap_base(ap);
340 return (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); 441 struct mv_port_priv *pp = ap->private_data;
442 u32 reg;
443 int i;
444
445 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
446 /* Disable EDMA if active. The disable bit auto clears.
447 */
448 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
449 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
450 } else {
451 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
452 }
453
454 /* now properly wait for the eDMA to stop */
455 for (i = 1000; i > 0; i--) {
456 reg = readl(port_mmio + EDMA_CMD_OFS);
457 if (!(EDMA_EN & reg)) {
458 break;
459 }
460 udelay(100);
461 }
462
463 if (EDMA_EN & reg) {
464 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id);
465 /* FIXME: Consider doing a reset here to recover */
466 }
341} 467}
342 468
343static inline int mv_port_bdma_capable(struct ata_port *ap) 469#ifdef ATA_DEBUG
470static void mv_dump_mem(void __iomem *start, unsigned bytes)
344{ 471{
345 return (ap->flags & MV_FLAG_BDMA); 472 int b, w;
473 for (b = 0; b < bytes; ) {
474 DPRINTK("%p: ", start + b);
475 for (w = 0; b < bytes && w < 4; w++) {
476 printk("%08x ",readl(start + b));
477 b += sizeof(u32);
478 }
479 printk("\n");
480 }
346} 481}
482#endif
347 483
348static void mv_irq_clear(struct ata_port *ap) 484static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
485{
486#ifdef ATA_DEBUG
487 int b, w;
488 u32 dw;
489 for (b = 0; b < bytes; ) {
490 DPRINTK("%02x: ", b);
491 for (w = 0; b < bytes && w < 4; w++) {
492 (void) pci_read_config_dword(pdev,b,&dw);
493 printk("%08x ",dw);
494 b += sizeof(u32);
495 }
496 printk("\n");
497 }
498#endif
499}
500static void mv_dump_all_regs(void __iomem *mmio_base, int port,
501 struct pci_dev *pdev)
349{ 502{
503#ifdef ATA_DEBUG
504 void __iomem *hc_base = mv_hc_base(mmio_base,
505 port >> MV_PORT_HC_SHIFT);
506 void __iomem *port_base;
507 int start_port, num_ports, p, start_hc, num_hcs, hc;
508
509 if (0 > port) {
510 start_hc = start_port = 0;
511 num_ports = 8; /* shld be benign for 4 port devs */
512 num_hcs = 2;
513 } else {
514 start_hc = port >> MV_PORT_HC_SHIFT;
515 start_port = port;
516 num_ports = num_hcs = 1;
517 }
518 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
519 num_ports > 1 ? num_ports - 1 : start_port);
520
521 if (NULL != pdev) {
522 DPRINTK("PCI config space regs:\n");
523 mv_dump_pci_cfg(pdev, 0x68);
524 }
525 DPRINTK("PCI regs:\n");
526 mv_dump_mem(mmio_base+0xc00, 0x3c);
527 mv_dump_mem(mmio_base+0xd00, 0x34);
528 mv_dump_mem(mmio_base+0xf00, 0x4);
529 mv_dump_mem(mmio_base+0x1d00, 0x6c);
530 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
531 hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT);
532 DPRINTK("HC regs (HC %i):\n", hc);
533 mv_dump_mem(hc_base, 0x1c);
534 }
535 for (p = start_port; p < start_port + num_ports; p++) {
536 port_base = mv_port_base(mmio_base, p);
537 DPRINTK("EDMA regs (port %i):\n",p);
538 mv_dump_mem(port_base, 0x54);
539 DPRINTK("SATA regs (port %i):\n",p);
540 mv_dump_mem(port_base+0x300, 0x60);
541 }
542#endif
350} 543}
351 544
352static unsigned int mv_scr_offset(unsigned int sc_reg_in) 545static unsigned int mv_scr_offset(unsigned int sc_reg_in)
@@ -389,30 +582,37 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
389 } 582 }
390} 583}
391 584
392static int mv_master_reset(void __iomem *mmio_base) 585/**
586 * mv_global_soft_reset - Perform the 6xxx global soft reset
587 * @mmio_base: base address of the HBA
588 *
589 * This routine only applies to 6xxx parts.
590 *
591 * LOCKING:
592 * Inherited from caller.
593 */
594static int mv_global_soft_reset(void __iomem *mmio_base)
393{ 595{
394 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; 596 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS;
395 int i, rc = 0; 597 int i, rc = 0;
396 u32 t; 598 u32 t;
397 599
398 VPRINTK("ENTER\n");
399
400 /* Following procedure defined in PCI "main command and status 600 /* Following procedure defined in PCI "main command and status
401 * register" table. 601 * register" table.
402 */ 602 */
403 t = readl(reg); 603 t = readl(reg);
404 writel(t | STOP_PCI_MASTER, reg); 604 writel(t | STOP_PCI_MASTER, reg);
405 605
406 for (i = 0; i < 100; i++) { 606 for (i = 0; i < 1000; i++) {
407 msleep(10); 607 udelay(1);
408 t = readl(reg); 608 t = readl(reg);
409 if (PCI_MASTER_EMPTY & t) { 609 if (PCI_MASTER_EMPTY & t) {
410 break; 610 break;
411 } 611 }
412 } 612 }
413 if (!(PCI_MASTER_EMPTY & t)) { 613 if (!(PCI_MASTER_EMPTY & t)) {
414 printk(KERN_ERR DRV_NAME "PCI master won't flush\n"); 614 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
415 rc = 1; /* broken HW? */ 615 rc = 1;
416 goto done; 616 goto done;
417 } 617 }
418 618
@@ -425,39 +625,399 @@ static int mv_master_reset(void __iomem *mmio_base)
425 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 625 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
426 626
427 if (!(GLOB_SFT_RST & t)) { 627 if (!(GLOB_SFT_RST & t)) {
428 printk(KERN_ERR DRV_NAME "can't set global reset\n"); 628 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
429 rc = 1; /* broken HW? */ 629 rc = 1;
430 goto done; 630 goto done;
431 } 631 }
432 632
433 /* clear reset */ 633 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
434 i = 5; 634 i = 5;
435 do { 635 do {
436 writel(t & ~GLOB_SFT_RST, reg); 636 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
437 t = readl(reg); 637 t = readl(reg);
438 udelay(1); 638 udelay(1);
439 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 639 } while ((GLOB_SFT_RST & t) && (i-- > 0));
440 640
441 if (GLOB_SFT_RST & t) { 641 if (GLOB_SFT_RST & t) {
442 printk(KERN_ERR DRV_NAME "can't clear global reset\n"); 642 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
443 rc = 1; /* broken HW? */ 643 rc = 1;
444 } 644 }
445 645done:
446 done:
447 VPRINTK("EXIT, rc = %i\n", rc);
448 return rc; 646 return rc;
449} 647}
450 648
451static void mv_err_intr(struct ata_port *ap) 649/**
650 * mv_host_stop - Host specific cleanup/stop routine.
651 * @host_set: host data structure
652 *
653 * Disable ints, cleanup host memory, call general purpose
654 * host_stop.
655 *
656 * LOCKING:
657 * Inherited from caller.
658 */
659static void mv_host_stop(struct ata_host_set *host_set)
452{ 660{
453 void __iomem *port_mmio; 661 struct mv_host_priv *hpriv = host_set->private_data;
454 u32 edma_err_cause, serr = 0; 662 struct pci_dev *pdev = to_pci_dev(host_set->dev);
663
664 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
665 pci_disable_msi(pdev);
666 } else {
667 pci_intx(pdev, 0);
668 }
669 kfree(hpriv);
670 ata_host_stop(host_set);
671}
672
673/**
674 * mv_port_start - Port specific init/start routine.
675 * @ap: ATA channel to manipulate
676 *
677 * Allocate and point to DMA memory, init port private memory,
678 * zero indices.
679 *
680 * LOCKING:
681 * Inherited from caller.
682 */
683static int mv_port_start(struct ata_port *ap)
684{
685 struct device *dev = ap->host_set->dev;
686 struct mv_port_priv *pp;
687 void __iomem *port_mmio = mv_ap_base(ap);
688 void *mem;
689 dma_addr_t mem_dma;
690
691 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
692 if (!pp) {
693 return -ENOMEM;
694 }
695 memset(pp, 0, sizeof(*pp));
696
697 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
698 GFP_KERNEL);
699 if (!mem) {
700 kfree(pp);
701 return -ENOMEM;
702 }
703 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
704
705 /* First item in chunk of DMA memory:
706 * 32-slot command request table (CRQB), 32 bytes each in size
707 */
708 pp->crqb = mem;
709 pp->crqb_dma = mem_dma;
710 mem += MV_CRQB_Q_SZ;
711 mem_dma += MV_CRQB_Q_SZ;
712
713 /* Second item:
714 * 32-slot command response table (CRPB), 8 bytes each in size
715 */
716 pp->crpb = mem;
717 pp->crpb_dma = mem_dma;
718 mem += MV_CRPB_Q_SZ;
719 mem_dma += MV_CRPB_Q_SZ;
720
721 /* Third item:
722 * Table of scatter-gather descriptors (ePRD), 16 bytes each
723 */
724 pp->sg_tbl = mem;
725 pp->sg_tbl_dma = mem_dma;
726
727 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT |
728 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
455 729
456 /* bug here b/c we got an err int on a port we don't know about, 730 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
457 * so there's no way to clear it 731 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
732 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
733
734 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
735 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
736
737 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
738 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
739 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
740
741 pp->req_producer = pp->rsp_consumer = 0;
742
743 /* Don't turn on EDMA here...do it before DMA commands only. Else
744 * we'll be unable to send non-data, PIO, etc due to restricted access
745 * to shadow regs.
458 */ 746 */
459 BUG_ON(NULL == ap); 747 ap->private_data = pp;
460 port_mmio = mv_ap_base(ap); 748 return 0;
749}
750
751/**
752 * mv_port_stop - Port specific cleanup/stop routine.
753 * @ap: ATA channel to manipulate
754 *
755 * Stop DMA, cleanup port memory.
756 *
757 * LOCKING:
758 * This routine uses the host_set lock to protect the DMA stop.
759 */
760static void mv_port_stop(struct ata_port *ap)
761{
762 struct device *dev = ap->host_set->dev;
763 struct mv_port_priv *pp = ap->private_data;
764 unsigned long flags;
765
766 spin_lock_irqsave(&ap->host_set->lock, flags);
767 mv_stop_dma(ap);
768 spin_unlock_irqrestore(&ap->host_set->lock, flags);
769
770 ap->private_data = NULL;
771 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
772 kfree(pp);
773}
774
775/**
776 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
777 * @qc: queued command whose SG list to source from
778 *
779 * Populate the SG list and mark the last entry.
780 *
781 * LOCKING:
782 * Inherited from caller.
783 */
784static void mv_fill_sg(struct ata_queued_cmd *qc)
785{
786 struct mv_port_priv *pp = qc->ap->private_data;
787 unsigned int i;
788
789 for (i = 0; i < qc->n_elem; i++) {
790 u32 sg_len;
791 dma_addr_t addr;
792
793 addr = sg_dma_address(&qc->sg[i]);
794 sg_len = sg_dma_len(&qc->sg[i]);
795
796 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
797 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
798 assert(0 == (sg_len & ~MV_DMA_BOUNDARY));
799 pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len);
800 }
801 if (0 < qc->n_elem) {
802 pp->sg_tbl[qc->n_elem - 1].flags_size |=
803 cpu_to_le32(EPRD_FLAG_END_OF_TBL);
804 }
805}
806
807static inline unsigned mv_inc_q_index(unsigned *index)
808{
809 *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
810 return *index;
811}
812
813static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
814{
815 *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
816 (last ? CRQB_CMD_LAST : 0);
817}
818
819/**
820 * mv_qc_prep - Host specific command preparation.
821 * @qc: queued command to prepare
822 *
823 * This routine simply redirects to the general purpose routine
824 * if command is not DMA. Else, it handles prep of the CRQB
825 * (command request block), does some sanity checking, and calls
826 * the SG load routine.
827 *
828 * LOCKING:
829 * Inherited from caller.
830 */
831static void mv_qc_prep(struct ata_queued_cmd *qc)
832{
833 struct ata_port *ap = qc->ap;
834 struct mv_port_priv *pp = ap->private_data;
835 u16 *cw;
836 struct ata_taskfile *tf;
837 u16 flags = 0;
838
839 if (ATA_PROT_DMA != qc->tf.protocol) {
840 return;
841 }
842
843 /* the req producer index should be the same as we remember it */
844 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
845 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
846 pp->req_producer);
847
848 /* Fill in command request block
849 */
850 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
851 flags |= CRQB_FLAG_READ;
852 }
853 assert(MV_MAX_Q_DEPTH > qc->tag);
854 flags |= qc->tag << CRQB_TAG_SHIFT;
855
856 pp->crqb[pp->req_producer].sg_addr =
857 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
858 pp->crqb[pp->req_producer].sg_addr_hi =
859 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
860 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
861
862 cw = &pp->crqb[pp->req_producer].ata_cmd[0];
863 tf = &qc->tf;
864
865 /* Sadly, the CRQB cannot accomodate all registers--there are
866 * only 11 bytes...so we must pick and choose required
867 * registers based on the command. So, we drop feature and
868 * hob_feature for [RW] DMA commands, but they are needed for
869 * NCQ. NCQ will drop hob_nsect.
870 */
871 switch (tf->command) {
872 case ATA_CMD_READ:
873 case ATA_CMD_READ_EXT:
874 case ATA_CMD_WRITE:
875 case ATA_CMD_WRITE_EXT:
876 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
877 break;
878#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
879 case ATA_CMD_FPDMA_READ:
880 case ATA_CMD_FPDMA_WRITE:
881 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
882 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
883 break;
884#endif /* FIXME: remove this line when NCQ added */
885 default:
886 /* The only other commands EDMA supports in non-queued and
887 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
888 * of which are defined/used by Linux. If we get here, this
889 * driver needs work.
890 *
891 * FIXME: modify libata to give qc_prep a return value and
892 * return error here.
893 */
894 BUG_ON(tf->command);
895 break;
896 }
897 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
898 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
899 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
900 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
901 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
902 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
903 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
904 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
905 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
906
907 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) {
908 return;
909 }
910 mv_fill_sg(qc);
911}
912
913/**
914 * mv_qc_issue - Initiate a command to the host
915 * @qc: queued command to start
916 *
917 * This routine simply redirects to the general purpose routine
918 * if command is not DMA. Else, it sanity checks our local
919 * caches of the request producer/consumer indices then enables
920 * DMA and bumps the request producer index.
921 *
922 * LOCKING:
923 * Inherited from caller.
924 */
925static int mv_qc_issue(struct ata_queued_cmd *qc)
926{
927 void __iomem *port_mmio = mv_ap_base(qc->ap);
928 struct mv_port_priv *pp = qc->ap->private_data;
929 u32 in_ptr;
930
931 if (ATA_PROT_DMA != qc->tf.protocol) {
932 /* We're about to send a non-EDMA capable command to the
933 * port. Turn off EDMA so there won't be problems accessing
934 * shadow block, etc registers.
935 */
936 mv_stop_dma(qc->ap);
937 return ata_qc_issue_prot(qc);
938 }
939
940 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
941
942 /* the req producer index should be the same as we remember it */
943 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
944 pp->req_producer);
945 /* until we do queuing, the queue should be empty at this point */
946 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
947 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
948 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
949
950 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
951
952 mv_start_dma(port_mmio, pp);
953
954 /* and write the request in pointer to kick the EDMA to life */
955 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
956 in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
957 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
958
959 return 0;
960}
961
962/**
963 * mv_get_crpb_status - get status from most recently completed cmd
964 * @ap: ATA channel to manipulate
965 *
966 * This routine is for use when the port is in DMA mode, when it
967 * will be using the CRPB (command response block) method of
968 * returning command completion information. We assert indices
969 * are good, grab status, and bump the response consumer index to
970 * prove that we're up to date.
971 *
972 * LOCKING:
973 * Inherited from caller.
974 */
975static u8 mv_get_crpb_status(struct ata_port *ap)
976{
977 void __iomem *port_mmio = mv_ap_base(ap);
978 struct mv_port_priv *pp = ap->private_data;
979 u32 out_ptr;
980
981 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
982
983 /* the response consumer index should be the same as we remember it */
984 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
985 pp->rsp_consumer);
986
987 /* increment our consumer index... */
988 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
989
990 /* and, until we do NCQ, there should only be 1 CRPB waiting */
991 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
992 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
993 pp->rsp_consumer);
994
995 /* write out our inc'd consumer index so EDMA knows we're caught up */
996 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
997 out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
998 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
999
1000 /* Return ATA status register for completed CRPB */
1001 return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT);
1002}
1003
1004/**
1005 * mv_err_intr - Handle error interrupts on the port
1006 * @ap: ATA channel to manipulate
1007 *
1008 * In most cases, just clear the interrupt and move on. However,
1009 * some cases require an eDMA reset, which is done right before
1010 * the COMRESET in mv_phy_reset(). The SERR case requires a
1011 * clear of pending errors in the SATA SERROR register. Finally,
1012 * if the port disabled DMA, update our cached copy to match.
1013 *
1014 * LOCKING:
1015 * Inherited from caller.
1016 */
1017static void mv_err_intr(struct ata_port *ap)
1018{
1019 void __iomem *port_mmio = mv_ap_base(ap);
1020 u32 edma_err_cause, serr = 0;
461 1021
462 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1022 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
463 1023
@@ -465,8 +1025,12 @@ static void mv_err_intr(struct ata_port *ap)
465 serr = scr_read(ap, SCR_ERROR); 1025 serr = scr_read(ap, SCR_ERROR);
466 scr_write_flush(ap, SCR_ERROR, serr); 1026 scr_write_flush(ap, SCR_ERROR, serr);
467 } 1027 }
468 DPRINTK("port %u error; EDMA err cause: 0x%08x SERR: 0x%08x\n", 1028 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
469 ap->port_no, edma_err_cause, serr); 1029 struct mv_port_priv *pp = ap->private_data;
1030 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1031 }
1032 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1033 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
470 1034
471 /* Clear EDMA now that SERR cleanup done */ 1035 /* Clear EDMA now that SERR cleanup done */
472 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1036 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
@@ -477,7 +1041,21 @@ static void mv_err_intr(struct ata_port *ap)
477 } 1041 }
478} 1042}
479 1043
480/* Handle any outstanding interrupts in a single SATAHC 1044/**
1045 * mv_host_intr - Handle all interrupts on the given host controller
1046 * @host_set: host specific structure
1047 * @relevant: port error bits relevant to this host controller
1048 * @hc: which host controller we're to look at
1049 *
1050 * Read then write clear the HC interrupt status then walk each
1051 * port connected to the HC and see if it needs servicing. Port
1052 * success ints are reported in the HC interrupt status reg, the
1053 * port error ints are reported in the higher level main
1054 * interrupt status register and thus are passed in via the
1055 * 'relevant' argument.
1056 *
1057 * LOCKING:
1058 * Inherited from caller.
481 */ 1059 */
482static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, 1060static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
483 unsigned int hc) 1061 unsigned int hc)
@@ -487,8 +1065,9 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
487 struct ata_port *ap; 1065 struct ata_port *ap;
488 struct ata_queued_cmd *qc; 1066 struct ata_queued_cmd *qc;
489 u32 hc_irq_cause; 1067 u32 hc_irq_cause;
490 int shift, port, port0, hard_port; 1068 int shift, port, port0, hard_port, handled;
491 u8 ata_status; 1069 unsigned int err_mask;
1070 u8 ata_status = 0;
492 1071
493 if (hc == 0) { 1072 if (hc == 0) {
494 port0 = 0; 1073 port0 = 0;
@@ -499,7 +1078,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
499 /* we'll need the HC success int register in most cases */ 1078 /* we'll need the HC success int register in most cases */
500 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 1079 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
501 if (hc_irq_cause) { 1080 if (hc_irq_cause) {
502 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 1081 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
503 } 1082 }
504 1083
505 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", 1084 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
@@ -508,54 +1087,70 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
508 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1087 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
509 ap = host_set->ports[port]; 1088 ap = host_set->ports[port];
510 hard_port = port & MV_PORT_MASK; /* range 0-3 */ 1089 hard_port = port & MV_PORT_MASK; /* range 0-3 */
511 ata_status = 0xffU; 1090 handled = 0; /* ensure ata_status is set if handled++ */
512 1091
513 if (((CRBP_DMA_DONE | DEV_IRQ) << hard_port) & hc_irq_cause) { 1092 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
514 BUG_ON(NULL == ap); 1093 /* new CRPB on the queue; just one at a time until NCQ
515 /* rcv'd new resp, basic DMA complete, or ATA IRQ */ 1094 */
516 /* This is needed to clear the ATA INTRQ. 1095 ata_status = mv_get_crpb_status(ap);
517 * FIXME: don't read the status reg in EDMA mode! 1096 handled++;
1097 } else if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1098 /* received ATA IRQ; read the status reg to clear INTRQ
518 */ 1099 */
519 ata_status = readb((void __iomem *) 1100 ata_status = readb((void __iomem *)
520 ap->ioaddr.status_addr); 1101 ap->ioaddr.status_addr);
1102 handled++;
521 } 1103 }
522 1104
523 shift = port * 2; 1105 err_mask = ac_err_mask(ata_status);
1106
1107 shift = port << 1; /* (port * 2) */
524 if (port >= MV_PORTS_PER_HC) { 1108 if (port >= MV_PORTS_PER_HC) {
525 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1109 shift++; /* skip bit 8 in the HC Main IRQ reg */
526 } 1110 }
527 if ((PORT0_ERR << shift) & relevant) { 1111 if ((PORT0_ERR << shift) & relevant) {
528 mv_err_intr(ap); 1112 mv_err_intr(ap);
529 /* FIXME: smart to OR in ATA_ERR? */ 1113 err_mask |= AC_ERR_OTHER;
530 ata_status = readb((void __iomem *) 1114 handled++;
531 ap->ioaddr.status_addr) | ATA_ERR;
532 } 1115 }
533 1116
534 if (ap) { 1117 if (handled && ap) {
535 qc = ata_qc_from_tag(ap, ap->active_tag); 1118 qc = ata_qc_from_tag(ap, ap->active_tag);
536 if (NULL != qc) { 1119 if (NULL != qc) {
537 VPRINTK("port %u IRQ found for qc, " 1120 VPRINTK("port %u IRQ found for qc, "
538 "ata_status 0x%x\n", port,ata_status); 1121 "ata_status 0x%x\n", port,ata_status);
539 BUG_ON(0xffU == ata_status);
540 /* mark qc status appropriately */ 1122 /* mark qc status appropriately */
541 ata_qc_complete(qc, ata_status); 1123 ata_qc_complete(qc, err_mask);
542 } 1124 }
543 } 1125 }
544 } 1126 }
545 VPRINTK("EXIT\n"); 1127 VPRINTK("EXIT\n");
546} 1128}
547 1129
1130/**
1131 * mv_interrupt -
1132 * @irq: unused
1133 * @dev_instance: private data; in this case the host structure
1134 * @regs: unused
1135 *
1136 * Read the read only register to determine if any host
1137 * controllers have pending interrupts. If so, call lower level
1138 * routine to handle. Also check for PCI errors which are only
1139 * reported here.
1140 *
1141 * LOCKING:
1142 * This routine holds the host_set lock while processing pending
1143 * interrupts.
1144 */
548static irqreturn_t mv_interrupt(int irq, void *dev_instance, 1145static irqreturn_t mv_interrupt(int irq, void *dev_instance,
549 struct pt_regs *regs) 1146 struct pt_regs *regs)
550{ 1147{
551 struct ata_host_set *host_set = dev_instance; 1148 struct ata_host_set *host_set = dev_instance;
552 unsigned int hc, handled = 0, n_hcs; 1149 unsigned int hc, handled = 0, n_hcs;
553 void __iomem *mmio; 1150 void __iomem *mmio = host_set->mmio_base;
554 u32 irq_stat; 1151 u32 irq_stat;
555 1152
556 mmio = host_set->mmio_base;
557 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); 1153 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
558 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
559 1154
560 /* check the cases where we either have nothing pending or have read 1155 /* check the cases where we either have nothing pending or have read
561 * a bogus register value which can indicate HW removal or PCI fault 1156 * a bogus register value which can indicate HW removal or PCI fault
@@ -564,64 +1159,89 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
564 return IRQ_NONE; 1159 return IRQ_NONE;
565 } 1160 }
566 1161
1162 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
567 spin_lock(&host_set->lock); 1163 spin_lock(&host_set->lock);
568 1164
569 for (hc = 0; hc < n_hcs; hc++) { 1165 for (hc = 0; hc < n_hcs; hc++) {
570 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); 1166 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
571 if (relevant) { 1167 if (relevant) {
572 mv_host_intr(host_set, relevant, hc); 1168 mv_host_intr(host_set, relevant, hc);
573 handled = 1; 1169 handled++;
574 } 1170 }
575 } 1171 }
576 if (PCI_ERR & irq_stat) { 1172 if (PCI_ERR & irq_stat) {
577 /* FIXME: these are all masked by default, but still need 1173 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
578 * to recover from them properly. 1174 readl(mmio + PCI_IRQ_CAUSE_OFS));
579 */
580 }
581 1175
1176 DPRINTK("All regs @ PCI error\n");
1177 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
1178
1179 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1180 handled++;
1181 }
582 spin_unlock(&host_set->lock); 1182 spin_unlock(&host_set->lock);
583 1183
584 return IRQ_RETVAL(handled); 1184 return IRQ_RETVAL(handled);
585} 1185}
586 1186
1187/**
1188 * mv_phy_reset - Perform eDMA reset followed by COMRESET
1189 * @ap: ATA channel to manipulate
1190 *
1191 * Part of this is taken from __sata_phy_reset and modified to
1192 * not sleep since this routine gets called from interrupt level.
1193 *
1194 * LOCKING:
1195 * Inherited from caller. This is coded to safe to call at
1196 * interrupt level, i.e. it does not sleep.
1197 */
587static void mv_phy_reset(struct ata_port *ap) 1198static void mv_phy_reset(struct ata_port *ap)
588{ 1199{
589 void __iomem *port_mmio = mv_ap_base(ap); 1200 void __iomem *port_mmio = mv_ap_base(ap);
590 struct ata_taskfile tf; 1201 struct ata_taskfile tf;
591 struct ata_device *dev = &ap->device[0]; 1202 struct ata_device *dev = &ap->device[0];
592 u32 edma = 0, bdma; 1203 unsigned long timeout;
593 1204
594 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); 1205 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
595 1206
596 edma = readl(port_mmio + EDMA_CMD_OFS); 1207 mv_stop_dma(ap);
597 if (EDMA_EN & edma) {
598 /* disable EDMA if active */
599 edma &= ~EDMA_EN;
600 writelfl(edma | EDMA_DS, port_mmio + EDMA_CMD_OFS);
601 udelay(1);
602 } else if (mv_port_bdma_capable(ap) &&
603 (bdma = readl(port_mmio + BDMA_CMD_OFS)) & BDMA_START) {
604 /* disable BDMA if active */
605 writelfl(bdma & ~BDMA_START, port_mmio + BDMA_CMD_OFS);
606 }
607 1208
608 writelfl(edma | ATA_RST, port_mmio + EDMA_CMD_OFS); 1209 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
609 udelay(25); /* allow reset propagation */ 1210 udelay(25); /* allow reset propagation */
610 1211
611 /* Spec never mentions clearing the bit. Marvell's driver does 1212 /* Spec never mentions clearing the bit. Marvell's driver does
612 * clear the bit, however. 1213 * clear the bit, however.
613 */ 1214 */
614 writelfl(edma & ~ATA_RST, port_mmio + EDMA_CMD_OFS); 1215 writelfl(0, port_mmio + EDMA_CMD_OFS);
615 1216
616 VPRINTK("Done. Now calling __sata_phy_reset()\n"); 1217 VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1218 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1219 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
617 1220
618 /* proceed to init communications via the scr_control reg */ 1221 /* proceed to init communications via the scr_control reg */
619 __sata_phy_reset(ap); 1222 scr_write_flush(ap, SCR_CONTROL, 0x301);
1223 mdelay(1);
1224 scr_write_flush(ap, SCR_CONTROL, 0x300);
1225 timeout = jiffies + (HZ * 1);
1226 do {
1227 mdelay(10);
1228 if ((scr_read(ap, SCR_STATUS) & 0xf) != 1)
1229 break;
1230 } while (time_before(jiffies, timeout));
1231
1232 VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1233 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1234 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
620 1235
621 if (ap->flags & ATA_FLAG_PORT_DISABLED) { 1236 if (sata_dev_present(ap)) {
622 VPRINTK("Port disabled pre-sig. Exiting.\n"); 1237 ata_port_probe(ap);
1238 } else {
1239 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1240 ap->id, scr_read(ap, SCR_STATUS));
1241 ata_port_disable(ap);
623 return; 1242 return;
624 } 1243 }
1244 ap->cbl = ATA_CBL_SATA;
625 1245
626 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); 1246 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
627 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); 1247 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
@@ -636,37 +1256,118 @@ static void mv_phy_reset(struct ata_port *ap)
636 VPRINTK("EXIT\n"); 1256 VPRINTK("EXIT\n");
637} 1257}
638 1258
639static void mv_port_init(struct ata_ioports *port, unsigned long base) 1259/**
1260 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
1261 * @ap: ATA channel to manipulate
1262 *
1263 * Intent is to clear all pending error conditions, reset the
1264 * chip/bus, fail the command, and move on.
1265 *
1266 * LOCKING:
1267 * This routine holds the host_set lock while failing the command.
1268 */
1269static void mv_eng_timeout(struct ata_port *ap)
1270{
1271 struct ata_queued_cmd *qc;
1272 unsigned long flags;
1273
1274 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1275 DPRINTK("All regs @ start of eng_timeout\n");
1276 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
1277 to_pci_dev(ap->host_set->dev));
1278
1279 qc = ata_qc_from_tag(ap, ap->active_tag);
1280 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
1281 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
1282 &qc->scsicmd->cmnd);
1283
1284 mv_err_intr(ap);
1285 mv_phy_reset(ap);
1286
1287 if (!qc) {
1288 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
1289 ap->id);
1290 } else {
1291 /* hack alert! We cannot use the supplied completion
1292 * function from inside the ->eh_strategy_handler() thread.
1293 * libata is the only user of ->eh_strategy_handler() in
1294 * any kernel, so the default scsi_done() assumes it is
1295 * not being called from the SCSI EH.
1296 */
1297 spin_lock_irqsave(&ap->host_set->lock, flags);
1298 qc->scsidone = scsi_finish_command;
1299 ata_qc_complete(qc, AC_ERR_OTHER);
1300 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1301 }
1302}
1303
1304/**
1305 * mv_port_init - Perform some early initialization on a single port.
1306 * @port: libata data structure storing shadow register addresses
1307 * @port_mmio: base address of the port
1308 *
1309 * Initialize shadow register mmio addresses, clear outstanding
1310 * interrupts on the port, and unmask interrupts for the future
1311 * start of the port.
1312 *
1313 * LOCKING:
1314 * Inherited from caller.
1315 */
1316static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
640{ 1317{
641 /* PIO related setup */ 1318 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
642 port->data_addr = base + SHD_PIO_DATA_OFS; 1319 unsigned serr_ofs;
643 port->error_addr = port->feature_addr = base + SHD_FEA_ERR_OFS; 1320
644 port->nsect_addr = base + SHD_SECT_CNT_OFS; 1321 /* PIO related setup
645 port->lbal_addr = base + SHD_LBA_L_OFS; 1322 */
646 port->lbam_addr = base + SHD_LBA_M_OFS; 1323 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
647 port->lbah_addr = base + SHD_LBA_H_OFS; 1324 port->error_addr =
648 port->device_addr = base + SHD_DEV_HD_OFS; 1325 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
649 port->status_addr = port->command_addr = base + SHD_CMD_STA_OFS; 1326 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
650 port->altstatus_addr = port->ctl_addr = base + SHD_CTL_AST_OFS; 1327 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
651 /* unused */ 1328 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
1329 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
1330 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
1331 port->status_addr =
1332 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
1333 /* special case: control/altstatus doesn't have ATA_REG_ address */
1334 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
1335
1336 /* unused: */
652 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; 1337 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
653 1338
1339 /* Clear any currently outstanding port interrupt conditions */
1340 serr_ofs = mv_scr_offset(SCR_ERROR);
1341 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
1342 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1343
654 /* unmask all EDMA error interrupts */ 1344 /* unmask all EDMA error interrupts */
655 writel(~0, (void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS); 1345 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
656 1346
657 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 1347 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
658 readl((void __iomem *)base + EDMA_CFG_OFS), 1348 readl(port_mmio + EDMA_CFG_OFS),
659 readl((void __iomem *)base + EDMA_ERR_IRQ_CAUSE_OFS), 1349 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
660 readl((void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS)); 1350 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
661} 1351}
662 1352
1353/**
1354 * mv_host_init - Perform some early initialization of the host.
1355 * @probe_ent: early data struct representing the host
1356 *
1357 * If possible, do an early global reset of the host. Then do
1358 * our port init and clear/unmask all/relevant host interrupts.
1359 *
1360 * LOCKING:
1361 * Inherited from caller.
1362 */
663static int mv_host_init(struct ata_probe_ent *probe_ent) 1363static int mv_host_init(struct ata_probe_ent *probe_ent)
664{ 1364{
665 int rc = 0, n_hc, port, hc; 1365 int rc = 0, n_hc, port, hc;
666 void __iomem *mmio = probe_ent->mmio_base; 1366 void __iomem *mmio = probe_ent->mmio_base;
667 void __iomem *port_mmio; 1367 void __iomem *port_mmio;
668 1368
669 if (mv_master_reset(probe_ent->mmio_base)) { 1369 if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) &&
1370 mv_global_soft_reset(probe_ent->mmio_base)) {
670 rc = 1; 1371 rc = 1;
671 goto done; 1372 goto done;
672 } 1373 }
@@ -676,17 +1377,27 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
676 1377
677 for (port = 0; port < probe_ent->n_ports; port++) { 1378 for (port = 0; port < probe_ent->n_ports; port++) {
678 port_mmio = mv_port_base(mmio, port); 1379 port_mmio = mv_port_base(mmio, port);
679 mv_port_init(&probe_ent->port[port], (unsigned long)port_mmio); 1380 mv_port_init(&probe_ent->port[port], port_mmio);
680 } 1381 }
681 1382
682 for (hc = 0; hc < n_hc; hc++) { 1383 for (hc = 0; hc < n_hc; hc++) {
683 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc, 1384 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
684 readl(mv_hc_base(mmio, hc) + HC_CFG_OFS), 1385
685 readl(mv_hc_base(mmio, hc) + HC_IRQ_CAUSE_OFS)); 1386 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
1387 "(before clear)=0x%08x\n", hc,
1388 readl(hc_mmio + HC_CFG_OFS),
1389 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
1390
1391 /* Clear any currently outstanding hc interrupt conditions */
1392 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
686 } 1393 }
687 1394
688 writel(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); 1395 /* Clear any currently outstanding host interrupt conditions */
689 writel(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); 1396 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1397
1398 /* and unmask interrupt generation for host regs */
1399 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
1400 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
690 1401
691 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " 1402 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
692 "PCI int cause/mask=0x%08x/0x%08x\n", 1403 "PCI int cause/mask=0x%08x/0x%08x\n",
@@ -694,11 +1405,53 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
694 readl(mmio + HC_MAIN_IRQ_MASK_OFS), 1405 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
695 readl(mmio + PCI_IRQ_CAUSE_OFS), 1406 readl(mmio + PCI_IRQ_CAUSE_OFS),
696 readl(mmio + PCI_IRQ_MASK_OFS)); 1407 readl(mmio + PCI_IRQ_MASK_OFS));
697 1408done:
698 done:
699 return rc; 1409 return rc;
700} 1410}
701 1411
1412/**
1413 * mv_print_info - Dump key info to kernel log for perusal.
1414 * @probe_ent: early data struct representing the host
1415 *
1416 * FIXME: complete this.
1417 *
1418 * LOCKING:
1419 * Inherited from caller.
1420 */
1421static void mv_print_info(struct ata_probe_ent *probe_ent)
1422{
1423 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1424 struct mv_host_priv *hpriv = probe_ent->private_data;
1425 u8 rev_id, scc;
1426 const char *scc_s;
1427
1428 /* Use this to determine the HW stepping of the chip so we know
1429 * what errata to workaround
1430 */
1431 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1432
1433 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
1434 if (scc == 0)
1435 scc_s = "SCSI";
1436 else if (scc == 0x01)
1437 scc_s = "RAID";
1438 else
1439 scc_s = "unknown";
1440
1441 dev_printk(KERN_INFO, &pdev->dev,
1442 "%u slots %u ports %s mode IRQ via %s\n",
1443 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
1444 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
1445}
1446
1447/**
1448 * mv_init_one - handle a positive probe of a Marvell host
1449 * @pdev: PCI device found
1450 * @ent: PCI device ID entry for the matched host
1451 *
1452 * LOCKING:
1453 * Inherited from caller.
1454 */
702static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1455static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
703{ 1456{
704 static int printed_version = 0; 1457 static int printed_version = 0;
@@ -706,15 +1459,10 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
706 struct mv_host_priv *hpriv; 1459 struct mv_host_priv *hpriv;
707 unsigned int board_idx = (unsigned int)ent->driver_data; 1460 unsigned int board_idx = (unsigned int)ent->driver_data;
708 void __iomem *mmio_base; 1461 void __iomem *mmio_base;
709 int pci_dev_busy = 0; 1462 int pci_dev_busy = 0, rc;
710 int rc;
711 1463
712 if (!printed_version++) { 1464 if (!printed_version++)
713 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 1465 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
714 }
715
716 VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev->bus->number,
717 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
718 1466
719 rc = pci_enable_device(pdev); 1467 rc = pci_enable_device(pdev);
720 if (rc) { 1468 if (rc) {
@@ -727,8 +1475,6 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
727 goto err_out; 1475 goto err_out;
728 } 1476 }
729 1477
730 pci_intx(pdev, 1);
731
732 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 1478 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
733 if (probe_ent == NULL) { 1479 if (probe_ent == NULL) {
734 rc = -ENOMEM; 1480 rc = -ENOMEM;
@@ -739,8 +1485,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
739 probe_ent->dev = pci_dev_to_dev(pdev); 1485 probe_ent->dev = pci_dev_to_dev(pdev);
740 INIT_LIST_HEAD(&probe_ent->node); 1486 INIT_LIST_HEAD(&probe_ent->node);
741 1487
742 mmio_base = ioremap_nocache(pci_resource_start(pdev, MV_PRIMARY_BAR), 1488 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
743 pci_resource_len(pdev, MV_PRIMARY_BAR));
744 if (mmio_base == NULL) { 1489 if (mmio_base == NULL) {
745 rc = -ENOMEM; 1490 rc = -ENOMEM;
746 goto err_out_free_ent; 1491 goto err_out_free_ent;
@@ -769,37 +1514,40 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
769 if (rc) { 1514 if (rc) {
770 goto err_out_hpriv; 1515 goto err_out_hpriv;
771 } 1516 }
772/* mv_print_info(probe_ent); */
773 1517
774 { 1518 /* Enable interrupts */
775 int b, w; 1519 if (pci_enable_msi(pdev) == 0) {
776 u32 dw[4]; /* hold a line of 16b */ 1520 hpriv->hp_flags |= MV_HP_FLAG_MSI;
777 VPRINTK("PCI config space:\n"); 1521 } else {
778 for (b = 0; b < 0x40; ) { 1522 pci_intx(pdev, 1);
779 for (w = 0; w < 4; w++) {
780 (void) pci_read_config_dword(pdev,b,&dw[w]);
781 b += sizeof(*dw);
782 }
783 VPRINTK("%08x %08x %08x %08x\n",
784 dw[0],dw[1],dw[2],dw[3]);
785 }
786 } 1523 }
787 1524
788 /* FIXME: check ata_device_add return value */ 1525 mv_dump_pci_cfg(pdev, 0x68);
789 ata_device_add(probe_ent); 1526 mv_print_info(probe_ent);
790 kfree(probe_ent); 1527
1528 if (ata_device_add(probe_ent) == 0) {
1529 rc = -ENODEV; /* No devices discovered */
1530 goto err_out_dev_add;
1531 }
791 1532
1533 kfree(probe_ent);
792 return 0; 1534 return 0;
793 1535
794 err_out_hpriv: 1536err_out_dev_add:
1537 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
1538 pci_disable_msi(pdev);
1539 } else {
1540 pci_intx(pdev, 0);
1541 }
1542err_out_hpriv:
795 kfree(hpriv); 1543 kfree(hpriv);
796 err_out_iounmap: 1544err_out_iounmap:
797 iounmap(mmio_base); 1545 pci_iounmap(pdev, mmio_base);
798 err_out_free_ent: 1546err_out_free_ent:
799 kfree(probe_ent); 1547 kfree(probe_ent);
800 err_out_regions: 1548err_out_regions:
801 pci_release_regions(pdev); 1549 pci_release_regions(pdev);
802 err_out: 1550err_out:
803 if (!pci_dev_busy) { 1551 if (!pci_dev_busy) {
804 pci_disable_device(pdev); 1552 pci_disable_device(pdev);
805 } 1553 }
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index cb832b03ec5e..d573888eda76 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -61,6 +61,7 @@
61#include <linux/blkdev.h> 61#include <linux/blkdev.h>
62#include <linux/delay.h> 62#include <linux/delay.h>
63#include <linux/interrupt.h> 63#include <linux/interrupt.h>
64#include <linux/device.h>
64#include "scsi.h" 65#include "scsi.h"
65#include <scsi/scsi_host.h> 66#include <scsi/scsi_host.h>
66#include <linux/libata.h> 67#include <linux/libata.h>
@@ -238,7 +239,7 @@ static Scsi_Host_Template nv_sht = {
238 .ordered_flush = 1, 239 .ordered_flush = 1,
239}; 240};
240 241
241static struct ata_port_operations nv_ops = { 242static const struct ata_port_operations nv_ops = {
242 .port_disable = ata_port_disable, 243 .port_disable = ata_port_disable,
243 .tf_load = ata_tf_load, 244 .tf_load = ata_tf_load,
244 .tf_read = ata_tf_read, 245 .tf_read = ata_tf_read,
@@ -331,7 +332,7 @@ static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
331 return 0xffffffffU; 332 return 0xffffffffU;
332 333
333 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) 334 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
334 return readl((void*)ap->ioaddr.scr_addr + (sc_reg * 4)); 335 return readl((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
335 else 336 else
336 return inl(ap->ioaddr.scr_addr + (sc_reg * 4)); 337 return inl(ap->ioaddr.scr_addr + (sc_reg * 4));
337} 338}
@@ -345,7 +346,7 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
345 return; 346 return;
346 347
347 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) 348 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
348 writel(val, (void*)ap->ioaddr.scr_addr + (sc_reg * 4)); 349 writel(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
349 else 350 else
350 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 351 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
351} 352}
@@ -383,7 +384,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
383 return -ENODEV; 384 return -ENODEV;
384 385
385 if (!printed_version++) 386 if (!printed_version++)
386 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 387 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
387 388
388 rc = pci_enable_device(pdev); 389 rc = pci_enable_device(pdev);
389 if (rc) 390 if (rc)
@@ -405,7 +406,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
405 rc = -ENOMEM; 406 rc = -ENOMEM;
406 407
407 ppi = &nv_port_info; 408 ppi = &nv_port_info;
408 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 409 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
409 if (!probe_ent) 410 if (!probe_ent)
410 goto err_out_regions; 411 goto err_out_regions;
411 412
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 538ad727bd2e..b41c977d6fab 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -38,6 +38,7 @@
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/sched.h> 40#include <linux/sched.h>
41#include <linux/device.h>
41#include "scsi.h" 42#include "scsi.h"
42#include <scsi/scsi_host.h> 43#include <scsi/scsi_host.h>
43#include <linux/libata.h> 44#include <linux/libata.h>
@@ -87,8 +88,8 @@ static void pdc_port_stop(struct ata_port *ap);
87static void pdc_pata_phy_reset(struct ata_port *ap); 88static void pdc_pata_phy_reset(struct ata_port *ap);
88static void pdc_sata_phy_reset(struct ata_port *ap); 89static void pdc_sata_phy_reset(struct ata_port *ap);
89static void pdc_qc_prep(struct ata_queued_cmd *qc); 90static void pdc_qc_prep(struct ata_queued_cmd *qc);
90static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf); 91static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
91static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf); 92static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
92static void pdc_irq_clear(struct ata_port *ap); 93static void pdc_irq_clear(struct ata_port *ap);
93static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 94static int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
94 95
@@ -113,7 +114,7 @@ static Scsi_Host_Template pdc_ata_sht = {
113 .ordered_flush = 1, 114 .ordered_flush = 1,
114}; 115};
115 116
116static struct ata_port_operations pdc_sata_ops = { 117static const struct ata_port_operations pdc_sata_ops = {
117 .port_disable = ata_port_disable, 118 .port_disable = ata_port_disable,
118 .tf_load = pdc_tf_load_mmio, 119 .tf_load = pdc_tf_load_mmio,
119 .tf_read = ata_tf_read, 120 .tf_read = ata_tf_read,
@@ -136,7 +137,7 @@ static struct ata_port_operations pdc_sata_ops = {
136 .host_stop = ata_pci_host_stop, 137 .host_stop = ata_pci_host_stop,
137}; 138};
138 139
139static struct ata_port_operations pdc_pata_ops = { 140static const struct ata_port_operations pdc_pata_ops = {
140 .port_disable = ata_port_disable, 141 .port_disable = ata_port_disable,
141 .tf_load = pdc_tf_load_mmio, 142 .tf_load = pdc_tf_load_mmio,
142 .tf_read = ata_tf_read, 143 .tf_read = ata_tf_read,
@@ -195,6 +196,8 @@ static struct ata_port_info pdc_port_info[] = {
195static struct pci_device_id pdc_ata_pci_tbl[] = { 196static struct pci_device_id pdc_ata_pci_tbl[] = {
196 { PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 197 { PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
197 board_2037x }, 198 board_2037x },
199 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
200 board_2037x },
198 { PCI_VENDOR_ID_PROMISE, 0x3571, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 201 { PCI_VENDOR_ID_PROMISE, 0x3571, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
199 board_2037x }, 202 board_2037x },
200 { PCI_VENDOR_ID_PROMISE, 0x3373, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 203 { PCI_VENDOR_ID_PROMISE, 0x3373, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
@@ -207,6 +210,8 @@ static struct pci_device_id pdc_ata_pci_tbl[] = {
207 board_2037x }, 210 board_2037x },
208 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 211 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
209 board_2037x }, 212 board_2037x },
213 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
214 board_2037x },
210 215
211 { PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 216 { PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
212 board_20319 }, 217 board_20319 },
@@ -324,7 +329,7 @@ static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
324{ 329{
325 if (sc_reg > SCR_CONTROL) 330 if (sc_reg > SCR_CONTROL)
326 return 0xffffffffU; 331 return 0xffffffffU;
327 return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 332 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
328} 333}
329 334
330 335
@@ -333,7 +338,7 @@ static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
333{ 338{
334 if (sc_reg > SCR_CONTROL) 339 if (sc_reg > SCR_CONTROL)
335 return; 340 return;
336 writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 341 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
337} 342}
338 343
339static void pdc_qc_prep(struct ata_queued_cmd *qc) 344static void pdc_qc_prep(struct ata_queued_cmd *qc)
@@ -395,7 +400,8 @@ static void pdc_eng_timeout(struct ata_port *ap)
395 case ATA_PROT_DMA: 400 case ATA_PROT_DMA:
396 case ATA_PROT_NODATA: 401 case ATA_PROT_NODATA:
397 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 402 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
398 ata_qc_complete(qc, ata_wait_idle(ap) | ATA_ERR); 403 drv_stat = ata_wait_idle(ap);
404 ata_qc_complete(qc, __ac_err_mask(drv_stat));
399 break; 405 break;
400 406
401 default: 407 default:
@@ -404,7 +410,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
404 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 410 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n",
405 ap->id, qc->tf.command, drv_stat); 411 ap->id, qc->tf.command, drv_stat);
406 412
407 ata_qc_complete(qc, drv_stat); 413 ata_qc_complete(qc, ac_err_mask(drv_stat));
408 break; 414 break;
409 } 415 }
410 416
@@ -416,33 +422,30 @@ out:
416static inline unsigned int pdc_host_intr( struct ata_port *ap, 422static inline unsigned int pdc_host_intr( struct ata_port *ap,
417 struct ata_queued_cmd *qc) 423 struct ata_queued_cmd *qc)
418{ 424{
419 u8 status; 425 unsigned int handled = 0, err_mask = 0;
420 unsigned int handled = 0, have_err = 0;
421 u32 tmp; 426 u32 tmp;
422 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL; 427 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
423 428
424 tmp = readl(mmio); 429 tmp = readl(mmio);
425 if (tmp & PDC_ERR_MASK) { 430 if (tmp & PDC_ERR_MASK) {
426 have_err = 1; 431 err_mask = AC_ERR_DEV;
427 pdc_reset_port(ap); 432 pdc_reset_port(ap);
428 } 433 }
429 434
430 switch (qc->tf.protocol) { 435 switch (qc->tf.protocol) {
431 case ATA_PROT_DMA: 436 case ATA_PROT_DMA:
432 case ATA_PROT_NODATA: 437 case ATA_PROT_NODATA:
433 status = ata_wait_idle(ap); 438 err_mask |= ac_err_mask(ata_wait_idle(ap));
434 if (have_err) 439 ata_qc_complete(qc, err_mask);
435 status |= ATA_ERR;
436 ata_qc_complete(qc, status);
437 handled = 1; 440 handled = 1;
438 break; 441 break;
439 442
440 default: 443 default:
441 ap->stats.idle_irq++; 444 ap->stats.idle_irq++;
442 break; 445 break;
443 } 446 }
444 447
445 return handled; 448 return handled;
446} 449}
447 450
448static void pdc_irq_clear(struct ata_port *ap) 451static void pdc_irq_clear(struct ata_port *ap)
@@ -523,8 +526,8 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
523 526
524 pp->pkt[2] = seq; 527 pp->pkt[2] = seq;
525 wmb(); /* flush PRD, pkt writes */ 528 wmb(); /* flush PRD, pkt writes */
526 writel(pp->pkt_dma, (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 529 writel(pp->pkt_dma, (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
527 readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ 530 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
528} 531}
529 532
530static int pdc_qc_issue_prot(struct ata_queued_cmd *qc) 533static int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
@@ -546,7 +549,7 @@ static int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
546 return ata_qc_issue_prot(qc); 549 return ata_qc_issue_prot(qc);
547} 550}
548 551
549static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) 552static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
550{ 553{
551 WARN_ON (tf->protocol == ATA_PROT_DMA || 554 WARN_ON (tf->protocol == ATA_PROT_DMA ||
552 tf->protocol == ATA_PROT_NODATA); 555 tf->protocol == ATA_PROT_NODATA);
@@ -554,7 +557,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
554} 557}
555 558
556 559
557static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) 560static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
558{ 561{
559 WARN_ON (tf->protocol == ATA_PROT_DMA || 562 WARN_ON (tf->protocol == ATA_PROT_DMA ||
560 tf->protocol == ATA_PROT_NODATA); 563 tf->protocol == ATA_PROT_NODATA);
@@ -631,7 +634,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
631 int rc; 634 int rc;
632 635
633 if (!printed_version++) 636 if (!printed_version++)
634 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 637 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
635 638
636 /* 639 /*
637 * If this driver happens to only be useful on Apple's K2, then 640 * If this driver happens to only be useful on Apple's K2, then
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index ffcdeb68641c..9938dae782b6 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/device.h>
38#include "scsi.h" 39#include "scsi.h"
39#include <scsi/scsi_host.h> 40#include <scsi/scsi_host.h>
40#include <asm/io.h> 41#include <asm/io.h>
@@ -51,8 +52,6 @@ enum {
51 QS_PRD_BYTES = QS_MAX_PRD * 16, 52 QS_PRD_BYTES = QS_MAX_PRD * 16,
52 QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES, 53 QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES,
53 54
54 QS_DMA_BOUNDARY = ~0UL,
55
56 /* global register offsets */ 55 /* global register offsets */
57 QS_HCF_CNFG3 = 0x0003, /* host configuration offset */ 56 QS_HCF_CNFG3 = 0x0003, /* host configuration offset */
58 QS_HID_HPHY = 0x0004, /* host physical interface info */ 57 QS_HID_HPHY = 0x0004, /* host physical interface info */
@@ -101,6 +100,10 @@ enum {
101 board_2068_idx = 0, /* QStor 4-port SATA/RAID */ 100 board_2068_idx = 0, /* QStor 4-port SATA/RAID */
102}; 101};
103 102
103enum {
104 QS_DMA_BOUNDARY = ~0UL
105};
106
104typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t; 107typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t;
105 108
106struct qs_port_priv { 109struct qs_port_priv {
@@ -145,7 +148,7 @@ static Scsi_Host_Template qs_ata_sht = {
145 .bios_param = ata_std_bios_param, 148 .bios_param = ata_std_bios_param,
146}; 149};
147 150
148static struct ata_port_operations qs_ata_ops = { 151static const struct ata_port_operations qs_ata_ops = {
149 .port_disable = ata_port_disable, 152 .port_disable = ata_port_disable,
150 .tf_load = ata_tf_load, 153 .tf_load = ata_tf_load,
151 .tf_read = ata_tf_read, 154 .tf_read = ata_tf_read,
@@ -398,11 +401,12 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
398 qc = ata_qc_from_tag(ap, ap->active_tag); 401 qc = ata_qc_from_tag(ap, ap->active_tag);
399 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 402 if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
400 switch (sHST) { 403 switch (sHST) {
401 case 0: /* sucessful CPB */ 404 case 0: /* successful CPB */
402 case 3: /* device error */ 405 case 3: /* device error */
403 pp->state = qs_state_idle; 406 pp->state = qs_state_idle;
404 qs_enter_reg_mode(qc->ap); 407 qs_enter_reg_mode(qc->ap);
405 ata_qc_complete(qc, sDST); 408 ata_qc_complete(qc,
409 ac_err_mask(sDST));
406 break; 410 break;
407 default: 411 default:
408 break; 412 break;
@@ -431,7 +435,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
431 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 435 if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
432 436
433 /* check main status, clearing INTRQ */ 437 /* check main status, clearing INTRQ */
434 u8 status = ata_chk_status(ap); 438 u8 status = ata_check_status(ap);
435 if ((status & ATA_BUSY)) 439 if ((status & ATA_BUSY))
436 continue; 440 continue;
437 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", 441 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
@@ -439,7 +443,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
439 443
440 /* complete taskfile transaction */ 444 /* complete taskfile transaction */
441 pp->state = qs_state_idle; 445 pp->state = qs_state_idle;
442 ata_qc_complete(qc, status); 446 ata_qc_complete(qc, ac_err_mask(status));
443 handled = 1; 447 handled = 1;
444 } 448 }
445 } 449 }
@@ -597,25 +601,22 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
597 if (rc) { 601 if (rc) {
598 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 602 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
599 if (rc) { 603 if (rc) {
600 printk(KERN_ERR DRV_NAME 604 dev_printk(KERN_ERR, &pdev->dev,
601 "(%s): 64-bit DMA enable failed\n", 605 "64-bit DMA enable failed\n");
602 pci_name(pdev));
603 return rc; 606 return rc;
604 } 607 }
605 } 608 }
606 } else { 609 } else {
607 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 610 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
608 if (rc) { 611 if (rc) {
609 printk(KERN_ERR DRV_NAME 612 dev_printk(KERN_ERR, &pdev->dev,
610 "(%s): 32-bit DMA enable failed\n", 613 "32-bit DMA enable failed\n");
611 pci_name(pdev));
612 return rc; 614 return rc;
613 } 615 }
614 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 616 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
615 if (rc) { 617 if (rc) {
616 printk(KERN_ERR DRV_NAME 618 dev_printk(KERN_ERR, &pdev->dev,
617 "(%s): 32-bit consistent DMA enable failed\n", 619 "32-bit consistent DMA enable failed\n");
618 pci_name(pdev));
619 return rc; 620 return rc;
620 } 621 }
621 } 622 }
@@ -632,7 +633,7 @@ static int qs_ata_init_one(struct pci_dev *pdev,
632 int rc, port_no; 633 int rc, port_no;
633 634
634 if (!printed_version++) 635 if (!printed_version++)
635 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 636 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
636 637
637 rc = pci_enable_device(pdev); 638 rc = pci_enable_device(pdev);
638 if (rc) 639 if (rc)
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index ba98a175ee3a..435f7e0085ec 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -41,6 +41,7 @@
41#include <linux/blkdev.h> 41#include <linux/blkdev.h>
42#include <linux/delay.h> 42#include <linux/delay.h>
43#include <linux/interrupt.h> 43#include <linux/interrupt.h>
44#include <linux/device.h>
44#include "scsi.h" 45#include "scsi.h"
45#include <scsi/scsi_host.h> 46#include <scsi/scsi_host.h>
46#include <linux/libata.h> 47#include <linux/libata.h>
@@ -150,7 +151,7 @@ static Scsi_Host_Template sil_sht = {
150 .ordered_flush = 1, 151 .ordered_flush = 1,
151}; 152};
152 153
153static struct ata_port_operations sil_ops = { 154static const struct ata_port_operations sil_ops = {
154 .port_disable = ata_port_disable, 155 .port_disable = ata_port_disable,
155 .dev_config = sil_dev_config, 156 .dev_config = sil_dev_config,
156 .tf_load = ata_tf_load, 157 .tf_load = ata_tf_load,
@@ -289,7 +290,7 @@ static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_re
289 290
290static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg) 291static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
291{ 292{
292 void *mmio = (void *) sil_scr_addr(ap, sc_reg); 293 void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
293 if (mmio) 294 if (mmio)
294 return readl(mmio); 295 return readl(mmio);
295 return 0xffffffffU; 296 return 0xffffffffU;
@@ -297,7 +298,7 @@ static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
297 298
298static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) 299static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
299{ 300{
300 void *mmio = (void *) sil_scr_addr(ap, sc_reg); 301 void *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
301 if (mmio) 302 if (mmio)
302 writel(val, mmio); 303 writel(val, mmio);
303} 304}
@@ -386,7 +387,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
386 u8 cls; 387 u8 cls;
387 388
388 if (!printed_version++) 389 if (!printed_version++)
389 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 390 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
390 391
391 /* 392 /*
392 * If this driver happens to only be useful on Apple's K2, then 393 * If this driver happens to only be useful on Apple's K2, then
@@ -463,8 +464,8 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
463 writeb(cls, mmio_base + SIL_FIFO_W3); 464 writeb(cls, mmio_base + SIL_FIFO_W3);
464 } 465 }
465 } else 466 } else
466 printk(KERN_WARNING DRV_NAME "(%s): cache line size not set. Driver may not function\n", 467 dev_printk(KERN_WARNING, &pdev->dev,
467 pci_name(pdev)); 468 "cache line size not set. Driver may not function\n");
468 469
469 if (ent->driver_data == sil_3114) { 470 if (ent->driver_data == sil_3114) {
470 irq_mask = SIL_MASK_4PORT; 471 irq_mask = SIL_MASK_4PORT;
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
new file mode 100644
index 000000000000..c66548025657
--- /dev/null
+++ b/drivers/scsi/sata_sil24.c
@@ -0,0 +1,871 @@
1/*
2 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
3 *
4 * Copyright 2005 Tejun Heo
5 *
6 * Based on preview driver from Silicon Image.
7 *
8 * NOTE: No NCQ/ATAPI support yet. The preview driver didn't support
9 * NCQ nor ATAPI, and, unfortunately, I couldn't find out how to make
10 * those work. Enabling those shouldn't be difficult. Basic
11 * structure is all there (in libata-dev tree). If you have any
12 * information about this hardware, please contact me or linux-ide.
13 * Info is needed on...
14 *
15 * - How to issue tagged commands and turn on sactive on issue accordingly.
16 * - Where to put an ATAPI command and how to tell the device to send it.
17 * - How to enable/use 64bit.
18 *
19 * This program is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by the
21 * Free Software Foundation; either version 2, or (at your option) any
22 * later version.
23 *
24 * This program is distributed in the hope that it will be useful, but
25 * WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27 * General Public License for more details.
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/blkdev.h>
35#include <linux/delay.h>
36#include <linux/interrupt.h>
37#include <linux/dma-mapping.h>
38#include <linux/device.h>
39#include <scsi/scsi_host.h>
40#include "scsi.h"
41#include <linux/libata.h>
42#include <asm/io.h>
43
44#define DRV_NAME "sata_sil24"
45#define DRV_VERSION "0.22" /* Silicon Image's preview driver was 0.10 */
46
47/*
48 * Port request block (PRB) 32 bytes
49 */
50struct sil24_prb {
51 u16 ctrl;
52 u16 prot;
53 u32 rx_cnt;
54 u8 fis[6 * 4];
55};
56
57/*
58 * Scatter gather entry (SGE) 16 bytes
59 */
60struct sil24_sge {
61 u64 addr;
62 u32 cnt;
63 u32 flags;
64};
65
66/*
67 * Port multiplier
68 */
69struct sil24_port_multiplier {
70 u32 diag;
71 u32 sactive;
72};
73
74enum {
75 /*
76 * Global controller registers (128 bytes @ BAR0)
77 */
78 /* 32 bit regs */
79 HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */
80 HOST_CTRL = 0x40,
81 HOST_IRQ_STAT = 0x44,
82 HOST_PHY_CFG = 0x48,
83 HOST_BIST_CTRL = 0x50,
84 HOST_BIST_PTRN = 0x54,
85 HOST_BIST_STAT = 0x58,
86 HOST_MEM_BIST_STAT = 0x5c,
87 HOST_FLASH_CMD = 0x70,
88 /* 8 bit regs */
89 HOST_FLASH_DATA = 0x74,
90 HOST_TRANSITION_DETECT = 0x75,
91 HOST_GPIO_CTRL = 0x76,
92 HOST_I2C_ADDR = 0x78, /* 32 bit */
93 HOST_I2C_DATA = 0x7c,
94 HOST_I2C_XFER_CNT = 0x7e,
95 HOST_I2C_CTRL = 0x7f,
96
97 /* HOST_SLOT_STAT bits */
98 HOST_SSTAT_ATTN = (1 << 31),
99
100 /*
101 * Port registers
102 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
103 */
104 PORT_REGS_SIZE = 0x2000,
105 PORT_PRB = 0x0000, /* (32 bytes PRB + 16 bytes SGEs * 6) * 31 (3968 bytes) */
106
107 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
108 /* 32 bit regs */
109 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
110 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
111 PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */
112 PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */
113 PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */
114 PORT_ACTIVATE_UPPER_ADDR= 0x101c,
115 PORT_EXEC_FIFO = 0x1020, /* command execution fifo */
116 PORT_CMD_ERR = 0x1024, /* command error number */
117 PORT_FIS_CFG = 0x1028,
118 PORT_FIFO_THRES = 0x102c,
119 /* 16 bit regs */
120 PORT_DECODE_ERR_CNT = 0x1040,
121 PORT_DECODE_ERR_THRESH = 0x1042,
122 PORT_CRC_ERR_CNT = 0x1044,
123 PORT_CRC_ERR_THRESH = 0x1046,
124 PORT_HSHK_ERR_CNT = 0x1048,
125 PORT_HSHK_ERR_THRESH = 0x104a,
126 /* 32 bit regs */
127 PORT_PHY_CFG = 0x1050,
128 PORT_SLOT_STAT = 0x1800,
129 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
130 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
131 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
132 PORT_SCONTROL = 0x1f00,
133 PORT_SSTATUS = 0x1f04,
134 PORT_SERROR = 0x1f08,
135 PORT_SACTIVE = 0x1f0c,
136
137 /* PORT_CTRL_STAT bits */
138 PORT_CS_PORT_RST = (1 << 0), /* port reset */
139 PORT_CS_DEV_RST = (1 << 1), /* device reset */
140 PORT_CS_INIT = (1 << 2), /* port initialize */
141 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
142 PORT_CS_RESUME = (1 << 6), /* port resume */
143 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
144 PORT_CS_PM_EN = (1 << 13), /* port multiplier enable */
145 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
146
147 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
148 /* bits[11:0] are masked */
149 PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */
150 PORT_IRQ_ERROR = (1 << 1), /* command execution error */
151 PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */
152 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
153 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
154 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
155 PORT_IRQ_UNK_FIS = (1 << 6), /* Unknown FIS received */
156 PORT_IRQ_SDB_FIS = (1 << 11), /* SDB FIS received */
157
158 /* bits[27:16] are unmasked (raw) */
159 PORT_IRQ_RAW_SHIFT = 16,
160 PORT_IRQ_MASKED_MASK = 0x7ff,
161 PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT),
162
163 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
164 PORT_IRQ_STEER_SHIFT = 30,
165 PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT),
166
167 /* PORT_CMD_ERR constants */
168 PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */
169 PORT_CERR_SDB = 2, /* Error bit in SDB FIS */
170 PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */
171 PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */
172 PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */
173 PORT_CERR_DIRECTION = 6, /* Data direction mismatch */
174 PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */
175 PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */
176 PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */
177 PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */
178 PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */
179 PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */
180 PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
181 PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
182 PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */
183 PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */
184 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
185 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
186 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
187 PORT_CERR_XFR_MSGABRT = 34, /* PSD ecode 10 - master abort */
188 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
189 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
190
191 /*
192 * Other constants
193 */
194 SGE_TRM = (1 << 31), /* Last SGE in chain */
195 PRB_SOFT_RST = (1 << 7), /* Soft reset request (ign BSY?) */
196
197 /* board id */
198 BID_SIL3124 = 0,
199 BID_SIL3132 = 1,
200 BID_SIL3131 = 2,
201
202 IRQ_STAT_4PORTS = 0xf,
203};
204
205struct sil24_cmd_block {
206 struct sil24_prb prb;
207 struct sil24_sge sge[LIBATA_MAX_PRD];
208};
209
210/*
211 * ap->private_data
212 *
213 * The preview driver always returned 0 for status. We emulate it
214 * here from the previous interrupt.
215 */
216struct sil24_port_priv {
217 struct sil24_cmd_block *cmd_block; /* 32 cmd blocks */
218 dma_addr_t cmd_block_dma; /* DMA base addr for them */
219 struct ata_taskfile tf; /* Cached taskfile registers */
220};
221
222/* ap->host_set->private_data */
223struct sil24_host_priv {
224 void __iomem *host_base; /* global controller control (128 bytes @BAR0) */
225 void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */
226};
227
228static u8 sil24_check_status(struct ata_port *ap);
229static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
230static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
231static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
232static void sil24_phy_reset(struct ata_port *ap);
233static void sil24_qc_prep(struct ata_queued_cmd *qc);
234static int sil24_qc_issue(struct ata_queued_cmd *qc);
235static void sil24_irq_clear(struct ata_port *ap);
236static void sil24_eng_timeout(struct ata_port *ap);
237static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
238static int sil24_port_start(struct ata_port *ap);
239static void sil24_port_stop(struct ata_port *ap);
240static void sil24_host_stop(struct ata_host_set *host_set);
241static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
242
243static struct pci_device_id sil24_pci_tbl[] = {
244 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
245 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
246 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
247 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
248 { } /* terminate list */
249};
250
251static struct pci_driver sil24_pci_driver = {
252 .name = DRV_NAME,
253 .id_table = sil24_pci_tbl,
254 .probe = sil24_init_one,
255 .remove = ata_pci_remove_one, /* safe? */
256};
257
258static Scsi_Host_Template sil24_sht = {
259 .module = THIS_MODULE,
260 .name = DRV_NAME,
261 .ioctl = ata_scsi_ioctl,
262 .queuecommand = ata_scsi_queuecmd,
263 .eh_strategy_handler = ata_scsi_error,
264 .can_queue = ATA_DEF_QUEUE,
265 .this_id = ATA_SHT_THIS_ID,
266 .sg_tablesize = LIBATA_MAX_PRD,
267 .max_sectors = ATA_MAX_SECTORS,
268 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
269 .emulated = ATA_SHT_EMULATED,
270 .use_clustering = ATA_SHT_USE_CLUSTERING,
271 .proc_name = DRV_NAME,
272 .dma_boundary = ATA_DMA_BOUNDARY,
273 .slave_configure = ata_scsi_slave_config,
274 .bios_param = ata_std_bios_param,
275 .ordered_flush = 1, /* NCQ not supported yet */
276};
277
278static const struct ata_port_operations sil24_ops = {
279 .port_disable = ata_port_disable,
280
281 .check_status = sil24_check_status,
282 .check_altstatus = sil24_check_status,
283 .dev_select = ata_noop_dev_select,
284
285 .tf_read = sil24_tf_read,
286
287 .phy_reset = sil24_phy_reset,
288
289 .qc_prep = sil24_qc_prep,
290 .qc_issue = sil24_qc_issue,
291
292 .eng_timeout = sil24_eng_timeout,
293
294 .irq_handler = sil24_interrupt,
295 .irq_clear = sil24_irq_clear,
296
297 .scr_read = sil24_scr_read,
298 .scr_write = sil24_scr_write,
299
300 .port_start = sil24_port_start,
301 .port_stop = sil24_port_stop,
302 .host_stop = sil24_host_stop,
303};
304
305/*
306 * Use bits 30-31 of host_flags to encode available port numbers.
307 * Current maxium is 4.
308 */
309#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
310#define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
311
312static struct ata_port_info sil24_port_info[] = {
313 /* sil_3124 */
314 {
315 .sht = &sil24_sht,
316 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
317 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
318 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4),
319 .pio_mask = 0x1f, /* pio0-4 */
320 .mwdma_mask = 0x07, /* mwdma0-2 */
321 .udma_mask = 0x3f, /* udma0-5 */
322 .port_ops = &sil24_ops,
323 },
324 /* sil_3132 */
325 {
326 .sht = &sil24_sht,
327 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
328 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
329 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2),
330 .pio_mask = 0x1f, /* pio0-4 */
331 .mwdma_mask = 0x07, /* mwdma0-2 */
332 .udma_mask = 0x3f, /* udma0-5 */
333 .port_ops = &sil24_ops,
334 },
335 /* sil_3131/sil_3531 */
336 {
337 .sht = &sil24_sht,
338 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
339 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
340 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1),
341 .pio_mask = 0x1f, /* pio0-4 */
342 .mwdma_mask = 0x07, /* mwdma0-2 */
343 .udma_mask = 0x3f, /* udma0-5 */
344 .port_ops = &sil24_ops,
345 },
346};
347
348static inline void sil24_update_tf(struct ata_port *ap)
349{
350 struct sil24_port_priv *pp = ap->private_data;
351 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
352 struct sil24_prb __iomem *prb = port;
353 u8 fis[6 * 4];
354
355 memcpy_fromio(fis, prb->fis, 6 * 4);
356 ata_tf_from_fis(fis, &pp->tf);
357}
358
359static u8 sil24_check_status(struct ata_port *ap)
360{
361 struct sil24_port_priv *pp = ap->private_data;
362 return pp->tf.command;
363}
364
365static int sil24_scr_map[] = {
366 [SCR_CONTROL] = 0,
367 [SCR_STATUS] = 1,
368 [SCR_ERROR] = 2,
369 [SCR_ACTIVE] = 3,
370};
371
372static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
373{
374 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
375 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
376 void __iomem *addr;
377 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
378 return readl(scr_addr + sil24_scr_map[sc_reg] * 4);
379 }
380 return 0xffffffffU;
381}
382
383static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
384{
385 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
386 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
387 void __iomem *addr;
388 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
389 writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
390 }
391}
392
393static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
394{
395 struct sil24_port_priv *pp = ap->private_data;
396 *tf = pp->tf;
397}
398
399static void sil24_phy_reset(struct ata_port *ap)
400{
401 __sata_phy_reset(ap);
402 /*
403 * No ATAPI yet. Just unconditionally indicate ATA device.
404 * If ATAPI device is attached, it will fail ATA_CMD_ID_ATA
405 * and libata core will ignore the device.
406 */
407 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
408 ap->device[0].class = ATA_DEV_ATA;
409}
410
411static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
412 struct sil24_cmd_block *cb)
413{
414 struct scatterlist *sg = qc->sg;
415 struct sil24_sge *sge = cb->sge;
416 unsigned i;
417
418 for (i = 0; i < qc->n_elem; i++, sg++, sge++) {
419 sge->addr = cpu_to_le64(sg_dma_address(sg));
420 sge->cnt = cpu_to_le32(sg_dma_len(sg));
421 sge->flags = 0;
422 sge->flags = i < qc->n_elem - 1 ? 0 : cpu_to_le32(SGE_TRM);
423 }
424}
425
426static void sil24_qc_prep(struct ata_queued_cmd *qc)
427{
428 struct ata_port *ap = qc->ap;
429 struct sil24_port_priv *pp = ap->private_data;
430 struct sil24_cmd_block *cb = pp->cmd_block + qc->tag;
431 struct sil24_prb *prb = &cb->prb;
432
433 switch (qc->tf.protocol) {
434 case ATA_PROT_PIO:
435 case ATA_PROT_DMA:
436 case ATA_PROT_NODATA:
437 break;
438 default:
439 /* ATAPI isn't supported yet */
440 BUG();
441 }
442
443 ata_tf_to_fis(&qc->tf, prb->fis, 0);
444
445 if (qc->flags & ATA_QCFLAG_DMAMAP)
446 sil24_fill_sg(qc, cb);
447}
448
449static int sil24_qc_issue(struct ata_queued_cmd *qc)
450{
451 struct ata_port *ap = qc->ap;
452 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
453 struct sil24_port_priv *pp = ap->private_data;
454 dma_addr_t paddr = pp->cmd_block_dma + qc->tag * sizeof(*pp->cmd_block);
455
456 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
457 return 0;
458}
459
460static void sil24_irq_clear(struct ata_port *ap)
461{
462 /* unused */
463}
464
465static int __sil24_reset_controller(void __iomem *port)
466{
467 int cnt;
468 u32 tmp;
469
470 /* Reset controller state. Is this correct? */
471 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
472 readl(port + PORT_CTRL_STAT); /* sync */
473
474 /* Max ~100ms */
475 for (cnt = 0; cnt < 1000; cnt++) {
476 udelay(100);
477 tmp = readl(port + PORT_CTRL_STAT);
478 if (!(tmp & PORT_CS_DEV_RST))
479 break;
480 }
481
482 if (tmp & PORT_CS_DEV_RST)
483 return -1;
484 return 0;
485}
486
487static void sil24_reset_controller(struct ata_port *ap)
488{
489 printk(KERN_NOTICE DRV_NAME
490 " ata%u: resetting controller...\n", ap->id);
491 if (__sil24_reset_controller((void __iomem *)ap->ioaddr.cmd_addr))
492 printk(KERN_ERR DRV_NAME
493 " ata%u: failed to reset controller\n", ap->id);
494}
495
496static void sil24_eng_timeout(struct ata_port *ap)
497{
498 struct ata_queued_cmd *qc;
499
500 qc = ata_qc_from_tag(ap, ap->active_tag);
501 if (!qc) {
502 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
503 ap->id);
504 return;
505 }
506
507 /*
508 * hack alert! We cannot use the supplied completion
509 * function from inside the ->eh_strategy_handler() thread.
510 * libata is the only user of ->eh_strategy_handler() in
511 * any kernel, so the default scsi_done() assumes it is
512 * not being called from the SCSI EH.
513 */
514 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
515 qc->scsidone = scsi_finish_command;
516 ata_qc_complete(qc, AC_ERR_OTHER);
517
518 sil24_reset_controller(ap);
519}
520
521static void sil24_error_intr(struct ata_port *ap, u32 slot_stat)
522{
523 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
524 struct sil24_port_priv *pp = ap->private_data;
525 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
526 u32 irq_stat, cmd_err, sstatus, serror;
527 unsigned int err_mask;
528
529 irq_stat = readl(port + PORT_IRQ_STAT);
530 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */
531
532 if (!(irq_stat & PORT_IRQ_ERROR)) {
533 /* ignore non-completion, non-error irqs for now */
534 printk(KERN_WARNING DRV_NAME
535 "ata%u: non-error exception irq (irq_stat %x)\n",
536 ap->id, irq_stat);
537 return;
538 }
539
540 cmd_err = readl(port + PORT_CMD_ERR);
541 sstatus = readl(port + PORT_SSTATUS);
542 serror = readl(port + PORT_SERROR);
543 if (serror)
544 writel(serror, port + PORT_SERROR);
545
546 printk(KERN_ERR DRV_NAME " ata%u: error interrupt on port%d\n"
547 " stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n",
548 ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror);
549
550 if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) {
551 /*
552 * Device is reporting error, tf registers are valid.
553 */
554 sil24_update_tf(ap);
555 err_mask = ac_err_mask(pp->tf.command);
556 } else {
557 /*
558 * Other errors. libata currently doesn't have any
559 * mechanism to report these errors. Just turn on
560 * ATA_ERR.
561 */
562 err_mask = AC_ERR_OTHER;
563 }
564
565 if (qc)
566 ata_qc_complete(qc, err_mask);
567
568 sil24_reset_controller(ap);
569}
570
571static inline void sil24_host_intr(struct ata_port *ap)
572{
573 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
574 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
575 u32 slot_stat;
576
577 slot_stat = readl(port + PORT_SLOT_STAT);
578 if (!(slot_stat & HOST_SSTAT_ATTN)) {
579 struct sil24_port_priv *pp = ap->private_data;
580 /*
581 * !HOST_SSAT_ATTN guarantees successful completion,
582 * so reading back tf registers is unnecessary for
583 * most commands. TODO: read tf registers for
584 * commands which require these values on successful
585 * completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER,
586 * DEVICE RESET and READ PORT MULTIPLIER (any more?).
587 */
588 sil24_update_tf(ap);
589
590 if (qc)
591 ata_qc_complete(qc, ac_err_mask(pp->tf.command));
592 } else
593 sil24_error_intr(ap, slot_stat);
594}
595
596static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
597{
598 struct ata_host_set *host_set = dev_instance;
599 struct sil24_host_priv *hpriv = host_set->private_data;
600 unsigned handled = 0;
601 u32 status;
602 int i;
603
604 status = readl(hpriv->host_base + HOST_IRQ_STAT);
605
606 if (status == 0xffffffff) {
607 printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
608 "PCI fault or device removal?\n");
609 goto out;
610 }
611
612 if (!(status & IRQ_STAT_4PORTS))
613 goto out;
614
615 spin_lock(&host_set->lock);
616
617 for (i = 0; i < host_set->n_ports; i++)
618 if (status & (1 << i)) {
619 struct ata_port *ap = host_set->ports[i];
620 if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
621 sil24_host_intr(host_set->ports[i]);
622 handled++;
623 } else
624 printk(KERN_ERR DRV_NAME
625 ": interrupt from disabled port %d\n", i);
626 }
627
628 spin_unlock(&host_set->lock);
629 out:
630 return IRQ_RETVAL(handled);
631}
632
633static int sil24_port_start(struct ata_port *ap)
634{
635 struct device *dev = ap->host_set->dev;
636 struct sil24_port_priv *pp;
637 struct sil24_cmd_block *cb;
638 size_t cb_size = sizeof(*cb);
639 dma_addr_t cb_dma;
640
641 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
642 if (!pp)
643 return -ENOMEM;
644 memset(pp, 0, sizeof(*pp));
645
646 pp->tf.command = ATA_DRDY;
647
648 cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
649 if (!cb) {
650 kfree(pp);
651 return -ENOMEM;
652 }
653 memset(cb, 0, cb_size);
654
655 pp->cmd_block = cb;
656 pp->cmd_block_dma = cb_dma;
657
658 ap->private_data = pp;
659
660 return 0;
661}
662
663static void sil24_port_stop(struct ata_port *ap)
664{
665 struct device *dev = ap->host_set->dev;
666 struct sil24_port_priv *pp = ap->private_data;
667 size_t cb_size = sizeof(*pp->cmd_block);
668
669 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
670 kfree(pp);
671}
672
673static void sil24_host_stop(struct ata_host_set *host_set)
674{
675 struct sil24_host_priv *hpriv = host_set->private_data;
676
677 iounmap(hpriv->host_base);
678 iounmap(hpriv->port_base);
679 kfree(hpriv);
680}
681
682static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
683{
684 static int printed_version = 0;
685 unsigned int board_id = (unsigned int)ent->driver_data;
686 struct ata_port_info *pinfo = &sil24_port_info[board_id];
687 struct ata_probe_ent *probe_ent = NULL;
688 struct sil24_host_priv *hpriv = NULL;
689 void __iomem *host_base = NULL;
690 void __iomem *port_base = NULL;
691 int i, rc;
692
693 if (!printed_version++)
694 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
695
696 rc = pci_enable_device(pdev);
697 if (rc)
698 return rc;
699
700 rc = pci_request_regions(pdev, DRV_NAME);
701 if (rc)
702 goto out_disable;
703
704 rc = -ENOMEM;
705 /* ioremap mmio registers */
706 host_base = ioremap(pci_resource_start(pdev, 0),
707 pci_resource_len(pdev, 0));
708 if (!host_base)
709 goto out_free;
710 port_base = ioremap(pci_resource_start(pdev, 2),
711 pci_resource_len(pdev, 2));
712 if (!port_base)
713 goto out_free;
714
715 /* allocate & init probe_ent and hpriv */
716 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
717 if (!probe_ent)
718 goto out_free;
719
720 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
721 if (!hpriv)
722 goto out_free;
723
724 memset(probe_ent, 0, sizeof(*probe_ent));
725 probe_ent->dev = pci_dev_to_dev(pdev);
726 INIT_LIST_HEAD(&probe_ent->node);
727
728 probe_ent->sht = pinfo->sht;
729 probe_ent->host_flags = pinfo->host_flags;
730 probe_ent->pio_mask = pinfo->pio_mask;
731 probe_ent->udma_mask = pinfo->udma_mask;
732 probe_ent->port_ops = pinfo->port_ops;
733 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags);
734
735 probe_ent->irq = pdev->irq;
736 probe_ent->irq_flags = SA_SHIRQ;
737 probe_ent->mmio_base = port_base;
738 probe_ent->private_data = hpriv;
739
740 memset(hpriv, 0, sizeof(*hpriv));
741 hpriv->host_base = host_base;
742 hpriv->port_base = port_base;
743
744 /*
745 * Configure the device
746 */
747 /*
748 * FIXME: This device is certainly 64-bit capable. We just
749 * don't know how to use it. After fixing 32bit activation in
750 * this function, enable 64bit masks here.
751 */
752 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
753 if (rc) {
754 dev_printk(KERN_ERR, &pdev->dev,
755 "32-bit DMA enable failed\n");
756 goto out_free;
757 }
758 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
759 if (rc) {
760 dev_printk(KERN_ERR, &pdev->dev,
761 "32-bit consistent DMA enable failed\n");
762 goto out_free;
763 }
764
765 /* GPIO off */
766 writel(0, host_base + HOST_FLASH_CMD);
767
768 /* Mask interrupts during initialization */
769 writel(0, host_base + HOST_CTRL);
770
771 for (i = 0; i < probe_ent->n_ports; i++) {
772 void __iomem *port = port_base + i * PORT_REGS_SIZE;
773 unsigned long portu = (unsigned long)port;
774 u32 tmp;
775 int cnt;
776
777 probe_ent->port[i].cmd_addr = portu + PORT_PRB;
778 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
779
780 ata_std_ports(&probe_ent->port[i]);
781
782 /* Initial PHY setting */
783 writel(0x20c, port + PORT_PHY_CFG);
784
785 /* Clear port RST */
786 tmp = readl(port + PORT_CTRL_STAT);
787 if (tmp & PORT_CS_PORT_RST) {
788 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
789 readl(port + PORT_CTRL_STAT); /* sync */
790 for (cnt = 0; cnt < 10; cnt++) {
791 msleep(10);
792 tmp = readl(port + PORT_CTRL_STAT);
793 if (!(tmp & PORT_CS_PORT_RST))
794 break;
795 }
796 if (tmp & PORT_CS_PORT_RST)
797 dev_printk(KERN_ERR, &pdev->dev,
798 "failed to clear port RST\n");
799 }
800
801 /* Zero error counters. */
802 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
803 writel(0x8000, port + PORT_CRC_ERR_THRESH);
804 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
805 writel(0x0000, port + PORT_DECODE_ERR_CNT);
806 writel(0x0000, port + PORT_CRC_ERR_CNT);
807 writel(0x0000, port + PORT_HSHK_ERR_CNT);
808
809 /* FIXME: 32bit activation? */
810 writel(0, port + PORT_ACTIVATE_UPPER_ADDR);
811 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_STAT);
812
813 /* Configure interrupts */
814 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
815 writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | PORT_IRQ_SDB_FIS,
816 port + PORT_IRQ_ENABLE_SET);
817
818 /* Clear interrupts */
819 writel(0x0fff0fff, port + PORT_IRQ_STAT);
820 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
821
822 /* Clear port multiplier enable and resume bits */
823 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
824
825 /* Reset itself */
826 if (__sil24_reset_controller(port))
827 dev_printk(KERN_ERR, &pdev->dev,
828 "failed to reset controller\n");
829 }
830
831 /* Turn on interrupts */
832 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
833
834 pci_set_master(pdev);
835
836 /* FIXME: check ata_device_add return value */
837 ata_device_add(probe_ent);
838
839 kfree(probe_ent);
840 return 0;
841
842 out_free:
843 if (host_base)
844 iounmap(host_base);
845 if (port_base)
846 iounmap(port_base);
847 kfree(probe_ent);
848 kfree(hpriv);
849 pci_release_regions(pdev);
850 out_disable:
851 pci_disable_device(pdev);
852 return rc;
853}
854
855static int __init sil24_init(void)
856{
857 return pci_module_init(&sil24_pci_driver);
858}
859
860static void __exit sil24_exit(void)
861{
862 pci_unregister_driver(&sil24_pci_driver);
863}
864
865MODULE_AUTHOR("Tejun Heo");
866MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
867MODULE_LICENSE("GPL");
868MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
869
870module_init(sil24_init);
871module_exit(sil24_exit);
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index b227e51d12f4..42288be0e561 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -38,6 +38,7 @@
38#include <linux/blkdev.h> 38#include <linux/blkdev.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/interrupt.h> 40#include <linux/interrupt.h>
41#include <linux/device.h>
41#include "scsi.h" 42#include "scsi.h"
42#include <scsi/scsi_host.h> 43#include <scsi/scsi_host.h>
43#include <linux/libata.h> 44#include <linux/libata.h>
@@ -102,7 +103,7 @@ static Scsi_Host_Template sis_sht = {
102 .ordered_flush = 1, 103 .ordered_flush = 1,
103}; 104};
104 105
105static struct ata_port_operations sis_ops = { 106static const struct ata_port_operations sis_ops = {
106 .port_disable = ata_port_disable, 107 .port_disable = ata_port_disable,
107 .tf_load = ata_tf_load, 108 .tf_load = ata_tf_load,
108 .tf_read = ata_tf_read, 109 .tf_read = ata_tf_read,
@@ -237,6 +238,7 @@ static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
237 238
238static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 239static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
239{ 240{
241 static int printed_version;
240 struct ata_probe_ent *probe_ent = NULL; 242 struct ata_probe_ent *probe_ent = NULL;
241 int rc; 243 int rc;
242 u32 genctl; 244 u32 genctl;
@@ -245,6 +247,9 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
245 u8 pmr; 247 u8 pmr;
246 u8 port2_start; 248 u8 port2_start;
247 249
250 if (!printed_version++)
251 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
252
248 rc = pci_enable_device(pdev); 253 rc = pci_enable_device(pdev);
249 if (rc) 254 if (rc)
250 return rc; 255 return rc;
@@ -263,7 +268,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
263 goto err_out_regions; 268 goto err_out_regions;
264 269
265 ppi = &sis_port_info; 270 ppi = &sis_port_info;
266 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 271 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
267 if (!probe_ent) { 272 if (!probe_ent) {
268 rc = -ENOMEM; 273 rc = -ENOMEM;
269 goto err_out_regions; 274 goto err_out_regions;
@@ -288,16 +293,18 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
288 pci_read_config_byte(pdev, SIS_PMR, &pmr); 293 pci_read_config_byte(pdev, SIS_PMR, &pmr);
289 if (ent->device != 0x182) { 294 if (ent->device != 0x182) {
290 if ((pmr & SIS_PMR_COMBINED) == 0) { 295 if ((pmr & SIS_PMR_COMBINED) == 0) {
291 printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in SATA mode\n"); 296 dev_printk(KERN_INFO, &pdev->dev,
297 "Detected SiS 180/181 chipset in SATA mode\n");
292 port2_start = 64; 298 port2_start = 64;
293 } 299 }
294 else { 300 else {
295 printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in combined mode\n"); 301 dev_printk(KERN_INFO, &pdev->dev,
302 "Detected SiS 180/181 chipset in combined mode\n");
296 port2_start=0; 303 port2_start=0;
297 } 304 }
298 } 305 }
299 else { 306 else {
300 printk(KERN_INFO "sata_sis: Detected SiS 182 chipset\n"); 307 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182 chipset\n");
301 port2_start = 0x20; 308 port2_start = 0x20;
302 } 309 }
303 310
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index d89d968bedac..db615ff794d8 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -44,6 +44,7 @@
44#include <linux/blkdev.h> 44#include <linux/blkdev.h>
45#include <linux/delay.h> 45#include <linux/delay.h>
46#include <linux/interrupt.h> 46#include <linux/interrupt.h>
47#include <linux/device.h>
47#include "scsi.h" 48#include "scsi.h"
48#include <scsi/scsi_host.h> 49#include <scsi/scsi_host.h>
49#include <linux/libata.h> 50#include <linux/libata.h>
@@ -84,6 +85,8 @@
84/* Port stride */ 85/* Port stride */
85#define K2_SATA_PORT_OFFSET 0x100 86#define K2_SATA_PORT_OFFSET 0x100
86 87
88static u8 k2_stat_check_status(struct ata_port *ap);
89
87 90
88static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) 91static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
89{ 92{
@@ -102,7 +105,7 @@ static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
102} 105}
103 106
104 107
105static void k2_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) 108static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
106{ 109{
107 struct ata_ioports *ioaddr = &ap->ioaddr; 110 struct ata_ioports *ioaddr = &ap->ioaddr;
108 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 111 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -136,16 +139,24 @@ static void k2_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
136static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 139static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
137{ 140{
138 struct ata_ioports *ioaddr = &ap->ioaddr; 141 struct ata_ioports *ioaddr = &ap->ioaddr;
139 u16 nsect, lbal, lbam, lbah; 142 u16 nsect, lbal, lbam, lbah, feature;
140 143
141 nsect = tf->nsect = readw(ioaddr->nsect_addr); 144 tf->command = k2_stat_check_status(ap);
142 lbal = tf->lbal = readw(ioaddr->lbal_addr);
143 lbam = tf->lbam = readw(ioaddr->lbam_addr);
144 lbah = tf->lbah = readw(ioaddr->lbah_addr);
145 tf->device = readw(ioaddr->device_addr); 145 tf->device = readw(ioaddr->device_addr);
146 feature = readw(ioaddr->error_addr);
147 nsect = readw(ioaddr->nsect_addr);
148 lbal = readw(ioaddr->lbal_addr);
149 lbam = readw(ioaddr->lbam_addr);
150 lbah = readw(ioaddr->lbah_addr);
151
152 tf->feature = feature;
153 tf->nsect = nsect;
154 tf->lbal = lbal;
155 tf->lbam = lbam;
156 tf->lbah = lbah;
146 157
147 if (tf->flags & ATA_TFLAG_LBA48) { 158 if (tf->flags & ATA_TFLAG_LBA48) {
148 tf->hob_feature = readw(ioaddr->error_addr) >> 8; 159 tf->hob_feature = feature >> 8;
149 tf->hob_nsect = nsect >> 8; 160 tf->hob_nsect = nsect >> 8;
150 tf->hob_lbal = lbal >> 8; 161 tf->hob_lbal = lbal >> 8;
151 tf->hob_lbam = lbam >> 8; 162 tf->hob_lbam = lbam >> 8;
@@ -297,7 +308,7 @@ static Scsi_Host_Template k2_sata_sht = {
297}; 308};
298 309
299 310
300static struct ata_port_operations k2_sata_ops = { 311static const struct ata_port_operations k2_sata_ops = {
301 .port_disable = ata_port_disable, 312 .port_disable = ata_port_disable,
302 .tf_load = k2_sata_tf_load, 313 .tf_load = k2_sata_tf_load,
303 .tf_read = k2_sata_tf_read, 314 .tf_read = k2_sata_tf_read,
@@ -352,7 +363,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
352 int i; 363 int i;
353 364
354 if (!printed_version++) 365 if (!printed_version++)
355 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 366 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
356 367
357 /* 368 /*
358 * If this driver happens to only be useful on Apple's K2, then 369 * If this driver happens to only be useful on Apple's K2, then
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 540a85191172..0ec21e09f5d8 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -38,6 +38,7 @@
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/sched.h> 40#include <linux/sched.h>
41#include <linux/device.h>
41#include "scsi.h" 42#include "scsi.h"
42#include <scsi/scsi_host.h> 43#include <scsi/scsi_host.h>
43#include <linux/libata.h> 44#include <linux/libata.h>
@@ -137,7 +138,7 @@ struct pdc_port_priv {
137}; 138};
138 139
139struct pdc_host_priv { 140struct pdc_host_priv {
140 void *dimm_mmio; 141 void __iomem *dimm_mmio;
141 142
142 unsigned int doing_hdma; 143 unsigned int doing_hdma;
143 unsigned int hdma_prod; 144 unsigned int hdma_prod;
@@ -157,8 +158,8 @@ static void pdc_20621_phy_reset (struct ata_port *ap);
157static int pdc_port_start(struct ata_port *ap); 158static int pdc_port_start(struct ata_port *ap);
158static void pdc_port_stop(struct ata_port *ap); 159static void pdc_port_stop(struct ata_port *ap);
159static void pdc20621_qc_prep(struct ata_queued_cmd *qc); 160static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
160static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf); 161static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
161static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf); 162static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
162static void pdc20621_host_stop(struct ata_host_set *host_set); 163static void pdc20621_host_stop(struct ata_host_set *host_set);
163static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe); 164static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
164static int pdc20621_detect_dimm(struct ata_probe_ent *pe); 165static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
@@ -196,7 +197,7 @@ static Scsi_Host_Template pdc_sata_sht = {
196 .ordered_flush = 1, 197 .ordered_flush = 1,
197}; 198};
198 199
199static struct ata_port_operations pdc_20621_ops = { 200static const struct ata_port_operations pdc_20621_ops = {
200 .port_disable = ata_port_disable, 201 .port_disable = ata_port_disable,
201 .tf_load = pdc_tf_load_mmio, 202 .tf_load = pdc_tf_load_mmio,
202 .tf_read = ata_tf_read, 203 .tf_read = ata_tf_read,
@@ -247,7 +248,7 @@ static void pdc20621_host_stop(struct ata_host_set *host_set)
247{ 248{
248 struct pci_dev *pdev = to_pci_dev(host_set->dev); 249 struct pci_dev *pdev = to_pci_dev(host_set->dev);
249 struct pdc_host_priv *hpriv = host_set->private_data; 250 struct pdc_host_priv *hpriv = host_set->private_data;
250 void *dimm_mmio = hpriv->dimm_mmio; 251 void __iomem *dimm_mmio = hpriv->dimm_mmio;
251 252
252 pci_iounmap(pdev, dimm_mmio); 253 pci_iounmap(pdev, dimm_mmio);
253 kfree(hpriv); 254 kfree(hpriv);
@@ -669,8 +670,8 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
669 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ 670 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
670 671
671 writel(port_ofs + PDC_DIMM_ATA_PKT, 672 writel(port_ofs + PDC_DIMM_ATA_PKT,
672 (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 673 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
673 readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 674 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
674 VPRINTK("submitted ofs 0x%x (%u), seq %u\n", 675 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
675 port_ofs + PDC_DIMM_ATA_PKT, 676 port_ofs + PDC_DIMM_ATA_PKT,
676 port_ofs + PDC_DIMM_ATA_PKT, 677 port_ofs + PDC_DIMM_ATA_PKT,
@@ -718,7 +719,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
718 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id, 719 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id,
719 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 720 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
720 /* get drive status; clear intr; complete txn */ 721 /* get drive status; clear intr; complete txn */
721 ata_qc_complete(qc, ata_wait_idle(ap)); 722 ata_qc_complete(qc, ac_err_mask(ata_wait_idle(ap)));
722 pdc20621_pop_hdma(qc); 723 pdc20621_pop_hdma(qc);
723 } 724 }
724 725
@@ -747,8 +748,8 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
747 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); 748 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
748 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); 749 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
749 writel(port_ofs + PDC_DIMM_ATA_PKT, 750 writel(port_ofs + PDC_DIMM_ATA_PKT,
750 (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 751 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
751 readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 752 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
752 } 753 }
753 754
754 /* step two - execute ATA command */ 755 /* step two - execute ATA command */
@@ -756,7 +757,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
756 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id, 757 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id,
757 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 758 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
758 /* get drive status; clear intr; complete txn */ 759 /* get drive status; clear intr; complete txn */
759 ata_qc_complete(qc, ata_wait_idle(ap)); 760 ata_qc_complete(qc, ac_err_mask(ata_wait_idle(ap)));
760 pdc20621_pop_hdma(qc); 761 pdc20621_pop_hdma(qc);
761 } 762 }
762 handled = 1; 763 handled = 1;
@@ -766,7 +767,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
766 767
767 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 768 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
768 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); 769 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
769 ata_qc_complete(qc, status); 770 ata_qc_complete(qc, ac_err_mask(status));
770 handled = 1; 771 handled = 1;
771 772
772 } else { 773 } else {
@@ -881,7 +882,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
881 case ATA_PROT_DMA: 882 case ATA_PROT_DMA:
882 case ATA_PROT_NODATA: 883 case ATA_PROT_NODATA:
883 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 884 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
884 ata_qc_complete(qc, ata_wait_idle(ap) | ATA_ERR); 885 ata_qc_complete(qc, __ac_err_mask(ata_wait_idle(ap)));
885 break; 886 break;
886 887
887 default: 888 default:
@@ -890,7 +891,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
890 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 891 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n",
891 ap->id, qc->tf.command, drv_stat); 892 ap->id, qc->tf.command, drv_stat);
892 893
893 ata_qc_complete(qc, drv_stat); 894 ata_qc_complete(qc, ac_err_mask(drv_stat));
894 break; 895 break;
895 } 896 }
896 897
@@ -899,7 +900,7 @@ out:
899 DPRINTK("EXIT\n"); 900 DPRINTK("EXIT\n");
900} 901}
901 902
902static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) 903static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
903{ 904{
904 WARN_ON (tf->protocol == ATA_PROT_DMA || 905 WARN_ON (tf->protocol == ATA_PROT_DMA ||
905 tf->protocol == ATA_PROT_NODATA); 906 tf->protocol == ATA_PROT_NODATA);
@@ -907,7 +908,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
907} 908}
908 909
909 910
910static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) 911static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
911{ 912{
912 WARN_ON (tf->protocol == ATA_PROT_DMA || 913 WARN_ON (tf->protocol == ATA_PROT_DMA ||
913 tf->protocol == ATA_PROT_NODATA); 914 tf->protocol == ATA_PROT_NODATA);
@@ -1014,7 +1015,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
1014 idx++; 1015 idx++;
1015 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size : 1016 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1016 (long) (window_size - offset); 1017 (long) (window_size - offset);
1017 memcpy_toio((char *) (dimm_mmio + offset / 4), (char *) psource, dist); 1018 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1018 writel(0x01, mmio + PDC_GENERAL_CTLR); 1019 writel(0x01, mmio + PDC_GENERAL_CTLR);
1019 readl(mmio + PDC_GENERAL_CTLR); 1020 readl(mmio + PDC_GENERAL_CTLR);
1020 1021
@@ -1023,8 +1024,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
1023 for (; (long) size >= (long) window_size ;) { 1024 for (; (long) size >= (long) window_size ;) {
1024 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1025 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1025 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1026 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1026 memcpy_toio((char *) (dimm_mmio), (char *) psource, 1027 memcpy_toio(dimm_mmio, psource, window_size / 4);
1027 window_size / 4);
1028 writel(0x01, mmio + PDC_GENERAL_CTLR); 1028 writel(0x01, mmio + PDC_GENERAL_CTLR);
1029 readl(mmio + PDC_GENERAL_CTLR); 1029 readl(mmio + PDC_GENERAL_CTLR);
1030 psource += window_size; 1030 psource += window_size;
@@ -1035,7 +1035,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
1035 if (size) { 1035 if (size) {
1036 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1036 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1037 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1037 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1038 memcpy_toio((char *) (dimm_mmio), (char *) psource, size / 4); 1038 memcpy_toio(dimm_mmio, psource, size / 4);
1039 writel(0x01, mmio + PDC_GENERAL_CTLR); 1039 writel(0x01, mmio + PDC_GENERAL_CTLR);
1040 readl(mmio + PDC_GENERAL_CTLR); 1040 readl(mmio + PDC_GENERAL_CTLR);
1041 } 1041 }
@@ -1386,7 +1386,7 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1386 int rc; 1386 int rc;
1387 1387
1388 if (!printed_version++) 1388 if (!printed_version++)
1389 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 1389 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1390 1390
1391 /* 1391 /*
1392 * If this driver happens to only be useful on Apple's K2, then 1392 * If this driver happens to only be useful on Apple's K2, then
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 4c9fb8b71be1..a5e245c098e1 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -32,6 +32,7 @@
32#include <linux/blkdev.h> 32#include <linux/blkdev.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/device.h>
35#include "scsi.h" 36#include "scsi.h"
36#include <scsi/scsi_host.h> 37#include <scsi/scsi_host.h>
37#include <linux/libata.h> 38#include <linux/libata.h>
@@ -90,7 +91,7 @@ static Scsi_Host_Template uli_sht = {
90 .ordered_flush = 1, 91 .ordered_flush = 1,
91}; 92};
92 93
93static struct ata_port_operations uli_ops = { 94static const struct ata_port_operations uli_ops = {
94 .port_disable = ata_port_disable, 95 .port_disable = ata_port_disable,
95 96
96 .tf_load = ata_tf_load, 97 .tf_load = ata_tf_load,
@@ -178,12 +179,16 @@ static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
178 179
179static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 180static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
180{ 181{
182 static int printed_version;
181 struct ata_probe_ent *probe_ent; 183 struct ata_probe_ent *probe_ent;
182 struct ata_port_info *ppi; 184 struct ata_port_info *ppi;
183 int rc; 185 int rc;
184 unsigned int board_idx = (unsigned int) ent->driver_data; 186 unsigned int board_idx = (unsigned int) ent->driver_data;
185 int pci_dev_busy = 0; 187 int pci_dev_busy = 0;
186 188
189 if (!printed_version++)
190 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
191
187 rc = pci_enable_device(pdev); 192 rc = pci_enable_device(pdev);
188 if (rc) 193 if (rc)
189 return rc; 194 return rc;
@@ -202,7 +207,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
202 goto err_out_regions; 207 goto err_out_regions;
203 208
204 ppi = &uli_port_info; 209 ppi = &uli_port_info;
205 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 210 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
206 if (!probe_ent) { 211 if (!probe_ent) {
207 rc = -ENOMEM; 212 rc = -ENOMEM;
208 goto err_out_regions; 213 goto err_out_regions;
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 128b996b07b7..b3ecdbe400e9 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -41,6 +41,7 @@
41#include <linux/init.h> 41#include <linux/init.h>
42#include <linux/blkdev.h> 42#include <linux/blkdev.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/device.h>
44#include "scsi.h" 45#include "scsi.h"
45#include <scsi/scsi_host.h> 46#include <scsi/scsi_host.h>
46#include <linux/libata.h> 47#include <linux/libata.h>
@@ -109,7 +110,7 @@ static Scsi_Host_Template svia_sht = {
109 .ordered_flush = 1, 110 .ordered_flush = 1,
110}; 111};
111 112
112static struct ata_port_operations svia_sata_ops = { 113static const struct ata_port_operations svia_sata_ops = {
113 .port_disable = ata_port_disable, 114 .port_disable = ata_port_disable,
114 115
115 .tf_load = ata_tf_load, 116 .tf_load = ata_tf_load,
@@ -212,7 +213,7 @@ static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
212 struct ata_probe_ent *probe_ent; 213 struct ata_probe_ent *probe_ent;
213 struct ata_port_info *ppi = &svia_port_info; 214 struct ata_port_info *ppi = &svia_port_info;
214 215
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 216 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent) 217 if (!probe_ent)
217 return NULL; 218 return NULL;
218 219
@@ -259,15 +260,15 @@ static void svia_configure(struct pci_dev *pdev)
259 u8 tmp8; 260 u8 tmp8;
260 261
261 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); 262 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
262 printk(KERN_INFO DRV_NAME "(%s): routed to hard irq line %d\n", 263 dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n",
263 pci_name(pdev),
264 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); 264 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
265 265
266 /* make sure SATA channels are enabled */ 266 /* make sure SATA channels are enabled */
267 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); 267 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
268 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 268 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
269 printk(KERN_DEBUG DRV_NAME "(%s): enabling SATA channels (0x%x)\n", 269 dev_printk(KERN_DEBUG, &pdev->dev,
270 pci_name(pdev), (int) tmp8); 270 "enabling SATA channels (0x%x)\n",
271 (int) tmp8);
271 tmp8 |= ALL_PORTS; 272 tmp8 |= ALL_PORTS;
272 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); 273 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
273 } 274 }
@@ -275,8 +276,9 @@ static void svia_configure(struct pci_dev *pdev)
275 /* make sure interrupts for each channel sent to us */ 276 /* make sure interrupts for each channel sent to us */
276 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); 277 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
277 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 278 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
278 printk(KERN_DEBUG DRV_NAME "(%s): enabling SATA channel interrupts (0x%x)\n", 279 dev_printk(KERN_DEBUG, &pdev->dev,
279 pci_name(pdev), (int) tmp8); 280 "enabling SATA channel interrupts (0x%x)\n",
281 (int) tmp8);
280 tmp8 |= ALL_PORTS; 282 tmp8 |= ALL_PORTS;
281 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); 283 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
282 } 284 }
@@ -284,8 +286,9 @@ static void svia_configure(struct pci_dev *pdev)
284 /* make sure native mode is enabled */ 286 /* make sure native mode is enabled */
285 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); 287 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
286 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { 288 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
287 printk(KERN_DEBUG DRV_NAME "(%s): enabling SATA channel native mode (0x%x)\n", 289 dev_printk(KERN_DEBUG, &pdev->dev,
288 pci_name(pdev), (int) tmp8); 290 "enabling SATA channel native mode (0x%x)\n",
291 (int) tmp8);
289 tmp8 |= NATIVE_MODE_ALL; 292 tmp8 |= NATIVE_MODE_ALL;
290 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 293 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
291 } 294 }
@@ -303,7 +306,7 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
303 u8 tmp8; 306 u8 tmp8;
304 307
305 if (!printed_version++) 308 if (!printed_version++)
306 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 309 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
307 310
308 rc = pci_enable_device(pdev); 311 rc = pci_enable_device(pdev);
309 if (rc) 312 if (rc)
@@ -318,8 +321,9 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
318 if (board_id == vt6420) { 321 if (board_id == vt6420) {
319 pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8); 322 pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8);
320 if (tmp8 & SATA_2DEV) { 323 if (tmp8 & SATA_2DEV) {
321 printk(KERN_ERR DRV_NAME "(%s): SATA master/slave not supported (0x%x)\n", 324 dev_printk(KERN_ERR, &pdev->dev,
322 pci_name(pdev), (int) tmp8); 325 "SATA master/slave not supported (0x%x)\n",
326 (int) tmp8);
323 rc = -EIO; 327 rc = -EIO;
324 goto err_out_regions; 328 goto err_out_regions;
325 } 329 }
@@ -332,10 +336,11 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
332 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++) 336 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
333 if ((pci_resource_start(pdev, i) == 0) || 337 if ((pci_resource_start(pdev, i) == 0) ||
334 (pci_resource_len(pdev, i) < bar_sizes[i])) { 338 (pci_resource_len(pdev, i) < bar_sizes[i])) {
335 printk(KERN_ERR DRV_NAME "(%s): invalid PCI BAR %u (sz 0x%lx, val 0x%lx)\n", 339 dev_printk(KERN_ERR, &pdev->dev,
336 pci_name(pdev), i, 340 "invalid PCI BAR %u (sz 0x%lx, val 0x%lx)\n",
337 pci_resource_start(pdev, i), 341 i,
338 pci_resource_len(pdev, i)); 342 pci_resource_start(pdev, i),
343 pci_resource_len(pdev, i));
339 rc = -ENODEV; 344 rc = -ENODEV;
340 goto err_out_regions; 345 goto err_out_regions;
341 } 346 }
@@ -353,8 +358,7 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
353 probe_ent = vt6421_init_probe_ent(pdev); 358 probe_ent = vt6421_init_probe_ent(pdev);
354 359
355 if (!probe_ent) { 360 if (!probe_ent) {
356 printk(KERN_ERR DRV_NAME "(%s): out of memory\n", 361 dev_printk(KERN_ERR, &pdev->dev, "out of memory\n");
357 pci_name(pdev));
358 rc = -ENOMEM; 362 rc = -ENOMEM;
359 goto err_out_regions; 363 goto err_out_regions;
360 } 364 }
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index cf94e0158a8d..bb84ba0c7e83 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -42,6 +42,7 @@
42#include <linux/delay.h> 42#include <linux/delay.h>
43#include <linux/interrupt.h> 43#include <linux/interrupt.h>
44#include <linux/dma-mapping.h> 44#include <linux/dma-mapping.h>
45#include <linux/device.h>
45#include "scsi.h" 46#include "scsi.h"
46#include <scsi/scsi_host.h> 47#include <scsi/scsi_host.h>
47#include <linux/libata.h> 48#include <linux/libata.h>
@@ -86,7 +87,7 @@ static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
86{ 87{
87 if (sc_reg > SCR_CONTROL) 88 if (sc_reg > SCR_CONTROL)
88 return 0xffffffffU; 89 return 0xffffffffU;
89 return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 90 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
90} 91}
91 92
92 93
@@ -95,16 +96,16 @@ static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
95{ 96{
96 if (sc_reg > SCR_CONTROL) 97 if (sc_reg > SCR_CONTROL)
97 return; 98 return;
98 writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 99 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
99} 100}
100 101
101 102
102static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl) 103static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
103{ 104{
104 unsigned long mask_addr; 105 void __iomem *mask_addr;
105 u8 mask; 106 u8 mask;
106 107
107 mask_addr = (unsigned long) ap->host_set->mmio_base + 108 mask_addr = ap->host_set->mmio_base +
108 VSC_SATA_INT_MASK_OFFSET + ap->port_no; 109 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
109 mask = readb(mask_addr); 110 mask = readb(mask_addr);
110 if (ctl & ATA_NIEN) 111 if (ctl & ATA_NIEN)
@@ -115,7 +116,7 @@ static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
115} 116}
116 117
117 118
118static void vsc_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) 119static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
119{ 120{
120 struct ata_ioports *ioaddr = &ap->ioaddr; 121 struct ata_ioports *ioaddr = &ap->ioaddr;
121 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 122 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -153,16 +154,24 @@ static void vsc_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
153static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 154static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
154{ 155{
155 struct ata_ioports *ioaddr = &ap->ioaddr; 156 struct ata_ioports *ioaddr = &ap->ioaddr;
156 u16 nsect, lbal, lbam, lbah; 157 u16 nsect, lbal, lbam, lbah, feature;
157 158
158 nsect = tf->nsect = readw(ioaddr->nsect_addr); 159 tf->command = ata_check_status(ap);
159 lbal = tf->lbal = readw(ioaddr->lbal_addr);
160 lbam = tf->lbam = readw(ioaddr->lbam_addr);
161 lbah = tf->lbah = readw(ioaddr->lbah_addr);
162 tf->device = readw(ioaddr->device_addr); 160 tf->device = readw(ioaddr->device_addr);
161 feature = readw(ioaddr->error_addr);
162 nsect = readw(ioaddr->nsect_addr);
163 lbal = readw(ioaddr->lbal_addr);
164 lbam = readw(ioaddr->lbam_addr);
165 lbah = readw(ioaddr->lbah_addr);
166
167 tf->feature = feature;
168 tf->nsect = nsect;
169 tf->lbal = lbal;
170 tf->lbam = lbam;
171 tf->lbah = lbah;
163 172
164 if (tf->flags & ATA_TFLAG_LBA48) { 173 if (tf->flags & ATA_TFLAG_LBA48) {
165 tf->hob_feature = readb(ioaddr->error_addr); 174 tf->hob_feature = feature >> 8;
166 tf->hob_nsect = nsect >> 8; 175 tf->hob_nsect = nsect >> 8;
167 tf->hob_lbal = lbal >> 8; 176 tf->hob_lbal = lbal >> 8;
168 tf->hob_lbam = lbam >> 8; 177 tf->hob_lbam = lbam >> 8;
@@ -231,7 +240,7 @@ static Scsi_Host_Template vsc_sata_sht = {
231}; 240};
232 241
233 242
234static struct ata_port_operations vsc_sata_ops = { 243static const struct ata_port_operations vsc_sata_ops = {
235 .port_disable = ata_port_disable, 244 .port_disable = ata_port_disable,
236 .tf_load = vsc_sata_tf_load, 245 .tf_load = vsc_sata_tf_load,
237 .tf_read = vsc_sata_tf_read, 246 .tf_read = vsc_sata_tf_read,
@@ -283,11 +292,11 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
283 struct ata_probe_ent *probe_ent = NULL; 292 struct ata_probe_ent *probe_ent = NULL;
284 unsigned long base; 293 unsigned long base;
285 int pci_dev_busy = 0; 294 int pci_dev_busy = 0;
286 void *mmio_base; 295 void __iomem *mmio_base;
287 int rc; 296 int rc;
288 297
289 if (!printed_version++) 298 if (!printed_version++)
290 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 299 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
291 300
292 rc = pci_enable_device(pdev); 301 rc = pci_enable_device(pdev);
293 if (rc) 302 if (rc)
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1f0ebabf6d47..a5711d545d71 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -130,7 +130,7 @@ EXPORT_SYMBOL(scsi_device_types);
130 * Returns: Pointer to request block. 130 * Returns: Pointer to request block.
131 */ 131 */
132struct scsi_request *scsi_allocate_request(struct scsi_device *sdev, 132struct scsi_request *scsi_allocate_request(struct scsi_device *sdev,
133 int gfp_mask) 133 gfp_t gfp_mask)
134{ 134{
135 const int offset = ALIGN(sizeof(struct scsi_request), 4); 135 const int offset = ALIGN(sizeof(struct scsi_request), 4);
136 const int size = offset + sizeof(struct request); 136 const int size = offset + sizeof(struct request);
@@ -196,7 +196,7 @@ struct scsi_host_cmd_pool {
196 unsigned int users; 196 unsigned int users;
197 char *name; 197 char *name;
198 unsigned int slab_flags; 198 unsigned int slab_flags;
199 unsigned int gfp_mask; 199 gfp_t gfp_mask;
200}; 200};
201 201
202static struct scsi_host_cmd_pool scsi_cmd_pool = { 202static struct scsi_host_cmd_pool scsi_cmd_pool = {
@@ -213,7 +213,7 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
213static DECLARE_MUTEX(host_cmd_pool_mutex); 213static DECLARE_MUTEX(host_cmd_pool_mutex);
214 214
215static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, 215static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
216 int gfp_mask) 216 gfp_t gfp_mask)
217{ 217{
218 struct scsi_cmnd *cmd; 218 struct scsi_cmnd *cmd;
219 219
@@ -245,7 +245,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
245 * 245 *
246 * Returns: The allocated scsi command structure. 246 * Returns: The allocated scsi command structure.
247 */ 247 */
248struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask) 248struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
249{ 249{
250 struct scsi_cmnd *cmd; 250 struct scsi_cmnd *cmd;
251 251
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index de7f98cc38fe..6a3f6aae8a97 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -205,7 +205,8 @@ int scsi_ioctl_send_command(struct scsi_device *sdev,
205 unsigned int inlen, outlen, cmdlen; 205 unsigned int inlen, outlen, cmdlen;
206 unsigned int needed, buf_needed; 206 unsigned int needed, buf_needed;
207 int timeout, retries, result; 207 int timeout, retries, result;
208 int data_direction, gfp_mask = GFP_KERNEL; 208 int data_direction;
209 gfp_t gfp_mask = GFP_KERNEL;
209 210
210 if (!sic) 211 if (!sic)
211 return -EINVAL; 212 return -EINVAL;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0074f28c37b2..3ff538809786 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -677,7 +677,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
677 return NULL; 677 return NULL;
678} 678}
679 679
680static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask) 680static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
681{ 681{
682 struct scsi_host_sg_pool *sgp; 682 struct scsi_host_sg_pool *sgp;
683 struct scatterlist *sgl; 683 struct scatterlist *sgl;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 771e97ef136e..b856e140e65f 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -26,6 +26,7 @@
26 */ 26 */
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/sched.h> /* workqueue stuff, HZ */
29#include <scsi/scsi_device.h> 30#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport.h> 32#include <scsi/scsi_transport.h>
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 8bb8222ea589..d2caa35059d9 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -19,6 +19,9 @@
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */ 20 */
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/string.h>
23#include <linux/slab.h>
24
22#include <scsi/scsi.h> 25#include <scsi/scsi.h>
23#include <scsi/scsi_host.h> 26#include <scsi/scsi_host.h>
24#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ad94367df430..d86d5c26061d 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -49,6 +49,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */
49#include <linux/seq_file.h> 49#include <linux/seq_file.h>
50#include <linux/blkdev.h> 50#include <linux/blkdev.h>
51#include <linux/delay.h> 51#include <linux/delay.h>
52#include <linux/scatterlist.h>
52 53
53#include "scsi.h" 54#include "scsi.h"
54#include <scsi/scsi_dbg.h> 55#include <scsi/scsi_dbg.h>
@@ -104,8 +105,8 @@ static int sg_allow_dio = SG_ALLOW_DIO_DEF;
104 105
105#define SG_DEV_ARR_LUMP 32 /* amount to over allocate sg_dev_arr by */ 106#define SG_DEV_ARR_LUMP 32 /* amount to over allocate sg_dev_arr by */
106 107
107static int sg_add(struct class_device *); 108static int sg_add(struct class_device *, struct class_interface *);
108static void sg_remove(struct class_device *); 109static void sg_remove(struct class_device *, struct class_interface *);
109 110
110static Scsi_Request *dummy_cmdp; /* only used for sizeof */ 111static Scsi_Request *dummy_cmdp; /* only used for sizeof */
111 112
@@ -1506,7 +1507,7 @@ static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1506} 1507}
1507 1508
1508static int 1509static int
1509sg_add(struct class_device *cl_dev) 1510sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1510{ 1511{
1511 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev); 1512 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1512 struct gendisk *disk; 1513 struct gendisk *disk;
@@ -1550,7 +1551,7 @@ sg_add(struct class_device *cl_dev)
1550 if (sg_sysfs_valid) { 1551 if (sg_sysfs_valid) {
1551 struct class_device * sg_class_member; 1552 struct class_device * sg_class_member;
1552 1553
1553 sg_class_member = class_device_create(sg_sysfs_class, 1554 sg_class_member = class_device_create(sg_sysfs_class, NULL,
1554 MKDEV(SCSI_GENERIC_MAJOR, k), 1555 MKDEV(SCSI_GENERIC_MAJOR, k),
1555 cl_dev->dev, "%s", 1556 cl_dev->dev, "%s",
1556 disk->disk_name); 1557 disk->disk_name);
@@ -1582,7 +1583,7 @@ out:
1582} 1583}
1583 1584
1584static void 1585static void
1585sg_remove(struct class_device *cl_dev) 1586sg_remove(struct class_device *cl_dev, struct class_interface *cl_intf)
1586{ 1587{
1587 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev); 1588 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1588 Sg_device *sdp = NULL; 1589 Sg_device *sdp = NULL;
@@ -1886,13 +1887,17 @@ st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1886 int i; 1887 int i;
1887 1888
1888 for (i=0; i < nr_pages; i++) { 1889 for (i=0; i < nr_pages; i++) {
1889 if (dirtied && !PageReserved(sgl[i].page)) 1890 struct page *page = sgl[i].page;
1890 SetPageDirty(sgl[i].page); 1891
1891 /* unlock_page(sgl[i].page); */ 1892 /* XXX: just for debug. Remove when PageReserved is removed */
1893 BUG_ON(PageReserved(page));
1894 if (dirtied)
1895 SetPageDirty(page);
1896 /* unlock_page(page); */
1892 /* FIXME: cache flush missing for rw==READ 1897 /* FIXME: cache flush missing for rw==READ
1893 * FIXME: call the correct reference counting function 1898 * FIXME: call the correct reference counting function
1894 */ 1899 */
1895 page_cache_release(sgl[i].page); 1900 page_cache_release(page);
1896 } 1901 }
1897 1902
1898 return 0; 1903 return 0;
@@ -1992,9 +1997,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1992 if (!p) 1997 if (!p)
1993 break; 1998 break;
1994 } 1999 }
1995 sclp->page = virt_to_page(p); 2000 sg_set_buf(sclp, p, ret_sz);
1996 sclp->offset = offset_in_page(p);
1997 sclp->length = ret_sz;
1998 2001
1999 SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n", 2002 SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n",
2000 k, sg_scatg2virt(sclp), ret_sz)); 2003 k, sg_scatg2virt(sclp), ret_sz));
@@ -2644,7 +2647,7 @@ static char *
2644sg_page_malloc(int rqSz, int lowDma, int *retSzp) 2647sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2645{ 2648{
2646 char *resp = NULL; 2649 char *resp = NULL;
2647 int page_mask; 2650 gfp_t page_mask;
2648 int order, a_size; 2651 int order, a_size;
2649 int resSz = rqSz; 2652 int resSz = rqSz;
2650 2653
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index d001c046551b..da9766283bd7 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3577,7 +3577,8 @@ static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a
3577static struct st_buffer * 3577static struct st_buffer *
3578 new_tape_buffer(int from_initialization, int need_dma, int max_sg) 3578 new_tape_buffer(int from_initialization, int need_dma, int max_sg)
3579{ 3579{
3580 int i, priority, got = 0, segs = 0; 3580 int i, got = 0, segs = 0;
3581 gfp_t priority;
3581 struct st_buffer *tb; 3582 struct st_buffer *tb;
3582 3583
3583 if (from_initialization) 3584 if (from_initialization)
@@ -3610,7 +3611,8 @@ static struct st_buffer *
3610/* Try to allocate enough space in the tape buffer */ 3611/* Try to allocate enough space in the tape buffer */
3611static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma) 3612static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
3612{ 3613{
3613 int segs, nbr, max_segs, b_size, priority, order, got; 3614 int segs, nbr, max_segs, b_size, order, got;
3615 gfp_t priority;
3614 3616
3615 if (new_size <= STbuffer->buffer_size) 3617 if (new_size <= STbuffer->buffer_size)
3616 return 1; 3618 return 1;
@@ -4375,7 +4377,7 @@ static void do_create_class_files(struct scsi_tape *STp, int dev_num, int mode)
4375 snprintf(name, 10, "%s%s%s", rew ? "n" : "", 4377 snprintf(name, 10, "%s%s%s", rew ? "n" : "",
4376 STp->disk->disk_name, st_formats[i]); 4378 STp->disk->disk_name, st_formats[i]);
4377 st_class_member = 4379 st_class_member =
4378 class_device_create(st_sysfs_class, 4380 class_device_create(st_sysfs_class, NULL,
4379 MKDEV(SCSI_TAPE_MAJOR, 4381 MKDEV(SCSI_TAPE_MAJOR,
4380 TAPE_MINOR(dev_num, mode, rew)), 4382 TAPE_MINOR(dev_num, mode, rew)),
4381 &STp->device->sdev_gendev, "%s", name); 4383 &STp->device->sdev_gendev, "%s", name);
@@ -4524,12 +4526,16 @@ static int sgl_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_p
4524 int i; 4526 int i;
4525 4527
4526 for (i=0; i < nr_pages; i++) { 4528 for (i=0; i < nr_pages; i++) {
4527 if (dirtied && !PageReserved(sgl[i].page)) 4529 struct page *page = sgl[i].page;
4528 SetPageDirty(sgl[i].page); 4530
4531 /* XXX: just for debug. Remove when PageReserved is removed */
4532 BUG_ON(PageReserved(page));
4533 if (dirtied)
4534 SetPageDirty(page);
4529 /* FIXME: cache flush missing for rw==READ 4535 /* FIXME: cache flush missing for rw==READ
4530 * FIXME: call the correct reference counting function 4536 * FIXME: call the correct reference counting function
4531 */ 4537 */
4532 page_cache_release(sgl[i].page); 4538 page_cache_release(page);
4533 } 4539 }
4534 4540
4535 return 0; 4541 return 0;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index e753ba27dc59..a1a58e1d5ad3 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -37,6 +37,9 @@
37 * along with this program; if not, write to the Free Software 37 * along with this program; if not, write to the Free Software
38 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 38 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
39 */ 39 */
40
41#include <linux/slab.h>
42
40#include "sym_glue.h" 43#include "sym_glue.h"
41#include "sym_nvram.h" 44#include "sym_nvram.h"
42 45
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index 3131a6bf7ab7..3a264a408216 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -37,6 +37,8 @@
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */ 38 */
39 39
40#include <linux/gfp.h>
41
40#ifndef SYM_HIPD_H 42#ifndef SYM_HIPD_H
41#define SYM_HIPD_H 43#define SYM_HIPD_H
42 44
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 5a51051e31f0..b131432c677d 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -88,7 +88,7 @@ zalon_probe(struct parisc_device *dev)
88 struct gsc_irq gsc_irq; 88 struct gsc_irq gsc_irq;
89 u32 zalon_vers; 89 u32 zalon_vers;
90 int error = -ENODEV; 90 int error = -ENODEV;
91 void __iomem *zalon = ioremap(dev->hpa, 4096); 91 void __iomem *zalon = ioremap(dev->hpa.start, 4096);
92 void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET; 92 void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET;
93 static int unit = 0; 93 static int unit = 0;
94 struct Scsi_Host *host; 94 struct Scsi_Host *host;
@@ -127,7 +127,7 @@ zalon_probe(struct parisc_device *dev)
127 device.chip = zalon720_chip; 127 device.chip = zalon720_chip;
128 device.host_id = 7; 128 device.host_id = 7;
129 device.dev = &dev->dev; 129 device.dev = &dev->dev;
130 device.slot.base = dev->hpa + GSC_SCSI_ZALON_OFFSET; 130 device.slot.base = dev->hpa.start + GSC_SCSI_ZALON_OFFSET;
131 device.slot.base_v = io_port; 131 device.slot.base_v = io_port;
132 device.slot.irq = dev->irq; 132 device.slot.irq = dev->irq;
133 device.differential = 2; 133 device.differential = 2;