diff options
Diffstat (limited to 'drivers/ata')
76 files changed, 7510 insertions, 1941 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index d8046a113c37..4672066167e3 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -173,6 +173,15 @@ config SATA_INIC162X | |||
173 | help | 173 | help |
174 | This option enables support for Initio 162x Serial ATA. | 174 | This option enables support for Initio 162x Serial ATA. |
175 | 175 | ||
176 | config PATA_ACPI | ||
177 | tristate "ACPI firmware driver for PATA" | ||
178 | depends on ATA_ACPI | ||
179 | help | ||
180 | This option enables an ACPI method driver which drives | ||
181 | motherboard PATA controller interfaces through the ACPI | ||
182 | firmware in the BIOS. This driver can sometimes handle | ||
183 | otherwise unsupported hardware. | ||
184 | |||
176 | config PATA_ALI | 185 | config PATA_ALI |
177 | tristate "ALi PATA support (Experimental)" | 186 | tristate "ALi PATA support (Experimental)" |
178 | depends on PCI && EXPERIMENTAL | 187 | depends on PCI && EXPERIMENTAL |
@@ -192,16 +201,25 @@ config PATA_AMD | |||
192 | If unsure, say N. | 201 | If unsure, say N. |
193 | 202 | ||
194 | config PATA_ARTOP | 203 | config PATA_ARTOP |
195 | tristate "ARTOP 6210/6260 PATA support (Experimental)" | 204 | tristate "ARTOP 6210/6260 PATA support" |
196 | depends on PCI && EXPERIMENTAL | 205 | depends on PCI |
197 | help | 206 | help |
198 | This option enables support for ARTOP PATA controllers. | 207 | This option enables support for ARTOP PATA controllers. |
199 | 208 | ||
200 | If unsure, say N. | 209 | If unsure, say N. |
201 | 210 | ||
211 | config PATA_AT32 | ||
212 | tristate "Atmel AVR32 PATA support (Experimental)" | ||
213 | depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL | ||
214 | help | ||
215 | This option enables support for the IDE devices on the | ||
216 | Atmel AT32AP platform. | ||
217 | |||
218 | If unsure, say N. | ||
219 | |||
202 | config PATA_ATIIXP | 220 | config PATA_ATIIXP |
203 | tristate "ATI PATA support (Experimental)" | 221 | tristate "ATI PATA support" |
204 | depends on PCI && EXPERIMENTAL | 222 | depends on PCI |
205 | help | 223 | help |
206 | This option enables support for the ATI ATA interfaces | 224 | This option enables support for the ATI ATA interfaces |
207 | found on the many ATI chipsets. | 225 | found on the many ATI chipsets. |
@@ -219,8 +237,8 @@ config PATA_CMD640_PCI | |||
219 | If unsure, say N. | 237 | If unsure, say N. |
220 | 238 | ||
221 | config PATA_CMD64X | 239 | config PATA_CMD64X |
222 | tristate "CMD64x PATA support (Very Experimental)" | 240 | tristate "CMD64x PATA support" |
223 | depends on PCI&& EXPERIMENTAL | 241 | depends on PCI |
224 | help | 242 | help |
225 | This option enables support for the CMD64x series chips | 243 | This option enables support for the CMD64x series chips |
226 | except for the CMD640. | 244 | except for the CMD640. |
@@ -282,8 +300,8 @@ config ATA_GENERIC | |||
282 | If unsure, say N. | 300 | If unsure, say N. |
283 | 301 | ||
284 | config PATA_HPT366 | 302 | config PATA_HPT366 |
285 | tristate "HPT 366/368 PATA support (Experimental)" | 303 | tristate "HPT 366/368 PATA support" |
286 | depends on PCI && EXPERIMENTAL | 304 | depends on PCI |
287 | help | 305 | help |
288 | This option enables support for the HPT 366 and 368 | 306 | This option enables support for the HPT 366 and 368 |
289 | PATA controllers via the new ATA layer. | 307 | PATA controllers via the new ATA layer. |
@@ -432,6 +450,15 @@ config PATA_NS87410 | |||
432 | 450 | ||
433 | If unsure, say N. | 451 | If unsure, say N. |
434 | 452 | ||
453 | config PATA_NS87415 | ||
454 | tristate "Nat Semi NS87415 PATA support (Experimental)" | ||
455 | depends on PCI && EXPERIMENTAL | ||
456 | help | ||
457 | This option enables support for the National Semiconductor | ||
458 | NS87415 PCI-IDE controller. | ||
459 | |||
460 | If unsure, say N. | ||
461 | |||
435 | config PATA_OPTI | 462 | config PATA_OPTI |
436 | tristate "OPTI621/6215 PATA support (Very Experimental)" | 463 | tristate "OPTI621/6215 PATA support (Very Experimental)" |
437 | depends on PCI && EXPERIMENTAL | 464 | depends on PCI && EXPERIMENTAL |
@@ -596,4 +623,20 @@ config PATA_SCC | |||
596 | 623 | ||
597 | If unsure, say N. | 624 | If unsure, say N. |
598 | 625 | ||
626 | config PATA_BF54X | ||
627 | tristate "Blackfin 54x ATAPI support" | ||
628 | depends on BF542 || BF548 || BF549 | ||
629 | help | ||
630 | This option enables support for the built-in ATAPI controller on | ||
631 | Blackfin 54x family chips. | ||
632 | |||
633 | If unsure, say N. | ||
634 | |||
635 | config PATA_BF54X_DMA | ||
636 | bool "DMA mode" | ||
637 | depends on PATA_BF54X | ||
638 | default y | ||
639 | help | ||
640 | Enable DMA mode for Blackfin ATAPI controller. | ||
641 | |||
599 | endif # ATA | 642 | endif # ATA |
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 8149c68ac2c7..2a63645003eb 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
@@ -21,6 +21,7 @@ obj-$(CONFIG_PDC_ADMA) += pdc_adma.o | |||
21 | obj-$(CONFIG_PATA_ALI) += pata_ali.o | 21 | obj-$(CONFIG_PATA_ALI) += pata_ali.o |
22 | obj-$(CONFIG_PATA_AMD) += pata_amd.o | 22 | obj-$(CONFIG_PATA_AMD) += pata_amd.o |
23 | obj-$(CONFIG_PATA_ARTOP) += pata_artop.o | 23 | obj-$(CONFIG_PATA_ARTOP) += pata_artop.o |
24 | obj-$(CONFIG_PATA_AT32) += pata_at32.o | ||
24 | obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o | 25 | obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o |
25 | obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o | 26 | obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o |
26 | obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o | 27 | obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o |
@@ -39,6 +40,7 @@ obj-$(CONFIG_PATA_IT8213) += pata_it8213.o | |||
39 | obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o | 40 | obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o |
40 | obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o | 41 | obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o |
41 | obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o | 42 | obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o |
43 | obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o | ||
42 | obj-$(CONFIG_PATA_OPTI) += pata_opti.o | 44 | obj-$(CONFIG_PATA_OPTI) += pata_opti.o |
43 | obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o | 45 | obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o |
44 | obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o | 46 | obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o |
@@ -61,12 +63,16 @@ obj-$(CONFIG_PATA_SIS) += pata_sis.o | |||
61 | obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o | 63 | obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o |
62 | obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o | 64 | obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o |
63 | obj-$(CONFIG_PATA_SCC) += pata_scc.o | 65 | obj-$(CONFIG_PATA_SCC) += pata_scc.o |
66 | obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o | ||
64 | obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o | 67 | obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o |
65 | obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o | 68 | obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o |
69 | # Should be last but two libata driver | ||
70 | obj-$(CONFIG_PATA_ACPI) += pata_acpi.o | ||
66 | # Should be last but one libata driver | 71 | # Should be last but one libata driver |
67 | obj-$(CONFIG_ATA_GENERIC) += ata_generic.o | 72 | obj-$(CONFIG_ATA_GENERIC) += ata_generic.o |
68 | # Should be last libata driver | 73 | # Should be last libata driver |
69 | obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o | 74 | obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o |
70 | 75 | ||
71 | libata-objs := libata-core.o libata-scsi.o libata-sff.o libata-eh.o | 76 | libata-objs := libata-core.o libata-scsi.o libata-sff.o libata-eh.o \ |
77 | libata-pmp.o | ||
72 | libata-$(CONFIG_ATA_ACPI) += libata-acpi.o | 78 | libata-$(CONFIG_ATA_ACPI) += libata-acpi.o |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index c16820325d7b..10bc3f64c453 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #include <linux/libata.h> | 46 | #include <linux/libata.h> |
47 | 47 | ||
48 | #define DRV_NAME "ahci" | 48 | #define DRV_NAME "ahci" |
49 | #define DRV_VERSION "2.3" | 49 | #define DRV_VERSION "3.0" |
50 | 50 | ||
51 | 51 | ||
52 | enum { | 52 | enum { |
@@ -77,11 +77,10 @@ enum { | |||
77 | RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ | 77 | RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ |
78 | 78 | ||
79 | board_ahci = 0, | 79 | board_ahci = 0, |
80 | board_ahci_pi = 1, | 80 | board_ahci_vt8251 = 1, |
81 | board_ahci_vt8251 = 2, | 81 | board_ahci_ign_iferr = 2, |
82 | board_ahci_ign_iferr = 3, | 82 | board_ahci_sb600 = 3, |
83 | board_ahci_sb600 = 4, | 83 | board_ahci_mv = 4, |
84 | board_ahci_mv = 5, | ||
85 | 84 | ||
86 | /* global controller registers */ | 85 | /* global controller registers */ |
87 | HOST_CAP = 0x00, /* host capabilities */ | 86 | HOST_CAP = 0x00, /* host capabilities */ |
@@ -97,6 +96,7 @@ enum { | |||
97 | 96 | ||
98 | /* HOST_CAP bits */ | 97 | /* HOST_CAP bits */ |
99 | HOST_CAP_SSC = (1 << 14), /* Slumber capable */ | 98 | HOST_CAP_SSC = (1 << 14), /* Slumber capable */ |
99 | HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ | ||
100 | HOST_CAP_CLO = (1 << 24), /* Command List Override support */ | 100 | HOST_CAP_CLO = (1 << 24), /* Command List Override support */ |
101 | HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ | 101 | HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ |
102 | HOST_CAP_SNTF = (1 << 29), /* SNotification register */ | 102 | HOST_CAP_SNTF = (1 << 29), /* SNotification register */ |
@@ -144,7 +144,8 @@ enum { | |||
144 | PORT_IRQ_IF_ERR | | 144 | PORT_IRQ_IF_ERR | |
145 | PORT_IRQ_CONNECT | | 145 | PORT_IRQ_CONNECT | |
146 | PORT_IRQ_PHYRDY | | 146 | PORT_IRQ_PHYRDY | |
147 | PORT_IRQ_UNK_FIS, | 147 | PORT_IRQ_UNK_FIS | |
148 | PORT_IRQ_BAD_PMP, | ||
148 | PORT_IRQ_ERROR = PORT_IRQ_FREEZE | | 149 | PORT_IRQ_ERROR = PORT_IRQ_FREEZE | |
149 | PORT_IRQ_TF_ERR | | 150 | PORT_IRQ_TF_ERR | |
150 | PORT_IRQ_HBUS_DATA_ERR, | 151 | PORT_IRQ_HBUS_DATA_ERR, |
@@ -154,6 +155,7 @@ enum { | |||
154 | 155 | ||
155 | /* PORT_CMD bits */ | 156 | /* PORT_CMD bits */ |
156 | PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ | 157 | PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ |
158 | PORT_CMD_PMP = (1 << 17), /* PMP attached */ | ||
157 | PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ | 159 | PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ |
158 | PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ | 160 | PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ |
159 | PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ | 161 | PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ |
@@ -167,19 +169,22 @@ enum { | |||
167 | PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ | 169 | PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ |
168 | PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ | 170 | PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ |
169 | 171 | ||
172 | /* hpriv->flags bits */ | ||
173 | AHCI_HFLAG_NO_NCQ = (1 << 0), | ||
174 | AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ | ||
175 | AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ | ||
176 | AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ | ||
177 | AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ | ||
178 | AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ | ||
179 | AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ | ||
180 | |||
170 | /* ap->flags bits */ | 181 | /* ap->flags bits */ |
171 | AHCI_FLAG_NO_NCQ = (1 << 24), | 182 | AHCI_FLAG_NO_HOTPLUG = (1 << 24), /* ignore PxSERR.DIAG.N */ |
172 | AHCI_FLAG_IGN_IRQ_IF_ERR = (1 << 25), /* ignore IRQ_IF_ERR */ | ||
173 | AHCI_FLAG_HONOR_PI = (1 << 26), /* honor PORTS_IMPL */ | ||
174 | AHCI_FLAG_IGN_SERR_INTERNAL = (1 << 27), /* ignore SERR_INTERNAL */ | ||
175 | AHCI_FLAG_32BIT_ONLY = (1 << 28), /* force 32bit */ | ||
176 | AHCI_FLAG_MV_PATA = (1 << 29), /* PATA port */ | ||
177 | AHCI_FLAG_NO_MSI = (1 << 30), /* no PCI MSI */ | ||
178 | 183 | ||
179 | AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 184 | AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
180 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | | 185 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | |
181 | ATA_FLAG_SKIP_D2H_BSY | | 186 | ATA_FLAG_ACPI_SATA | ATA_FLAG_AN, |
182 | ATA_FLAG_ACPI_SATA, | 187 | AHCI_LFLAG_COMMON = ATA_LFLAG_SKIP_D2H_BSY, |
183 | }; | 188 | }; |
184 | 189 | ||
185 | struct ahci_cmd_hdr { | 190 | struct ahci_cmd_hdr { |
@@ -198,6 +203,7 @@ struct ahci_sg { | |||
198 | }; | 203 | }; |
199 | 204 | ||
200 | struct ahci_host_priv { | 205 | struct ahci_host_priv { |
206 | unsigned int flags; /* AHCI_HFLAG_* */ | ||
201 | u32 cap; /* cap to use */ | 207 | u32 cap; /* cap to use */ |
202 | u32 port_map; /* port map to use */ | 208 | u32 port_map; /* port map to use */ |
203 | u32 saved_cap; /* saved initial cap */ | 209 | u32 saved_cap; /* saved initial cap */ |
@@ -205,6 +211,7 @@ struct ahci_host_priv { | |||
205 | }; | 211 | }; |
206 | 212 | ||
207 | struct ahci_port_priv { | 213 | struct ahci_port_priv { |
214 | struct ata_link *active_link; | ||
208 | struct ahci_cmd_hdr *cmd_slot; | 215 | struct ahci_cmd_hdr *cmd_slot; |
209 | dma_addr_t cmd_slot_dma; | 216 | dma_addr_t cmd_slot_dma; |
210 | void *cmd_tbl; | 217 | void *cmd_tbl; |
@@ -215,6 +222,7 @@ struct ahci_port_priv { | |||
215 | unsigned int ncq_saw_d2h:1; | 222 | unsigned int ncq_saw_d2h:1; |
216 | unsigned int ncq_saw_dmas:1; | 223 | unsigned int ncq_saw_dmas:1; |
217 | unsigned int ncq_saw_sdb:1; | 224 | unsigned int ncq_saw_sdb:1; |
225 | u32 intr_mask; /* interrupts to enable */ | ||
218 | }; | 226 | }; |
219 | 227 | ||
220 | static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); | 228 | static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
@@ -229,6 +237,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc); | |||
229 | static u8 ahci_check_status(struct ata_port *ap); | 237 | static u8 ahci_check_status(struct ata_port *ap); |
230 | static void ahci_freeze(struct ata_port *ap); | 238 | static void ahci_freeze(struct ata_port *ap); |
231 | static void ahci_thaw(struct ata_port *ap); | 239 | static void ahci_thaw(struct ata_port *ap); |
240 | static void ahci_pmp_attach(struct ata_port *ap); | ||
241 | static void ahci_pmp_detach(struct ata_port *ap); | ||
232 | static void ahci_error_handler(struct ata_port *ap); | 242 | static void ahci_error_handler(struct ata_port *ap); |
233 | static void ahci_vt8251_error_handler(struct ata_port *ap); | 243 | static void ahci_vt8251_error_handler(struct ata_port *ap); |
234 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); | 244 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); |
@@ -262,20 +272,17 @@ static struct scsi_host_template ahci_sht = { | |||
262 | }; | 272 | }; |
263 | 273 | ||
264 | static const struct ata_port_operations ahci_ops = { | 274 | static const struct ata_port_operations ahci_ops = { |
265 | .port_disable = ata_port_disable, | ||
266 | |||
267 | .check_status = ahci_check_status, | 275 | .check_status = ahci_check_status, |
268 | .check_altstatus = ahci_check_status, | 276 | .check_altstatus = ahci_check_status, |
269 | .dev_select = ata_noop_dev_select, | 277 | .dev_select = ata_noop_dev_select, |
270 | 278 | ||
271 | .tf_read = ahci_tf_read, | 279 | .tf_read = ahci_tf_read, |
272 | 280 | ||
281 | .qc_defer = sata_pmp_qc_defer_cmd_switch, | ||
273 | .qc_prep = ahci_qc_prep, | 282 | .qc_prep = ahci_qc_prep, |
274 | .qc_issue = ahci_qc_issue, | 283 | .qc_issue = ahci_qc_issue, |
275 | 284 | ||
276 | .irq_clear = ahci_irq_clear, | 285 | .irq_clear = ahci_irq_clear, |
277 | .irq_on = ata_dummy_irq_on, | ||
278 | .irq_ack = ata_dummy_irq_ack, | ||
279 | 286 | ||
280 | .scr_read = ahci_scr_read, | 287 | .scr_read = ahci_scr_read, |
281 | .scr_write = ahci_scr_write, | 288 | .scr_write = ahci_scr_write, |
@@ -286,6 +293,9 @@ static const struct ata_port_operations ahci_ops = { | |||
286 | .error_handler = ahci_error_handler, | 293 | .error_handler = ahci_error_handler, |
287 | .post_internal_cmd = ahci_post_internal_cmd, | 294 | .post_internal_cmd = ahci_post_internal_cmd, |
288 | 295 | ||
296 | .pmp_attach = ahci_pmp_attach, | ||
297 | .pmp_detach = ahci_pmp_detach, | ||
298 | |||
289 | #ifdef CONFIG_PM | 299 | #ifdef CONFIG_PM |
290 | .port_suspend = ahci_port_suspend, | 300 | .port_suspend = ahci_port_suspend, |
291 | .port_resume = ahci_port_resume, | 301 | .port_resume = ahci_port_resume, |
@@ -296,20 +306,17 @@ static const struct ata_port_operations ahci_ops = { | |||
296 | }; | 306 | }; |
297 | 307 | ||
298 | static const struct ata_port_operations ahci_vt8251_ops = { | 308 | static const struct ata_port_operations ahci_vt8251_ops = { |
299 | .port_disable = ata_port_disable, | ||
300 | |||
301 | .check_status = ahci_check_status, | 309 | .check_status = ahci_check_status, |
302 | .check_altstatus = ahci_check_status, | 310 | .check_altstatus = ahci_check_status, |
303 | .dev_select = ata_noop_dev_select, | 311 | .dev_select = ata_noop_dev_select, |
304 | 312 | ||
305 | .tf_read = ahci_tf_read, | 313 | .tf_read = ahci_tf_read, |
306 | 314 | ||
315 | .qc_defer = sata_pmp_qc_defer_cmd_switch, | ||
307 | .qc_prep = ahci_qc_prep, | 316 | .qc_prep = ahci_qc_prep, |
308 | .qc_issue = ahci_qc_issue, | 317 | .qc_issue = ahci_qc_issue, |
309 | 318 | ||
310 | .irq_clear = ahci_irq_clear, | 319 | .irq_clear = ahci_irq_clear, |
311 | .irq_on = ata_dummy_irq_on, | ||
312 | .irq_ack = ata_dummy_irq_ack, | ||
313 | 320 | ||
314 | .scr_read = ahci_scr_read, | 321 | .scr_read = ahci_scr_read, |
315 | .scr_write = ahci_scr_write, | 322 | .scr_write = ahci_scr_write, |
@@ -320,6 +327,9 @@ static const struct ata_port_operations ahci_vt8251_ops = { | |||
320 | .error_handler = ahci_vt8251_error_handler, | 327 | .error_handler = ahci_vt8251_error_handler, |
321 | .post_internal_cmd = ahci_post_internal_cmd, | 328 | .post_internal_cmd = ahci_post_internal_cmd, |
322 | 329 | ||
330 | .pmp_attach = ahci_pmp_attach, | ||
331 | .pmp_detach = ahci_pmp_detach, | ||
332 | |||
323 | #ifdef CONFIG_PM | 333 | #ifdef CONFIG_PM |
324 | .port_suspend = ahci_port_suspend, | 334 | .port_suspend = ahci_port_suspend, |
325 | .port_resume = ahci_port_resume, | 335 | .port_resume = ahci_port_resume, |
@@ -329,53 +339,52 @@ static const struct ata_port_operations ahci_vt8251_ops = { | |||
329 | .port_stop = ahci_port_stop, | 339 | .port_stop = ahci_port_stop, |
330 | }; | 340 | }; |
331 | 341 | ||
342 | #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) | ||
343 | |||
332 | static const struct ata_port_info ahci_port_info[] = { | 344 | static const struct ata_port_info ahci_port_info[] = { |
333 | /* board_ahci */ | 345 | /* board_ahci */ |
334 | { | 346 | { |
335 | .flags = AHCI_FLAG_COMMON, | 347 | .flags = AHCI_FLAG_COMMON, |
336 | .pio_mask = 0x1f, /* pio0-4 */ | 348 | .link_flags = AHCI_LFLAG_COMMON, |
337 | .udma_mask = ATA_UDMA6, | ||
338 | .port_ops = &ahci_ops, | ||
339 | }, | ||
340 | /* board_ahci_pi */ | ||
341 | { | ||
342 | .flags = AHCI_FLAG_COMMON | AHCI_FLAG_HONOR_PI, | ||
343 | .pio_mask = 0x1f, /* pio0-4 */ | 349 | .pio_mask = 0x1f, /* pio0-4 */ |
344 | .udma_mask = ATA_UDMA6, | 350 | .udma_mask = ATA_UDMA6, |
345 | .port_ops = &ahci_ops, | 351 | .port_ops = &ahci_ops, |
346 | }, | 352 | }, |
347 | /* board_ahci_vt8251 */ | 353 | /* board_ahci_vt8251 */ |
348 | { | 354 | { |
349 | .flags = AHCI_FLAG_COMMON | ATA_FLAG_HRST_TO_RESUME | | 355 | AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), |
350 | AHCI_FLAG_NO_NCQ, | 356 | .flags = AHCI_FLAG_COMMON, |
357 | .link_flags = AHCI_LFLAG_COMMON | ATA_LFLAG_HRST_TO_RESUME, | ||
351 | .pio_mask = 0x1f, /* pio0-4 */ | 358 | .pio_mask = 0x1f, /* pio0-4 */ |
352 | .udma_mask = ATA_UDMA6, | 359 | .udma_mask = ATA_UDMA6, |
353 | .port_ops = &ahci_vt8251_ops, | 360 | .port_ops = &ahci_vt8251_ops, |
354 | }, | 361 | }, |
355 | /* board_ahci_ign_iferr */ | 362 | /* board_ahci_ign_iferr */ |
356 | { | 363 | { |
357 | .flags = AHCI_FLAG_COMMON | AHCI_FLAG_IGN_IRQ_IF_ERR, | 364 | AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), |
365 | .flags = AHCI_FLAG_COMMON, | ||
366 | .link_flags = AHCI_LFLAG_COMMON, | ||
358 | .pio_mask = 0x1f, /* pio0-4 */ | 367 | .pio_mask = 0x1f, /* pio0-4 */ |
359 | .udma_mask = ATA_UDMA6, | 368 | .udma_mask = ATA_UDMA6, |
360 | .port_ops = &ahci_ops, | 369 | .port_ops = &ahci_ops, |
361 | }, | 370 | }, |
362 | /* board_ahci_sb600 */ | 371 | /* board_ahci_sb600 */ |
363 | { | 372 | { |
364 | .flags = AHCI_FLAG_COMMON | | 373 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | |
365 | AHCI_FLAG_IGN_SERR_INTERNAL | | 374 | AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_PMP), |
366 | AHCI_FLAG_32BIT_ONLY, | 375 | .flags = AHCI_FLAG_COMMON, |
376 | .link_flags = AHCI_LFLAG_COMMON, | ||
367 | .pio_mask = 0x1f, /* pio0-4 */ | 377 | .pio_mask = 0x1f, /* pio0-4 */ |
368 | .udma_mask = ATA_UDMA6, | 378 | .udma_mask = ATA_UDMA6, |
369 | .port_ops = &ahci_ops, | 379 | .port_ops = &ahci_ops, |
370 | }, | 380 | }, |
371 | /* board_ahci_mv */ | 381 | /* board_ahci_mv */ |
372 | { | 382 | { |
373 | .sht = &ahci_sht, | 383 | AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | |
384 | AHCI_HFLAG_MV_PATA), | ||
374 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 385 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
375 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | | 386 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, |
376 | ATA_FLAG_SKIP_D2H_BSY | AHCI_FLAG_HONOR_PI | | 387 | .link_flags = AHCI_LFLAG_COMMON, |
377 | AHCI_FLAG_NO_NCQ | AHCI_FLAG_NO_MSI | | ||
378 | AHCI_FLAG_MV_PATA, | ||
379 | .pio_mask = 0x1f, /* pio0-4 */ | 388 | .pio_mask = 0x1f, /* pio0-4 */ |
380 | .udma_mask = ATA_UDMA6, | 389 | .udma_mask = ATA_UDMA6, |
381 | .port_ops = &ahci_ops, | 390 | .port_ops = &ahci_ops, |
@@ -394,23 +403,25 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
394 | { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ | 403 | { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ |
395 | { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ | 404 | { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ |
396 | { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ | 405 | { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ |
397 | { PCI_VDEVICE(INTEL, 0x2821), board_ahci_pi }, /* ICH8 */ | 406 | { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ |
398 | { PCI_VDEVICE(INTEL, 0x2822), board_ahci_pi }, /* ICH8 */ | 407 | { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */ |
399 | { PCI_VDEVICE(INTEL, 0x2824), board_ahci_pi }, /* ICH8 */ | 408 | { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ |
400 | { PCI_VDEVICE(INTEL, 0x2829), board_ahci_pi }, /* ICH8M */ | 409 | { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ |
401 | { PCI_VDEVICE(INTEL, 0x282a), board_ahci_pi }, /* ICH8M */ | 410 | { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ |
402 | { PCI_VDEVICE(INTEL, 0x2922), board_ahci_pi }, /* ICH9 */ | 411 | { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */ |
403 | { PCI_VDEVICE(INTEL, 0x2923), board_ahci_pi }, /* ICH9 */ | 412 | { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */ |
404 | { PCI_VDEVICE(INTEL, 0x2924), board_ahci_pi }, /* ICH9 */ | 413 | { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */ |
405 | { PCI_VDEVICE(INTEL, 0x2925), board_ahci_pi }, /* ICH9 */ | 414 | { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */ |
406 | { PCI_VDEVICE(INTEL, 0x2927), board_ahci_pi }, /* ICH9 */ | 415 | { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */ |
407 | { PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */ | 416 | { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */ |
408 | { PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */ | 417 | { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */ |
409 | { PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */ | 418 | { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */ |
410 | { PCI_VDEVICE(INTEL, 0x292c), board_ahci_pi }, /* ICH9M */ | 419 | { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */ |
411 | { PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */ | 420 | { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */ |
412 | { PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */ | 421 | { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */ |
413 | { PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */ | 422 | { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ |
423 | { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ | ||
424 | { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ | ||
414 | 425 | ||
415 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ | 426 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
416 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 427 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
@@ -474,6 +485,14 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
474 | { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ | 485 | { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ |
475 | { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ | 486 | { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ |
476 | { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ | 487 | { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ |
488 | { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */ | ||
489 | { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */ | ||
490 | { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */ | ||
491 | { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */ | ||
492 | { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */ | ||
493 | { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ | ||
494 | { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ | ||
495 | { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ | ||
477 | 496 | ||
478 | /* SiS */ | 497 | /* SiS */ |
479 | { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ | 498 | { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ |
@@ -524,7 +543,6 @@ static inline void __iomem *ahci_port_base(struct ata_port *ap) | |||
524 | /** | 543 | /** |
525 | * ahci_save_initial_config - Save and fixup initial config values | 544 | * ahci_save_initial_config - Save and fixup initial config values |
526 | * @pdev: target PCI device | 545 | * @pdev: target PCI device |
527 | * @pi: associated ATA port info | ||
528 | * @hpriv: host private area to store config values | 546 | * @hpriv: host private area to store config values |
529 | * | 547 | * |
530 | * Some registers containing configuration info might be setup by | 548 | * Some registers containing configuration info might be setup by |
@@ -538,7 +556,6 @@ static inline void __iomem *ahci_port_base(struct ata_port *ap) | |||
538 | * None. | 556 | * None. |
539 | */ | 557 | */ |
540 | static void ahci_save_initial_config(struct pci_dev *pdev, | 558 | static void ahci_save_initial_config(struct pci_dev *pdev, |
541 | const struct ata_port_info *pi, | ||
542 | struct ahci_host_priv *hpriv) | 559 | struct ahci_host_priv *hpriv) |
543 | { | 560 | { |
544 | void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; | 561 | void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; |
@@ -552,26 +569,22 @@ static void ahci_save_initial_config(struct pci_dev *pdev, | |||
552 | hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); | 569 | hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); |
553 | 570 | ||
554 | /* some chips have errata preventing 64bit use */ | 571 | /* some chips have errata preventing 64bit use */ |
555 | if ((cap & HOST_CAP_64) && (pi->flags & AHCI_FLAG_32BIT_ONLY)) { | 572 | if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) { |
556 | dev_printk(KERN_INFO, &pdev->dev, | 573 | dev_printk(KERN_INFO, &pdev->dev, |
557 | "controller can't do 64bit DMA, forcing 32bit\n"); | 574 | "controller can't do 64bit DMA, forcing 32bit\n"); |
558 | cap &= ~HOST_CAP_64; | 575 | cap &= ~HOST_CAP_64; |
559 | } | 576 | } |
560 | 577 | ||
561 | if ((cap & HOST_CAP_NCQ) && (pi->flags & AHCI_FLAG_NO_NCQ)) { | 578 | if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) { |
562 | dev_printk(KERN_INFO, &pdev->dev, | 579 | dev_printk(KERN_INFO, &pdev->dev, |
563 | "controller can't do NCQ, turning off CAP_NCQ\n"); | 580 | "controller can't do NCQ, turning off CAP_NCQ\n"); |
564 | cap &= ~HOST_CAP_NCQ; | 581 | cap &= ~HOST_CAP_NCQ; |
565 | } | 582 | } |
566 | 583 | ||
567 | /* fixup zero port_map */ | 584 | if ((cap && HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { |
568 | if (!port_map) { | 585 | dev_printk(KERN_INFO, &pdev->dev, |
569 | port_map = (1 << ahci_nr_ports(cap)) - 1; | 586 | "controller can't do PMP, turning off CAP_PMP\n"); |
570 | dev_printk(KERN_WARNING, &pdev->dev, | 587 | cap &= ~HOST_CAP_PMP; |
571 | "PORTS_IMPL is zero, forcing 0x%x\n", port_map); | ||
572 | |||
573 | /* write the fixed up value to the PI register */ | ||
574 | hpriv->saved_port_map = port_map; | ||
575 | } | 588 | } |
576 | 589 | ||
577 | /* | 590 | /* |
@@ -579,7 +592,7 @@ static void ahci_save_initial_config(struct pci_dev *pdev, | |||
579 | * is asserted through the standard AHCI port | 592 | * is asserted through the standard AHCI port |
580 | * presence register, as bit 4 (counting from 0) | 593 | * presence register, as bit 4 (counting from 0) |
581 | */ | 594 | */ |
582 | if (pi->flags & AHCI_FLAG_MV_PATA) { | 595 | if (hpriv->flags & AHCI_HFLAG_MV_PATA) { |
583 | dev_printk(KERN_ERR, &pdev->dev, | 596 | dev_printk(KERN_ERR, &pdev->dev, |
584 | "MV_AHCI HACK: port_map %x -> %x\n", | 597 | "MV_AHCI HACK: port_map %x -> %x\n", |
585 | hpriv->port_map, | 598 | hpriv->port_map, |
@@ -589,7 +602,7 @@ static void ahci_save_initial_config(struct pci_dev *pdev, | |||
589 | } | 602 | } |
590 | 603 | ||
591 | /* cross check port_map and cap.n_ports */ | 604 | /* cross check port_map and cap.n_ports */ |
592 | if (pi->flags & AHCI_FLAG_HONOR_PI) { | 605 | if (port_map) { |
593 | u32 tmp_port_map = port_map; | 606 | u32 tmp_port_map = port_map; |
594 | int n_ports = ahci_nr_ports(cap); | 607 | int n_ports = ahci_nr_ports(cap); |
595 | 608 | ||
@@ -600,17 +613,26 @@ static void ahci_save_initial_config(struct pci_dev *pdev, | |||
600 | } | 613 | } |
601 | } | 614 | } |
602 | 615 | ||
603 | /* Whine if inconsistent. No need to update cap. | 616 | /* If n_ports and port_map are inconsistent, whine and |
604 | * port_map is used to determine number of ports. | 617 | * clear port_map and let it be generated from n_ports. |
605 | */ | 618 | */ |
606 | if (n_ports || tmp_port_map) | 619 | if (n_ports || tmp_port_map) { |
607 | dev_printk(KERN_WARNING, &pdev->dev, | 620 | dev_printk(KERN_WARNING, &pdev->dev, |
608 | "nr_ports (%u) and implemented port map " | 621 | "nr_ports (%u) and implemented port map " |
609 | "(0x%x) don't match\n", | 622 | "(0x%x) don't match, using nr_ports\n", |
610 | ahci_nr_ports(cap), port_map); | 623 | ahci_nr_ports(cap), port_map); |
611 | } else { | 624 | port_map = 0; |
612 | /* fabricate port_map from cap.nr_ports */ | 625 | } |
626 | } | ||
627 | |||
628 | /* fabricate port_map from cap.nr_ports */ | ||
629 | if (!port_map) { | ||
613 | port_map = (1 << ahci_nr_ports(cap)) - 1; | 630 | port_map = (1 << ahci_nr_ports(cap)) - 1; |
631 | dev_printk(KERN_WARNING, &pdev->dev, | ||
632 | "forcing PORTS_IMPL to 0x%x\n", port_map); | ||
633 | |||
634 | /* write the fixed up value to the PI register */ | ||
635 | hpriv->saved_port_map = port_map; | ||
614 | } | 636 | } |
615 | 637 | ||
616 | /* record values to use during operation */ | 638 | /* record values to use during operation */ |
@@ -836,8 +858,14 @@ static int ahci_reset_controller(struct ata_host *host) | |||
836 | void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; | 858 | void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; |
837 | u32 tmp; | 859 | u32 tmp; |
838 | 860 | ||
839 | /* global controller reset */ | 861 | /* we must be in AHCI mode, before using anything |
862 | * AHCI-specific, such as HOST_RESET. | ||
863 | */ | ||
840 | tmp = readl(mmio + HOST_CTL); | 864 | tmp = readl(mmio + HOST_CTL); |
865 | if (!(tmp & HOST_AHCI_EN)) | ||
866 | writel(tmp | HOST_AHCI_EN, mmio + HOST_CTL); | ||
867 | |||
868 | /* global controller reset */ | ||
841 | if ((tmp & HOST_RESET) == 0) { | 869 | if ((tmp & HOST_RESET) == 0) { |
842 | writel(tmp | HOST_RESET, mmio + HOST_CTL); | 870 | writel(tmp | HOST_RESET, mmio + HOST_CTL); |
843 | readl(mmio + HOST_CTL); /* flush */ | 871 | readl(mmio + HOST_CTL); /* flush */ |
@@ -904,13 +932,14 @@ static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap, | |||
904 | 932 | ||
905 | static void ahci_init_controller(struct ata_host *host) | 933 | static void ahci_init_controller(struct ata_host *host) |
906 | { | 934 | { |
935 | struct ahci_host_priv *hpriv = host->private_data; | ||
907 | struct pci_dev *pdev = to_pci_dev(host->dev); | 936 | struct pci_dev *pdev = to_pci_dev(host->dev); |
908 | void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; | 937 | void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; |
909 | int i; | 938 | int i; |
910 | void __iomem *port_mmio; | 939 | void __iomem *port_mmio; |
911 | u32 tmp; | 940 | u32 tmp; |
912 | 941 | ||
913 | if (host->ports[0]->flags & AHCI_FLAG_MV_PATA) { | 942 | if (hpriv->flags & AHCI_HFLAG_MV_PATA) { |
914 | port_mmio = __ahci_port_base(host, 4); | 943 | port_mmio = __ahci_port_base(host, 4); |
915 | 944 | ||
916 | writel(0, port_mmio + PORT_IRQ_MASK); | 945 | writel(0, port_mmio + PORT_IRQ_MASK); |
@@ -1042,9 +1071,10 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, | |||
1042 | return 0; | 1071 | return 0; |
1043 | } | 1072 | } |
1044 | 1073 | ||
1045 | static int ahci_do_softreset(struct ata_port *ap, unsigned int *class, | 1074 | static int ahci_do_softreset(struct ata_link *link, unsigned int *class, |
1046 | int pmp, unsigned long deadline) | 1075 | int pmp, unsigned long deadline) |
1047 | { | 1076 | { |
1077 | struct ata_port *ap = link->ap; | ||
1048 | const char *reason = NULL; | 1078 | const char *reason = NULL; |
1049 | unsigned long now, msecs; | 1079 | unsigned long now, msecs; |
1050 | struct ata_taskfile tf; | 1080 | struct ata_taskfile tf; |
@@ -1052,7 +1082,7 @@ static int ahci_do_softreset(struct ata_port *ap, unsigned int *class, | |||
1052 | 1082 | ||
1053 | DPRINTK("ENTER\n"); | 1083 | DPRINTK("ENTER\n"); |
1054 | 1084 | ||
1055 | if (ata_port_offline(ap)) { | 1085 | if (ata_link_offline(link)) { |
1056 | DPRINTK("PHY reports no device\n"); | 1086 | DPRINTK("PHY reports no device\n"); |
1057 | *class = ATA_DEV_NONE; | 1087 | *class = ATA_DEV_NONE; |
1058 | return 0; | 1088 | return 0; |
@@ -1061,10 +1091,10 @@ static int ahci_do_softreset(struct ata_port *ap, unsigned int *class, | |||
1061 | /* prepare for SRST (AHCI-1.1 10.4.1) */ | 1091 | /* prepare for SRST (AHCI-1.1 10.4.1) */ |
1062 | rc = ahci_kick_engine(ap, 1); | 1092 | rc = ahci_kick_engine(ap, 1); |
1063 | if (rc) | 1093 | if (rc) |
1064 | ata_port_printk(ap, KERN_WARNING, | 1094 | ata_link_printk(link, KERN_WARNING, |
1065 | "failed to reset engine (errno=%d)", rc); | 1095 | "failed to reset engine (errno=%d)", rc); |
1066 | 1096 | ||
1067 | ata_tf_init(ap->device, &tf); | 1097 | ata_tf_init(link->device, &tf); |
1068 | 1098 | ||
1069 | /* issue the first D2H Register FIS */ | 1099 | /* issue the first D2H Register FIS */ |
1070 | msecs = 0; | 1100 | msecs = 0; |
@@ -1109,19 +1139,25 @@ static int ahci_do_softreset(struct ata_port *ap, unsigned int *class, | |||
1109 | return 0; | 1139 | return 0; |
1110 | 1140 | ||
1111 | fail: | 1141 | fail: |
1112 | ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason); | 1142 | ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason); |
1113 | return rc; | 1143 | return rc; |
1114 | } | 1144 | } |
1115 | 1145 | ||
1116 | static int ahci_softreset(struct ata_port *ap, unsigned int *class, | 1146 | static int ahci_softreset(struct ata_link *link, unsigned int *class, |
1117 | unsigned long deadline) | 1147 | unsigned long deadline) |
1118 | { | 1148 | { |
1119 | return ahci_do_softreset(ap, class, 0, deadline); | 1149 | int pmp = 0; |
1150 | |||
1151 | if (link->ap->flags & ATA_FLAG_PMP) | ||
1152 | pmp = SATA_PMP_CTRL_PORT; | ||
1153 | |||
1154 | return ahci_do_softreset(link, class, pmp, deadline); | ||
1120 | } | 1155 | } |
1121 | 1156 | ||
1122 | static int ahci_hardreset(struct ata_port *ap, unsigned int *class, | 1157 | static int ahci_hardreset(struct ata_link *link, unsigned int *class, |
1123 | unsigned long deadline) | 1158 | unsigned long deadline) |
1124 | { | 1159 | { |
1160 | struct ata_port *ap = link->ap; | ||
1125 | struct ahci_port_priv *pp = ap->private_data; | 1161 | struct ahci_port_priv *pp = ap->private_data; |
1126 | u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; | 1162 | u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; |
1127 | struct ata_taskfile tf; | 1163 | struct ata_taskfile tf; |
@@ -1132,26 +1168,27 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class, | |||
1132 | ahci_stop_engine(ap); | 1168 | ahci_stop_engine(ap); |
1133 | 1169 | ||
1134 | /* clear D2H reception area to properly wait for D2H FIS */ | 1170 | /* clear D2H reception area to properly wait for D2H FIS */ |
1135 | ata_tf_init(ap->device, &tf); | 1171 | ata_tf_init(link->device, &tf); |
1136 | tf.command = 0x80; | 1172 | tf.command = 0x80; |
1137 | ata_tf_to_fis(&tf, 0, 0, d2h_fis); | 1173 | ata_tf_to_fis(&tf, 0, 0, d2h_fis); |
1138 | 1174 | ||
1139 | rc = sata_std_hardreset(ap, class, deadline); | 1175 | rc = sata_std_hardreset(link, class, deadline); |
1140 | 1176 | ||
1141 | ahci_start_engine(ap); | 1177 | ahci_start_engine(ap); |
1142 | 1178 | ||
1143 | if (rc == 0 && ata_port_online(ap)) | 1179 | if (rc == 0 && ata_link_online(link)) |
1144 | *class = ahci_dev_classify(ap); | 1180 | *class = ahci_dev_classify(ap); |
1145 | if (*class == ATA_DEV_UNKNOWN) | 1181 | if (rc != -EAGAIN && *class == ATA_DEV_UNKNOWN) |
1146 | *class = ATA_DEV_NONE; | 1182 | *class = ATA_DEV_NONE; |
1147 | 1183 | ||
1148 | DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); | 1184 | DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); |
1149 | return rc; | 1185 | return rc; |
1150 | } | 1186 | } |
1151 | 1187 | ||
1152 | static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class, | 1188 | static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, |
1153 | unsigned long deadline) | 1189 | unsigned long deadline) |
1154 | { | 1190 | { |
1191 | struct ata_port *ap = link->ap; | ||
1155 | u32 serror; | 1192 | u32 serror; |
1156 | int rc; | 1193 | int rc; |
1157 | 1194 | ||
@@ -1159,7 +1196,7 @@ static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class, | |||
1159 | 1196 | ||
1160 | ahci_stop_engine(ap); | 1197 | ahci_stop_engine(ap); |
1161 | 1198 | ||
1162 | rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->eh_context), | 1199 | rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), |
1163 | deadline); | 1200 | deadline); |
1164 | 1201 | ||
1165 | /* vt8251 needs SError cleared for the port to operate */ | 1202 | /* vt8251 needs SError cleared for the port to operate */ |
@@ -1176,12 +1213,13 @@ static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class, | |||
1176 | return rc ?: -EAGAIN; | 1213 | return rc ?: -EAGAIN; |
1177 | } | 1214 | } |
1178 | 1215 | ||
1179 | static void ahci_postreset(struct ata_port *ap, unsigned int *class) | 1216 | static void ahci_postreset(struct ata_link *link, unsigned int *class) |
1180 | { | 1217 | { |
1218 | struct ata_port *ap = link->ap; | ||
1181 | void __iomem *port_mmio = ahci_port_base(ap); | 1219 | void __iomem *port_mmio = ahci_port_base(ap); |
1182 | u32 new_tmp, tmp; | 1220 | u32 new_tmp, tmp; |
1183 | 1221 | ||
1184 | ata_std_postreset(ap, class); | 1222 | ata_std_postreset(link, class); |
1185 | 1223 | ||
1186 | /* Make sure port's ATAPI bit is set appropriately */ | 1224 | /* Make sure port's ATAPI bit is set appropriately */ |
1187 | new_tmp = tmp = readl(port_mmio + PORT_CMD); | 1225 | new_tmp = tmp = readl(port_mmio + PORT_CMD); |
@@ -1195,6 +1233,12 @@ static void ahci_postreset(struct ata_port *ap, unsigned int *class) | |||
1195 | } | 1233 | } |
1196 | } | 1234 | } |
1197 | 1235 | ||
1236 | static int ahci_pmp_softreset(struct ata_link *link, unsigned int *class, | ||
1237 | unsigned long deadline) | ||
1238 | { | ||
1239 | return ahci_do_softreset(link, class, link->pmp, deadline); | ||
1240 | } | ||
1241 | |||
1198 | static u8 ahci_check_status(struct ata_port *ap) | 1242 | static u8 ahci_check_status(struct ata_port *ap) |
1199 | { | 1243 | { |
1200 | void __iomem *mmio = ap->ioaddr.cmd_addr; | 1244 | void __iomem *mmio = ap->ioaddr.cmd_addr; |
@@ -1253,7 +1297,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) | |||
1253 | */ | 1297 | */ |
1254 | cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; | 1298 | cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; |
1255 | 1299 | ||
1256 | ata_tf_to_fis(&qc->tf, 0, 1, cmd_tbl); | 1300 | ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); |
1257 | if (is_atapi) { | 1301 | if (is_atapi) { |
1258 | memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); | 1302 | memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); |
1259 | memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); | 1303 | memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); |
@@ -1266,7 +1310,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) | |||
1266 | /* | 1310 | /* |
1267 | * Fill in command slot information. | 1311 | * Fill in command slot information. |
1268 | */ | 1312 | */ |
1269 | opts = cmd_fis_len | n_elem << 16; | 1313 | opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12); |
1270 | if (qc->tf.flags & ATA_TFLAG_WRITE) | 1314 | if (qc->tf.flags & ATA_TFLAG_WRITE) |
1271 | opts |= AHCI_CMD_WRITE; | 1315 | opts |= AHCI_CMD_WRITE; |
1272 | if (is_atapi) | 1316 | if (is_atapi) |
@@ -1277,66 +1321,87 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) | |||
1277 | 1321 | ||
1278 | static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) | 1322 | static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) |
1279 | { | 1323 | { |
1324 | struct ahci_host_priv *hpriv = ap->host->private_data; | ||
1280 | struct ahci_port_priv *pp = ap->private_data; | 1325 | struct ahci_port_priv *pp = ap->private_data; |
1281 | struct ata_eh_info *ehi = &ap->eh_info; | 1326 | struct ata_eh_info *host_ehi = &ap->link.eh_info; |
1282 | unsigned int err_mask = 0, action = 0; | 1327 | struct ata_link *link = NULL; |
1283 | struct ata_queued_cmd *qc; | 1328 | struct ata_queued_cmd *active_qc; |
1329 | struct ata_eh_info *active_ehi; | ||
1284 | u32 serror; | 1330 | u32 serror; |
1285 | 1331 | ||
1286 | ata_ehi_clear_desc(ehi); | 1332 | /* determine active link */ |
1333 | ata_port_for_each_link(link, ap) | ||
1334 | if (ata_link_active(link)) | ||
1335 | break; | ||
1336 | if (!link) | ||
1337 | link = &ap->link; | ||
1338 | |||
1339 | active_qc = ata_qc_from_tag(ap, link->active_tag); | ||
1340 | active_ehi = &link->eh_info; | ||
1341 | |||
1342 | /* record irq stat */ | ||
1343 | ata_ehi_clear_desc(host_ehi); | ||
1344 | ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); | ||
1287 | 1345 | ||
1288 | /* AHCI needs SError cleared; otherwise, it might lock up */ | 1346 | /* AHCI needs SError cleared; otherwise, it might lock up */ |
1289 | ahci_scr_read(ap, SCR_ERROR, &serror); | 1347 | ahci_scr_read(ap, SCR_ERROR, &serror); |
1290 | ahci_scr_write(ap, SCR_ERROR, serror); | 1348 | ahci_scr_write(ap, SCR_ERROR, serror); |
1291 | 1349 | host_ehi->serror |= serror; | |
1292 | /* analyze @irq_stat */ | ||
1293 | ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat); | ||
1294 | 1350 | ||
1295 | /* some controllers set IRQ_IF_ERR on device errors, ignore it */ | 1351 | /* some controllers set IRQ_IF_ERR on device errors, ignore it */ |
1296 | if (ap->flags & AHCI_FLAG_IGN_IRQ_IF_ERR) | 1352 | if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR) |
1297 | irq_stat &= ~PORT_IRQ_IF_ERR; | 1353 | irq_stat &= ~PORT_IRQ_IF_ERR; |
1298 | 1354 | ||
1299 | if (irq_stat & PORT_IRQ_TF_ERR) { | 1355 | if (irq_stat & PORT_IRQ_TF_ERR) { |
1300 | err_mask |= AC_ERR_DEV; | 1356 | /* If qc is active, charge it; otherwise, the active |
1301 | if (ap->flags & AHCI_FLAG_IGN_SERR_INTERNAL) | 1357 | * link. There's no active qc on NCQ errors. It will |
1302 | serror &= ~SERR_INTERNAL; | 1358 | * be determined by EH by reading log page 10h. |
1359 | */ | ||
1360 | if (active_qc) | ||
1361 | active_qc->err_mask |= AC_ERR_DEV; | ||
1362 | else | ||
1363 | active_ehi->err_mask |= AC_ERR_DEV; | ||
1364 | |||
1365 | if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL) | ||
1366 | host_ehi->serror &= ~SERR_INTERNAL; | ||
1367 | } | ||
1368 | |||
1369 | if (irq_stat & PORT_IRQ_UNK_FIS) { | ||
1370 | u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); | ||
1371 | |||
1372 | active_ehi->err_mask |= AC_ERR_HSM; | ||
1373 | active_ehi->action |= ATA_EH_SOFTRESET; | ||
1374 | ata_ehi_push_desc(active_ehi, | ||
1375 | "unknown FIS %08x %08x %08x %08x" , | ||
1376 | unk[0], unk[1], unk[2], unk[3]); | ||
1377 | } | ||
1378 | |||
1379 | if (ap->nr_pmp_links && (irq_stat & PORT_IRQ_BAD_PMP)) { | ||
1380 | active_ehi->err_mask |= AC_ERR_HSM; | ||
1381 | active_ehi->action |= ATA_EH_SOFTRESET; | ||
1382 | ata_ehi_push_desc(active_ehi, "incorrect PMP"); | ||
1303 | } | 1383 | } |
1304 | 1384 | ||
1305 | if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { | 1385 | if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { |
1306 | err_mask |= AC_ERR_HOST_BUS; | 1386 | host_ehi->err_mask |= AC_ERR_HOST_BUS; |
1307 | action |= ATA_EH_SOFTRESET; | 1387 | host_ehi->action |= ATA_EH_SOFTRESET; |
1388 | ata_ehi_push_desc(host_ehi, "host bus error"); | ||
1308 | } | 1389 | } |
1309 | 1390 | ||
1310 | if (irq_stat & PORT_IRQ_IF_ERR) { | 1391 | if (irq_stat & PORT_IRQ_IF_ERR) { |
1311 | err_mask |= AC_ERR_ATA_BUS; | 1392 | host_ehi->err_mask |= AC_ERR_ATA_BUS; |
1312 | action |= ATA_EH_SOFTRESET; | 1393 | host_ehi->action |= ATA_EH_SOFTRESET; |
1313 | ata_ehi_push_desc(ehi, "interface fatal error"); | 1394 | ata_ehi_push_desc(host_ehi, "interface fatal error"); |
1314 | } | 1395 | } |
1315 | 1396 | ||
1316 | if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { | 1397 | if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { |
1317 | ata_ehi_hotplugged(ehi); | 1398 | ata_ehi_hotplugged(host_ehi); |
1318 | ata_ehi_push_desc(ehi, "%s", irq_stat & PORT_IRQ_CONNECT ? | 1399 | ata_ehi_push_desc(host_ehi, "%s", |
1400 | irq_stat & PORT_IRQ_CONNECT ? | ||
1319 | "connection status changed" : "PHY RDY changed"); | 1401 | "connection status changed" : "PHY RDY changed"); |
1320 | } | 1402 | } |
1321 | 1403 | ||
1322 | if (irq_stat & PORT_IRQ_UNK_FIS) { | ||
1323 | u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); | ||
1324 | |||
1325 | err_mask |= AC_ERR_HSM; | ||
1326 | action |= ATA_EH_SOFTRESET; | ||
1327 | ata_ehi_push_desc(ehi, "unknown FIS %08x %08x %08x %08x", | ||
1328 | unk[0], unk[1], unk[2], unk[3]); | ||
1329 | } | ||
1330 | |||
1331 | /* okay, let's hand over to EH */ | 1404 | /* okay, let's hand over to EH */ |
1332 | ehi->serror |= serror; | ||
1333 | ehi->action |= action; | ||
1334 | |||
1335 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
1336 | if (qc) | ||
1337 | qc->err_mask |= err_mask; | ||
1338 | else | ||
1339 | ehi->err_mask |= err_mask; | ||
1340 | 1405 | ||
1341 | if (irq_stat & PORT_IRQ_FREEZE) | 1406 | if (irq_stat & PORT_IRQ_FREEZE) |
1342 | ata_port_freeze(ap); | 1407 | ata_port_freeze(ap); |
@@ -1347,25 +1412,64 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) | |||
1347 | static void ahci_port_intr(struct ata_port *ap) | 1412 | static void ahci_port_intr(struct ata_port *ap) |
1348 | { | 1413 | { |
1349 | void __iomem *port_mmio = ap->ioaddr.cmd_addr; | 1414 | void __iomem *port_mmio = ap->ioaddr.cmd_addr; |
1350 | struct ata_eh_info *ehi = &ap->eh_info; | 1415 | struct ata_eh_info *ehi = &ap->link.eh_info; |
1351 | struct ahci_port_priv *pp = ap->private_data; | 1416 | struct ahci_port_priv *pp = ap->private_data; |
1417 | struct ahci_host_priv *hpriv = ap->host->private_data; | ||
1418 | int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); | ||
1352 | u32 status, qc_active; | 1419 | u32 status, qc_active; |
1353 | int rc, known_irq = 0; | 1420 | int rc, known_irq = 0; |
1354 | 1421 | ||
1355 | status = readl(port_mmio + PORT_IRQ_STAT); | 1422 | status = readl(port_mmio + PORT_IRQ_STAT); |
1356 | writel(status, port_mmio + PORT_IRQ_STAT); | 1423 | writel(status, port_mmio + PORT_IRQ_STAT); |
1357 | 1424 | ||
1425 | /* ignore BAD_PMP while resetting */ | ||
1426 | if (unlikely(resetting)) | ||
1427 | status &= ~PORT_IRQ_BAD_PMP; | ||
1428 | |||
1358 | if (unlikely(status & PORT_IRQ_ERROR)) { | 1429 | if (unlikely(status & PORT_IRQ_ERROR)) { |
1359 | ahci_error_intr(ap, status); | 1430 | ahci_error_intr(ap, status); |
1360 | return; | 1431 | return; |
1361 | } | 1432 | } |
1362 | 1433 | ||
1363 | if (ap->sactive) | 1434 | if (status & PORT_IRQ_SDB_FIS) { |
1435 | /* If SNotification is available, leave notification | ||
1436 | * handling to sata_async_notification(). If not, | ||
1437 | * emulate it by snooping SDB FIS RX area. | ||
1438 | * | ||
1439 | * Snooping FIS RX area is probably cheaper than | ||
1440 | * poking SNotification but some constrollers which | ||
1441 | * implement SNotification, ICH9 for example, don't | ||
1442 | * store AN SDB FIS into receive area. | ||
1443 | */ | ||
1444 | if (hpriv->cap & HOST_CAP_SNTF) | ||
1445 | sata_async_notification(ap); | ||
1446 | else { | ||
1447 | /* If the 'N' bit in word 0 of the FIS is set, | ||
1448 | * we just received asynchronous notification. | ||
1449 | * Tell libata about it. | ||
1450 | */ | ||
1451 | const __le32 *f = pp->rx_fis + RX_FIS_SDB; | ||
1452 | u32 f0 = le32_to_cpu(f[0]); | ||
1453 | |||
1454 | if (f0 & (1 << 15)) | ||
1455 | sata_async_notification(ap); | ||
1456 | } | ||
1457 | } | ||
1458 | |||
1459 | /* pp->active_link is valid iff any command is in flight */ | ||
1460 | if (ap->qc_active && pp->active_link->sactive) | ||
1364 | qc_active = readl(port_mmio + PORT_SCR_ACT); | 1461 | qc_active = readl(port_mmio + PORT_SCR_ACT); |
1365 | else | 1462 | else |
1366 | qc_active = readl(port_mmio + PORT_CMD_ISSUE); | 1463 | qc_active = readl(port_mmio + PORT_CMD_ISSUE); |
1367 | 1464 | ||
1368 | rc = ata_qc_complete_multiple(ap, qc_active, NULL); | 1465 | rc = ata_qc_complete_multiple(ap, qc_active, NULL); |
1466 | |||
1467 | /* If resetting, spurious or invalid completions are expected, | ||
1468 | * return unconditionally. | ||
1469 | */ | ||
1470 | if (resetting) | ||
1471 | return; | ||
1472 | |||
1369 | if (rc > 0) | 1473 | if (rc > 0) |
1370 | return; | 1474 | return; |
1371 | if (rc < 0) { | 1475 | if (rc < 0) { |
@@ -1380,7 +1484,7 @@ static void ahci_port_intr(struct ata_port *ap) | |||
1380 | /* if !NCQ, ignore. No modern ATA device has broken HSM | 1484 | /* if !NCQ, ignore. No modern ATA device has broken HSM |
1381 | * implementation for non-NCQ commands. | 1485 | * implementation for non-NCQ commands. |
1382 | */ | 1486 | */ |
1383 | if (!ap->sactive) | 1487 | if (!ap->link.sactive) |
1384 | return; | 1488 | return; |
1385 | 1489 | ||
1386 | if (status & PORT_IRQ_D2H_REG_FIS) { | 1490 | if (status & PORT_IRQ_D2H_REG_FIS) { |
@@ -1433,7 +1537,7 @@ static void ahci_port_intr(struct ata_port *ap) | |||
1433 | if (!known_irq) | 1537 | if (!known_irq) |
1434 | ata_port_printk(ap, KERN_INFO, "spurious interrupt " | 1538 | ata_port_printk(ap, KERN_INFO, "spurious interrupt " |
1435 | "(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n", | 1539 | "(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n", |
1436 | status, ap->active_tag, ap->sactive); | 1540 | status, ap->link.active_tag, ap->link.sactive); |
1437 | } | 1541 | } |
1438 | 1542 | ||
1439 | static void ahci_irq_clear(struct ata_port *ap) | 1543 | static void ahci_irq_clear(struct ata_port *ap) |
@@ -1498,6 +1602,13 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) | |||
1498 | { | 1602 | { |
1499 | struct ata_port *ap = qc->ap; | 1603 | struct ata_port *ap = qc->ap; |
1500 | void __iomem *port_mmio = ahci_port_base(ap); | 1604 | void __iomem *port_mmio = ahci_port_base(ap); |
1605 | struct ahci_port_priv *pp = ap->private_data; | ||
1606 | |||
1607 | /* Keep track of the currently active link. It will be used | ||
1608 | * in completion path to determine whether NCQ phase is in | ||
1609 | * progress. | ||
1610 | */ | ||
1611 | pp->active_link = qc->dev->link; | ||
1501 | 1612 | ||
1502 | if (qc->tf.protocol == ATA_PROT_NCQ) | 1613 | if (qc->tf.protocol == ATA_PROT_NCQ) |
1503 | writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); | 1614 | writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); |
@@ -1520,6 +1631,7 @@ static void ahci_thaw(struct ata_port *ap) | |||
1520 | void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; | 1631 | void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; |
1521 | void __iomem *port_mmio = ahci_port_base(ap); | 1632 | void __iomem *port_mmio = ahci_port_base(ap); |
1522 | u32 tmp; | 1633 | u32 tmp; |
1634 | struct ahci_port_priv *pp = ap->private_data; | ||
1523 | 1635 | ||
1524 | /* clear IRQ */ | 1636 | /* clear IRQ */ |
1525 | tmp = readl(port_mmio + PORT_IRQ_STAT); | 1637 | tmp = readl(port_mmio + PORT_IRQ_STAT); |
@@ -1527,7 +1639,7 @@ static void ahci_thaw(struct ata_port *ap) | |||
1527 | writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); | 1639 | writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); |
1528 | 1640 | ||
1529 | /* turn IRQ back on */ | 1641 | /* turn IRQ back on */ |
1530 | writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK); | 1642 | writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); |
1531 | } | 1643 | } |
1532 | 1644 | ||
1533 | static void ahci_error_handler(struct ata_port *ap) | 1645 | static void ahci_error_handler(struct ata_port *ap) |
@@ -1539,8 +1651,10 @@ static void ahci_error_handler(struct ata_port *ap) | |||
1539 | } | 1651 | } |
1540 | 1652 | ||
1541 | /* perform recovery */ | 1653 | /* perform recovery */ |
1542 | ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_hardreset, | 1654 | sata_pmp_do_eh(ap, ata_std_prereset, ahci_softreset, |
1543 | ahci_postreset); | 1655 | ahci_hardreset, ahci_postreset, |
1656 | sata_pmp_std_prereset, ahci_pmp_softreset, | ||
1657 | sata_pmp_std_hardreset, sata_pmp_std_postreset); | ||
1544 | } | 1658 | } |
1545 | 1659 | ||
1546 | static void ahci_vt8251_error_handler(struct ata_port *ap) | 1660 | static void ahci_vt8251_error_handler(struct ata_port *ap) |
@@ -1565,11 +1679,44 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) | |||
1565 | ahci_kick_engine(ap, 1); | 1679 | ahci_kick_engine(ap, 1); |
1566 | } | 1680 | } |
1567 | 1681 | ||
1682 | static void ahci_pmp_attach(struct ata_port *ap) | ||
1683 | { | ||
1684 | void __iomem *port_mmio = ahci_port_base(ap); | ||
1685 | struct ahci_port_priv *pp = ap->private_data; | ||
1686 | u32 cmd; | ||
1687 | |||
1688 | cmd = readl(port_mmio + PORT_CMD); | ||
1689 | cmd |= PORT_CMD_PMP; | ||
1690 | writel(cmd, port_mmio + PORT_CMD); | ||
1691 | |||
1692 | pp->intr_mask |= PORT_IRQ_BAD_PMP; | ||
1693 | writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); | ||
1694 | } | ||
1695 | |||
1696 | static void ahci_pmp_detach(struct ata_port *ap) | ||
1697 | { | ||
1698 | void __iomem *port_mmio = ahci_port_base(ap); | ||
1699 | struct ahci_port_priv *pp = ap->private_data; | ||
1700 | u32 cmd; | ||
1701 | |||
1702 | cmd = readl(port_mmio + PORT_CMD); | ||
1703 | cmd &= ~PORT_CMD_PMP; | ||
1704 | writel(cmd, port_mmio + PORT_CMD); | ||
1705 | |||
1706 | pp->intr_mask &= ~PORT_IRQ_BAD_PMP; | ||
1707 | writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); | ||
1708 | } | ||
1709 | |||
1568 | static int ahci_port_resume(struct ata_port *ap) | 1710 | static int ahci_port_resume(struct ata_port *ap) |
1569 | { | 1711 | { |
1570 | ahci_power_up(ap); | 1712 | ahci_power_up(ap); |
1571 | ahci_start_port(ap); | 1713 | ahci_start_port(ap); |
1572 | 1714 | ||
1715 | if (ap->nr_pmp_links) | ||
1716 | ahci_pmp_attach(ap); | ||
1717 | else | ||
1718 | ahci_pmp_detach(ap); | ||
1719 | |||
1573 | return 0; | 1720 | return 0; |
1574 | } | 1721 | } |
1575 | 1722 | ||
@@ -1681,6 +1828,12 @@ static int ahci_port_start(struct ata_port *ap) | |||
1681 | pp->cmd_tbl = mem; | 1828 | pp->cmd_tbl = mem; |
1682 | pp->cmd_tbl_dma = mem_dma; | 1829 | pp->cmd_tbl_dma = mem_dma; |
1683 | 1830 | ||
1831 | /* | ||
1832 | * Save off initial list of interrupts to be enabled. | ||
1833 | * This could be changed later | ||
1834 | */ | ||
1835 | pp->intr_mask = DEF_PORT_IRQ; | ||
1836 | |||
1684 | ap->private_data = pp; | 1837 | ap->private_data = pp; |
1685 | 1838 | ||
1686 | /* engage engines, captain */ | 1839 | /* engage engines, captain */ |
@@ -1830,20 +1983,24 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1830 | if (rc) | 1983 | if (rc) |
1831 | return rc; | 1984 | return rc; |
1832 | 1985 | ||
1833 | if ((pi.flags & AHCI_FLAG_NO_MSI) || pci_enable_msi(pdev)) | ||
1834 | pci_intx(pdev, 1); | ||
1835 | |||
1836 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); | 1986 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); |
1837 | if (!hpriv) | 1987 | if (!hpriv) |
1838 | return -ENOMEM; | 1988 | return -ENOMEM; |
1989 | hpriv->flags |= (unsigned long)pi.private_data; | ||
1990 | |||
1991 | if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) | ||
1992 | pci_intx(pdev, 1); | ||
1839 | 1993 | ||
1840 | /* save initial config */ | 1994 | /* save initial config */ |
1841 | ahci_save_initial_config(pdev, &pi, hpriv); | 1995 | ahci_save_initial_config(pdev, hpriv); |
1842 | 1996 | ||
1843 | /* prepare host */ | 1997 | /* prepare host */ |
1844 | if (hpriv->cap & HOST_CAP_NCQ) | 1998 | if (hpriv->cap & HOST_CAP_NCQ) |
1845 | pi.flags |= ATA_FLAG_NCQ; | 1999 | pi.flags |= ATA_FLAG_NCQ; |
1846 | 2000 | ||
2001 | if (hpriv->cap & HOST_CAP_PMP) | ||
2002 | pi.flags |= ATA_FLAG_PMP; | ||
2003 | |||
1847 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, fls(hpriv->port_map)); | 2004 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, fls(hpriv->port_map)); |
1848 | if (!host) | 2005 | if (!host) |
1849 | return -ENOMEM; | 2006 | return -ENOMEM; |
@@ -1854,6 +2011,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1854 | struct ata_port *ap = host->ports[i]; | 2011 | struct ata_port *ap = host->ports[i]; |
1855 | void __iomem *port_mmio = ahci_port_base(ap); | 2012 | void __iomem *port_mmio = ahci_port_base(ap); |
1856 | 2013 | ||
2014 | ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); | ||
2015 | ata_port_pbar_desc(ap, AHCI_PCI_BAR, | ||
2016 | 0x100 + ap->port_no * 0x80, "port"); | ||
2017 | |||
1857 | /* standard SATA port setup */ | 2018 | /* standard SATA port setup */ |
1858 | if (hpriv->port_map & (1 << i)) | 2019 | if (hpriv->port_map & (1 << i)) |
1859 | ap->ioaddr.cmd_addr = port_mmio; | 2020 | ap->ioaddr.cmd_addr = port_mmio; |
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c index 945466954724..90329982bef7 100644 --- a/drivers/ata/ata_generic.c +++ b/drivers/ata/ata_generic.c | |||
@@ -34,7 +34,7 @@ | |||
34 | 34 | ||
35 | /** | 35 | /** |
36 | * generic_set_mode - mode setting | 36 | * generic_set_mode - mode setting |
37 | * @ap: interface to set up | 37 | * @link: link to set up |
38 | * @unused: returned device on error | 38 | * @unused: returned device on error |
39 | * | 39 | * |
40 | * Use a non standard set_mode function. We don't want to be tuned. | 40 | * Use a non standard set_mode function. We don't want to be tuned. |
@@ -43,24 +43,24 @@ | |||
43 | * and respect them. | 43 | * and respect them. |
44 | */ | 44 | */ |
45 | 45 | ||
46 | static int generic_set_mode(struct ata_port *ap, struct ata_device **unused) | 46 | static int generic_set_mode(struct ata_link *link, struct ata_device **unused) |
47 | { | 47 | { |
48 | struct ata_port *ap = link->ap; | ||
48 | int dma_enabled = 0; | 49 | int dma_enabled = 0; |
49 | int i; | 50 | struct ata_device *dev; |
50 | 51 | ||
51 | /* Bits 5 and 6 indicate if DMA is active on master/slave */ | 52 | /* Bits 5 and 6 indicate if DMA is active on master/slave */ |
52 | if (ap->ioaddr.bmdma_addr) | 53 | if (ap->ioaddr.bmdma_addr) |
53 | dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | 54 | dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); |
54 | 55 | ||
55 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 56 | ata_link_for_each_dev(dev, link) { |
56 | struct ata_device *dev = &ap->device[i]; | ||
57 | if (ata_dev_enabled(dev)) { | 57 | if (ata_dev_enabled(dev)) { |
58 | /* We don't really care */ | 58 | /* We don't really care */ |
59 | dev->pio_mode = XFER_PIO_0; | 59 | dev->pio_mode = XFER_PIO_0; |
60 | dev->dma_mode = XFER_MW_DMA_0; | 60 | dev->dma_mode = XFER_MW_DMA_0; |
61 | /* We do need the right mode information for DMA or PIO | 61 | /* We do need the right mode information for DMA or PIO |
62 | and this comes from the current configuration flags */ | 62 | and this comes from the current configuration flags */ |
63 | if (dma_enabled & (1 << (5 + i))) { | 63 | if (dma_enabled & (1 << (5 + dev->devno))) { |
64 | ata_id_to_dma_mode(dev, XFER_MW_DMA_0); | 64 | ata_id_to_dma_mode(dev, XFER_MW_DMA_0); |
65 | dev->flags &= ~ATA_DFLAG_PIO; | 65 | dev->flags &= ~ATA_DFLAG_PIO; |
66 | } else { | 66 | } else { |
@@ -95,7 +95,6 @@ static struct scsi_host_template generic_sht = { | |||
95 | static struct ata_port_operations generic_port_ops = { | 95 | static struct ata_port_operations generic_port_ops = { |
96 | .set_mode = generic_set_mode, | 96 | .set_mode = generic_set_mode, |
97 | 97 | ||
98 | .port_disable = ata_port_disable, | ||
99 | .tf_load = ata_tf_load, | 98 | .tf_load = ata_tf_load, |
100 | .tf_read = ata_tf_read, | 99 | .tf_read = ata_tf_read, |
101 | .check_status = ata_check_status, | 100 | .check_status = ata_check_status, |
@@ -121,9 +120,8 @@ static struct ata_port_operations generic_port_ops = { | |||
121 | .irq_handler = ata_interrupt, | 120 | .irq_handler = ata_interrupt, |
122 | .irq_clear = ata_bmdma_irq_clear, | 121 | .irq_clear = ata_bmdma_irq_clear, |
123 | .irq_on = ata_irq_on, | 122 | .irq_on = ata_irq_on, |
124 | .irq_ack = ata_irq_ack, | ||
125 | 123 | ||
126 | .port_start = ata_port_start, | 124 | .port_start = ata_sff_port_start, |
127 | }; | 125 | }; |
128 | 126 | ||
129 | static int all_generic_ide; /* Set to claim all devices */ | 127 | static int all_generic_ide; /* Set to claim all devices */ |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 92c2d5082bef..e783e678acf5 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -123,7 +123,6 @@ enum { | |||
123 | ich_pata_33 = 1, /* ICH up to UDMA 33 only */ | 123 | ich_pata_33 = 1, /* ICH up to UDMA 33 only */ |
124 | ich_pata_66 = 2, /* ICH up to 66 Mhz */ | 124 | ich_pata_66 = 2, /* ICH up to 66 Mhz */ |
125 | ich_pata_100 = 3, /* ICH up to UDMA 100 */ | 125 | ich_pata_100 = 3, /* ICH up to UDMA 100 */ |
126 | ich_pata_133 = 4, /* ICH up to UDMA 133 */ | ||
127 | ich5_sata = 5, | 126 | ich5_sata = 5, |
128 | ich6_sata = 6, | 127 | ich6_sata = 6, |
129 | ich6_sata_ahci = 7, | 128 | ich6_sata_ahci = 7, |
@@ -199,7 +198,7 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
199 | { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, | 198 | { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, |
200 | { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, | 199 | { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, |
201 | /* Intel ICH5 */ | 200 | /* Intel ICH5 */ |
202 | { 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 }, | 201 | { 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, |
203 | /* C-ICH (i810E2) */ | 202 | /* C-ICH (i810E2) */ |
204 | { 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, | 203 | { 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, |
205 | /* ESB (855GME/875P + 6300ESB) UDMA 100 */ | 204 | /* ESB (855GME/875P + 6300ESB) UDMA 100 */ |
@@ -207,7 +206,7 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
207 | /* ICH6 (and 6) (i915) UDMA 100 */ | 206 | /* ICH6 (and 6) (i915) UDMA 100 */ |
208 | { 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, | 207 | { 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, |
209 | /* ICH7/7-R (i945, i975) UDMA 100*/ | 208 | /* ICH7/7-R (i945, i975) UDMA 100*/ |
210 | { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 }, | 209 | { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, |
211 | { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, | 210 | { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, |
212 | /* ICH8 Mobile PATA Controller */ | 211 | /* ICH8 Mobile PATA Controller */ |
213 | { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, | 212 | { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, |
@@ -290,7 +289,6 @@ static struct scsi_host_template piix_sht = { | |||
290 | }; | 289 | }; |
291 | 290 | ||
292 | static const struct ata_port_operations piix_pata_ops = { | 291 | static const struct ata_port_operations piix_pata_ops = { |
293 | .port_disable = ata_port_disable, | ||
294 | .set_piomode = piix_set_piomode, | 292 | .set_piomode = piix_set_piomode, |
295 | .set_dmamode = piix_set_dmamode, | 293 | .set_dmamode = piix_set_dmamode, |
296 | .mode_filter = ata_pci_default_filter, | 294 | .mode_filter = ata_pci_default_filter, |
@@ -318,13 +316,11 @@ static const struct ata_port_operations piix_pata_ops = { | |||
318 | .irq_handler = ata_interrupt, | 316 | .irq_handler = ata_interrupt, |
319 | .irq_clear = ata_bmdma_irq_clear, | 317 | .irq_clear = ata_bmdma_irq_clear, |
320 | .irq_on = ata_irq_on, | 318 | .irq_on = ata_irq_on, |
321 | .irq_ack = ata_irq_ack, | ||
322 | 319 | ||
323 | .port_start = ata_port_start, | 320 | .port_start = ata_port_start, |
324 | }; | 321 | }; |
325 | 322 | ||
326 | static const struct ata_port_operations ich_pata_ops = { | 323 | static const struct ata_port_operations ich_pata_ops = { |
327 | .port_disable = ata_port_disable, | ||
328 | .set_piomode = piix_set_piomode, | 324 | .set_piomode = piix_set_piomode, |
329 | .set_dmamode = ich_set_dmamode, | 325 | .set_dmamode = ich_set_dmamode, |
330 | .mode_filter = ata_pci_default_filter, | 326 | .mode_filter = ata_pci_default_filter, |
@@ -352,14 +348,11 @@ static const struct ata_port_operations ich_pata_ops = { | |||
352 | .irq_handler = ata_interrupt, | 348 | .irq_handler = ata_interrupt, |
353 | .irq_clear = ata_bmdma_irq_clear, | 349 | .irq_clear = ata_bmdma_irq_clear, |
354 | .irq_on = ata_irq_on, | 350 | .irq_on = ata_irq_on, |
355 | .irq_ack = ata_irq_ack, | ||
356 | 351 | ||
357 | .port_start = ata_port_start, | 352 | .port_start = ata_port_start, |
358 | }; | 353 | }; |
359 | 354 | ||
360 | static const struct ata_port_operations piix_sata_ops = { | 355 | static const struct ata_port_operations piix_sata_ops = { |
361 | .port_disable = ata_port_disable, | ||
362 | |||
363 | .tf_load = ata_tf_load, | 356 | .tf_load = ata_tf_load, |
364 | .tf_read = ata_tf_read, | 357 | .tf_read = ata_tf_read, |
365 | .check_status = ata_check_status, | 358 | .check_status = ata_check_status, |
@@ -382,7 +375,6 @@ static const struct ata_port_operations piix_sata_ops = { | |||
382 | .irq_handler = ata_interrupt, | 375 | .irq_handler = ata_interrupt, |
383 | .irq_clear = ata_bmdma_irq_clear, | 376 | .irq_clear = ata_bmdma_irq_clear, |
384 | .irq_on = ata_irq_on, | 377 | .irq_on = ata_irq_on, |
385 | .irq_ack = ata_irq_ack, | ||
386 | 378 | ||
387 | .port_start = ata_port_start, | 379 | .port_start = ata_port_start, |
388 | }; | 380 | }; |
@@ -445,15 +437,15 @@ static const struct piix_map_db ich8_map_db = { | |||
445 | }; | 437 | }; |
446 | 438 | ||
447 | static const struct piix_map_db tolapai_map_db = { | 439 | static const struct piix_map_db tolapai_map_db = { |
448 | .mask = 0x3, | 440 | .mask = 0x3, |
449 | .port_enable = 0x3, | 441 | .port_enable = 0x3, |
450 | .map = { | 442 | .map = { |
451 | /* PM PS SM SS MAP */ | 443 | /* PM PS SM SS MAP */ |
452 | { P0, NA, P1, NA }, /* 00b */ | 444 | { P0, NA, P1, NA }, /* 00b */ |
453 | { RV, RV, RV, RV }, /* 01b */ | 445 | { RV, RV, RV, RV }, /* 01b */ |
454 | { RV, RV, RV, RV }, /* 10b */ | 446 | { RV, RV, RV, RV }, /* 10b */ |
455 | { RV, RV, RV, RV }, | 447 | { RV, RV, RV, RV }, |
456 | }, | 448 | }, |
457 | }; | 449 | }; |
458 | 450 | ||
459 | static const struct piix_map_db *piix_map_db_table[] = { | 451 | static const struct piix_map_db *piix_map_db_table[] = { |
@@ -466,7 +458,7 @@ static const struct piix_map_db *piix_map_db_table[] = { | |||
466 | }; | 458 | }; |
467 | 459 | ||
468 | static struct ata_port_info piix_port_info[] = { | 460 | static struct ata_port_info piix_port_info[] = { |
469 | /* piix_pata_33: 0: PIIX4 at 33MHz */ | 461 | [piix_pata_33] = /* PIIX4 at 33MHz */ |
470 | { | 462 | { |
471 | .sht = &piix_sht, | 463 | .sht = &piix_sht, |
472 | .flags = PIIX_PATA_FLAGS, | 464 | .flags = PIIX_PATA_FLAGS, |
@@ -476,7 +468,7 @@ static struct ata_port_info piix_port_info[] = { | |||
476 | .port_ops = &piix_pata_ops, | 468 | .port_ops = &piix_pata_ops, |
477 | }, | 469 | }, |
478 | 470 | ||
479 | /* ich_pata_33: 1 ICH0 - ICH at 33Mhz*/ | 471 | [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/ |
480 | { | 472 | { |
481 | .sht = &piix_sht, | 473 | .sht = &piix_sht, |
482 | .flags = PIIX_PATA_FLAGS, | 474 | .flags = PIIX_PATA_FLAGS, |
@@ -485,7 +477,8 @@ static struct ata_port_info piix_port_info[] = { | |||
485 | .udma_mask = ATA_UDMA2, /* UDMA33 */ | 477 | .udma_mask = ATA_UDMA2, /* UDMA33 */ |
486 | .port_ops = &ich_pata_ops, | 478 | .port_ops = &ich_pata_ops, |
487 | }, | 479 | }, |
488 | /* ich_pata_66: 2 ICH controllers up to 66MHz */ | 480 | |
481 | [ich_pata_66] = /* ICH controllers up to 66MHz */ | ||
489 | { | 482 | { |
490 | .sht = &piix_sht, | 483 | .sht = &piix_sht, |
491 | .flags = PIIX_PATA_FLAGS, | 484 | .flags = PIIX_PATA_FLAGS, |
@@ -495,7 +488,7 @@ static struct ata_port_info piix_port_info[] = { | |||
495 | .port_ops = &ich_pata_ops, | 488 | .port_ops = &ich_pata_ops, |
496 | }, | 489 | }, |
497 | 490 | ||
498 | /* ich_pata_100: 3 */ | 491 | [ich_pata_100] = |
499 | { | 492 | { |
500 | .sht = &piix_sht, | 493 | .sht = &piix_sht, |
501 | .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR, | 494 | .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR, |
@@ -505,17 +498,7 @@ static struct ata_port_info piix_port_info[] = { | |||
505 | .port_ops = &ich_pata_ops, | 498 | .port_ops = &ich_pata_ops, |
506 | }, | 499 | }, |
507 | 500 | ||
508 | /* ich_pata_133: 4 ICH with full UDMA6 */ | 501 | [ich5_sata] = |
509 | { | ||
510 | .sht = &piix_sht, | ||
511 | .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR, | ||
512 | .pio_mask = 0x1f, /* pio 0-4 */ | ||
513 | .mwdma_mask = 0x06, /* Check: maybe 0x07 */ | ||
514 | .udma_mask = ATA_UDMA6, /* UDMA133 */ | ||
515 | .port_ops = &ich_pata_ops, | ||
516 | }, | ||
517 | |||
518 | /* ich5_sata: 5 */ | ||
519 | { | 502 | { |
520 | .sht = &piix_sht, | 503 | .sht = &piix_sht, |
521 | .flags = PIIX_SATA_FLAGS, | 504 | .flags = PIIX_SATA_FLAGS, |
@@ -525,7 +508,7 @@ static struct ata_port_info piix_port_info[] = { | |||
525 | .port_ops = &piix_sata_ops, | 508 | .port_ops = &piix_sata_ops, |
526 | }, | 509 | }, |
527 | 510 | ||
528 | /* ich6_sata: 6 */ | 511 | [ich6_sata] = |
529 | { | 512 | { |
530 | .sht = &piix_sht, | 513 | .sht = &piix_sht, |
531 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR, | 514 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR, |
@@ -535,7 +518,7 @@ static struct ata_port_info piix_port_info[] = { | |||
535 | .port_ops = &piix_sata_ops, | 518 | .port_ops = &piix_sata_ops, |
536 | }, | 519 | }, |
537 | 520 | ||
538 | /* ich6_sata_ahci: 7 */ | 521 | [ich6_sata_ahci] = |
539 | { | 522 | { |
540 | .sht = &piix_sht, | 523 | .sht = &piix_sht, |
541 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | | 524 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | |
@@ -546,7 +529,7 @@ static struct ata_port_info piix_port_info[] = { | |||
546 | .port_ops = &piix_sata_ops, | 529 | .port_ops = &piix_sata_ops, |
547 | }, | 530 | }, |
548 | 531 | ||
549 | /* ich6m_sata_ahci: 8 */ | 532 | [ich6m_sata_ahci] = |
550 | { | 533 | { |
551 | .sht = &piix_sht, | 534 | .sht = &piix_sht, |
552 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | | 535 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | |
@@ -557,7 +540,7 @@ static struct ata_port_info piix_port_info[] = { | |||
557 | .port_ops = &piix_sata_ops, | 540 | .port_ops = &piix_sata_ops, |
558 | }, | 541 | }, |
559 | 542 | ||
560 | /* ich8_sata_ahci: 9 */ | 543 | [ich8_sata_ahci] = |
561 | { | 544 | { |
562 | .sht = &piix_sht, | 545 | .sht = &piix_sht, |
563 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | | 546 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | |
@@ -568,7 +551,7 @@ static struct ata_port_info piix_port_info[] = { | |||
568 | .port_ops = &piix_sata_ops, | 551 | .port_ops = &piix_sata_ops, |
569 | }, | 552 | }, |
570 | 553 | ||
571 | /* piix_pata_mwdma: 10: PIIX3 MWDMA only */ | 554 | [piix_pata_mwdma] = /* PIIX3 MWDMA only */ |
572 | { | 555 | { |
573 | .sht = &piix_sht, | 556 | .sht = &piix_sht, |
574 | .flags = PIIX_PATA_FLAGS, | 557 | .flags = PIIX_PATA_FLAGS, |
@@ -577,7 +560,7 @@ static struct ata_port_info piix_port_info[] = { | |||
577 | .port_ops = &piix_pata_ops, | 560 | .port_ops = &piix_pata_ops, |
578 | }, | 561 | }, |
579 | 562 | ||
580 | /* tolapai_sata_ahci: 11: */ | 563 | [tolapai_sata_ahci] = |
581 | { | 564 | { |
582 | .sht = &piix_sht, | 565 | .sht = &piix_sht, |
583 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | | 566 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | |
@@ -615,6 +598,7 @@ static const struct ich_laptop ich_laptop[] = { | |||
615 | { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ | 598 | { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ |
616 | { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ | 599 | { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ |
617 | { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ | 600 | { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ |
601 | { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ | ||
618 | { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ | 602 | { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ |
619 | /* end marker */ | 603 | /* end marker */ |
620 | { 0, } | 604 | { 0, } |
@@ -657,19 +641,20 @@ static int ich_pata_cable_detect(struct ata_port *ap) | |||
657 | 641 | ||
658 | /** | 642 | /** |
659 | * piix_pata_prereset - prereset for PATA host controller | 643 | * piix_pata_prereset - prereset for PATA host controller |
660 | * @ap: Target port | 644 | * @link: Target link |
661 | * @deadline: deadline jiffies for the operation | 645 | * @deadline: deadline jiffies for the operation |
662 | * | 646 | * |
663 | * LOCKING: | 647 | * LOCKING: |
664 | * None (inherited from caller). | 648 | * None (inherited from caller). |
665 | */ | 649 | */ |
666 | static int piix_pata_prereset(struct ata_port *ap, unsigned long deadline) | 650 | static int piix_pata_prereset(struct ata_link *link, unsigned long deadline) |
667 | { | 651 | { |
652 | struct ata_port *ap = link->ap; | ||
668 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 653 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
669 | 654 | ||
670 | if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) | 655 | if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) |
671 | return -ENOENT; | 656 | return -ENOENT; |
672 | return ata_std_prereset(ap, deadline); | 657 | return ata_std_prereset(link, deadline); |
673 | } | 658 | } |
674 | 659 | ||
675 | static void piix_pata_error_handler(struct ata_port *ap) | 660 | static void piix_pata_error_handler(struct ata_port *ap) |
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index c059f78ad944..3f7533589041 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/acpi.h> | 14 | #include <linux/acpi.h> |
15 | #include <linux/libata.h> | 15 | #include <linux/libata.h> |
16 | #include <linux/pci.h> | 16 | #include <linux/pci.h> |
17 | #include <scsi/scsi_device.h> | ||
17 | #include "libata.h" | 18 | #include "libata.h" |
18 | 19 | ||
19 | #include <acpi/acpi_bus.h> | 20 | #include <acpi/acpi_bus.h> |
@@ -40,11 +41,40 @@ static int is_pci_dev(struct device *dev) | |||
40 | return (dev->bus == &pci_bus_type); | 41 | return (dev->bus == &pci_bus_type); |
41 | } | 42 | } |
42 | 43 | ||
43 | static void ata_acpi_associate_sata_port(struct ata_port *ap) | 44 | /** |
45 | * ata_acpi_associate_sata_port - associate SATA port with ACPI objects | ||
46 | * @ap: target SATA port | ||
47 | * | ||
48 | * Look up ACPI objects associated with @ap and initialize acpi_handle | ||
49 | * fields of @ap, the port and devices accordingly. | ||
50 | * | ||
51 | * LOCKING: | ||
52 | * EH context. | ||
53 | * | ||
54 | * RETURNS: | ||
55 | * 0 on success, -errno on failure. | ||
56 | */ | ||
57 | void ata_acpi_associate_sata_port(struct ata_port *ap) | ||
44 | { | 58 | { |
45 | acpi_integer adr = SATA_ADR(ap->port_no, NO_PORT_MULT); | 59 | WARN_ON(!(ap->flags & ATA_FLAG_ACPI_SATA)); |
60 | |||
61 | if (!ap->nr_pmp_links) { | ||
62 | acpi_integer adr = SATA_ADR(ap->port_no, NO_PORT_MULT); | ||
63 | |||
64 | ap->link.device->acpi_handle = | ||
65 | acpi_get_child(ap->host->acpi_handle, adr); | ||
66 | } else { | ||
67 | struct ata_link *link; | ||
68 | |||
69 | ap->link.device->acpi_handle = NULL; | ||
46 | 70 | ||
47 | ap->device->acpi_handle = acpi_get_child(ap->host->acpi_handle, adr); | 71 | ata_port_for_each_link(link, ap) { |
72 | acpi_integer adr = SATA_ADR(ap->port_no, link->pmp); | ||
73 | |||
74 | link->device->acpi_handle = | ||
75 | acpi_get_child(ap->host->acpi_handle, adr); | ||
76 | } | ||
77 | } | ||
48 | } | 78 | } |
49 | 79 | ||
50 | static void ata_acpi_associate_ide_port(struct ata_port *ap) | 80 | static void ata_acpi_associate_ide_port(struct ata_port *ap) |
@@ -60,12 +90,53 @@ static void ata_acpi_associate_ide_port(struct ata_port *ap) | |||
60 | max_devices++; | 90 | max_devices++; |
61 | 91 | ||
62 | for (i = 0; i < max_devices; i++) { | 92 | for (i = 0; i < max_devices; i++) { |
63 | struct ata_device *dev = &ap->device[i]; | 93 | struct ata_device *dev = &ap->link.device[i]; |
64 | 94 | ||
65 | dev->acpi_handle = acpi_get_child(ap->acpi_handle, i); | 95 | dev->acpi_handle = acpi_get_child(ap->acpi_handle, i); |
66 | } | 96 | } |
67 | } | 97 | } |
68 | 98 | ||
99 | static void ata_acpi_handle_hotplug (struct ata_port *ap, struct kobject *kobj, | ||
100 | u32 event) | ||
101 | { | ||
102 | char event_string[12]; | ||
103 | char *envp[] = { event_string, NULL }; | ||
104 | struct ata_eh_info *ehi = &ap->link.eh_info; | ||
105 | |||
106 | if (event == 0 || event == 1) { | ||
107 | unsigned long flags; | ||
108 | spin_lock_irqsave(ap->lock, flags); | ||
109 | ata_ehi_clear_desc(ehi); | ||
110 | ata_ehi_push_desc(ehi, "ACPI event"); | ||
111 | ata_ehi_hotplugged(ehi); | ||
112 | ata_port_freeze(ap); | ||
113 | spin_unlock_irqrestore(ap->lock, flags); | ||
114 | } | ||
115 | |||
116 | if (kobj) { | ||
117 | sprintf(event_string, "BAY_EVENT=%d", event); | ||
118 | kobject_uevent_env(kobj, KOBJ_CHANGE, envp); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data) | ||
123 | { | ||
124 | struct ata_device *dev = data; | ||
125 | struct kobject *kobj = NULL; | ||
126 | |||
127 | if (dev->sdev) | ||
128 | kobj = &dev->sdev->sdev_gendev.kobj; | ||
129 | |||
130 | ata_acpi_handle_hotplug (dev->link->ap, kobj, event); | ||
131 | } | ||
132 | |||
133 | static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data) | ||
134 | { | ||
135 | struct ata_port *ap = data; | ||
136 | |||
137 | ata_acpi_handle_hotplug (ap, &ap->dev->kobj, event); | ||
138 | } | ||
139 | |||
69 | /** | 140 | /** |
70 | * ata_acpi_associate - associate ATA host with ACPI objects | 141 | * ata_acpi_associate - associate ATA host with ACPI objects |
71 | * @host: target ATA host | 142 | * @host: target ATA host |
@@ -81,7 +152,7 @@ static void ata_acpi_associate_ide_port(struct ata_port *ap) | |||
81 | */ | 152 | */ |
82 | void ata_acpi_associate(struct ata_host *host) | 153 | void ata_acpi_associate(struct ata_host *host) |
83 | { | 154 | { |
84 | int i; | 155 | int i, j; |
85 | 156 | ||
86 | if (!is_pci_dev(host->dev) || libata_noacpi) | 157 | if (!is_pci_dev(host->dev) || libata_noacpi) |
87 | return; | 158 | return; |
@@ -97,6 +168,22 @@ void ata_acpi_associate(struct ata_host *host) | |||
97 | ata_acpi_associate_sata_port(ap); | 168 | ata_acpi_associate_sata_port(ap); |
98 | else | 169 | else |
99 | ata_acpi_associate_ide_port(ap); | 170 | ata_acpi_associate_ide_port(ap); |
171 | |||
172 | if (ap->acpi_handle) | ||
173 | acpi_install_notify_handler (ap->acpi_handle, | ||
174 | ACPI_SYSTEM_NOTIFY, | ||
175 | ata_acpi_ap_notify, | ||
176 | ap); | ||
177 | |||
178 | for (j = 0; j < ata_link_max_devices(&ap->link); j++) { | ||
179 | struct ata_device *dev = &ap->link.device[j]; | ||
180 | |||
181 | if (dev->acpi_handle) | ||
182 | acpi_install_notify_handler (dev->acpi_handle, | ||
183 | ACPI_SYSTEM_NOTIFY, | ||
184 | ata_acpi_dev_notify, | ||
185 | dev); | ||
186 | } | ||
100 | } | 187 | } |
101 | } | 188 | } |
102 | 189 | ||
@@ -113,7 +200,7 @@ void ata_acpi_associate(struct ata_host *host) | |||
113 | * RETURNS: | 200 | * RETURNS: |
114 | * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure. | 201 | * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure. |
115 | */ | 202 | */ |
116 | static int ata_acpi_gtm(const struct ata_port *ap, struct ata_acpi_gtm *gtm) | 203 | int ata_acpi_gtm(const struct ata_port *ap, struct ata_acpi_gtm *gtm) |
117 | { | 204 | { |
118 | struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER }; | 205 | struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER }; |
119 | union acpi_object *out_obj; | 206 | union acpi_object *out_obj; |
@@ -157,6 +244,8 @@ static int ata_acpi_gtm(const struct ata_port *ap, struct ata_acpi_gtm *gtm) | |||
157 | return rc; | 244 | return rc; |
158 | } | 245 | } |
159 | 246 | ||
247 | EXPORT_SYMBOL_GPL(ata_acpi_gtm); | ||
248 | |||
160 | /** | 249 | /** |
161 | * ata_acpi_stm - execute _STM | 250 | * ata_acpi_stm - execute _STM |
162 | * @ap: target ATA port | 251 | * @ap: target ATA port |
@@ -170,7 +259,7 @@ static int ata_acpi_gtm(const struct ata_port *ap, struct ata_acpi_gtm *gtm) | |||
170 | * RETURNS: | 259 | * RETURNS: |
171 | * 0 on success, -ENOENT if _STM doesn't exist, -errno on failure. | 260 | * 0 on success, -ENOENT if _STM doesn't exist, -errno on failure. |
172 | */ | 261 | */ |
173 | static int ata_acpi_stm(const struct ata_port *ap, struct ata_acpi_gtm *stm) | 262 | int ata_acpi_stm(const struct ata_port *ap, struct ata_acpi_gtm *stm) |
174 | { | 263 | { |
175 | acpi_status status; | 264 | acpi_status status; |
176 | struct acpi_object_list input; | 265 | struct acpi_object_list input; |
@@ -182,10 +271,10 @@ static int ata_acpi_stm(const struct ata_port *ap, struct ata_acpi_gtm *stm) | |||
182 | /* Buffers for id may need byteswapping ? */ | 271 | /* Buffers for id may need byteswapping ? */ |
183 | in_params[1].type = ACPI_TYPE_BUFFER; | 272 | in_params[1].type = ACPI_TYPE_BUFFER; |
184 | in_params[1].buffer.length = 512; | 273 | in_params[1].buffer.length = 512; |
185 | in_params[1].buffer.pointer = (u8 *)ap->device[0].id; | 274 | in_params[1].buffer.pointer = (u8 *)ap->link.device[0].id; |
186 | in_params[2].type = ACPI_TYPE_BUFFER; | 275 | in_params[2].type = ACPI_TYPE_BUFFER; |
187 | in_params[2].buffer.length = 512; | 276 | in_params[2].buffer.length = 512; |
188 | in_params[2].buffer.pointer = (u8 *)ap->device[1].id; | 277 | in_params[2].buffer.pointer = (u8 *)ap->link.device[1].id; |
189 | 278 | ||
190 | input.count = 3; | 279 | input.count = 3; |
191 | input.pointer = in_params; | 280 | input.pointer = in_params; |
@@ -202,6 +291,8 @@ static int ata_acpi_stm(const struct ata_port *ap, struct ata_acpi_gtm *stm) | |||
202 | return 0; | 291 | return 0; |
203 | } | 292 | } |
204 | 293 | ||
294 | EXPORT_SYMBOL_GPL(ata_acpi_stm); | ||
295 | |||
205 | /** | 296 | /** |
206 | * ata_dev_get_GTF - get the drive bootup default taskfile settings | 297 | * ata_dev_get_GTF - get the drive bootup default taskfile settings |
207 | * @dev: target ATA device | 298 | * @dev: target ATA device |
@@ -226,7 +317,7 @@ static int ata_acpi_stm(const struct ata_port *ap, struct ata_acpi_gtm *stm) | |||
226 | static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf, | 317 | static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf, |
227 | void **ptr_to_free) | 318 | void **ptr_to_free) |
228 | { | 319 | { |
229 | struct ata_port *ap = dev->ap; | 320 | struct ata_port *ap = dev->link->ap; |
230 | acpi_status status; | 321 | acpi_status status; |
231 | struct acpi_buffer output; | 322 | struct acpi_buffer output; |
232 | union acpi_object *out_obj; | 323 | union acpi_object *out_obj; |
@@ -296,6 +387,44 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf, | |||
296 | } | 387 | } |
297 | 388 | ||
298 | /** | 389 | /** |
390 | * ata_acpi_cbl_80wire - Check for 80 wire cable | ||
391 | * @ap: Port to check | ||
392 | * | ||
393 | * Return 1 if the ACPI mode data for this port indicates the BIOS selected | ||
394 | * an 80wire mode. | ||
395 | */ | ||
396 | |||
397 | int ata_acpi_cbl_80wire(struct ata_port *ap) | ||
398 | { | ||
399 | struct ata_acpi_gtm gtm; | ||
400 | int valid = 0; | ||
401 | |||
402 | /* No _GTM data, no information */ | ||
403 | if (ata_acpi_gtm(ap, >m) < 0) | ||
404 | return 0; | ||
405 | |||
406 | /* Split timing, DMA enabled */ | ||
407 | if ((gtm.flags & 0x11) == 0x11 && gtm.drive[0].dma < 55) | ||
408 | valid |= 1; | ||
409 | if ((gtm.flags & 0x14) == 0x14 && gtm.drive[1].dma < 55) | ||
410 | valid |= 2; | ||
411 | /* Shared timing, DMA enabled */ | ||
412 | if ((gtm.flags & 0x11) == 0x01 && gtm.drive[0].dma < 55) | ||
413 | valid |= 1; | ||
414 | if ((gtm.flags & 0x14) == 0x04 && gtm.drive[0].dma < 55) | ||
415 | valid |= 2; | ||
416 | |||
417 | /* Drive check */ | ||
418 | if ((valid & 1) && ata_dev_enabled(&ap->link.device[0])) | ||
419 | return 1; | ||
420 | if ((valid & 2) && ata_dev_enabled(&ap->link.device[1])) | ||
421 | return 1; | ||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire); | ||
426 | |||
427 | /** | ||
299 | * taskfile_load_raw - send taskfile registers to host controller | 428 | * taskfile_load_raw - send taskfile registers to host controller |
300 | * @dev: target ATA device | 429 | * @dev: target ATA device |
301 | * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7) | 430 | * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7) |
@@ -320,7 +449,7 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf, | |||
320 | static int taskfile_load_raw(struct ata_device *dev, | 449 | static int taskfile_load_raw(struct ata_device *dev, |
321 | const struct ata_acpi_gtf *gtf) | 450 | const struct ata_acpi_gtf *gtf) |
322 | { | 451 | { |
323 | struct ata_port *ap = dev->ap; | 452 | struct ata_port *ap = dev->link->ap; |
324 | struct ata_taskfile tf, rtf; | 453 | struct ata_taskfile tf, rtf; |
325 | unsigned int err_mask; | 454 | unsigned int err_mask; |
326 | 455 | ||
@@ -349,7 +478,7 @@ static int taskfile_load_raw(struct ata_device *dev, | |||
349 | tf.lbal, tf.lbam, tf.lbah, tf.device); | 478 | tf.lbal, tf.lbam, tf.lbah, tf.device); |
350 | 479 | ||
351 | rtf = tf; | 480 | rtf = tf; |
352 | err_mask = ata_exec_internal(dev, &rtf, NULL, DMA_NONE, NULL, 0); | 481 | err_mask = ata_exec_internal(dev, &rtf, NULL, DMA_NONE, NULL, 0, 0); |
353 | if (err_mask) { | 482 | if (err_mask) { |
354 | ata_dev_printk(dev, KERN_ERR, | 483 | ata_dev_printk(dev, KERN_ERR, |
355 | "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x failed " | 484 | "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x failed " |
@@ -424,7 +553,7 @@ static int ata_acpi_exec_tfs(struct ata_device *dev) | |||
424 | */ | 553 | */ |
425 | static int ata_acpi_push_id(struct ata_device *dev) | 554 | static int ata_acpi_push_id(struct ata_device *dev) |
426 | { | 555 | { |
427 | struct ata_port *ap = dev->ap; | 556 | struct ata_port *ap = dev->link->ap; |
428 | int err; | 557 | int err; |
429 | acpi_status status; | 558 | acpi_status status; |
430 | struct acpi_object_list input; | 559 | struct acpi_object_list input; |
@@ -508,7 +637,7 @@ int ata_acpi_on_suspend(struct ata_port *ap) | |||
508 | */ | 637 | */ |
509 | void ata_acpi_on_resume(struct ata_port *ap) | 638 | void ata_acpi_on_resume(struct ata_port *ap) |
510 | { | 639 | { |
511 | int i; | 640 | struct ata_device *dev; |
512 | 641 | ||
513 | if (ap->acpi_handle && (ap->pflags & ATA_PFLAG_GTM_VALID)) { | 642 | if (ap->acpi_handle && (ap->pflags & ATA_PFLAG_GTM_VALID)) { |
514 | BUG_ON(ap->flags & ATA_FLAG_ACPI_SATA); | 643 | BUG_ON(ap->flags & ATA_FLAG_ACPI_SATA); |
@@ -518,8 +647,8 @@ void ata_acpi_on_resume(struct ata_port *ap) | |||
518 | } | 647 | } |
519 | 648 | ||
520 | /* schedule _GTF */ | 649 | /* schedule _GTF */ |
521 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 650 | ata_link_for_each_dev(dev, &ap->link) |
522 | ap->device[i].flags |= ATA_DFLAG_ACPI_PENDING; | 651 | dev->flags |= ATA_DFLAG_ACPI_PENDING; |
523 | } | 652 | } |
524 | 653 | ||
525 | /** | 654 | /** |
@@ -538,8 +667,8 @@ void ata_acpi_on_resume(struct ata_port *ap) | |||
538 | */ | 667 | */ |
539 | int ata_acpi_on_devcfg(struct ata_device *dev) | 668 | int ata_acpi_on_devcfg(struct ata_device *dev) |
540 | { | 669 | { |
541 | struct ata_port *ap = dev->ap; | 670 | struct ata_port *ap = dev->link->ap; |
542 | struct ata_eh_context *ehc = &ap->eh_context; | 671 | struct ata_eh_context *ehc = &ap->link.eh_context; |
543 | int acpi_sata = ap->flags & ATA_FLAG_ACPI_SATA; | 672 | int acpi_sata = ap->flags & ATA_FLAG_ACPI_SATA; |
544 | int rc; | 673 | int rc; |
545 | 674 | ||
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 772be09b4689..b05384a8c326 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -59,8 +59,6 @@ | |||
59 | 59 | ||
60 | #include "libata.h" | 60 | #include "libata.h" |
61 | 61 | ||
62 | #define DRV_VERSION "2.21" /* must be exactly four chars */ | ||
63 | |||
64 | 62 | ||
65 | /* debounce timing parameters in msecs { interval, duration, timeout } */ | 63 | /* debounce timing parameters in msecs { interval, duration, timeout } */ |
66 | const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; | 64 | const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; |
@@ -70,6 +68,7 @@ const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; | |||
70 | static unsigned int ata_dev_init_params(struct ata_device *dev, | 68 | static unsigned int ata_dev_init_params(struct ata_device *dev, |
71 | u16 heads, u16 sectors); | 69 | u16 heads, u16 sectors); |
72 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev); | 70 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev); |
71 | static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable); | ||
73 | static void ata_dev_xfermask(struct ata_device *dev); | 72 | static void ata_dev_xfermask(struct ata_device *dev); |
74 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev); | 73 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev); |
75 | 74 | ||
@@ -86,6 +85,10 @@ int atapi_dmadir = 0; | |||
86 | module_param(atapi_dmadir, int, 0444); | 85 | module_param(atapi_dmadir, int, 0444); |
87 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); | 86 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); |
88 | 87 | ||
88 | int atapi_passthru16 = 1; | ||
89 | module_param(atapi_passthru16, int, 0444); | ||
90 | MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)"); | ||
91 | |||
89 | int libata_fua = 0; | 92 | int libata_fua = 0; |
90 | module_param_named(fua, libata_fua, int, 0444); | 93 | module_param_named(fua, libata_fua, int, 0444); |
91 | MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); | 94 | MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); |
@@ -94,13 +97,17 @@ static int ata_ignore_hpa = 0; | |||
94 | module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); | 97 | module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); |
95 | MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); | 98 | MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); |
96 | 99 | ||
100 | static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; | ||
101 | module_param_named(dma, libata_dma_mask, int, 0444); | ||
102 | MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); | ||
103 | |||
97 | static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ; | 104 | static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ; |
98 | module_param(ata_probe_timeout, int, 0444); | 105 | module_param(ata_probe_timeout, int, 0444); |
99 | MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); | 106 | MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); |
100 | 107 | ||
101 | int libata_noacpi = 1; | 108 | int libata_noacpi = 0; |
102 | module_param_named(noacpi, libata_noacpi, int, 0444); | 109 | module_param_named(noacpi, libata_noacpi, int, 0444); |
103 | MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set"); | 110 | MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set"); |
104 | 111 | ||
105 | MODULE_AUTHOR("Jeff Garzik"); | 112 | MODULE_AUTHOR("Jeff Garzik"); |
106 | MODULE_DESCRIPTION("Library module for ATA devices"); | 113 | MODULE_DESCRIPTION("Library module for ATA devices"); |
@@ -235,7 +242,7 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) | |||
235 | if (dev->flags & ATA_DFLAG_PIO) { | 242 | if (dev->flags & ATA_DFLAG_PIO) { |
236 | tf->protocol = ATA_PROT_PIO; | 243 | tf->protocol = ATA_PROT_PIO; |
237 | index = dev->multi_count ? 0 : 8; | 244 | index = dev->multi_count ? 0 : 8; |
238 | } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) { | 245 | } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { |
239 | /* Unable to use DMA due to host limitation */ | 246 | /* Unable to use DMA due to host limitation */ |
240 | tf->protocol = ATA_PROT_PIO; | 247 | tf->protocol = ATA_PROT_PIO; |
241 | index = dev->multi_count ? 0 : 8; | 248 | index = dev->multi_count ? 0 : 8; |
@@ -604,7 +611,7 @@ static const char *sata_spd_string(unsigned int spd) | |||
604 | void ata_dev_disable(struct ata_device *dev) | 611 | void ata_dev_disable(struct ata_device *dev) |
605 | { | 612 | { |
606 | if (ata_dev_enabled(dev)) { | 613 | if (ata_dev_enabled(dev)) { |
607 | if (ata_msg_drv(dev->ap)) | 614 | if (ata_msg_drv(dev->link->ap)) |
608 | ata_dev_printk(dev, KERN_WARNING, "disabled\n"); | 615 | ata_dev_printk(dev, KERN_WARNING, "disabled\n"); |
609 | ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | | 616 | ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | |
610 | ATA_DNXFER_QUIET); | 617 | ATA_DNXFER_QUIET); |
@@ -667,37 +674,57 @@ static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) | |||
667 | * None. | 674 | * None. |
668 | * | 675 | * |
669 | * RETURNS: | 676 | * RETURNS: |
670 | * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN | 677 | * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or |
671 | * the event of failure. | 678 | * %ATA_DEV_UNKNOWN the event of failure. |
672 | */ | 679 | */ |
673 | |||
674 | unsigned int ata_dev_classify(const struct ata_taskfile *tf) | 680 | unsigned int ata_dev_classify(const struct ata_taskfile *tf) |
675 | { | 681 | { |
676 | /* Apple's open source Darwin code hints that some devices only | 682 | /* Apple's open source Darwin code hints that some devices only |
677 | * put a proper signature into the LBA mid/high registers, | 683 | * put a proper signature into the LBA mid/high registers, |
678 | * So, we only check those. It's sufficient for uniqueness. | 684 | * So, we only check those. It's sufficient for uniqueness. |
685 | * | ||
686 | * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate | ||
687 | * signatures for ATA and ATAPI devices attached on SerialATA, | ||
688 | * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA | ||
689 | * spec has never mentioned about using different signatures | ||
690 | * for ATA/ATAPI devices. Then, Serial ATA II: Port | ||
691 | * Multiplier specification began to use 0x69/0x96 to identify | ||
692 | * port multpliers and 0x3c/0xc3 to identify SEMB device. | ||
693 | * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and | ||
694 | * 0x69/0x96 shortly and described them as reserved for | ||
695 | * SerialATA. | ||
696 | * | ||
697 | * We follow the current spec and consider that 0x69/0x96 | ||
698 | * identifies a port multiplier and 0x3c/0xc3 a SEMB device. | ||
679 | */ | 699 | */ |
680 | 700 | if ((tf->lbam == 0) && (tf->lbah == 0)) { | |
681 | if (((tf->lbam == 0) && (tf->lbah == 0)) || | ||
682 | ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) { | ||
683 | DPRINTK("found ATA device by sig\n"); | 701 | DPRINTK("found ATA device by sig\n"); |
684 | return ATA_DEV_ATA; | 702 | return ATA_DEV_ATA; |
685 | } | 703 | } |
686 | 704 | ||
687 | if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) || | 705 | if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { |
688 | ((tf->lbam == 0x69) && (tf->lbah == 0x96))) { | ||
689 | DPRINTK("found ATAPI device by sig\n"); | 706 | DPRINTK("found ATAPI device by sig\n"); |
690 | return ATA_DEV_ATAPI; | 707 | return ATA_DEV_ATAPI; |
691 | } | 708 | } |
692 | 709 | ||
710 | if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { | ||
711 | DPRINTK("found PMP device by sig\n"); | ||
712 | return ATA_DEV_PMP; | ||
713 | } | ||
714 | |||
715 | if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { | ||
716 | printk("ata: SEMB device ignored\n"); | ||
717 | return ATA_DEV_SEMB_UNSUP; /* not yet */ | ||
718 | } | ||
719 | |||
693 | DPRINTK("unknown device\n"); | 720 | DPRINTK("unknown device\n"); |
694 | return ATA_DEV_UNKNOWN; | 721 | return ATA_DEV_UNKNOWN; |
695 | } | 722 | } |
696 | 723 | ||
697 | /** | 724 | /** |
698 | * ata_dev_try_classify - Parse returned ATA device signature | 725 | * ata_dev_try_classify - Parse returned ATA device signature |
699 | * @ap: ATA channel to examine | 726 | * @dev: ATA device to classify (starting at zero) |
700 | * @device: Device to examine (starting at zero) | 727 | * @present: device seems present |
701 | * @r_err: Value of error register on completion | 728 | * @r_err: Value of error register on completion |
702 | * | 729 | * |
703 | * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, | 730 | * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, |
@@ -715,15 +742,15 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf) | |||
715 | * RETURNS: | 742 | * RETURNS: |
716 | * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. | 743 | * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. |
717 | */ | 744 | */ |
718 | 745 | unsigned int ata_dev_try_classify(struct ata_device *dev, int present, | |
719 | unsigned int | 746 | u8 *r_err) |
720 | ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err) | ||
721 | { | 747 | { |
748 | struct ata_port *ap = dev->link->ap; | ||
722 | struct ata_taskfile tf; | 749 | struct ata_taskfile tf; |
723 | unsigned int class; | 750 | unsigned int class; |
724 | u8 err; | 751 | u8 err; |
725 | 752 | ||
726 | ap->ops->dev_select(ap, device); | 753 | ap->ops->dev_select(ap, dev->devno); |
727 | 754 | ||
728 | memset(&tf, 0, sizeof(tf)); | 755 | memset(&tf, 0, sizeof(tf)); |
729 | 756 | ||
@@ -733,12 +760,12 @@ ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err) | |||
733 | *r_err = err; | 760 | *r_err = err; |
734 | 761 | ||
735 | /* see if device passed diags: if master then continue and warn later */ | 762 | /* see if device passed diags: if master then continue and warn later */ |
736 | if (err == 0 && device == 0) | 763 | if (err == 0 && dev->devno == 0) |
737 | /* diagnostic fail : do nothing _YET_ */ | 764 | /* diagnostic fail : do nothing _YET_ */ |
738 | ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC; | 765 | dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; |
739 | else if (err == 1) | 766 | else if (err == 1) |
740 | /* do nothing */ ; | 767 | /* do nothing */ ; |
741 | else if ((device == 0) && (err == 0x81)) | 768 | else if ((dev->devno == 0) && (err == 0x81)) |
742 | /* do nothing */ ; | 769 | /* do nothing */ ; |
743 | else | 770 | else |
744 | return ATA_DEV_NONE; | 771 | return ATA_DEV_NONE; |
@@ -746,10 +773,20 @@ ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err) | |||
746 | /* determine if device is ATA or ATAPI */ | 773 | /* determine if device is ATA or ATAPI */ |
747 | class = ata_dev_classify(&tf); | 774 | class = ata_dev_classify(&tf); |
748 | 775 | ||
749 | if (class == ATA_DEV_UNKNOWN) | 776 | if (class == ATA_DEV_UNKNOWN) { |
750 | return ATA_DEV_NONE; | 777 | /* If the device failed diagnostic, it's likely to |
751 | if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) | 778 | * have reported incorrect device signature too. |
752 | return ATA_DEV_NONE; | 779 | * Assume ATA device if the device seems present but |
780 | * device signature is invalid with diagnostic | ||
781 | * failure. | ||
782 | */ | ||
783 | if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) | ||
784 | class = ATA_DEV_ATA; | ||
785 | else | ||
786 | class = ATA_DEV_NONE; | ||
787 | } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) | ||
788 | class = ATA_DEV_NONE; | ||
789 | |||
753 | return class; | 790 | return class; |
754 | } | 791 | } |
755 | 792 | ||
@@ -816,6 +853,21 @@ void ata_id_c_string(const u16 *id, unsigned char *s, | |||
816 | *p = '\0'; | 853 | *p = '\0'; |
817 | } | 854 | } |
818 | 855 | ||
856 | static u64 ata_id_n_sectors(const u16 *id) | ||
857 | { | ||
858 | if (ata_id_has_lba(id)) { | ||
859 | if (ata_id_has_lba48(id)) | ||
860 | return ata_id_u64(id, 100); | ||
861 | else | ||
862 | return ata_id_u32(id, 60); | ||
863 | } else { | ||
864 | if (ata_id_current_chs_valid(id)) | ||
865 | return ata_id_u32(id, 57); | ||
866 | else | ||
867 | return id[1] * id[3] * id[6]; | ||
868 | } | ||
869 | } | ||
870 | |||
819 | static u64 ata_tf_to_lba48(struct ata_taskfile *tf) | 871 | static u64 ata_tf_to_lba48(struct ata_taskfile *tf) |
820 | { | 872 | { |
821 | u64 sectors = 0; | 873 | u64 sectors = 0; |
@@ -843,129 +895,110 @@ static u64 ata_tf_to_lba(struct ata_taskfile *tf) | |||
843 | } | 895 | } |
844 | 896 | ||
845 | /** | 897 | /** |
846 | * ata_read_native_max_address_ext - LBA48 native max query | 898 | * ata_read_native_max_address - Read native max address |
847 | * @dev: Device to query | 899 | * @dev: target device |
900 | * @max_sectors: out parameter for the result native max address | ||
848 | * | 901 | * |
849 | * Perform an LBA48 size query upon the device in question. Return the | 902 | * Perform an LBA48 or LBA28 native size query upon the device in |
850 | * actual LBA48 size or zero if the command fails. | 903 | * question. |
851 | */ | ||
852 | |||
853 | static u64 ata_read_native_max_address_ext(struct ata_device *dev) | ||
854 | { | ||
855 | unsigned int err; | ||
856 | struct ata_taskfile tf; | ||
857 | |||
858 | ata_tf_init(dev, &tf); | ||
859 | |||
860 | tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; | ||
861 | tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR; | ||
862 | tf.protocol |= ATA_PROT_NODATA; | ||
863 | tf.device |= 0x40; | ||
864 | |||
865 | err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); | ||
866 | if (err) | ||
867 | return 0; | ||
868 | |||
869 | return ata_tf_to_lba48(&tf); | ||
870 | } | ||
871 | |||
872 | /** | ||
873 | * ata_read_native_max_address - LBA28 native max query | ||
874 | * @dev: Device to query | ||
875 | * | 904 | * |
876 | * Performa an LBA28 size query upon the device in question. Return the | 905 | * RETURNS: |
877 | * actual LBA28 size or zero if the command fails. | 906 | * 0 on success, -EACCES if command is aborted by the drive. |
907 | * -EIO on other errors. | ||
878 | */ | 908 | */ |
879 | 909 | static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) | |
880 | static u64 ata_read_native_max_address(struct ata_device *dev) | ||
881 | { | 910 | { |
882 | unsigned int err; | 911 | unsigned int err_mask; |
883 | struct ata_taskfile tf; | 912 | struct ata_taskfile tf; |
913 | int lba48 = ata_id_has_lba48(dev->id); | ||
884 | 914 | ||
885 | ata_tf_init(dev, &tf); | 915 | ata_tf_init(dev, &tf); |
886 | 916 | ||
887 | tf.command = ATA_CMD_READ_NATIVE_MAX; | 917 | /* always clear all address registers */ |
888 | tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; | 918 | tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; |
919 | |||
920 | if (lba48) { | ||
921 | tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; | ||
922 | tf.flags |= ATA_TFLAG_LBA48; | ||
923 | } else | ||
924 | tf.command = ATA_CMD_READ_NATIVE_MAX; | ||
925 | |||
889 | tf.protocol |= ATA_PROT_NODATA; | 926 | tf.protocol |= ATA_PROT_NODATA; |
890 | tf.device |= 0x40; | 927 | tf.device |= ATA_LBA; |
891 | 928 | ||
892 | err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); | 929 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); |
893 | if (err) | 930 | if (err_mask) { |
894 | return 0; | 931 | ata_dev_printk(dev, KERN_WARNING, "failed to read native " |
932 | "max address (err_mask=0x%x)\n", err_mask); | ||
933 | if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) | ||
934 | return -EACCES; | ||
935 | return -EIO; | ||
936 | } | ||
895 | 937 | ||
896 | return ata_tf_to_lba(&tf); | 938 | if (lba48) |
939 | *max_sectors = ata_tf_to_lba48(&tf); | ||
940 | else | ||
941 | *max_sectors = ata_tf_to_lba(&tf); | ||
942 | if (dev->horkage & ATA_HORKAGE_HPA_SIZE) | ||
943 | (*max_sectors)--; | ||
944 | return 0; | ||
897 | } | 945 | } |
898 | 946 | ||
899 | /** | 947 | /** |
900 | * ata_set_native_max_address_ext - LBA48 native max set | 948 | * ata_set_max_sectors - Set max sectors |
901 | * @dev: Device to query | 949 | * @dev: target device |
902 | * @new_sectors: new max sectors value to set for the device | 950 | * @new_sectors: new max sectors value to set for the device |
903 | * | 951 | * |
904 | * Perform an LBA48 size set max upon the device in question. Return the | 952 | * Set max sectors of @dev to @new_sectors. |
905 | * actual LBA48 size or zero if the command fails. | 953 | * |
954 | * RETURNS: | ||
955 | * 0 on success, -EACCES if command is aborted or denied (due to | ||
956 | * previous non-volatile SET_MAX) by the drive. -EIO on other | ||
957 | * errors. | ||
906 | */ | 958 | */ |
907 | 959 | static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) | |
908 | static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors) | ||
909 | { | 960 | { |
910 | unsigned int err; | 961 | unsigned int err_mask; |
911 | struct ata_taskfile tf; | 962 | struct ata_taskfile tf; |
963 | int lba48 = ata_id_has_lba48(dev->id); | ||
912 | 964 | ||
913 | new_sectors--; | 965 | new_sectors--; |
914 | 966 | ||
915 | ata_tf_init(dev, &tf); | 967 | ata_tf_init(dev, &tf); |
916 | 968 | ||
917 | tf.command = ATA_CMD_SET_MAX_EXT; | 969 | tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; |
918 | tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR; | ||
919 | tf.protocol |= ATA_PROT_NODATA; | ||
920 | tf.device |= 0x40; | ||
921 | |||
922 | tf.lbal = (new_sectors >> 0) & 0xff; | ||
923 | tf.lbam = (new_sectors >> 8) & 0xff; | ||
924 | tf.lbah = (new_sectors >> 16) & 0xff; | ||
925 | |||
926 | tf.hob_lbal = (new_sectors >> 24) & 0xff; | ||
927 | tf.hob_lbam = (new_sectors >> 32) & 0xff; | ||
928 | tf.hob_lbah = (new_sectors >> 40) & 0xff; | ||
929 | |||
930 | err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); | ||
931 | if (err) | ||
932 | return 0; | ||
933 | |||
934 | return ata_tf_to_lba48(&tf); | ||
935 | } | ||
936 | |||
937 | /** | ||
938 | * ata_set_native_max_address - LBA28 native max set | ||
939 | * @dev: Device to query | ||
940 | * @new_sectors: new max sectors value to set for the device | ||
941 | * | ||
942 | * Perform an LBA28 size set max upon the device in question. Return the | ||
943 | * actual LBA28 size or zero if the command fails. | ||
944 | */ | ||
945 | 970 | ||
946 | static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors) | 971 | if (lba48) { |
947 | { | 972 | tf.command = ATA_CMD_SET_MAX_EXT; |
948 | unsigned int err; | 973 | tf.flags |= ATA_TFLAG_LBA48; |
949 | struct ata_taskfile tf; | ||
950 | 974 | ||
951 | new_sectors--; | 975 | tf.hob_lbal = (new_sectors >> 24) & 0xff; |
976 | tf.hob_lbam = (new_sectors >> 32) & 0xff; | ||
977 | tf.hob_lbah = (new_sectors >> 40) & 0xff; | ||
978 | } else { | ||
979 | tf.command = ATA_CMD_SET_MAX; | ||
952 | 980 | ||
953 | ata_tf_init(dev, &tf); | 981 | tf.device |= (new_sectors >> 24) & 0xf; |
982 | } | ||
954 | 983 | ||
955 | tf.command = ATA_CMD_SET_MAX; | ||
956 | tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; | ||
957 | tf.protocol |= ATA_PROT_NODATA; | 984 | tf.protocol |= ATA_PROT_NODATA; |
985 | tf.device |= ATA_LBA; | ||
958 | 986 | ||
959 | tf.lbal = (new_sectors >> 0) & 0xff; | 987 | tf.lbal = (new_sectors >> 0) & 0xff; |
960 | tf.lbam = (new_sectors >> 8) & 0xff; | 988 | tf.lbam = (new_sectors >> 8) & 0xff; |
961 | tf.lbah = (new_sectors >> 16) & 0xff; | 989 | tf.lbah = (new_sectors >> 16) & 0xff; |
962 | tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40; | ||
963 | 990 | ||
964 | err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); | 991 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); |
965 | if (err) | 992 | if (err_mask) { |
966 | return 0; | 993 | ata_dev_printk(dev, KERN_WARNING, "failed to set " |
994 | "max address (err_mask=0x%x)\n", err_mask); | ||
995 | if (err_mask == AC_ERR_DEV && | ||
996 | (tf.feature & (ATA_ABORTED | ATA_IDNF))) | ||
997 | return -EACCES; | ||
998 | return -EIO; | ||
999 | } | ||
967 | 1000 | ||
968 | return ata_tf_to_lba(&tf); | 1001 | return 0; |
969 | } | 1002 | } |
970 | 1003 | ||
971 | /** | 1004 | /** |
@@ -975,60 +1008,93 @@ static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors) | |||
975 | * Read the size of an LBA28 or LBA48 disk with HPA features and resize | 1008 | * Read the size of an LBA28 or LBA48 disk with HPA features and resize |
976 | * it if required to the full size of the media. The caller must check | 1009 | * it if required to the full size of the media. The caller must check |
977 | * the drive has the HPA feature set enabled. | 1010 | * the drive has the HPA feature set enabled. |
1011 | * | ||
1012 | * RETURNS: | ||
1013 | * 0 on success, -errno on failure. | ||
978 | */ | 1014 | */ |
979 | 1015 | static int ata_hpa_resize(struct ata_device *dev) | |
980 | static u64 ata_hpa_resize(struct ata_device *dev) | ||
981 | { | 1016 | { |
982 | u64 sectors = dev->n_sectors; | 1017 | struct ata_eh_context *ehc = &dev->link->eh_context; |
983 | u64 hpa_sectors; | 1018 | int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; |
1019 | u64 sectors = ata_id_n_sectors(dev->id); | ||
1020 | u64 native_sectors; | ||
1021 | int rc; | ||
984 | 1022 | ||
985 | if (ata_id_has_lba48(dev->id)) | 1023 | /* do we need to do it? */ |
986 | hpa_sectors = ata_read_native_max_address_ext(dev); | 1024 | if (dev->class != ATA_DEV_ATA || |
987 | else | 1025 | !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || |
988 | hpa_sectors = ata_read_native_max_address(dev); | 1026 | (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) |
1027 | return 0; | ||
989 | 1028 | ||
990 | if (hpa_sectors > sectors) { | 1029 | /* read native max address */ |
991 | ata_dev_printk(dev, KERN_INFO, | 1030 | rc = ata_read_native_max_address(dev, &native_sectors); |
992 | "Host Protected Area detected:\n" | 1031 | if (rc) { |
993 | "\tcurrent size: %lld sectors\n" | 1032 | /* If HPA isn't going to be unlocked, skip HPA |
994 | "\tnative size: %lld sectors\n", | 1033 | * resizing from the next try. |
995 | (long long)sectors, (long long)hpa_sectors); | 1034 | */ |
996 | 1035 | if (!ata_ignore_hpa) { | |
997 | if (ata_ignore_hpa) { | 1036 | ata_dev_printk(dev, KERN_WARNING, "HPA support seems " |
998 | if (ata_id_has_lba48(dev->id)) | 1037 | "broken, will skip HPA handling\n"); |
999 | hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors); | 1038 | dev->horkage |= ATA_HORKAGE_BROKEN_HPA; |
1000 | else | 1039 | |
1001 | hpa_sectors = ata_set_native_max_address(dev, | 1040 | /* we can continue if device aborted the command */ |
1002 | hpa_sectors); | 1041 | if (rc == -EACCES) |
1003 | 1042 | rc = 0; | |
1004 | if (hpa_sectors) { | ||
1005 | ata_dev_printk(dev, KERN_INFO, "native size " | ||
1006 | "increased to %lld sectors\n", | ||
1007 | (long long)hpa_sectors); | ||
1008 | return hpa_sectors; | ||
1009 | } | ||
1010 | } | 1043 | } |
1011 | } else if (hpa_sectors < sectors) | ||
1012 | ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) " | ||
1013 | "is smaller than sectors (%lld)\n", __FUNCTION__, | ||
1014 | (long long)hpa_sectors, (long long)sectors); | ||
1015 | 1044 | ||
1016 | return sectors; | 1045 | return rc; |
1017 | } | 1046 | } |
1018 | 1047 | ||
1019 | static u64 ata_id_n_sectors(const u16 *id) | 1048 | /* nothing to do? */ |
1020 | { | 1049 | if (native_sectors <= sectors || !ata_ignore_hpa) { |
1021 | if (ata_id_has_lba(id)) { | 1050 | if (!print_info || native_sectors == sectors) |
1022 | if (ata_id_has_lba48(id)) | 1051 | return 0; |
1023 | return ata_id_u64(id, 100); | 1052 | |
1024 | else | 1053 | if (native_sectors > sectors) |
1025 | return ata_id_u32(id, 60); | 1054 | ata_dev_printk(dev, KERN_INFO, |
1026 | } else { | 1055 | "HPA detected: current %llu, native %llu\n", |
1027 | if (ata_id_current_chs_valid(id)) | 1056 | (unsigned long long)sectors, |
1028 | return ata_id_u32(id, 57); | 1057 | (unsigned long long)native_sectors); |
1029 | else | 1058 | else if (native_sectors < sectors) |
1030 | return id[1] * id[3] * id[6]; | 1059 | ata_dev_printk(dev, KERN_WARNING, |
1060 | "native sectors (%llu) is smaller than " | ||
1061 | "sectors (%llu)\n", | ||
1062 | (unsigned long long)native_sectors, | ||
1063 | (unsigned long long)sectors); | ||
1064 | return 0; | ||
1031 | } | 1065 | } |
1066 | |||
1067 | /* let's unlock HPA */ | ||
1068 | rc = ata_set_max_sectors(dev, native_sectors); | ||
1069 | if (rc == -EACCES) { | ||
1070 | /* if device aborted the command, skip HPA resizing */ | ||
1071 | ata_dev_printk(dev, KERN_WARNING, "device aborted resize " | ||
1072 | "(%llu -> %llu), skipping HPA handling\n", | ||
1073 | (unsigned long long)sectors, | ||
1074 | (unsigned long long)native_sectors); | ||
1075 | dev->horkage |= ATA_HORKAGE_BROKEN_HPA; | ||
1076 | return 0; | ||
1077 | } else if (rc) | ||
1078 | return rc; | ||
1079 | |||
1080 | /* re-read IDENTIFY data */ | ||
1081 | rc = ata_dev_reread_id(dev, 0); | ||
1082 | if (rc) { | ||
1083 | ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY " | ||
1084 | "data after HPA resizing\n"); | ||
1085 | return rc; | ||
1086 | } | ||
1087 | |||
1088 | if (print_info) { | ||
1089 | u64 new_sectors = ata_id_n_sectors(dev->id); | ||
1090 | ata_dev_printk(dev, KERN_INFO, | ||
1091 | "HPA unlocked: %llu -> %llu, native %llu\n", | ||
1092 | (unsigned long long)sectors, | ||
1093 | (unsigned long long)new_sectors, | ||
1094 | (unsigned long long)native_sectors); | ||
1095 | } | ||
1096 | |||
1097 | return 0; | ||
1032 | } | 1098 | } |
1033 | 1099 | ||
1034 | /** | 1100 | /** |
@@ -1150,7 +1216,7 @@ void ata_dev_select(struct ata_port *ap, unsigned int device, | |||
1150 | ap->ops->dev_select(ap, device); | 1216 | ap->ops->dev_select(ap, device); |
1151 | 1217 | ||
1152 | if (wait) { | 1218 | if (wait) { |
1153 | if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI) | 1219 | if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) |
1154 | msleep(150); | 1220 | msleep(150); |
1155 | ata_wait_idle(ap); | 1221 | ata_wait_idle(ap); |
1156 | } | 1222 | } |
@@ -1328,6 +1394,7 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc) | |||
1328 | * @dma_dir: Data tranfer direction of the command | 1394 | * @dma_dir: Data tranfer direction of the command |
1329 | * @sg: sg list for the data buffer of the command | 1395 | * @sg: sg list for the data buffer of the command |
1330 | * @n_elem: Number of sg entries | 1396 | * @n_elem: Number of sg entries |
1397 | * @timeout: Timeout in msecs (0 for default) | ||
1331 | * | 1398 | * |
1332 | * Executes libata internal command with timeout. @tf contains | 1399 | * Executes libata internal command with timeout. @tf contains |
1333 | * command on entry and result on return. Timeout and error | 1400 | * command on entry and result on return. Timeout and error |
@@ -1344,13 +1411,15 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc) | |||
1344 | unsigned ata_exec_internal_sg(struct ata_device *dev, | 1411 | unsigned ata_exec_internal_sg(struct ata_device *dev, |
1345 | struct ata_taskfile *tf, const u8 *cdb, | 1412 | struct ata_taskfile *tf, const u8 *cdb, |
1346 | int dma_dir, struct scatterlist *sg, | 1413 | int dma_dir, struct scatterlist *sg, |
1347 | unsigned int n_elem) | 1414 | unsigned int n_elem, unsigned long timeout) |
1348 | { | 1415 | { |
1349 | struct ata_port *ap = dev->ap; | 1416 | struct ata_link *link = dev->link; |
1417 | struct ata_port *ap = link->ap; | ||
1350 | u8 command = tf->command; | 1418 | u8 command = tf->command; |
1351 | struct ata_queued_cmd *qc; | 1419 | struct ata_queued_cmd *qc; |
1352 | unsigned int tag, preempted_tag; | 1420 | unsigned int tag, preempted_tag; |
1353 | u32 preempted_sactive, preempted_qc_active; | 1421 | u32 preempted_sactive, preempted_qc_active; |
1422 | int preempted_nr_active_links; | ||
1354 | DECLARE_COMPLETION_ONSTACK(wait); | 1423 | DECLARE_COMPLETION_ONSTACK(wait); |
1355 | unsigned long flags; | 1424 | unsigned long flags; |
1356 | unsigned int err_mask; | 1425 | unsigned int err_mask; |
@@ -1386,12 +1455,14 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, | |||
1386 | qc->dev = dev; | 1455 | qc->dev = dev; |
1387 | ata_qc_reinit(qc); | 1456 | ata_qc_reinit(qc); |
1388 | 1457 | ||
1389 | preempted_tag = ap->active_tag; | 1458 | preempted_tag = link->active_tag; |
1390 | preempted_sactive = ap->sactive; | 1459 | preempted_sactive = link->sactive; |
1391 | preempted_qc_active = ap->qc_active; | 1460 | preempted_qc_active = ap->qc_active; |
1392 | ap->active_tag = ATA_TAG_POISON; | 1461 | preempted_nr_active_links = ap->nr_active_links; |
1393 | ap->sactive = 0; | 1462 | link->active_tag = ATA_TAG_POISON; |
1463 | link->sactive = 0; | ||
1394 | ap->qc_active = 0; | 1464 | ap->qc_active = 0; |
1465 | ap->nr_active_links = 0; | ||
1395 | 1466 | ||
1396 | /* prepare & issue qc */ | 1467 | /* prepare & issue qc */ |
1397 | qc->tf = *tf; | 1468 | qc->tf = *tf; |
@@ -1416,7 +1487,10 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, | |||
1416 | 1487 | ||
1417 | spin_unlock_irqrestore(ap->lock, flags); | 1488 | spin_unlock_irqrestore(ap->lock, flags); |
1418 | 1489 | ||
1419 | rc = wait_for_completion_timeout(&wait, ata_probe_timeout); | 1490 | if (!timeout) |
1491 | timeout = ata_probe_timeout * 1000 / HZ; | ||
1492 | |||
1493 | rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); | ||
1420 | 1494 | ||
1421 | ata_port_flush_task(ap); | 1495 | ata_port_flush_task(ap); |
1422 | 1496 | ||
@@ -1467,9 +1541,10 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, | |||
1467 | err_mask = qc->err_mask; | 1541 | err_mask = qc->err_mask; |
1468 | 1542 | ||
1469 | ata_qc_free(qc); | 1543 | ata_qc_free(qc); |
1470 | ap->active_tag = preempted_tag; | 1544 | link->active_tag = preempted_tag; |
1471 | ap->sactive = preempted_sactive; | 1545 | link->sactive = preempted_sactive; |
1472 | ap->qc_active = preempted_qc_active; | 1546 | ap->qc_active = preempted_qc_active; |
1547 | ap->nr_active_links = preempted_nr_active_links; | ||
1473 | 1548 | ||
1474 | /* XXX - Some LLDDs (sata_mv) disable port on command failure. | 1549 | /* XXX - Some LLDDs (sata_mv) disable port on command failure. |
1475 | * Until those drivers are fixed, we detect the condition | 1550 | * Until those drivers are fixed, we detect the condition |
@@ -1500,6 +1575,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, | |||
1500 | * @dma_dir: Data tranfer direction of the command | 1575 | * @dma_dir: Data tranfer direction of the command |
1501 | * @buf: Data buffer of the command | 1576 | * @buf: Data buffer of the command |
1502 | * @buflen: Length of data buffer | 1577 | * @buflen: Length of data buffer |
1578 | * @timeout: Timeout in msecs (0 for default) | ||
1503 | * | 1579 | * |
1504 | * Wrapper around ata_exec_internal_sg() which takes simple | 1580 | * Wrapper around ata_exec_internal_sg() which takes simple |
1505 | * buffer instead of sg list. | 1581 | * buffer instead of sg list. |
@@ -1512,7 +1588,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, | |||
1512 | */ | 1588 | */ |
1513 | unsigned ata_exec_internal(struct ata_device *dev, | 1589 | unsigned ata_exec_internal(struct ata_device *dev, |
1514 | struct ata_taskfile *tf, const u8 *cdb, | 1590 | struct ata_taskfile *tf, const u8 *cdb, |
1515 | int dma_dir, void *buf, unsigned int buflen) | 1591 | int dma_dir, void *buf, unsigned int buflen, |
1592 | unsigned long timeout) | ||
1516 | { | 1593 | { |
1517 | struct scatterlist *psg = NULL, sg; | 1594 | struct scatterlist *psg = NULL, sg; |
1518 | unsigned int n_elem = 0; | 1595 | unsigned int n_elem = 0; |
@@ -1524,7 +1601,8 @@ unsigned ata_exec_internal(struct ata_device *dev, | |||
1524 | n_elem++; | 1601 | n_elem++; |
1525 | } | 1602 | } |
1526 | 1603 | ||
1527 | return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem); | 1604 | return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, |
1605 | timeout); | ||
1528 | } | 1606 | } |
1529 | 1607 | ||
1530 | /** | 1608 | /** |
@@ -1551,7 +1629,7 @@ unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) | |||
1551 | tf.flags |= ATA_TFLAG_DEVICE; | 1629 | tf.flags |= ATA_TFLAG_DEVICE; |
1552 | tf.protocol = ATA_PROT_NODATA; | 1630 | tf.protocol = ATA_PROT_NODATA; |
1553 | 1631 | ||
1554 | return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); | 1632 | return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); |
1555 | } | 1633 | } |
1556 | 1634 | ||
1557 | /** | 1635 | /** |
@@ -1566,7 +1644,7 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) | |||
1566 | { | 1644 | { |
1567 | /* Controller doesn't support IORDY. Probably a pointless check | 1645 | /* Controller doesn't support IORDY. Probably a pointless check |
1568 | as the caller should know this */ | 1646 | as the caller should know this */ |
1569 | if (adev->ap->flags & ATA_FLAG_NO_IORDY) | 1647 | if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) |
1570 | return 0; | 1648 | return 0; |
1571 | /* PIO3 and higher it is mandatory */ | 1649 | /* PIO3 and higher it is mandatory */ |
1572 | if (adev->pio_mode > XFER_PIO_2) | 1650 | if (adev->pio_mode > XFER_PIO_2) |
@@ -1613,6 +1691,9 @@ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) | |||
1613 | * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS | 1691 | * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS |
1614 | * for pre-ATA4 drives. | 1692 | * for pre-ATA4 drives. |
1615 | * | 1693 | * |
1694 | * FIXME: ATA_CMD_ID_ATA is optional for early drives and right | ||
1695 | * now we abort if we hit that case. | ||
1696 | * | ||
1616 | * LOCKING: | 1697 | * LOCKING: |
1617 | * Kernel thread context (may sleep) | 1698 | * Kernel thread context (may sleep) |
1618 | * | 1699 | * |
@@ -1622,7 +1703,7 @@ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) | |||
1622 | int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | 1703 | int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, |
1623 | unsigned int flags, u16 *id) | 1704 | unsigned int flags, u16 *id) |
1624 | { | 1705 | { |
1625 | struct ata_port *ap = dev->ap; | 1706 | struct ata_port *ap = dev->link->ap; |
1626 | unsigned int class = *p_class; | 1707 | unsigned int class = *p_class; |
1627 | struct ata_taskfile tf; | 1708 | struct ata_taskfile tf; |
1628 | unsigned int err_mask = 0; | 1709 | unsigned int err_mask = 0; |
@@ -1663,7 +1744,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | |||
1663 | tf.flags |= ATA_TFLAG_POLLING; | 1744 | tf.flags |= ATA_TFLAG_POLLING; |
1664 | 1745 | ||
1665 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, | 1746 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, |
1666 | id, sizeof(id[0]) * ATA_ID_WORDS); | 1747 | id, sizeof(id[0]) * ATA_ID_WORDS, 0); |
1667 | if (err_mask) { | 1748 | if (err_mask) { |
1668 | if (err_mask & AC_ERR_NODEV_HINT) { | 1749 | if (err_mask & AC_ERR_NODEV_HINT) { |
1669 | DPRINTK("ata%u.%d: NODEV after polling detection\n", | 1750 | DPRINTK("ata%u.%d: NODEV after polling detection\n", |
@@ -1722,7 +1803,8 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | |||
1722 | tf.feature = SETFEATURES_SPINUP; | 1803 | tf.feature = SETFEATURES_SPINUP; |
1723 | tf.protocol = ATA_PROT_NODATA; | 1804 | tf.protocol = ATA_PROT_NODATA; |
1724 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | 1805 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
1725 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); | 1806 | err_mask = ata_exec_internal(dev, &tf, NULL, |
1807 | DMA_NONE, NULL, 0, 0); | ||
1726 | if (err_mask && id[2] != 0x738c) { | 1808 | if (err_mask && id[2] != 0x738c) { |
1727 | rc = -EIO; | 1809 | rc = -EIO; |
1728 | reason = "SPINUP failed"; | 1810 | reason = "SPINUP failed"; |
@@ -1740,10 +1822,13 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | |||
1740 | /* | 1822 | /* |
1741 | * The exact sequence expected by certain pre-ATA4 drives is: | 1823 | * The exact sequence expected by certain pre-ATA4 drives is: |
1742 | * SRST RESET | 1824 | * SRST RESET |
1743 | * IDENTIFY | 1825 | * IDENTIFY (optional in early ATA) |
1744 | * INITIALIZE DEVICE PARAMETERS | 1826 | * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) |
1745 | * anything else.. | 1827 | * anything else.. |
1746 | * Some drives were very specific about that exact sequence. | 1828 | * Some drives were very specific about that exact sequence. |
1829 | * | ||
1830 | * Note that ATA4 says lba is mandatory so the second check | ||
1831 | * shoud never trigger. | ||
1747 | */ | 1832 | */ |
1748 | if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { | 1833 | if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { |
1749 | err_mask = ata_dev_init_params(dev, id[3], id[6]); | 1834 | err_mask = ata_dev_init_params(dev, id[3], id[6]); |
@@ -1774,13 +1859,14 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | |||
1774 | 1859 | ||
1775 | static inline u8 ata_dev_knobble(struct ata_device *dev) | 1860 | static inline u8 ata_dev_knobble(struct ata_device *dev) |
1776 | { | 1861 | { |
1777 | return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); | 1862 | struct ata_port *ap = dev->link->ap; |
1863 | return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); | ||
1778 | } | 1864 | } |
1779 | 1865 | ||
1780 | static void ata_dev_config_ncq(struct ata_device *dev, | 1866 | static void ata_dev_config_ncq(struct ata_device *dev, |
1781 | char *desc, size_t desc_sz) | 1867 | char *desc, size_t desc_sz) |
1782 | { | 1868 | { |
1783 | struct ata_port *ap = dev->ap; | 1869 | struct ata_port *ap = dev->link->ap; |
1784 | int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); | 1870 | int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); |
1785 | 1871 | ||
1786 | if (!ata_id_has_ncq(dev->id)) { | 1872 | if (!ata_id_has_ncq(dev->id)) { |
@@ -1817,8 +1903,8 @@ static void ata_dev_config_ncq(struct ata_device *dev, | |||
1817 | */ | 1903 | */ |
1818 | int ata_dev_configure(struct ata_device *dev) | 1904 | int ata_dev_configure(struct ata_device *dev) |
1819 | { | 1905 | { |
1820 | struct ata_port *ap = dev->ap; | 1906 | struct ata_port *ap = dev->link->ap; |
1821 | struct ata_eh_context *ehc = &ap->eh_context; | 1907 | struct ata_eh_context *ehc = &dev->link->eh_context; |
1822 | int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; | 1908 | int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; |
1823 | const u16 *id = dev->id; | 1909 | const u16 *id = dev->id; |
1824 | unsigned int xfer_mask; | 1910 | unsigned int xfer_mask; |
@@ -1844,6 +1930,11 @@ int ata_dev_configure(struct ata_device *dev) | |||
1844 | if (rc) | 1930 | if (rc) |
1845 | return rc; | 1931 | return rc; |
1846 | 1932 | ||
1933 | /* massage HPA, do it early as it might change IDENTIFY data */ | ||
1934 | rc = ata_hpa_resize(dev); | ||
1935 | if (rc) | ||
1936 | return rc; | ||
1937 | |||
1847 | /* print device capabilities */ | 1938 | /* print device capabilities */ |
1848 | if (ata_msg_probe(ap)) | 1939 | if (ata_msg_probe(ap)) |
1849 | ata_dev_printk(dev, KERN_DEBUG, | 1940 | ata_dev_printk(dev, KERN_DEBUG, |
@@ -1911,10 +2002,6 @@ int ata_dev_configure(struct ata_device *dev) | |||
1911 | dev->flags |= ATA_DFLAG_FLUSH_EXT; | 2002 | dev->flags |= ATA_DFLAG_FLUSH_EXT; |
1912 | } | 2003 | } |
1913 | 2004 | ||
1914 | if (!(dev->horkage & ATA_HORKAGE_BROKEN_HPA) && | ||
1915 | ata_id_hpa_enabled(dev->id)) | ||
1916 | dev->n_sectors = ata_hpa_resize(dev); | ||
1917 | |||
1918 | /* config NCQ */ | 2005 | /* config NCQ */ |
1919 | ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); | 2006 | ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); |
1920 | 2007 | ||
@@ -1963,7 +2050,9 @@ int ata_dev_configure(struct ata_device *dev) | |||
1963 | 2050 | ||
1964 | /* ATAPI-specific feature tests */ | 2051 | /* ATAPI-specific feature tests */ |
1965 | else if (dev->class == ATA_DEV_ATAPI) { | 2052 | else if (dev->class == ATA_DEV_ATAPI) { |
1966 | char *cdb_intr_string = ""; | 2053 | const char *cdb_intr_string = ""; |
2054 | const char *atapi_an_string = ""; | ||
2055 | u32 sntf; | ||
1967 | 2056 | ||
1968 | rc = atapi_cdb_len(id); | 2057 | rc = atapi_cdb_len(id); |
1969 | if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { | 2058 | if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { |
@@ -1975,6 +2064,28 @@ int ata_dev_configure(struct ata_device *dev) | |||
1975 | } | 2064 | } |
1976 | dev->cdb_len = (unsigned int) rc; | 2065 | dev->cdb_len = (unsigned int) rc; |
1977 | 2066 | ||
2067 | /* Enable ATAPI AN if both the host and device have | ||
2068 | * the support. If PMP is attached, SNTF is required | ||
2069 | * to enable ATAPI AN to discern between PHY status | ||
2070 | * changed notifications and ATAPI ANs. | ||
2071 | */ | ||
2072 | if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && | ||
2073 | (!ap->nr_pmp_links || | ||
2074 | sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { | ||
2075 | unsigned int err_mask; | ||
2076 | |||
2077 | /* issue SET feature command to turn this on */ | ||
2078 | err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE); | ||
2079 | if (err_mask) | ||
2080 | ata_dev_printk(dev, KERN_ERR, | ||
2081 | "failed to enable ATAPI AN " | ||
2082 | "(err_mask=0x%x)\n", err_mask); | ||
2083 | else { | ||
2084 | dev->flags |= ATA_DFLAG_AN; | ||
2085 | atapi_an_string = ", ATAPI AN"; | ||
2086 | } | ||
2087 | } | ||
2088 | |||
1978 | if (ata_id_cdb_intr(dev->id)) { | 2089 | if (ata_id_cdb_intr(dev->id)) { |
1979 | dev->flags |= ATA_DFLAG_CDB_INTR; | 2090 | dev->flags |= ATA_DFLAG_CDB_INTR; |
1980 | cdb_intr_string = ", CDB intr"; | 2091 | cdb_intr_string = ", CDB intr"; |
@@ -1983,10 +2094,10 @@ int ata_dev_configure(struct ata_device *dev) | |||
1983 | /* print device info to dmesg */ | 2094 | /* print device info to dmesg */ |
1984 | if (ata_msg_drv(ap) && print_info) | 2095 | if (ata_msg_drv(ap) && print_info) |
1985 | ata_dev_printk(dev, KERN_INFO, | 2096 | ata_dev_printk(dev, KERN_INFO, |
1986 | "ATAPI: %s, %s, max %s%s\n", | 2097 | "ATAPI: %s, %s, max %s%s%s\n", |
1987 | modelbuf, fwrevbuf, | 2098 | modelbuf, fwrevbuf, |
1988 | ata_mode_string(xfer_mask), | 2099 | ata_mode_string(xfer_mask), |
1989 | cdb_intr_string); | 2100 | cdb_intr_string, atapi_an_string); |
1990 | } | 2101 | } |
1991 | 2102 | ||
1992 | /* determine max_sectors */ | 2103 | /* determine max_sectors */ |
@@ -2103,21 +2214,19 @@ int ata_bus_probe(struct ata_port *ap) | |||
2103 | { | 2214 | { |
2104 | unsigned int classes[ATA_MAX_DEVICES]; | 2215 | unsigned int classes[ATA_MAX_DEVICES]; |
2105 | int tries[ATA_MAX_DEVICES]; | 2216 | int tries[ATA_MAX_DEVICES]; |
2106 | int i, rc; | 2217 | int rc; |
2107 | struct ata_device *dev; | 2218 | struct ata_device *dev; |
2108 | 2219 | ||
2109 | ata_port_probe(ap); | 2220 | ata_port_probe(ap); |
2110 | 2221 | ||
2111 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 2222 | ata_link_for_each_dev(dev, &ap->link) |
2112 | tries[i] = ATA_PROBE_MAX_TRIES; | 2223 | tries[dev->devno] = ATA_PROBE_MAX_TRIES; |
2113 | 2224 | ||
2114 | retry: | 2225 | retry: |
2115 | /* reset and determine device classes */ | 2226 | /* reset and determine device classes */ |
2116 | ap->ops->phy_reset(ap); | 2227 | ap->ops->phy_reset(ap); |
2117 | 2228 | ||
2118 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2229 | ata_link_for_each_dev(dev, &ap->link) { |
2119 | dev = &ap->device[i]; | ||
2120 | |||
2121 | if (!(ap->flags & ATA_FLAG_DISABLED) && | 2230 | if (!(ap->flags & ATA_FLAG_DISABLED) && |
2122 | dev->class != ATA_DEV_UNKNOWN) | 2231 | dev->class != ATA_DEV_UNKNOWN) |
2123 | classes[dev->devno] = dev->class; | 2232 | classes[dev->devno] = dev->class; |
@@ -2132,18 +2241,16 @@ int ata_bus_probe(struct ata_port *ap) | |||
2132 | /* after the reset the device state is PIO 0 and the controller | 2241 | /* after the reset the device state is PIO 0 and the controller |
2133 | state is undefined. Record the mode */ | 2242 | state is undefined. Record the mode */ |
2134 | 2243 | ||
2135 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 2244 | ata_link_for_each_dev(dev, &ap->link) |
2136 | ap->device[i].pio_mode = XFER_PIO_0; | 2245 | dev->pio_mode = XFER_PIO_0; |
2137 | 2246 | ||
2138 | /* read IDENTIFY page and configure devices. We have to do the identify | 2247 | /* read IDENTIFY page and configure devices. We have to do the identify |
2139 | specific sequence bass-ackwards so that PDIAG- is released by | 2248 | specific sequence bass-ackwards so that PDIAG- is released by |
2140 | the slave device */ | 2249 | the slave device */ |
2141 | 2250 | ||
2142 | for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) { | 2251 | ata_link_for_each_dev(dev, &ap->link) { |
2143 | dev = &ap->device[i]; | 2252 | if (tries[dev->devno]) |
2144 | 2253 | dev->class = classes[dev->devno]; | |
2145 | if (tries[i]) | ||
2146 | dev->class = classes[i]; | ||
2147 | 2254 | ||
2148 | if (!ata_dev_enabled(dev)) | 2255 | if (!ata_dev_enabled(dev)) |
2149 | continue; | 2256 | continue; |
@@ -2158,33 +2265,42 @@ int ata_bus_probe(struct ata_port *ap) | |||
2158 | if (ap->ops->cable_detect) | 2265 | if (ap->ops->cable_detect) |
2159 | ap->cbl = ap->ops->cable_detect(ap); | 2266 | ap->cbl = ap->ops->cable_detect(ap); |
2160 | 2267 | ||
2268 | /* We may have SATA bridge glue hiding here irrespective of the | ||
2269 | reported cable types and sensed types */ | ||
2270 | ata_link_for_each_dev(dev, &ap->link) { | ||
2271 | if (!ata_dev_enabled(dev)) | ||
2272 | continue; | ||
2273 | /* SATA drives indicate we have a bridge. We don't know which | ||
2274 | end of the link the bridge is which is a problem */ | ||
2275 | if (ata_id_is_sata(dev->id)) | ||
2276 | ap->cbl = ATA_CBL_SATA; | ||
2277 | } | ||
2278 | |||
2161 | /* After the identify sequence we can now set up the devices. We do | 2279 | /* After the identify sequence we can now set up the devices. We do |
2162 | this in the normal order so that the user doesn't get confused */ | 2280 | this in the normal order so that the user doesn't get confused */ |
2163 | 2281 | ||
2164 | for(i = 0; i < ATA_MAX_DEVICES; i++) { | 2282 | ata_link_for_each_dev(dev, &ap->link) { |
2165 | dev = &ap->device[i]; | ||
2166 | if (!ata_dev_enabled(dev)) | 2283 | if (!ata_dev_enabled(dev)) |
2167 | continue; | 2284 | continue; |
2168 | 2285 | ||
2169 | ap->eh_context.i.flags |= ATA_EHI_PRINTINFO; | 2286 | ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; |
2170 | rc = ata_dev_configure(dev); | 2287 | rc = ata_dev_configure(dev); |
2171 | ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO; | 2288 | ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; |
2172 | if (rc) | 2289 | if (rc) |
2173 | goto fail; | 2290 | goto fail; |
2174 | } | 2291 | } |
2175 | 2292 | ||
2176 | /* configure transfer mode */ | 2293 | /* configure transfer mode */ |
2177 | rc = ata_set_mode(ap, &dev); | 2294 | rc = ata_set_mode(&ap->link, &dev); |
2178 | if (rc) | 2295 | if (rc) |
2179 | goto fail; | 2296 | goto fail; |
2180 | 2297 | ||
2181 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 2298 | ata_link_for_each_dev(dev, &ap->link) |
2182 | if (ata_dev_enabled(&ap->device[i])) | 2299 | if (ata_dev_enabled(dev)) |
2183 | return 0; | 2300 | return 0; |
2184 | 2301 | ||
2185 | /* no device present, disable port */ | 2302 | /* no device present, disable port */ |
2186 | ata_port_disable(ap); | 2303 | ata_port_disable(ap); |
2187 | ap->ops->port_disable(ap); | ||
2188 | return -ENODEV; | 2304 | return -ENODEV; |
2189 | 2305 | ||
2190 | fail: | 2306 | fail: |
@@ -2204,7 +2320,7 @@ int ata_bus_probe(struct ata_port *ap) | |||
2204 | /* This is the last chance, better to slow | 2320 | /* This is the last chance, better to slow |
2205 | * down than lose it. | 2321 | * down than lose it. |
2206 | */ | 2322 | */ |
2207 | sata_down_spd_limit(ap); | 2323 | sata_down_spd_limit(&ap->link); |
2208 | ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); | 2324 | ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); |
2209 | } | 2325 | } |
2210 | } | 2326 | } |
@@ -2233,28 +2349,28 @@ void ata_port_probe(struct ata_port *ap) | |||
2233 | 2349 | ||
2234 | /** | 2350 | /** |
2235 | * sata_print_link_status - Print SATA link status | 2351 | * sata_print_link_status - Print SATA link status |
2236 | * @ap: SATA port to printk link status about | 2352 | * @link: SATA link to printk link status about |
2237 | * | 2353 | * |
2238 | * This function prints link speed and status of a SATA link. | 2354 | * This function prints link speed and status of a SATA link. |
2239 | * | 2355 | * |
2240 | * LOCKING: | 2356 | * LOCKING: |
2241 | * None. | 2357 | * None. |
2242 | */ | 2358 | */ |
2243 | void sata_print_link_status(struct ata_port *ap) | 2359 | void sata_print_link_status(struct ata_link *link) |
2244 | { | 2360 | { |
2245 | u32 sstatus, scontrol, tmp; | 2361 | u32 sstatus, scontrol, tmp; |
2246 | 2362 | ||
2247 | if (sata_scr_read(ap, SCR_STATUS, &sstatus)) | 2363 | if (sata_scr_read(link, SCR_STATUS, &sstatus)) |
2248 | return; | 2364 | return; |
2249 | sata_scr_read(ap, SCR_CONTROL, &scontrol); | 2365 | sata_scr_read(link, SCR_CONTROL, &scontrol); |
2250 | 2366 | ||
2251 | if (ata_port_online(ap)) { | 2367 | if (ata_link_online(link)) { |
2252 | tmp = (sstatus >> 4) & 0xf; | 2368 | tmp = (sstatus >> 4) & 0xf; |
2253 | ata_port_printk(ap, KERN_INFO, | 2369 | ata_link_printk(link, KERN_INFO, |
2254 | "SATA link up %s (SStatus %X SControl %X)\n", | 2370 | "SATA link up %s (SStatus %X SControl %X)\n", |
2255 | sata_spd_string(tmp), sstatus, scontrol); | 2371 | sata_spd_string(tmp), sstatus, scontrol); |
2256 | } else { | 2372 | } else { |
2257 | ata_port_printk(ap, KERN_INFO, | 2373 | ata_link_printk(link, KERN_INFO, |
2258 | "SATA link down (SStatus %X SControl %X)\n", | 2374 | "SATA link down (SStatus %X SControl %X)\n", |
2259 | sstatus, scontrol); | 2375 | sstatus, scontrol); |
2260 | } | 2376 | } |
@@ -2274,32 +2390,33 @@ void sata_print_link_status(struct ata_port *ap) | |||
2274 | */ | 2390 | */ |
2275 | void __sata_phy_reset(struct ata_port *ap) | 2391 | void __sata_phy_reset(struct ata_port *ap) |
2276 | { | 2392 | { |
2277 | u32 sstatus; | 2393 | struct ata_link *link = &ap->link; |
2278 | unsigned long timeout = jiffies + (HZ * 5); | 2394 | unsigned long timeout = jiffies + (HZ * 5); |
2395 | u32 sstatus; | ||
2279 | 2396 | ||
2280 | if (ap->flags & ATA_FLAG_SATA_RESET) { | 2397 | if (ap->flags & ATA_FLAG_SATA_RESET) { |
2281 | /* issue phy wake/reset */ | 2398 | /* issue phy wake/reset */ |
2282 | sata_scr_write_flush(ap, SCR_CONTROL, 0x301); | 2399 | sata_scr_write_flush(link, SCR_CONTROL, 0x301); |
2283 | /* Couldn't find anything in SATA I/II specs, but | 2400 | /* Couldn't find anything in SATA I/II specs, but |
2284 | * AHCI-1.1 10.4.2 says at least 1 ms. */ | 2401 | * AHCI-1.1 10.4.2 says at least 1 ms. */ |
2285 | mdelay(1); | 2402 | mdelay(1); |
2286 | } | 2403 | } |
2287 | /* phy wake/clear reset */ | 2404 | /* phy wake/clear reset */ |
2288 | sata_scr_write_flush(ap, SCR_CONTROL, 0x300); | 2405 | sata_scr_write_flush(link, SCR_CONTROL, 0x300); |
2289 | 2406 | ||
2290 | /* wait for phy to become ready, if necessary */ | 2407 | /* wait for phy to become ready, if necessary */ |
2291 | do { | 2408 | do { |
2292 | msleep(200); | 2409 | msleep(200); |
2293 | sata_scr_read(ap, SCR_STATUS, &sstatus); | 2410 | sata_scr_read(link, SCR_STATUS, &sstatus); |
2294 | if ((sstatus & 0xf) != 1) | 2411 | if ((sstatus & 0xf) != 1) |
2295 | break; | 2412 | break; |
2296 | } while (time_before(jiffies, timeout)); | 2413 | } while (time_before(jiffies, timeout)); |
2297 | 2414 | ||
2298 | /* print link status */ | 2415 | /* print link status */ |
2299 | sata_print_link_status(ap); | 2416 | sata_print_link_status(link); |
2300 | 2417 | ||
2301 | /* TODO: phy layer with polling, timeouts, etc. */ | 2418 | /* TODO: phy layer with polling, timeouts, etc. */ |
2302 | if (!ata_port_offline(ap)) | 2419 | if (!ata_link_offline(link)) |
2303 | ata_port_probe(ap); | 2420 | ata_port_probe(ap); |
2304 | else | 2421 | else |
2305 | ata_port_disable(ap); | 2422 | ata_port_disable(ap); |
@@ -2344,8 +2461,8 @@ void sata_phy_reset(struct ata_port *ap) | |||
2344 | 2461 | ||
2345 | struct ata_device *ata_dev_pair(struct ata_device *adev) | 2462 | struct ata_device *ata_dev_pair(struct ata_device *adev) |
2346 | { | 2463 | { |
2347 | struct ata_port *ap = adev->ap; | 2464 | struct ata_link *link = adev->link; |
2348 | struct ata_device *pair = &ap->device[1 - adev->devno]; | 2465 | struct ata_device *pair = &link->device[1 - adev->devno]; |
2349 | if (!ata_dev_enabled(pair)) | 2466 | if (!ata_dev_enabled(pair)) |
2350 | return NULL; | 2467 | return NULL; |
2351 | return pair; | 2468 | return pair; |
@@ -2366,16 +2483,16 @@ struct ata_device *ata_dev_pair(struct ata_device *adev) | |||
2366 | 2483 | ||
2367 | void ata_port_disable(struct ata_port *ap) | 2484 | void ata_port_disable(struct ata_port *ap) |
2368 | { | 2485 | { |
2369 | ap->device[0].class = ATA_DEV_NONE; | 2486 | ap->link.device[0].class = ATA_DEV_NONE; |
2370 | ap->device[1].class = ATA_DEV_NONE; | 2487 | ap->link.device[1].class = ATA_DEV_NONE; |
2371 | ap->flags |= ATA_FLAG_DISABLED; | 2488 | ap->flags |= ATA_FLAG_DISABLED; |
2372 | } | 2489 | } |
2373 | 2490 | ||
2374 | /** | 2491 | /** |
2375 | * sata_down_spd_limit - adjust SATA spd limit downward | 2492 | * sata_down_spd_limit - adjust SATA spd limit downward |
2376 | * @ap: Port to adjust SATA spd limit for | 2493 | * @link: Link to adjust SATA spd limit for |
2377 | * | 2494 | * |
2378 | * Adjust SATA spd limit of @ap downward. Note that this | 2495 | * Adjust SATA spd limit of @link downward. Note that this |
2379 | * function only adjusts the limit. The change must be applied | 2496 | * function only adjusts the limit. The change must be applied |
2380 | * using sata_set_spd(). | 2497 | * using sata_set_spd(). |
2381 | * | 2498 | * |
@@ -2385,24 +2502,24 @@ void ata_port_disable(struct ata_port *ap) | |||
2385 | * RETURNS: | 2502 | * RETURNS: |
2386 | * 0 on success, negative errno on failure | 2503 | * 0 on success, negative errno on failure |
2387 | */ | 2504 | */ |
2388 | int sata_down_spd_limit(struct ata_port *ap) | 2505 | int sata_down_spd_limit(struct ata_link *link) |
2389 | { | 2506 | { |
2390 | u32 sstatus, spd, mask; | 2507 | u32 sstatus, spd, mask; |
2391 | int rc, highbit; | 2508 | int rc, highbit; |
2392 | 2509 | ||
2393 | if (!sata_scr_valid(ap)) | 2510 | if (!sata_scr_valid(link)) |
2394 | return -EOPNOTSUPP; | 2511 | return -EOPNOTSUPP; |
2395 | 2512 | ||
2396 | /* If SCR can be read, use it to determine the current SPD. | 2513 | /* If SCR can be read, use it to determine the current SPD. |
2397 | * If not, use cached value in ap->sata_spd. | 2514 | * If not, use cached value in link->sata_spd. |
2398 | */ | 2515 | */ |
2399 | rc = sata_scr_read(ap, SCR_STATUS, &sstatus); | 2516 | rc = sata_scr_read(link, SCR_STATUS, &sstatus); |
2400 | if (rc == 0) | 2517 | if (rc == 0) |
2401 | spd = (sstatus >> 4) & 0xf; | 2518 | spd = (sstatus >> 4) & 0xf; |
2402 | else | 2519 | else |
2403 | spd = ap->sata_spd; | 2520 | spd = link->sata_spd; |
2404 | 2521 | ||
2405 | mask = ap->sata_spd_limit; | 2522 | mask = link->sata_spd_limit; |
2406 | if (mask <= 1) | 2523 | if (mask <= 1) |
2407 | return -EINVAL; | 2524 | return -EINVAL; |
2408 | 2525 | ||
@@ -2422,22 +2539,22 @@ int sata_down_spd_limit(struct ata_port *ap) | |||
2422 | if (!mask) | 2539 | if (!mask) |
2423 | return -EINVAL; | 2540 | return -EINVAL; |
2424 | 2541 | ||
2425 | ap->sata_spd_limit = mask; | 2542 | link->sata_spd_limit = mask; |
2426 | 2543 | ||
2427 | ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n", | 2544 | ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", |
2428 | sata_spd_string(fls(mask))); | 2545 | sata_spd_string(fls(mask))); |
2429 | 2546 | ||
2430 | return 0; | 2547 | return 0; |
2431 | } | 2548 | } |
2432 | 2549 | ||
2433 | static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol) | 2550 | static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) |
2434 | { | 2551 | { |
2435 | u32 spd, limit; | 2552 | u32 spd, limit; |
2436 | 2553 | ||
2437 | if (ap->sata_spd_limit == UINT_MAX) | 2554 | if (link->sata_spd_limit == UINT_MAX) |
2438 | limit = 0; | 2555 | limit = 0; |
2439 | else | 2556 | else |
2440 | limit = fls(ap->sata_spd_limit); | 2557 | limit = fls(link->sata_spd_limit); |
2441 | 2558 | ||
2442 | spd = (*scontrol >> 4) & 0xf; | 2559 | spd = (*scontrol >> 4) & 0xf; |
2443 | *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4); | 2560 | *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4); |
@@ -2447,10 +2564,10 @@ static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol) | |||
2447 | 2564 | ||
2448 | /** | 2565 | /** |
2449 | * sata_set_spd_needed - is SATA spd configuration needed | 2566 | * sata_set_spd_needed - is SATA spd configuration needed |
2450 | * @ap: Port in question | 2567 | * @link: Link in question |
2451 | * | 2568 | * |
2452 | * Test whether the spd limit in SControl matches | 2569 | * Test whether the spd limit in SControl matches |
2453 | * @ap->sata_spd_limit. This function is used to determine | 2570 | * @link->sata_spd_limit. This function is used to determine |
2454 | * whether hardreset is necessary to apply SATA spd | 2571 | * whether hardreset is necessary to apply SATA spd |
2455 | * configuration. | 2572 | * configuration. |
2456 | * | 2573 | * |
@@ -2460,21 +2577,21 @@ static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol) | |||
2460 | * RETURNS: | 2577 | * RETURNS: |
2461 | * 1 if SATA spd configuration is needed, 0 otherwise. | 2578 | * 1 if SATA spd configuration is needed, 0 otherwise. |
2462 | */ | 2579 | */ |
2463 | int sata_set_spd_needed(struct ata_port *ap) | 2580 | int sata_set_spd_needed(struct ata_link *link) |
2464 | { | 2581 | { |
2465 | u32 scontrol; | 2582 | u32 scontrol; |
2466 | 2583 | ||
2467 | if (sata_scr_read(ap, SCR_CONTROL, &scontrol)) | 2584 | if (sata_scr_read(link, SCR_CONTROL, &scontrol)) |
2468 | return 0; | 2585 | return 0; |
2469 | 2586 | ||
2470 | return __sata_set_spd_needed(ap, &scontrol); | 2587 | return __sata_set_spd_needed(link, &scontrol); |
2471 | } | 2588 | } |
2472 | 2589 | ||
2473 | /** | 2590 | /** |
2474 | * sata_set_spd - set SATA spd according to spd limit | 2591 | * sata_set_spd - set SATA spd according to spd limit |
2475 | * @ap: Port to set SATA spd for | 2592 | * @link: Link to set SATA spd for |
2476 | * | 2593 | * |
2477 | * Set SATA spd of @ap according to sata_spd_limit. | 2594 | * Set SATA spd of @link according to sata_spd_limit. |
2478 | * | 2595 | * |
2479 | * LOCKING: | 2596 | * LOCKING: |
2480 | * Inherited from caller. | 2597 | * Inherited from caller. |
@@ -2483,18 +2600,18 @@ int sata_set_spd_needed(struct ata_port *ap) | |||
2483 | * 0 if spd doesn't need to be changed, 1 if spd has been | 2600 | * 0 if spd doesn't need to be changed, 1 if spd has been |
2484 | * changed. Negative errno if SCR registers are inaccessible. | 2601 | * changed. Negative errno if SCR registers are inaccessible. |
2485 | */ | 2602 | */ |
2486 | int sata_set_spd(struct ata_port *ap) | 2603 | int sata_set_spd(struct ata_link *link) |
2487 | { | 2604 | { |
2488 | u32 scontrol; | 2605 | u32 scontrol; |
2489 | int rc; | 2606 | int rc; |
2490 | 2607 | ||
2491 | if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) | 2608 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) |
2492 | return rc; | 2609 | return rc; |
2493 | 2610 | ||
2494 | if (!__sata_set_spd_needed(ap, &scontrol)) | 2611 | if (!__sata_set_spd_needed(link, &scontrol)) |
2495 | return 0; | 2612 | return 0; |
2496 | 2613 | ||
2497 | if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) | 2614 | if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) |
2498 | return rc; | 2615 | return rc; |
2499 | 2616 | ||
2500 | return 1; | 2617 | return 1; |
@@ -2749,7 +2866,7 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) | |||
2749 | 2866 | ||
2750 | static int ata_dev_set_mode(struct ata_device *dev) | 2867 | static int ata_dev_set_mode(struct ata_device *dev) |
2751 | { | 2868 | { |
2752 | struct ata_eh_context *ehc = &dev->ap->eh_context; | 2869 | struct ata_eh_context *ehc = &dev->link->eh_context; |
2753 | unsigned int err_mask; | 2870 | unsigned int err_mask; |
2754 | int rc; | 2871 | int rc; |
2755 | 2872 | ||
@@ -2761,7 +2878,11 @@ static int ata_dev_set_mode(struct ata_device *dev) | |||
2761 | /* Old CFA may refuse this command, which is just fine */ | 2878 | /* Old CFA may refuse this command, which is just fine */ |
2762 | if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) | 2879 | if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) |
2763 | err_mask &= ~AC_ERR_DEV; | 2880 | err_mask &= ~AC_ERR_DEV; |
2764 | 2881 | /* Some very old devices and some bad newer ones fail any kind of | |
2882 | SET_XFERMODE request but support PIO0-2 timings and no IORDY */ | ||
2883 | if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) && | ||
2884 | dev->pio_mode <= XFER_PIO_2) | ||
2885 | err_mask &= ~AC_ERR_DEV; | ||
2765 | if (err_mask) { | 2886 | if (err_mask) { |
2766 | ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " | 2887 | ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " |
2767 | "(err_mask=0x%x)\n", err_mask); | 2888 | "(err_mask=0x%x)\n", err_mask); |
@@ -2769,7 +2890,7 @@ static int ata_dev_set_mode(struct ata_device *dev) | |||
2769 | } | 2890 | } |
2770 | 2891 | ||
2771 | ehc->i.flags |= ATA_EHI_POST_SETMODE; | 2892 | ehc->i.flags |= ATA_EHI_POST_SETMODE; |
2772 | rc = ata_dev_revalidate(dev, 0); | 2893 | rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); |
2773 | ehc->i.flags &= ~ATA_EHI_POST_SETMODE; | 2894 | ehc->i.flags &= ~ATA_EHI_POST_SETMODE; |
2774 | if (rc) | 2895 | if (rc) |
2775 | return rc; | 2896 | return rc; |
@@ -2784,7 +2905,7 @@ static int ata_dev_set_mode(struct ata_device *dev) | |||
2784 | 2905 | ||
2785 | /** | 2906 | /** |
2786 | * ata_do_set_mode - Program timings and issue SET FEATURES - XFER | 2907 | * ata_do_set_mode - Program timings and issue SET FEATURES - XFER |
2787 | * @ap: port on which timings will be programmed | 2908 | * @link: link on which timings will be programmed |
2788 | * @r_failed_dev: out paramter for failed device | 2909 | * @r_failed_dev: out paramter for failed device |
2789 | * | 2910 | * |
2790 | * Standard implementation of the function used to tune and set | 2911 | * Standard implementation of the function used to tune and set |
@@ -2799,25 +2920,36 @@ static int ata_dev_set_mode(struct ata_device *dev) | |||
2799 | * 0 on success, negative errno otherwise | 2920 | * 0 on success, negative errno otherwise |
2800 | */ | 2921 | */ |
2801 | 2922 | ||
2802 | int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) | 2923 | int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) |
2803 | { | 2924 | { |
2925 | struct ata_port *ap = link->ap; | ||
2804 | struct ata_device *dev; | 2926 | struct ata_device *dev; |
2805 | int i, rc = 0, used_dma = 0, found = 0; | 2927 | int rc = 0, used_dma = 0, found = 0; |
2806 | |||
2807 | 2928 | ||
2808 | /* step 1: calculate xfer_mask */ | 2929 | /* step 1: calculate xfer_mask */ |
2809 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2930 | ata_link_for_each_dev(dev, link) { |
2810 | unsigned int pio_mask, dma_mask; | 2931 | unsigned int pio_mask, dma_mask; |
2811 | 2932 | unsigned int mode_mask; | |
2812 | dev = &ap->device[i]; | ||
2813 | 2933 | ||
2814 | if (!ata_dev_enabled(dev)) | 2934 | if (!ata_dev_enabled(dev)) |
2815 | continue; | 2935 | continue; |
2816 | 2936 | ||
2937 | mode_mask = ATA_DMA_MASK_ATA; | ||
2938 | if (dev->class == ATA_DEV_ATAPI) | ||
2939 | mode_mask = ATA_DMA_MASK_ATAPI; | ||
2940 | else if (ata_id_is_cfa(dev->id)) | ||
2941 | mode_mask = ATA_DMA_MASK_CFA; | ||
2942 | |||
2817 | ata_dev_xfermask(dev); | 2943 | ata_dev_xfermask(dev); |
2818 | 2944 | ||
2819 | pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); | 2945 | pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); |
2820 | dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); | 2946 | dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); |
2947 | |||
2948 | if (libata_dma_mask & mode_mask) | ||
2949 | dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); | ||
2950 | else | ||
2951 | dma_mask = 0; | ||
2952 | |||
2821 | dev->pio_mode = ata_xfer_mask2mode(pio_mask); | 2953 | dev->pio_mode = ata_xfer_mask2mode(pio_mask); |
2822 | dev->dma_mode = ata_xfer_mask2mode(dma_mask); | 2954 | dev->dma_mode = ata_xfer_mask2mode(dma_mask); |
2823 | 2955 | ||
@@ -2829,8 +2961,7 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) | |||
2829 | goto out; | 2961 | goto out; |
2830 | 2962 | ||
2831 | /* step 2: always set host PIO timings */ | 2963 | /* step 2: always set host PIO timings */ |
2832 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2964 | ata_link_for_each_dev(dev, link) { |
2833 | dev = &ap->device[i]; | ||
2834 | if (!ata_dev_enabled(dev)) | 2965 | if (!ata_dev_enabled(dev)) |
2835 | continue; | 2966 | continue; |
2836 | 2967 | ||
@@ -2847,9 +2978,7 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) | |||
2847 | } | 2978 | } |
2848 | 2979 | ||
2849 | /* step 3: set host DMA timings */ | 2980 | /* step 3: set host DMA timings */ |
2850 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2981 | ata_link_for_each_dev(dev, link) { |
2851 | dev = &ap->device[i]; | ||
2852 | |||
2853 | if (!ata_dev_enabled(dev) || !dev->dma_mode) | 2982 | if (!ata_dev_enabled(dev) || !dev->dma_mode) |
2854 | continue; | 2983 | continue; |
2855 | 2984 | ||
@@ -2860,9 +2989,7 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) | |||
2860 | } | 2989 | } |
2861 | 2990 | ||
2862 | /* step 4: update devices' xfer mode */ | 2991 | /* step 4: update devices' xfer mode */ |
2863 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2992 | ata_link_for_each_dev(dev, link) { |
2864 | dev = &ap->device[i]; | ||
2865 | |||
2866 | /* don't update suspended devices' xfer mode */ | 2993 | /* don't update suspended devices' xfer mode */ |
2867 | if (!ata_dev_enabled(dev)) | 2994 | if (!ata_dev_enabled(dev)) |
2868 | continue; | 2995 | continue; |
@@ -2886,7 +3013,7 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) | |||
2886 | 3013 | ||
2887 | /** | 3014 | /** |
2888 | * ata_set_mode - Program timings and issue SET FEATURES - XFER | 3015 | * ata_set_mode - Program timings and issue SET FEATURES - XFER |
2889 | * @ap: port on which timings will be programmed | 3016 | * @link: link on which timings will be programmed |
2890 | * @r_failed_dev: out paramter for failed device | 3017 | * @r_failed_dev: out paramter for failed device |
2891 | * | 3018 | * |
2892 | * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If | 3019 | * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If |
@@ -2899,12 +3026,14 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) | |||
2899 | * RETURNS: | 3026 | * RETURNS: |
2900 | * 0 on success, negative errno otherwise | 3027 | * 0 on success, negative errno otherwise |
2901 | */ | 3028 | */ |
2902 | int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) | 3029 | int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) |
2903 | { | 3030 | { |
3031 | struct ata_port *ap = link->ap; | ||
3032 | |||
2904 | /* has private set_mode? */ | 3033 | /* has private set_mode? */ |
2905 | if (ap->ops->set_mode) | 3034 | if (ap->ops->set_mode) |
2906 | return ap->ops->set_mode(ap, r_failed_dev); | 3035 | return ap->ops->set_mode(link, r_failed_dev); |
2907 | return ata_do_set_mode(ap, r_failed_dev); | 3036 | return ata_do_set_mode(link, r_failed_dev); |
2908 | } | 3037 | } |
2909 | 3038 | ||
2910 | /** | 3039 | /** |
@@ -3007,7 +3136,7 @@ int ata_wait_ready(struct ata_port *ap, unsigned long deadline) | |||
3007 | 3136 | ||
3008 | if (!(status & ATA_BUSY)) | 3137 | if (!(status & ATA_BUSY)) |
3009 | return 0; | 3138 | return 0; |
3010 | if (!ata_port_online(ap) && status == 0xff) | 3139 | if (!ata_link_online(&ap->link) && status == 0xff) |
3011 | return -ENODEV; | 3140 | return -ENODEV; |
3012 | if (time_after(now, deadline)) | 3141 | if (time_after(now, deadline)) |
3013 | return -EBUSY; | 3142 | return -EBUSY; |
@@ -3088,6 +3217,8 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, | |||
3088 | unsigned long deadline) | 3217 | unsigned long deadline) |
3089 | { | 3218 | { |
3090 | struct ata_ioports *ioaddr = &ap->ioaddr; | 3219 | struct ata_ioports *ioaddr = &ap->ioaddr; |
3220 | struct ata_device *dev; | ||
3221 | int i = 0; | ||
3091 | 3222 | ||
3092 | DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); | 3223 | DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); |
3093 | 3224 | ||
@@ -3098,6 +3229,25 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, | |||
3098 | udelay(20); /* FIXME: flush */ | 3229 | udelay(20); /* FIXME: flush */ |
3099 | iowrite8(ap->ctl, ioaddr->ctl_addr); | 3230 | iowrite8(ap->ctl, ioaddr->ctl_addr); |
3100 | 3231 | ||
3232 | /* If we issued an SRST then an ATA drive (not ATAPI) | ||
3233 | * may have changed configuration and be in PIO0 timing. If | ||
3234 | * we did a hard reset (or are coming from power on) this is | ||
3235 | * true for ATA or ATAPI. Until we've set a suitable controller | ||
3236 | * mode we should not touch the bus as we may be talking too fast. | ||
3237 | */ | ||
3238 | |||
3239 | ata_link_for_each_dev(dev, &ap->link) | ||
3240 | dev->pio_mode = XFER_PIO_0; | ||
3241 | |||
3242 | /* If the controller has a pio mode setup function then use | ||
3243 | it to set the chipset to rights. Don't touch the DMA setup | ||
3244 | as that will be dealt with when revalidating */ | ||
3245 | if (ap->ops->set_piomode) { | ||
3246 | ata_link_for_each_dev(dev, &ap->link) | ||
3247 | if (devmask & (1 << i++)) | ||
3248 | ap->ops->set_piomode(ap, dev); | ||
3249 | } | ||
3250 | |||
3101 | /* spec mandates ">= 2ms" before checking status. | 3251 | /* spec mandates ">= 2ms" before checking status. |
3102 | * We wait 150ms, because that was the magic delay used for | 3252 | * We wait 150ms, because that was the magic delay used for |
3103 | * ATAPI devices in Hale Landis's ATADRVR, for the period of time | 3253 | * ATAPI devices in Hale Landis's ATADRVR, for the period of time |
@@ -3142,6 +3292,7 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, | |||
3142 | 3292 | ||
3143 | void ata_bus_reset(struct ata_port *ap) | 3293 | void ata_bus_reset(struct ata_port *ap) |
3144 | { | 3294 | { |
3295 | struct ata_device *device = ap->link.device; | ||
3145 | struct ata_ioports *ioaddr = &ap->ioaddr; | 3296 | struct ata_ioports *ioaddr = &ap->ioaddr; |
3146 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | 3297 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; |
3147 | u8 err; | 3298 | u8 err; |
@@ -3177,19 +3328,19 @@ void ata_bus_reset(struct ata_port *ap) | |||
3177 | /* | 3328 | /* |
3178 | * determine by signature whether we have ATA or ATAPI devices | 3329 | * determine by signature whether we have ATA or ATAPI devices |
3179 | */ | 3330 | */ |
3180 | ap->device[0].class = ata_dev_try_classify(ap, 0, &err); | 3331 | device[0].class = ata_dev_try_classify(&device[0], dev0, &err); |
3181 | if ((slave_possible) && (err != 0x81)) | 3332 | if ((slave_possible) && (err != 0x81)) |
3182 | ap->device[1].class = ata_dev_try_classify(ap, 1, &err); | 3333 | device[1].class = ata_dev_try_classify(&device[1], dev1, &err); |
3183 | 3334 | ||
3184 | /* is double-select really necessary? */ | 3335 | /* is double-select really necessary? */ |
3185 | if (ap->device[1].class != ATA_DEV_NONE) | 3336 | if (device[1].class != ATA_DEV_NONE) |
3186 | ap->ops->dev_select(ap, 1); | 3337 | ap->ops->dev_select(ap, 1); |
3187 | if (ap->device[0].class != ATA_DEV_NONE) | 3338 | if (device[0].class != ATA_DEV_NONE) |
3188 | ap->ops->dev_select(ap, 0); | 3339 | ap->ops->dev_select(ap, 0); |
3189 | 3340 | ||
3190 | /* if no devices were detected, disable this port */ | 3341 | /* if no devices were detected, disable this port */ |
3191 | if ((ap->device[0].class == ATA_DEV_NONE) && | 3342 | if ((device[0].class == ATA_DEV_NONE) && |
3192 | (ap->device[1].class == ATA_DEV_NONE)) | 3343 | (device[1].class == ATA_DEV_NONE)) |
3193 | goto err_out; | 3344 | goto err_out; |
3194 | 3345 | ||
3195 | if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { | 3346 | if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { |
@@ -3202,18 +3353,18 @@ void ata_bus_reset(struct ata_port *ap) | |||
3202 | 3353 | ||
3203 | err_out: | 3354 | err_out: |
3204 | ata_port_printk(ap, KERN_ERR, "disabling port\n"); | 3355 | ata_port_printk(ap, KERN_ERR, "disabling port\n"); |
3205 | ap->ops->port_disable(ap); | 3356 | ata_port_disable(ap); |
3206 | 3357 | ||
3207 | DPRINTK("EXIT\n"); | 3358 | DPRINTK("EXIT\n"); |
3208 | } | 3359 | } |
3209 | 3360 | ||
3210 | /** | 3361 | /** |
3211 | * sata_phy_debounce - debounce SATA phy status | 3362 | * sata_link_debounce - debounce SATA phy status |
3212 | * @ap: ATA port to debounce SATA phy status for | 3363 | * @link: ATA link to debounce SATA phy status for |
3213 | * @params: timing parameters { interval, duratinon, timeout } in msec | 3364 | * @params: timing parameters { interval, duratinon, timeout } in msec |
3214 | * @deadline: deadline jiffies for the operation | 3365 | * @deadline: deadline jiffies for the operation |
3215 | * | 3366 | * |
3216 | * Make sure SStatus of @ap reaches stable state, determined by | 3367 | * Make sure SStatus of @link reaches stable state, determined by |
3217 | * holding the same value where DET is not 1 for @duration polled | 3368 | * holding the same value where DET is not 1 for @duration polled |
3218 | * every @interval, before @timeout. Timeout constraints the | 3369 | * every @interval, before @timeout. Timeout constraints the |
3219 | * beginning of the stable state. Because DET gets stuck at 1 on | 3370 | * beginning of the stable state. Because DET gets stuck at 1 on |
@@ -3229,8 +3380,8 @@ err_out: | |||
3229 | * RETURNS: | 3380 | * RETURNS: |
3230 | * 0 on success, -errno on failure. | 3381 | * 0 on success, -errno on failure. |
3231 | */ | 3382 | */ |
3232 | int sata_phy_debounce(struct ata_port *ap, const unsigned long *params, | 3383 | int sata_link_debounce(struct ata_link *link, const unsigned long *params, |
3233 | unsigned long deadline) | 3384 | unsigned long deadline) |
3234 | { | 3385 | { |
3235 | unsigned long interval_msec = params[0]; | 3386 | unsigned long interval_msec = params[0]; |
3236 | unsigned long duration = msecs_to_jiffies(params[1]); | 3387 | unsigned long duration = msecs_to_jiffies(params[1]); |
@@ -3242,7 +3393,7 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params, | |||
3242 | if (time_before(t, deadline)) | 3393 | if (time_before(t, deadline)) |
3243 | deadline = t; | 3394 | deadline = t; |
3244 | 3395 | ||
3245 | if ((rc = sata_scr_read(ap, SCR_STATUS, &cur))) | 3396 | if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) |
3246 | return rc; | 3397 | return rc; |
3247 | cur &= 0xf; | 3398 | cur &= 0xf; |
3248 | 3399 | ||
@@ -3251,7 +3402,7 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params, | |||
3251 | 3402 | ||
3252 | while (1) { | 3403 | while (1) { |
3253 | msleep(interval_msec); | 3404 | msleep(interval_msec); |
3254 | if ((rc = sata_scr_read(ap, SCR_STATUS, &cur))) | 3405 | if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) |
3255 | return rc; | 3406 | return rc; |
3256 | cur &= 0xf; | 3407 | cur &= 0xf; |
3257 | 3408 | ||
@@ -3277,12 +3428,12 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params, | |||
3277 | } | 3428 | } |
3278 | 3429 | ||
3279 | /** | 3430 | /** |
3280 | * sata_phy_resume - resume SATA phy | 3431 | * sata_link_resume - resume SATA link |
3281 | * @ap: ATA port to resume SATA phy for | 3432 | * @link: ATA link to resume SATA |
3282 | * @params: timing parameters { interval, duratinon, timeout } in msec | 3433 | * @params: timing parameters { interval, duratinon, timeout } in msec |
3283 | * @deadline: deadline jiffies for the operation | 3434 | * @deadline: deadline jiffies for the operation |
3284 | * | 3435 | * |
3285 | * Resume SATA phy of @ap and debounce it. | 3436 | * Resume SATA phy @link and debounce it. |
3286 | * | 3437 | * |
3287 | * LOCKING: | 3438 | * LOCKING: |
3288 | * Kernel thread context (may sleep) | 3439 | * Kernel thread context (may sleep) |
@@ -3290,18 +3441,18 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params, | |||
3290 | * RETURNS: | 3441 | * RETURNS: |
3291 | * 0 on success, -errno on failure. | 3442 | * 0 on success, -errno on failure. |
3292 | */ | 3443 | */ |
3293 | int sata_phy_resume(struct ata_port *ap, const unsigned long *params, | 3444 | int sata_link_resume(struct ata_link *link, const unsigned long *params, |
3294 | unsigned long deadline) | 3445 | unsigned long deadline) |
3295 | { | 3446 | { |
3296 | u32 scontrol; | 3447 | u32 scontrol; |
3297 | int rc; | 3448 | int rc; |
3298 | 3449 | ||
3299 | if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) | 3450 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) |
3300 | return rc; | 3451 | return rc; |
3301 | 3452 | ||
3302 | scontrol = (scontrol & 0x0f0) | 0x300; | 3453 | scontrol = (scontrol & 0x0f0) | 0x300; |
3303 | 3454 | ||
3304 | if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) | 3455 | if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) |
3305 | return rc; | 3456 | return rc; |
3306 | 3457 | ||
3307 | /* Some PHYs react badly if SStatus is pounded immediately | 3458 | /* Some PHYs react badly if SStatus is pounded immediately |
@@ -3309,15 +3460,15 @@ int sata_phy_resume(struct ata_port *ap, const unsigned long *params, | |||
3309 | */ | 3460 | */ |
3310 | msleep(200); | 3461 | msleep(200); |
3311 | 3462 | ||
3312 | return sata_phy_debounce(ap, params, deadline); | 3463 | return sata_link_debounce(link, params, deadline); |
3313 | } | 3464 | } |
3314 | 3465 | ||
3315 | /** | 3466 | /** |
3316 | * ata_std_prereset - prepare for reset | 3467 | * ata_std_prereset - prepare for reset |
3317 | * @ap: ATA port to be reset | 3468 | * @link: ATA link to be reset |
3318 | * @deadline: deadline jiffies for the operation | 3469 | * @deadline: deadline jiffies for the operation |
3319 | * | 3470 | * |
3320 | * @ap is about to be reset. Initialize it. Failure from | 3471 | * @link is about to be reset. Initialize it. Failure from |
3321 | * prereset makes libata abort whole reset sequence and give up | 3472 | * prereset makes libata abort whole reset sequence and give up |
3322 | * that port, so prereset should be best-effort. It does its | 3473 | * that port, so prereset should be best-effort. It does its |
3323 | * best to prepare for reset sequence but if things go wrong, it | 3474 | * best to prepare for reset sequence but if things go wrong, it |
@@ -3329,37 +3480,44 @@ int sata_phy_resume(struct ata_port *ap, const unsigned long *params, | |||
3329 | * RETURNS: | 3480 | * RETURNS: |
3330 | * 0 on success, -errno otherwise. | 3481 | * 0 on success, -errno otherwise. |
3331 | */ | 3482 | */ |
3332 | int ata_std_prereset(struct ata_port *ap, unsigned long deadline) | 3483 | int ata_std_prereset(struct ata_link *link, unsigned long deadline) |
3333 | { | 3484 | { |
3334 | struct ata_eh_context *ehc = &ap->eh_context; | 3485 | struct ata_port *ap = link->ap; |
3486 | struct ata_eh_context *ehc = &link->eh_context; | ||
3335 | const unsigned long *timing = sata_ehc_deb_timing(ehc); | 3487 | const unsigned long *timing = sata_ehc_deb_timing(ehc); |
3336 | int rc; | 3488 | int rc; |
3337 | 3489 | ||
3338 | /* handle link resume */ | 3490 | /* handle link resume */ |
3339 | if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && | 3491 | if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && |
3340 | (ap->flags & ATA_FLAG_HRST_TO_RESUME)) | 3492 | (link->flags & ATA_LFLAG_HRST_TO_RESUME)) |
3493 | ehc->i.action |= ATA_EH_HARDRESET; | ||
3494 | |||
3495 | /* Some PMPs don't work with only SRST, force hardreset if PMP | ||
3496 | * is supported. | ||
3497 | */ | ||
3498 | if (ap->flags & ATA_FLAG_PMP) | ||
3341 | ehc->i.action |= ATA_EH_HARDRESET; | 3499 | ehc->i.action |= ATA_EH_HARDRESET; |
3342 | 3500 | ||
3343 | /* if we're about to do hardreset, nothing more to do */ | 3501 | /* if we're about to do hardreset, nothing more to do */ |
3344 | if (ehc->i.action & ATA_EH_HARDRESET) | 3502 | if (ehc->i.action & ATA_EH_HARDRESET) |
3345 | return 0; | 3503 | return 0; |
3346 | 3504 | ||
3347 | /* if SATA, resume phy */ | 3505 | /* if SATA, resume link */ |
3348 | if (ap->flags & ATA_FLAG_SATA) { | 3506 | if (ap->flags & ATA_FLAG_SATA) { |
3349 | rc = sata_phy_resume(ap, timing, deadline); | 3507 | rc = sata_link_resume(link, timing, deadline); |
3350 | /* whine about phy resume failure but proceed */ | 3508 | /* whine about phy resume failure but proceed */ |
3351 | if (rc && rc != -EOPNOTSUPP) | 3509 | if (rc && rc != -EOPNOTSUPP) |
3352 | ata_port_printk(ap, KERN_WARNING, "failed to resume " | 3510 | ata_link_printk(link, KERN_WARNING, "failed to resume " |
3353 | "link for reset (errno=%d)\n", rc); | 3511 | "link for reset (errno=%d)\n", rc); |
3354 | } | 3512 | } |
3355 | 3513 | ||
3356 | /* Wait for !BSY if the controller can wait for the first D2H | 3514 | /* Wait for !BSY if the controller can wait for the first D2H |
3357 | * Reg FIS and we don't know that no device is attached. | 3515 | * Reg FIS and we don't know that no device is attached. |
3358 | */ | 3516 | */ |
3359 | if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) { | 3517 | if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) { |
3360 | rc = ata_wait_ready(ap, deadline); | 3518 | rc = ata_wait_ready(ap, deadline); |
3361 | if (rc && rc != -ENODEV) { | 3519 | if (rc && rc != -ENODEV) { |
3362 | ata_port_printk(ap, KERN_WARNING, "device not ready " | 3520 | ata_link_printk(link, KERN_WARNING, "device not ready " |
3363 | "(errno=%d), forcing hardreset\n", rc); | 3521 | "(errno=%d), forcing hardreset\n", rc); |
3364 | ehc->i.action |= ATA_EH_HARDRESET; | 3522 | ehc->i.action |= ATA_EH_HARDRESET; |
3365 | } | 3523 | } |
@@ -3370,7 +3528,7 @@ int ata_std_prereset(struct ata_port *ap, unsigned long deadline) | |||
3370 | 3528 | ||
3371 | /** | 3529 | /** |
3372 | * ata_std_softreset - reset host port via ATA SRST | 3530 | * ata_std_softreset - reset host port via ATA SRST |
3373 | * @ap: port to reset | 3531 | * @link: ATA link to reset |
3374 | * @classes: resulting classes of attached devices | 3532 | * @classes: resulting classes of attached devices |
3375 | * @deadline: deadline jiffies for the operation | 3533 | * @deadline: deadline jiffies for the operation |
3376 | * | 3534 | * |
@@ -3382,9 +3540,10 @@ int ata_std_prereset(struct ata_port *ap, unsigned long deadline) | |||
3382 | * RETURNS: | 3540 | * RETURNS: |
3383 | * 0 on success, -errno otherwise. | 3541 | * 0 on success, -errno otherwise. |
3384 | */ | 3542 | */ |
3385 | int ata_std_softreset(struct ata_port *ap, unsigned int *classes, | 3543 | int ata_std_softreset(struct ata_link *link, unsigned int *classes, |
3386 | unsigned long deadline) | 3544 | unsigned long deadline) |
3387 | { | 3545 | { |
3546 | struct ata_port *ap = link->ap; | ||
3388 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | 3547 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; |
3389 | unsigned int devmask = 0; | 3548 | unsigned int devmask = 0; |
3390 | int rc; | 3549 | int rc; |
@@ -3392,7 +3551,7 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes, | |||
3392 | 3551 | ||
3393 | DPRINTK("ENTER\n"); | 3552 | DPRINTK("ENTER\n"); |
3394 | 3553 | ||
3395 | if (ata_port_offline(ap)) { | 3554 | if (ata_link_offline(link)) { |
3396 | classes[0] = ATA_DEV_NONE; | 3555 | classes[0] = ATA_DEV_NONE; |
3397 | goto out; | 3556 | goto out; |
3398 | } | 3557 | } |
@@ -3410,15 +3569,17 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes, | |||
3410 | DPRINTK("about to softreset, devmask=%x\n", devmask); | 3569 | DPRINTK("about to softreset, devmask=%x\n", devmask); |
3411 | rc = ata_bus_softreset(ap, devmask, deadline); | 3570 | rc = ata_bus_softreset(ap, devmask, deadline); |
3412 | /* if link is occupied, -ENODEV too is an error */ | 3571 | /* if link is occupied, -ENODEV too is an error */ |
3413 | if (rc && (rc != -ENODEV || sata_scr_valid(ap))) { | 3572 | if (rc && (rc != -ENODEV || sata_scr_valid(link))) { |
3414 | ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc); | 3573 | ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); |
3415 | return rc; | 3574 | return rc; |
3416 | } | 3575 | } |
3417 | 3576 | ||
3418 | /* determine by signature whether we have ATA or ATAPI devices */ | 3577 | /* determine by signature whether we have ATA or ATAPI devices */ |
3419 | classes[0] = ata_dev_try_classify(ap, 0, &err); | 3578 | classes[0] = ata_dev_try_classify(&link->device[0], |
3579 | devmask & (1 << 0), &err); | ||
3420 | if (slave_possible && err != 0x81) | 3580 | if (slave_possible && err != 0x81) |
3421 | classes[1] = ata_dev_try_classify(ap, 1, &err); | 3581 | classes[1] = ata_dev_try_classify(&link->device[1], |
3582 | devmask & (1 << 1), &err); | ||
3422 | 3583 | ||
3423 | out: | 3584 | out: |
3424 | DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); | 3585 | DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); |
@@ -3426,12 +3587,12 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes, | |||
3426 | } | 3587 | } |
3427 | 3588 | ||
3428 | /** | 3589 | /** |
3429 | * sata_port_hardreset - reset port via SATA phy reset | 3590 | * sata_link_hardreset - reset link via SATA phy reset |
3430 | * @ap: port to reset | 3591 | * @link: link to reset |
3431 | * @timing: timing parameters { interval, duratinon, timeout } in msec | 3592 | * @timing: timing parameters { interval, duratinon, timeout } in msec |
3432 | * @deadline: deadline jiffies for the operation | 3593 | * @deadline: deadline jiffies for the operation |
3433 | * | 3594 | * |
3434 | * SATA phy-reset host port using DET bits of SControl register. | 3595 | * SATA phy-reset @link using DET bits of SControl register. |
3435 | * | 3596 | * |
3436 | * LOCKING: | 3597 | * LOCKING: |
3437 | * Kernel thread context (may sleep) | 3598 | * Kernel thread context (may sleep) |
@@ -3439,7 +3600,7 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes, | |||
3439 | * RETURNS: | 3600 | * RETURNS: |
3440 | * 0 on success, -errno otherwise. | 3601 | * 0 on success, -errno otherwise. |
3441 | */ | 3602 | */ |
3442 | int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing, | 3603 | int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, |
3443 | unsigned long deadline) | 3604 | unsigned long deadline) |
3444 | { | 3605 | { |
3445 | u32 scontrol; | 3606 | u32 scontrol; |
@@ -3447,30 +3608,30 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing, | |||
3447 | 3608 | ||
3448 | DPRINTK("ENTER\n"); | 3609 | DPRINTK("ENTER\n"); |
3449 | 3610 | ||
3450 | if (sata_set_spd_needed(ap)) { | 3611 | if (sata_set_spd_needed(link)) { |
3451 | /* SATA spec says nothing about how to reconfigure | 3612 | /* SATA spec says nothing about how to reconfigure |
3452 | * spd. To be on the safe side, turn off phy during | 3613 | * spd. To be on the safe side, turn off phy during |
3453 | * reconfiguration. This works for at least ICH7 AHCI | 3614 | * reconfiguration. This works for at least ICH7 AHCI |
3454 | * and Sil3124. | 3615 | * and Sil3124. |
3455 | */ | 3616 | */ |
3456 | if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) | 3617 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) |
3457 | goto out; | 3618 | goto out; |
3458 | 3619 | ||
3459 | scontrol = (scontrol & 0x0f0) | 0x304; | 3620 | scontrol = (scontrol & 0x0f0) | 0x304; |
3460 | 3621 | ||
3461 | if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) | 3622 | if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) |
3462 | goto out; | 3623 | goto out; |
3463 | 3624 | ||
3464 | sata_set_spd(ap); | 3625 | sata_set_spd(link); |
3465 | } | 3626 | } |
3466 | 3627 | ||
3467 | /* issue phy wake/reset */ | 3628 | /* issue phy wake/reset */ |
3468 | if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) | 3629 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) |
3469 | goto out; | 3630 | goto out; |
3470 | 3631 | ||
3471 | scontrol = (scontrol & 0x0f0) | 0x301; | 3632 | scontrol = (scontrol & 0x0f0) | 0x301; |
3472 | 3633 | ||
3473 | if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol))) | 3634 | if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) |
3474 | goto out; | 3635 | goto out; |
3475 | 3636 | ||
3476 | /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 | 3637 | /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 |
@@ -3478,8 +3639,8 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing, | |||
3478 | */ | 3639 | */ |
3479 | msleep(1); | 3640 | msleep(1); |
3480 | 3641 | ||
3481 | /* bring phy back */ | 3642 | /* bring link back */ |
3482 | rc = sata_phy_resume(ap, timing, deadline); | 3643 | rc = sata_link_resume(link, timing, deadline); |
3483 | out: | 3644 | out: |
3484 | DPRINTK("EXIT, rc=%d\n", rc); | 3645 | DPRINTK("EXIT, rc=%d\n", rc); |
3485 | return rc; | 3646 | return rc; |
@@ -3487,7 +3648,7 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing, | |||
3487 | 3648 | ||
3488 | /** | 3649 | /** |
3489 | * sata_std_hardreset - reset host port via SATA phy reset | 3650 | * sata_std_hardreset - reset host port via SATA phy reset |
3490 | * @ap: port to reset | 3651 | * @link: link to reset |
3491 | * @class: resulting class of attached device | 3652 | * @class: resulting class of attached device |
3492 | * @deadline: deadline jiffies for the operation | 3653 | * @deadline: deadline jiffies for the operation |
3493 | * | 3654 | * |
@@ -3500,24 +3661,25 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing, | |||
3500 | * RETURNS: | 3661 | * RETURNS: |
3501 | * 0 on success, -errno otherwise. | 3662 | * 0 on success, -errno otherwise. |
3502 | */ | 3663 | */ |
3503 | int sata_std_hardreset(struct ata_port *ap, unsigned int *class, | 3664 | int sata_std_hardreset(struct ata_link *link, unsigned int *class, |
3504 | unsigned long deadline) | 3665 | unsigned long deadline) |
3505 | { | 3666 | { |
3506 | const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context); | 3667 | struct ata_port *ap = link->ap; |
3668 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); | ||
3507 | int rc; | 3669 | int rc; |
3508 | 3670 | ||
3509 | DPRINTK("ENTER\n"); | 3671 | DPRINTK("ENTER\n"); |
3510 | 3672 | ||
3511 | /* do hardreset */ | 3673 | /* do hardreset */ |
3512 | rc = sata_port_hardreset(ap, timing, deadline); | 3674 | rc = sata_link_hardreset(link, timing, deadline); |
3513 | if (rc) { | 3675 | if (rc) { |
3514 | ata_port_printk(ap, KERN_ERR, | 3676 | ata_link_printk(link, KERN_ERR, |
3515 | "COMRESET failed (errno=%d)\n", rc); | 3677 | "COMRESET failed (errno=%d)\n", rc); |
3516 | return rc; | 3678 | return rc; |
3517 | } | 3679 | } |
3518 | 3680 | ||
3519 | /* TODO: phy layer with polling, timeouts, etc. */ | 3681 | /* TODO: phy layer with polling, timeouts, etc. */ |
3520 | if (ata_port_offline(ap)) { | 3682 | if (ata_link_offline(link)) { |
3521 | *class = ATA_DEV_NONE; | 3683 | *class = ATA_DEV_NONE; |
3522 | DPRINTK("EXIT, link offline\n"); | 3684 | DPRINTK("EXIT, link offline\n"); |
3523 | return 0; | 3685 | return 0; |
@@ -3526,17 +3688,27 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class, | |||
3526 | /* wait a while before checking status, see SRST for more info */ | 3688 | /* wait a while before checking status, see SRST for more info */ |
3527 | msleep(150); | 3689 | msleep(150); |
3528 | 3690 | ||
3691 | /* If PMP is supported, we have to do follow-up SRST. Note | ||
3692 | * that some PMPs don't send D2H Reg FIS after hardreset at | ||
3693 | * all if the first port is empty. Wait for it just for a | ||
3694 | * second and request follow-up SRST. | ||
3695 | */ | ||
3696 | if (ap->flags & ATA_FLAG_PMP) { | ||
3697 | ata_wait_ready(ap, jiffies + HZ); | ||
3698 | return -EAGAIN; | ||
3699 | } | ||
3700 | |||
3529 | rc = ata_wait_ready(ap, deadline); | 3701 | rc = ata_wait_ready(ap, deadline); |
3530 | /* link occupied, -ENODEV too is an error */ | 3702 | /* link occupied, -ENODEV too is an error */ |
3531 | if (rc) { | 3703 | if (rc) { |
3532 | ata_port_printk(ap, KERN_ERR, | 3704 | ata_link_printk(link, KERN_ERR, |
3533 | "COMRESET failed (errno=%d)\n", rc); | 3705 | "COMRESET failed (errno=%d)\n", rc); |
3534 | return rc; | 3706 | return rc; |
3535 | } | 3707 | } |
3536 | 3708 | ||
3537 | ap->ops->dev_select(ap, 0); /* probably unnecessary */ | 3709 | ap->ops->dev_select(ap, 0); /* probably unnecessary */ |
3538 | 3710 | ||
3539 | *class = ata_dev_try_classify(ap, 0, NULL); | 3711 | *class = ata_dev_try_classify(link->device, 1, NULL); |
3540 | 3712 | ||
3541 | DPRINTK("EXIT, class=%u\n", *class); | 3713 | DPRINTK("EXIT, class=%u\n", *class); |
3542 | return 0; | 3714 | return 0; |
@@ -3544,7 +3716,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class, | |||
3544 | 3716 | ||
3545 | /** | 3717 | /** |
3546 | * ata_std_postreset - standard postreset callback | 3718 | * ata_std_postreset - standard postreset callback |
3547 | * @ap: the target ata_port | 3719 | * @link: the target ata_link |
3548 | * @classes: classes of attached devices | 3720 | * @classes: classes of attached devices |
3549 | * | 3721 | * |
3550 | * This function is invoked after a successful reset. Note that | 3722 | * This function is invoked after a successful reset. Note that |
@@ -3554,18 +3726,19 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class, | |||
3554 | * LOCKING: | 3726 | * LOCKING: |
3555 | * Kernel thread context (may sleep) | 3727 | * Kernel thread context (may sleep) |
3556 | */ | 3728 | */ |
3557 | void ata_std_postreset(struct ata_port *ap, unsigned int *classes) | 3729 | void ata_std_postreset(struct ata_link *link, unsigned int *classes) |
3558 | { | 3730 | { |
3731 | struct ata_port *ap = link->ap; | ||
3559 | u32 serror; | 3732 | u32 serror; |
3560 | 3733 | ||
3561 | DPRINTK("ENTER\n"); | 3734 | DPRINTK("ENTER\n"); |
3562 | 3735 | ||
3563 | /* print link status */ | 3736 | /* print link status */ |
3564 | sata_print_link_status(ap); | 3737 | sata_print_link_status(link); |
3565 | 3738 | ||
3566 | /* clear SError */ | 3739 | /* clear SError */ |
3567 | if (sata_scr_read(ap, SCR_ERROR, &serror) == 0) | 3740 | if (sata_scr_read(link, SCR_ERROR, &serror) == 0) |
3568 | sata_scr_write(ap, SCR_ERROR, serror); | 3741 | sata_scr_write(link, SCR_ERROR, serror); |
3569 | 3742 | ||
3570 | /* is double-select really necessary? */ | 3743 | /* is double-select really necessary? */ |
3571 | if (classes[0] != ATA_DEV_NONE) | 3744 | if (classes[0] != ATA_DEV_NONE) |
@@ -3652,7 +3825,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, | |||
3652 | int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) | 3825 | int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) |
3653 | { | 3826 | { |
3654 | unsigned int class = dev->class; | 3827 | unsigned int class = dev->class; |
3655 | u16 *id = (void *)dev->ap->sector_buf; | 3828 | u16 *id = (void *)dev->link->ap->sector_buf; |
3656 | int rc; | 3829 | int rc; |
3657 | 3830 | ||
3658 | /* read ID data */ | 3831 | /* read ID data */ |
@@ -3671,6 +3844,7 @@ int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) | |||
3671 | /** | 3844 | /** |
3672 | * ata_dev_revalidate - Revalidate ATA device | 3845 | * ata_dev_revalidate - Revalidate ATA device |
3673 | * @dev: device to revalidate | 3846 | * @dev: device to revalidate |
3847 | * @new_class: new class code | ||
3674 | * @readid_flags: read ID flags | 3848 | * @readid_flags: read ID flags |
3675 | * | 3849 | * |
3676 | * Re-read IDENTIFY page, make sure @dev is still attached to the | 3850 | * Re-read IDENTIFY page, make sure @dev is still attached to the |
@@ -3682,7 +3856,8 @@ int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) | |||
3682 | * RETURNS: | 3856 | * RETURNS: |
3683 | * 0 on success, negative errno otherwise | 3857 | * 0 on success, negative errno otherwise |
3684 | */ | 3858 | */ |
3685 | int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags) | 3859 | int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, |
3860 | unsigned int readid_flags) | ||
3686 | { | 3861 | { |
3687 | u64 n_sectors = dev->n_sectors; | 3862 | u64 n_sectors = dev->n_sectors; |
3688 | int rc; | 3863 | int rc; |
@@ -3690,6 +3865,15 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags) | |||
3690 | if (!ata_dev_enabled(dev)) | 3865 | if (!ata_dev_enabled(dev)) |
3691 | return -ENODEV; | 3866 | return -ENODEV; |
3692 | 3867 | ||
3868 | /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ | ||
3869 | if (ata_class_enabled(new_class) && | ||
3870 | new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) { | ||
3871 | ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n", | ||
3872 | dev->class, new_class); | ||
3873 | rc = -ENODEV; | ||
3874 | goto fail; | ||
3875 | } | ||
3876 | |||
3693 | /* re-read ID */ | 3877 | /* re-read ID */ |
3694 | rc = ata_dev_reread_id(dev, readid_flags); | 3878 | rc = ata_dev_reread_id(dev, readid_flags); |
3695 | if (rc) | 3879 | if (rc) |
@@ -3763,6 +3947,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3763 | { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ | 3947 | { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ |
3764 | { "IOMEGA ZIP 250 ATAPI Floppy", | 3948 | { "IOMEGA ZIP 250 ATAPI Floppy", |
3765 | NULL, ATA_HORKAGE_NODMA }, | 3949 | NULL, ATA_HORKAGE_NODMA }, |
3950 | /* Odd clown on sil3726/4726 PMPs */ | ||
3951 | { "Config Disk", NULL, ATA_HORKAGE_NODMA | | ||
3952 | ATA_HORKAGE_SKIP_PM }, | ||
3766 | 3953 | ||
3767 | /* Weird ATAPI devices */ | 3954 | /* Weird ATAPI devices */ |
3768 | { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, | 3955 | { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, |
@@ -3775,16 +3962,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3775 | /* http://thread.gmane.org/gmane.linux.ide/14907 */ | 3962 | /* http://thread.gmane.org/gmane.linux.ide/14907 */ |
3776 | { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, | 3963 | { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, |
3777 | /* NCQ is broken */ | 3964 | /* NCQ is broken */ |
3778 | { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ }, | 3965 | { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, |
3779 | { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ }, | ||
3780 | { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ }, | ||
3781 | { "Maxtor 7B250S0", "BANC1B70", ATA_HORKAGE_NONCQ, }, | ||
3782 | { "Maxtor 7B300S0", "BANC1B70", ATA_HORKAGE_NONCQ }, | ||
3783 | { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, | 3966 | { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, |
3784 | { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI", | 3967 | { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ }, |
3785 | ATA_HORKAGE_NONCQ }, | 3968 | { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ }, |
3786 | /* NCQ hard hangs device under heavier load, needs hard power cycle */ | 3969 | { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, |
3787 | { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ }, | 3970 | |
3788 | /* Blacklist entries taken from Silicon Image 3124/3132 | 3971 | /* Blacklist entries taken from Silicon Image 3124/3132 |
3789 | Windows driver .inf file - also several Linux problem reports */ | 3972 | Windows driver .inf file - also several Linux problem reports */ |
3790 | { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, | 3973 | { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, |
@@ -3793,11 +3976,16 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3793 | /* Drives which do spurious command completion */ | 3976 | /* Drives which do spurious command completion */ |
3794 | { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, }, | 3977 | { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, }, |
3795 | { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, }, | 3978 | { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, }, |
3979 | { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, }, | ||
3796 | { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, | 3980 | { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, |
3797 | { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, | 3981 | { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, |
3982 | { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, }, | ||
3798 | { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, | 3983 | { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, |
3984 | { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, }, | ||
3799 | { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, }, | 3985 | { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, }, |
3800 | { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, }, | 3986 | { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ, }, |
3987 | { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, }, | ||
3988 | { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, }, | ||
3801 | { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, }, | 3989 | { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, }, |
3802 | 3990 | ||
3803 | /* devices which puke on READ_NATIVE_MAX */ | 3991 | /* devices which puke on READ_NATIVE_MAX */ |
@@ -3806,10 +3994,31 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3806 | { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, | 3994 | { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, |
3807 | { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, | 3995 | { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, |
3808 | 3996 | ||
3997 | /* Devices which report 1 sector over size HPA */ | ||
3998 | { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, | ||
3999 | { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, | ||
4000 | |||
3809 | /* End Marker */ | 4001 | /* End Marker */ |
3810 | { } | 4002 | { } |
3811 | }; | 4003 | }; |
3812 | 4004 | ||
4005 | int strn_pattern_cmp(const char *patt, const char *name, int wildchar) | ||
4006 | { | ||
4007 | const char *p; | ||
4008 | int len; | ||
4009 | |||
4010 | /* | ||
4011 | * check for trailing wildcard: *\0 | ||
4012 | */ | ||
4013 | p = strchr(patt, wildchar); | ||
4014 | if (p && ((*(p + 1)) == 0)) | ||
4015 | len = p - patt; | ||
4016 | else | ||
4017 | len = strlen(name); | ||
4018 | |||
4019 | return strncmp(patt, name, len); | ||
4020 | } | ||
4021 | |||
3813 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev) | 4022 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev) |
3814 | { | 4023 | { |
3815 | unsigned char model_num[ATA_ID_PROD_LEN + 1]; | 4024 | unsigned char model_num[ATA_ID_PROD_LEN + 1]; |
@@ -3820,10 +4029,10 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev) | |||
3820 | ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); | 4029 | ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); |
3821 | 4030 | ||
3822 | while (ad->model_num) { | 4031 | while (ad->model_num) { |
3823 | if (!strcmp(ad->model_num, model_num)) { | 4032 | if (!strn_pattern_cmp(ad->model_num, model_num, '*')) { |
3824 | if (ad->model_rev == NULL) | 4033 | if (ad->model_rev == NULL) |
3825 | return ad->horkage; | 4034 | return ad->horkage; |
3826 | if (!strcmp(ad->model_rev, model_rev)) | 4035 | if (!strn_pattern_cmp(ad->model_rev, model_rev, '*')) |
3827 | return ad->horkage; | 4036 | return ad->horkage; |
3828 | } | 4037 | } |
3829 | ad++; | 4038 | ad++; |
@@ -3837,7 +4046,7 @@ static int ata_dma_blacklisted(const struct ata_device *dev) | |||
3837 | * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) | 4046 | * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) |
3838 | * if the LLDD handles only interrupts in the HSM_ST_LAST state. | 4047 | * if the LLDD handles only interrupts in the HSM_ST_LAST state. |
3839 | */ | 4048 | */ |
3840 | if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) && | 4049 | if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && |
3841 | (dev->flags & ATA_DFLAG_CDB_INTR)) | 4050 | (dev->flags & ATA_DFLAG_CDB_INTR)) |
3842 | return 1; | 4051 | return 1; |
3843 | return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; | 4052 | return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; |
@@ -3857,7 +4066,8 @@ static int ata_dma_blacklisted(const struct ata_device *dev) | |||
3857 | */ | 4066 | */ |
3858 | static void ata_dev_xfermask(struct ata_device *dev) | 4067 | static void ata_dev_xfermask(struct ata_device *dev) |
3859 | { | 4068 | { |
3860 | struct ata_port *ap = dev->ap; | 4069 | struct ata_link *link = dev->link; |
4070 | struct ata_port *ap = link->ap; | ||
3861 | struct ata_host *host = ap->host; | 4071 | struct ata_host *host = ap->host; |
3862 | unsigned long xfer_mask; | 4072 | unsigned long xfer_mask; |
3863 | 4073 | ||
@@ -3955,7 +4165,43 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) | |||
3955 | tf.protocol = ATA_PROT_NODATA; | 4165 | tf.protocol = ATA_PROT_NODATA; |
3956 | tf.nsect = dev->xfer_mode; | 4166 | tf.nsect = dev->xfer_mode; |
3957 | 4167 | ||
3958 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); | 4168 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); |
4169 | |||
4170 | DPRINTK("EXIT, err_mask=%x\n", err_mask); | ||
4171 | return err_mask; | ||
4172 | } | ||
4173 | |||
4174 | /** | ||
4175 | * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES | ||
4176 | * @dev: Device to which command will be sent | ||
4177 | * @enable: Whether to enable or disable the feature | ||
4178 | * | ||
4179 | * Issue SET FEATURES - SATA FEATURES command to device @dev | ||
4180 | * on port @ap with sector count set to indicate Asynchronous | ||
4181 | * Notification feature | ||
4182 | * | ||
4183 | * LOCKING: | ||
4184 | * PCI/etc. bus probe sem. | ||
4185 | * | ||
4186 | * RETURNS: | ||
4187 | * 0 on success, AC_ERR_* mask otherwise. | ||
4188 | */ | ||
4189 | static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable) | ||
4190 | { | ||
4191 | struct ata_taskfile tf; | ||
4192 | unsigned int err_mask; | ||
4193 | |||
4194 | /* set up set-features taskfile */ | ||
4195 | DPRINTK("set features - SATA features\n"); | ||
4196 | |||
4197 | ata_tf_init(dev, &tf); | ||
4198 | tf.command = ATA_CMD_SET_FEATURES; | ||
4199 | tf.feature = enable; | ||
4200 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | ||
4201 | tf.protocol = ATA_PROT_NODATA; | ||
4202 | tf.nsect = SATA_AN; | ||
4203 | |||
4204 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); | ||
3959 | 4205 | ||
3960 | DPRINTK("EXIT, err_mask=%x\n", err_mask); | 4206 | DPRINTK("EXIT, err_mask=%x\n", err_mask); |
3961 | return err_mask; | 4207 | return err_mask; |
@@ -3993,7 +4239,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev, | |||
3993 | tf.nsect = sectors; | 4239 | tf.nsect = sectors; |
3994 | tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ | 4240 | tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ |
3995 | 4241 | ||
3996 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); | 4242 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); |
3997 | /* A clean abort indicates an original or just out of spec drive | 4243 | /* A clean abort indicates an original or just out of spec drive |
3998 | and we should continue as we issue the setup based on the | 4244 | and we should continue as we issue the setup based on the |
3999 | drive reported working geometry */ | 4245 | drive reported working geometry */ |
@@ -4207,6 +4453,36 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc) | |||
4207 | } | 4453 | } |
4208 | 4454 | ||
4209 | /** | 4455 | /** |
4456 | * ata_std_qc_defer - Check whether a qc needs to be deferred | ||
4457 | * @qc: ATA command in question | ||
4458 | * | ||
4459 | * Non-NCQ commands cannot run with any other command, NCQ or | ||
4460 | * not. As upper layer only knows the queue depth, we are | ||
4461 | * responsible for maintaining exclusion. This function checks | ||
4462 | * whether a new command @qc can be issued. | ||
4463 | * | ||
4464 | * LOCKING: | ||
4465 | * spin_lock_irqsave(host lock) | ||
4466 | * | ||
4467 | * RETURNS: | ||
4468 | * ATA_DEFER_* if deferring is needed, 0 otherwise. | ||
4469 | */ | ||
4470 | int ata_std_qc_defer(struct ata_queued_cmd *qc) | ||
4471 | { | ||
4472 | struct ata_link *link = qc->dev->link; | ||
4473 | |||
4474 | if (qc->tf.protocol == ATA_PROT_NCQ) { | ||
4475 | if (!ata_tag_valid(link->active_tag)) | ||
4476 | return 0; | ||
4477 | } else { | ||
4478 | if (!ata_tag_valid(link->active_tag) && !link->sactive) | ||
4479 | return 0; | ||
4480 | } | ||
4481 | |||
4482 | return ATA_DEFER_LINK; | ||
4483 | } | ||
4484 | |||
4485 | /** | ||
4210 | * ata_qc_prep - Prepare taskfile for submission | 4486 | * ata_qc_prep - Prepare taskfile for submission |
4211 | * @qc: Metadata associated with taskfile to be prepared | 4487 | * @qc: Metadata associated with taskfile to be prepared |
4212 | * | 4488 | * |
@@ -4482,7 +4758,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) | |||
4482 | void ata_data_xfer(struct ata_device *adev, unsigned char *buf, | 4758 | void ata_data_xfer(struct ata_device *adev, unsigned char *buf, |
4483 | unsigned int buflen, int write_data) | 4759 | unsigned int buflen, int write_data) |
4484 | { | 4760 | { |
4485 | struct ata_port *ap = adev->ap; | 4761 | struct ata_port *ap = adev->link->ap; |
4486 | unsigned int words = buflen >> 1; | 4762 | unsigned int words = buflen >> 1; |
4487 | 4763 | ||
4488 | /* Transfer multiple of 2 bytes */ | 4764 | /* Transfer multiple of 2 bytes */ |
@@ -4611,6 +4887,8 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc) | |||
4611 | ata_pio_sector(qc); | 4887 | ata_pio_sector(qc); |
4612 | } else | 4888 | } else |
4613 | ata_pio_sector(qc); | 4889 | ata_pio_sector(qc); |
4890 | |||
4891 | ata_altstatus(qc->ap); /* flush */ | ||
4614 | } | 4892 | } |
4615 | 4893 | ||
4616 | /** | 4894 | /** |
@@ -4785,6 +5063,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) | |||
4785 | VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); | 5063 | VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); |
4786 | 5064 | ||
4787 | __atapi_pio_bytes(qc, bytes); | 5065 | __atapi_pio_bytes(qc, bytes); |
5066 | ata_altstatus(ap); /* flush */ | ||
4788 | 5067 | ||
4789 | return; | 5068 | return; |
4790 | 5069 | ||
@@ -4956,7 +5235,6 @@ fsm_start: | |||
4956 | */ | 5235 | */ |
4957 | ap->hsm_task_state = HSM_ST; | 5236 | ap->hsm_task_state = HSM_ST; |
4958 | ata_pio_sectors(qc); | 5237 | ata_pio_sectors(qc); |
4959 | ata_altstatus(ap); /* flush */ | ||
4960 | } else | 5238 | } else |
4961 | /* send CDB */ | 5239 | /* send CDB */ |
4962 | atapi_send_cdb(ap, qc); | 5240 | atapi_send_cdb(ap, qc); |
@@ -5037,7 +5315,6 @@ fsm_start: | |||
5037 | 5315 | ||
5038 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { | 5316 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { |
5039 | ata_pio_sectors(qc); | 5317 | ata_pio_sectors(qc); |
5040 | ata_altstatus(ap); | ||
5041 | status = ata_wait_idle(ap); | 5318 | status = ata_wait_idle(ap); |
5042 | } | 5319 | } |
5043 | 5320 | ||
@@ -5057,13 +5334,11 @@ fsm_start: | |||
5057 | if (ap->hsm_task_state == HSM_ST_LAST && | 5334 | if (ap->hsm_task_state == HSM_ST_LAST && |
5058 | (!(qc->tf.flags & ATA_TFLAG_WRITE))) { | 5335 | (!(qc->tf.flags & ATA_TFLAG_WRITE))) { |
5059 | /* all data read */ | 5336 | /* all data read */ |
5060 | ata_altstatus(ap); | ||
5061 | status = ata_wait_idle(ap); | 5337 | status = ata_wait_idle(ap); |
5062 | goto fsm_start; | 5338 | goto fsm_start; |
5063 | } | 5339 | } |
5064 | } | 5340 | } |
5065 | 5341 | ||
5066 | ata_altstatus(ap); /* flush */ | ||
5067 | poll_next = 1; | 5342 | poll_next = 1; |
5068 | break; | 5343 | break; |
5069 | 5344 | ||
@@ -5188,7 +5463,7 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) | |||
5188 | 5463 | ||
5189 | struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) | 5464 | struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) |
5190 | { | 5465 | { |
5191 | struct ata_port *ap = dev->ap; | 5466 | struct ata_port *ap = dev->link->ap; |
5192 | struct ata_queued_cmd *qc; | 5467 | struct ata_queued_cmd *qc; |
5193 | 5468 | ||
5194 | qc = ata_qc_new(ap); | 5469 | qc = ata_qc_new(ap); |
@@ -5231,6 +5506,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) | |||
5231 | void __ata_qc_complete(struct ata_queued_cmd *qc) | 5506 | void __ata_qc_complete(struct ata_queued_cmd *qc) |
5232 | { | 5507 | { |
5233 | struct ata_port *ap = qc->ap; | 5508 | struct ata_port *ap = qc->ap; |
5509 | struct ata_link *link = qc->dev->link; | ||
5234 | 5510 | ||
5235 | WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ | 5511 | WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ |
5236 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); | 5512 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); |
@@ -5239,10 +5515,19 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) | |||
5239 | ata_sg_clean(qc); | 5515 | ata_sg_clean(qc); |
5240 | 5516 | ||
5241 | /* command should be marked inactive atomically with qc completion */ | 5517 | /* command should be marked inactive atomically with qc completion */ |
5242 | if (qc->tf.protocol == ATA_PROT_NCQ) | 5518 | if (qc->tf.protocol == ATA_PROT_NCQ) { |
5243 | ap->sactive &= ~(1 << qc->tag); | 5519 | link->sactive &= ~(1 << qc->tag); |
5244 | else | 5520 | if (!link->sactive) |
5245 | ap->active_tag = ATA_TAG_POISON; | 5521 | ap->nr_active_links--; |
5522 | } else { | ||
5523 | link->active_tag = ATA_TAG_POISON; | ||
5524 | ap->nr_active_links--; | ||
5525 | } | ||
5526 | |||
5527 | /* clear exclusive status */ | ||
5528 | if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && | ||
5529 | ap->excl_link == link)) | ||
5530 | ap->excl_link = NULL; | ||
5246 | 5531 | ||
5247 | /* atapi: mark qc as inactive to prevent the interrupt handler | 5532 | /* atapi: mark qc as inactive to prevent the interrupt handler |
5248 | * from completing the command twice later, before the error handler | 5533 | * from completing the command twice later, before the error handler |
@@ -5411,19 +5696,25 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc) | |||
5411 | void ata_qc_issue(struct ata_queued_cmd *qc) | 5696 | void ata_qc_issue(struct ata_queued_cmd *qc) |
5412 | { | 5697 | { |
5413 | struct ata_port *ap = qc->ap; | 5698 | struct ata_port *ap = qc->ap; |
5699 | struct ata_link *link = qc->dev->link; | ||
5414 | 5700 | ||
5415 | /* Make sure only one non-NCQ command is outstanding. The | 5701 | /* Make sure only one non-NCQ command is outstanding. The |
5416 | * check is skipped for old EH because it reuses active qc to | 5702 | * check is skipped for old EH because it reuses active qc to |
5417 | * request ATAPI sense. | 5703 | * request ATAPI sense. |
5418 | */ | 5704 | */ |
5419 | WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag)); | 5705 | WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag)); |
5420 | 5706 | ||
5421 | if (qc->tf.protocol == ATA_PROT_NCQ) { | 5707 | if (qc->tf.protocol == ATA_PROT_NCQ) { |
5422 | WARN_ON(ap->sactive & (1 << qc->tag)); | 5708 | WARN_ON(link->sactive & (1 << qc->tag)); |
5423 | ap->sactive |= 1 << qc->tag; | 5709 | |
5710 | if (!link->sactive) | ||
5711 | ap->nr_active_links++; | ||
5712 | link->sactive |= 1 << qc->tag; | ||
5424 | } else { | 5713 | } else { |
5425 | WARN_ON(ap->sactive); | 5714 | WARN_ON(link->sactive); |
5426 | ap->active_tag = qc->tag; | 5715 | |
5716 | ap->nr_active_links++; | ||
5717 | link->active_tag = qc->tag; | ||
5427 | } | 5718 | } |
5428 | 5719 | ||
5429 | qc->flags |= ATA_QCFLAG_ACTIVE; | 5720 | qc->flags |= ATA_QCFLAG_ACTIVE; |
@@ -5606,7 +5897,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
5606 | inline unsigned int ata_host_intr (struct ata_port *ap, | 5897 | inline unsigned int ata_host_intr (struct ata_port *ap, |
5607 | struct ata_queued_cmd *qc) | 5898 | struct ata_queued_cmd *qc) |
5608 | { | 5899 | { |
5609 | struct ata_eh_info *ehi = &ap->eh_info; | 5900 | struct ata_eh_info *ehi = &ap->link.eh_info; |
5610 | u8 status, host_stat = 0; | 5901 | u8 status, host_stat = 0; |
5611 | 5902 | ||
5612 | VPRINTK("ata%u: protocol %d task_state %d\n", | 5903 | VPRINTK("ata%u: protocol %d task_state %d\n", |
@@ -5680,7 +5971,8 @@ idle_irq: | |||
5680 | 5971 | ||
5681 | #ifdef ATA_IRQ_TRAP | 5972 | #ifdef ATA_IRQ_TRAP |
5682 | if ((ap->stats.idle_irq % 1000) == 0) { | 5973 | if ((ap->stats.idle_irq % 1000) == 0) { |
5683 | ap->ops->irq_ack(ap, 0); /* debug trap */ | 5974 | ata_chk_status(ap); |
5975 | ap->ops->irq_clear(ap); | ||
5684 | ata_port_printk(ap, KERN_WARNING, "irq trap\n"); | 5976 | ata_port_printk(ap, KERN_WARNING, "irq trap\n"); |
5685 | return 1; | 5977 | return 1; |
5686 | } | 5978 | } |
@@ -5721,7 +6013,7 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance) | |||
5721 | !(ap->flags & ATA_FLAG_DISABLED)) { | 6013 | !(ap->flags & ATA_FLAG_DISABLED)) { |
5722 | struct ata_queued_cmd *qc; | 6014 | struct ata_queued_cmd *qc; |
5723 | 6015 | ||
5724 | qc = ata_qc_from_tag(ap, ap->active_tag); | 6016 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
5725 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && | 6017 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && |
5726 | (qc->flags & ATA_QCFLAG_ACTIVE)) | 6018 | (qc->flags & ATA_QCFLAG_ACTIVE)) |
5727 | handled |= ata_host_intr(ap, qc); | 6019 | handled |= ata_host_intr(ap, qc); |
@@ -5735,9 +6027,9 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance) | |||
5735 | 6027 | ||
5736 | /** | 6028 | /** |
5737 | * sata_scr_valid - test whether SCRs are accessible | 6029 | * sata_scr_valid - test whether SCRs are accessible |
5738 | * @ap: ATA port to test SCR accessibility for | 6030 | * @link: ATA link to test SCR accessibility for |
5739 | * | 6031 | * |
5740 | * Test whether SCRs are accessible for @ap. | 6032 | * Test whether SCRs are accessible for @link. |
5741 | * | 6033 | * |
5742 | * LOCKING: | 6034 | * LOCKING: |
5743 | * None. | 6035 | * None. |
@@ -5745,60 +6037,74 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance) | |||
5745 | * RETURNS: | 6037 | * RETURNS: |
5746 | * 1 if SCRs are accessible, 0 otherwise. | 6038 | * 1 if SCRs are accessible, 0 otherwise. |
5747 | */ | 6039 | */ |
5748 | int sata_scr_valid(struct ata_port *ap) | 6040 | int sata_scr_valid(struct ata_link *link) |
5749 | { | 6041 | { |
6042 | struct ata_port *ap = link->ap; | ||
6043 | |||
5750 | return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; | 6044 | return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; |
5751 | } | 6045 | } |
5752 | 6046 | ||
5753 | /** | 6047 | /** |
5754 | * sata_scr_read - read SCR register of the specified port | 6048 | * sata_scr_read - read SCR register of the specified port |
5755 | * @ap: ATA port to read SCR for | 6049 | * @link: ATA link to read SCR for |
5756 | * @reg: SCR to read | 6050 | * @reg: SCR to read |
5757 | * @val: Place to store read value | 6051 | * @val: Place to store read value |
5758 | * | 6052 | * |
5759 | * Read SCR register @reg of @ap into *@val. This function is | 6053 | * Read SCR register @reg of @link into *@val. This function is |
5760 | * guaranteed to succeed if the cable type of the port is SATA | 6054 | * guaranteed to succeed if @link is ap->link, the cable type of |
5761 | * and the port implements ->scr_read. | 6055 | * the port is SATA and the port implements ->scr_read. |
5762 | * | 6056 | * |
5763 | * LOCKING: | 6057 | * LOCKING: |
5764 | * None. | 6058 | * None if @link is ap->link. Kernel thread context otherwise. |
5765 | * | 6059 | * |
5766 | * RETURNS: | 6060 | * RETURNS: |
5767 | * 0 on success, negative errno on failure. | 6061 | * 0 on success, negative errno on failure. |
5768 | */ | 6062 | */ |
5769 | int sata_scr_read(struct ata_port *ap, int reg, u32 *val) | 6063 | int sata_scr_read(struct ata_link *link, int reg, u32 *val) |
5770 | { | 6064 | { |
5771 | if (sata_scr_valid(ap)) | 6065 | if (ata_is_host_link(link)) { |
5772 | return ap->ops->scr_read(ap, reg, val); | 6066 | struct ata_port *ap = link->ap; |
5773 | return -EOPNOTSUPP; | 6067 | |
6068 | if (sata_scr_valid(link)) | ||
6069 | return ap->ops->scr_read(ap, reg, val); | ||
6070 | return -EOPNOTSUPP; | ||
6071 | } | ||
6072 | |||
6073 | return sata_pmp_scr_read(link, reg, val); | ||
5774 | } | 6074 | } |
5775 | 6075 | ||
5776 | /** | 6076 | /** |
5777 | * sata_scr_write - write SCR register of the specified port | 6077 | * sata_scr_write - write SCR register of the specified port |
5778 | * @ap: ATA port to write SCR for | 6078 | * @link: ATA link to write SCR for |
5779 | * @reg: SCR to write | 6079 | * @reg: SCR to write |
5780 | * @val: value to write | 6080 | * @val: value to write |
5781 | * | 6081 | * |
5782 | * Write @val to SCR register @reg of @ap. This function is | 6082 | * Write @val to SCR register @reg of @link. This function is |
5783 | * guaranteed to succeed if the cable type of the port is SATA | 6083 | * guaranteed to succeed if @link is ap->link, the cable type of |
5784 | * and the port implements ->scr_read. | 6084 | * the port is SATA and the port implements ->scr_read. |
5785 | * | 6085 | * |
5786 | * LOCKING: | 6086 | * LOCKING: |
5787 | * None. | 6087 | * None if @link is ap->link. Kernel thread context otherwise. |
5788 | * | 6088 | * |
5789 | * RETURNS: | 6089 | * RETURNS: |
5790 | * 0 on success, negative errno on failure. | 6090 | * 0 on success, negative errno on failure. |
5791 | */ | 6091 | */ |
5792 | int sata_scr_write(struct ata_port *ap, int reg, u32 val) | 6092 | int sata_scr_write(struct ata_link *link, int reg, u32 val) |
5793 | { | 6093 | { |
5794 | if (sata_scr_valid(ap)) | 6094 | if (ata_is_host_link(link)) { |
5795 | return ap->ops->scr_write(ap, reg, val); | 6095 | struct ata_port *ap = link->ap; |
5796 | return -EOPNOTSUPP; | 6096 | |
6097 | if (sata_scr_valid(link)) | ||
6098 | return ap->ops->scr_write(ap, reg, val); | ||
6099 | return -EOPNOTSUPP; | ||
6100 | } | ||
6101 | |||
6102 | return sata_pmp_scr_write(link, reg, val); | ||
5797 | } | 6103 | } |
5798 | 6104 | ||
5799 | /** | 6105 | /** |
5800 | * sata_scr_write_flush - write SCR register of the specified port and flush | 6106 | * sata_scr_write_flush - write SCR register of the specified port and flush |
5801 | * @ap: ATA port to write SCR for | 6107 | * @link: ATA link to write SCR for |
5802 | * @reg: SCR to write | 6108 | * @reg: SCR to write |
5803 | * @val: value to write | 6109 | * @val: value to write |
5804 | * | 6110 | * |
@@ -5806,31 +6112,36 @@ int sata_scr_write(struct ata_port *ap, int reg, u32 val) | |||
5806 | * function performs flush after writing to the register. | 6112 | * function performs flush after writing to the register. |
5807 | * | 6113 | * |
5808 | * LOCKING: | 6114 | * LOCKING: |
5809 | * None. | 6115 | * None if @link is ap->link. Kernel thread context otherwise. |
5810 | * | 6116 | * |
5811 | * RETURNS: | 6117 | * RETURNS: |
5812 | * 0 on success, negative errno on failure. | 6118 | * 0 on success, negative errno on failure. |
5813 | */ | 6119 | */ |
5814 | int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val) | 6120 | int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) |
5815 | { | 6121 | { |
5816 | int rc; | 6122 | if (ata_is_host_link(link)) { |
6123 | struct ata_port *ap = link->ap; | ||
6124 | int rc; | ||
5817 | 6125 | ||
5818 | if (sata_scr_valid(ap)) { | 6126 | if (sata_scr_valid(link)) { |
5819 | rc = ap->ops->scr_write(ap, reg, val); | 6127 | rc = ap->ops->scr_write(ap, reg, val); |
5820 | if (rc == 0) | 6128 | if (rc == 0) |
5821 | rc = ap->ops->scr_read(ap, reg, &val); | 6129 | rc = ap->ops->scr_read(ap, reg, &val); |
5822 | return rc; | 6130 | return rc; |
6131 | } | ||
6132 | return -EOPNOTSUPP; | ||
5823 | } | 6133 | } |
5824 | return -EOPNOTSUPP; | 6134 | |
6135 | return sata_pmp_scr_write(link, reg, val); | ||
5825 | } | 6136 | } |
5826 | 6137 | ||
5827 | /** | 6138 | /** |
5828 | * ata_port_online - test whether the given port is online | 6139 | * ata_link_online - test whether the given link is online |
5829 | * @ap: ATA port to test | 6140 | * @link: ATA link to test |
5830 | * | 6141 | * |
5831 | * Test whether @ap is online. Note that this function returns 0 | 6142 | * Test whether @link is online. Note that this function returns |
5832 | * if online status of @ap cannot be obtained, so | 6143 | * 0 if online status of @link cannot be obtained, so |
5833 | * ata_port_online(ap) != !ata_port_offline(ap). | 6144 | * ata_link_online(link) != !ata_link_offline(link). |
5834 | * | 6145 | * |
5835 | * LOCKING: | 6146 | * LOCKING: |
5836 | * None. | 6147 | * None. |
@@ -5838,22 +6149,23 @@ int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val) | |||
5838 | * RETURNS: | 6149 | * RETURNS: |
5839 | * 1 if the port online status is available and online. | 6150 | * 1 if the port online status is available and online. |
5840 | */ | 6151 | */ |
5841 | int ata_port_online(struct ata_port *ap) | 6152 | int ata_link_online(struct ata_link *link) |
5842 | { | 6153 | { |
5843 | u32 sstatus; | 6154 | u32 sstatus; |
5844 | 6155 | ||
5845 | if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3) | 6156 | if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && |
6157 | (sstatus & 0xf) == 0x3) | ||
5846 | return 1; | 6158 | return 1; |
5847 | return 0; | 6159 | return 0; |
5848 | } | 6160 | } |
5849 | 6161 | ||
5850 | /** | 6162 | /** |
5851 | * ata_port_offline - test whether the given port is offline | 6163 | * ata_link_offline - test whether the given link is offline |
5852 | * @ap: ATA port to test | 6164 | * @link: ATA link to test |
5853 | * | 6165 | * |
5854 | * Test whether @ap is offline. Note that this function returns | 6166 | * Test whether @link is offline. Note that this function |
5855 | * 0 if offline status of @ap cannot be obtained, so | 6167 | * returns 0 if offline status of @link cannot be obtained, so |
5856 | * ata_port_online(ap) != !ata_port_offline(ap). | 6168 | * ata_link_online(link) != !ata_link_offline(link). |
5857 | * | 6169 | * |
5858 | * LOCKING: | 6170 | * LOCKING: |
5859 | * None. | 6171 | * None. |
@@ -5861,11 +6173,12 @@ int ata_port_online(struct ata_port *ap) | |||
5861 | * RETURNS: | 6173 | * RETURNS: |
5862 | * 1 if the port offline status is available and offline. | 6174 | * 1 if the port offline status is available and offline. |
5863 | */ | 6175 | */ |
5864 | int ata_port_offline(struct ata_port *ap) | 6176 | int ata_link_offline(struct ata_link *link) |
5865 | { | 6177 | { |
5866 | u32 sstatus; | 6178 | u32 sstatus; |
5867 | 6179 | ||
5868 | if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3) | 6180 | if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && |
6181 | (sstatus & 0xf) != 0x3) | ||
5869 | return 1; | 6182 | return 1; |
5870 | return 0; | 6183 | return 0; |
5871 | } | 6184 | } |
@@ -5883,6 +6196,10 @@ int ata_flush_cache(struct ata_device *dev) | |||
5883 | else | 6196 | else |
5884 | cmd = ATA_CMD_FLUSH; | 6197 | cmd = ATA_CMD_FLUSH; |
5885 | 6198 | ||
6199 | /* This is wrong. On a failed flush we get back the LBA of the lost | ||
6200 | sector and we should (assuming it wasn't aborted as unknown) issue | ||
6201 | a further flush command to continue the writeback until it | ||
6202 | does not error */ | ||
5886 | err_mask = ata_do_simple_cmd(dev, cmd); | 6203 | err_mask = ata_do_simple_cmd(dev, cmd); |
5887 | if (err_mask) { | 6204 | if (err_mask) { |
5888 | ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n"); | 6205 | ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n"); |
@@ -5902,6 +6219,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, | |||
5902 | 6219 | ||
5903 | for (i = 0; i < host->n_ports; i++) { | 6220 | for (i = 0; i < host->n_ports; i++) { |
5904 | struct ata_port *ap = host->ports[i]; | 6221 | struct ata_port *ap = host->ports[i]; |
6222 | struct ata_link *link; | ||
5905 | 6223 | ||
5906 | /* Previous resume operation might still be in | 6224 | /* Previous resume operation might still be in |
5907 | * progress. Wait for PM_PENDING to clear. | 6225 | * progress. Wait for PM_PENDING to clear. |
@@ -5921,8 +6239,10 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, | |||
5921 | } | 6239 | } |
5922 | 6240 | ||
5923 | ap->pflags |= ATA_PFLAG_PM_PENDING; | 6241 | ap->pflags |= ATA_PFLAG_PM_PENDING; |
5924 | ap->eh_info.action |= action; | 6242 | __ata_port_for_each_link(link, ap) { |
5925 | ap->eh_info.flags |= ehi_flags; | 6243 | link->eh_info.action |= action; |
6244 | link->eh_info.flags |= ehi_flags; | ||
6245 | } | ||
5926 | 6246 | ||
5927 | ata_port_schedule_eh(ap); | 6247 | ata_port_schedule_eh(ap); |
5928 | 6248 | ||
@@ -6026,12 +6346,13 @@ int ata_port_start(struct ata_port *ap) | |||
6026 | */ | 6346 | */ |
6027 | void ata_dev_init(struct ata_device *dev) | 6347 | void ata_dev_init(struct ata_device *dev) |
6028 | { | 6348 | { |
6029 | struct ata_port *ap = dev->ap; | 6349 | struct ata_link *link = dev->link; |
6350 | struct ata_port *ap = link->ap; | ||
6030 | unsigned long flags; | 6351 | unsigned long flags; |
6031 | 6352 | ||
6032 | /* SATA spd limit is bound to the first device */ | 6353 | /* SATA spd limit is bound to the first device */ |
6033 | ap->sata_spd_limit = ap->hw_sata_spd_limit; | 6354 | link->sata_spd_limit = link->hw_sata_spd_limit; |
6034 | ap->sata_spd = 0; | 6355 | link->sata_spd = 0; |
6035 | 6356 | ||
6036 | /* High bits of dev->flags are used to record warm plug | 6357 | /* High bits of dev->flags are used to record warm plug |
6037 | * requests which occur asynchronously. Synchronize using | 6358 | * requests which occur asynchronously. Synchronize using |
@@ -6050,6 +6371,70 @@ void ata_dev_init(struct ata_device *dev) | |||
6050 | } | 6371 | } |
6051 | 6372 | ||
6052 | /** | 6373 | /** |
6374 | * ata_link_init - Initialize an ata_link structure | ||
6375 | * @ap: ATA port link is attached to | ||
6376 | * @link: Link structure to initialize | ||
6377 | * @pmp: Port multiplier port number | ||
6378 | * | ||
6379 | * Initialize @link. | ||
6380 | * | ||
6381 | * LOCKING: | ||
6382 | * Kernel thread context (may sleep) | ||
6383 | */ | ||
6384 | void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) | ||
6385 | { | ||
6386 | int i; | ||
6387 | |||
6388 | /* clear everything except for devices */ | ||
6389 | memset(link, 0, offsetof(struct ata_link, device[0])); | ||
6390 | |||
6391 | link->ap = ap; | ||
6392 | link->pmp = pmp; | ||
6393 | link->active_tag = ATA_TAG_POISON; | ||
6394 | link->hw_sata_spd_limit = UINT_MAX; | ||
6395 | |||
6396 | /* can't use iterator, ap isn't initialized yet */ | ||
6397 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | ||
6398 | struct ata_device *dev = &link->device[i]; | ||
6399 | |||
6400 | dev->link = link; | ||
6401 | dev->devno = dev - link->device; | ||
6402 | ata_dev_init(dev); | ||
6403 | } | ||
6404 | } | ||
6405 | |||
6406 | /** | ||
6407 | * sata_link_init_spd - Initialize link->sata_spd_limit | ||
6408 | * @link: Link to configure sata_spd_limit for | ||
6409 | * | ||
6410 | * Initialize @link->[hw_]sata_spd_limit to the currently | ||
6411 | * configured value. | ||
6412 | * | ||
6413 | * LOCKING: | ||
6414 | * Kernel thread context (may sleep). | ||
6415 | * | ||
6416 | * RETURNS: | ||
6417 | * 0 on success, -errno on failure. | ||
6418 | */ | ||
6419 | int sata_link_init_spd(struct ata_link *link) | ||
6420 | { | ||
6421 | u32 scontrol, spd; | ||
6422 | int rc; | ||
6423 | |||
6424 | rc = sata_scr_read(link, SCR_CONTROL, &scontrol); | ||
6425 | if (rc) | ||
6426 | return rc; | ||
6427 | |||
6428 | spd = (scontrol >> 4) & 0xf; | ||
6429 | if (spd) | ||
6430 | link->hw_sata_spd_limit &= (1 << spd) - 1; | ||
6431 | |||
6432 | link->sata_spd_limit = link->hw_sata_spd_limit; | ||
6433 | |||
6434 | return 0; | ||
6435 | } | ||
6436 | |||
6437 | /** | ||
6053 | * ata_port_alloc - allocate and initialize basic ATA port resources | 6438 | * ata_port_alloc - allocate and initialize basic ATA port resources |
6054 | * @host: ATA host this allocated port belongs to | 6439 | * @host: ATA host this allocated port belongs to |
6055 | * | 6440 | * |
@@ -6064,7 +6449,6 @@ void ata_dev_init(struct ata_device *dev) | |||
6064 | struct ata_port *ata_port_alloc(struct ata_host *host) | 6449 | struct ata_port *ata_port_alloc(struct ata_host *host) |
6065 | { | 6450 | { |
6066 | struct ata_port *ap; | 6451 | struct ata_port *ap; |
6067 | unsigned int i; | ||
6068 | 6452 | ||
6069 | DPRINTK("ENTER\n"); | 6453 | DPRINTK("ENTER\n"); |
6070 | 6454 | ||
@@ -6079,9 +6463,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host) | |||
6079 | ap->ctl = ATA_DEVCTL_OBS; | 6463 | ap->ctl = ATA_DEVCTL_OBS; |
6080 | ap->host = host; | 6464 | ap->host = host; |
6081 | ap->dev = host->dev; | 6465 | ap->dev = host->dev; |
6082 | |||
6083 | ap->hw_sata_spd_limit = UINT_MAX; | ||
6084 | ap->active_tag = ATA_TAG_POISON; | ||
6085 | ap->last_ctl = 0xFF; | 6466 | ap->last_ctl = 0xFF; |
6086 | 6467 | ||
6087 | #if defined(ATA_VERBOSE_DEBUG) | 6468 | #if defined(ATA_VERBOSE_DEBUG) |
@@ -6104,12 +6485,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host) | |||
6104 | 6485 | ||
6105 | ap->cbl = ATA_CBL_NONE; | 6486 | ap->cbl = ATA_CBL_NONE; |
6106 | 6487 | ||
6107 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 6488 | ata_link_init(ap, &ap->link, 0); |
6108 | struct ata_device *dev = &ap->device[i]; | ||
6109 | dev->ap = ap; | ||
6110 | dev->devno = i; | ||
6111 | ata_dev_init(dev); | ||
6112 | } | ||
6113 | 6489 | ||
6114 | #ifdef ATA_IRQ_TRAP | 6490 | #ifdef ATA_IRQ_TRAP |
6115 | ap->stats.unhandled_irq = 1; | 6491 | ap->stats.unhandled_irq = 1; |
@@ -6145,6 +6521,7 @@ static void ata_host_release(struct device *gendev, void *res) | |||
6145 | if (ap->scsi_host) | 6521 | if (ap->scsi_host) |
6146 | scsi_host_put(ap->scsi_host); | 6522 | scsi_host_put(ap->scsi_host); |
6147 | 6523 | ||
6524 | kfree(ap->pmp_link); | ||
6148 | kfree(ap); | 6525 | kfree(ap); |
6149 | host->ports[i] = NULL; | 6526 | host->ports[i] = NULL; |
6150 | } | 6527 | } |
@@ -6255,6 +6632,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, | |||
6255 | ap->mwdma_mask = pi->mwdma_mask; | 6632 | ap->mwdma_mask = pi->mwdma_mask; |
6256 | ap->udma_mask = pi->udma_mask; | 6633 | ap->udma_mask = pi->udma_mask; |
6257 | ap->flags |= pi->flags; | 6634 | ap->flags |= pi->flags; |
6635 | ap->link.flags |= pi->link_flags; | ||
6258 | ap->ops = pi->port_ops; | 6636 | ap->ops = pi->port_ops; |
6259 | 6637 | ||
6260 | if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) | 6638 | if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) |
@@ -6390,8 +6768,6 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) | |||
6390 | /* set cable, sata_spd_limit and report */ | 6768 | /* set cable, sata_spd_limit and report */ |
6391 | for (i = 0; i < host->n_ports; i++) { | 6769 | for (i = 0; i < host->n_ports; i++) { |
6392 | struct ata_port *ap = host->ports[i]; | 6770 | struct ata_port *ap = host->ports[i]; |
6393 | int irq_line; | ||
6394 | u32 scontrol; | ||
6395 | unsigned long xfer_mask; | 6771 | unsigned long xfer_mask; |
6396 | 6772 | ||
6397 | /* set SATA cable type if still unset */ | 6773 | /* set SATA cable type if still unset */ |
@@ -6399,32 +6775,20 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) | |||
6399 | ap->cbl = ATA_CBL_SATA; | 6775 | ap->cbl = ATA_CBL_SATA; |
6400 | 6776 | ||
6401 | /* init sata_spd_limit to the current value */ | 6777 | /* init sata_spd_limit to the current value */ |
6402 | if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) { | 6778 | sata_link_init_spd(&ap->link); |
6403 | int spd = (scontrol >> 4) & 0xf; | ||
6404 | if (spd) | ||
6405 | ap->hw_sata_spd_limit &= (1 << spd) - 1; | ||
6406 | } | ||
6407 | ap->sata_spd_limit = ap->hw_sata_spd_limit; | ||
6408 | |||
6409 | /* report the secondary IRQ for second channel legacy */ | ||
6410 | irq_line = host->irq; | ||
6411 | if (i == 1 && host->irq2) | ||
6412 | irq_line = host->irq2; | ||
6413 | 6779 | ||
6780 | /* print per-port info to dmesg */ | ||
6414 | xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, | 6781 | xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, |
6415 | ap->udma_mask); | 6782 | ap->udma_mask); |
6416 | 6783 | ||
6417 | /* print per-port info to dmesg */ | 6784 | if (!ata_port_is_dummy(ap)) { |
6418 | if (!ata_port_is_dummy(ap)) | 6785 | ata_port_printk(ap, KERN_INFO, |
6419 | ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p " | 6786 | "%cATA max %s %s\n", |
6420 | "ctl 0x%p bmdma 0x%p irq %d\n", | ||
6421 | (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', | 6787 | (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', |
6422 | ata_mode_string(xfer_mask), | 6788 | ata_mode_string(xfer_mask), |
6423 | ap->ioaddr.cmd_addr, | 6789 | ap->link.eh_info.desc); |
6424 | ap->ioaddr.ctl_addr, | 6790 | ata_ehi_clear_desc(&ap->link.eh_info); |
6425 | ap->ioaddr.bmdma_addr, | 6791 | } else |
6426 | irq_line); | ||
6427 | else | ||
6428 | ata_port_printk(ap, KERN_INFO, "DUMMY\n"); | 6792 | ata_port_printk(ap, KERN_INFO, "DUMMY\n"); |
6429 | } | 6793 | } |
6430 | 6794 | ||
@@ -6436,7 +6800,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) | |||
6436 | 6800 | ||
6437 | /* probe */ | 6801 | /* probe */ |
6438 | if (ap->ops->error_handler) { | 6802 | if (ap->ops->error_handler) { |
6439 | struct ata_eh_info *ehi = &ap->eh_info; | 6803 | struct ata_eh_info *ehi = &ap->link.eh_info; |
6440 | unsigned long flags; | 6804 | unsigned long flags; |
6441 | 6805 | ||
6442 | ata_port_probe(ap); | 6806 | ata_port_probe(ap); |
@@ -6444,7 +6808,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) | |||
6444 | /* kick EH for boot probing */ | 6808 | /* kick EH for boot probing */ |
6445 | spin_lock_irqsave(ap->lock, flags); | 6809 | spin_lock_irqsave(ap->lock, flags); |
6446 | 6810 | ||
6447 | ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1; | 6811 | ehi->probe_mask = |
6812 | (1 << ata_link_max_devices(&ap->link)) - 1; | ||
6448 | ehi->action |= ATA_EH_SOFTRESET; | 6813 | ehi->action |= ATA_EH_SOFTRESET; |
6449 | ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; | 6814 | ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; |
6450 | 6815 | ||
@@ -6506,7 +6871,7 @@ int ata_host_activate(struct ata_host *host, int irq, | |||
6506 | irq_handler_t irq_handler, unsigned long irq_flags, | 6871 | irq_handler_t irq_handler, unsigned long irq_flags, |
6507 | struct scsi_host_template *sht) | 6872 | struct scsi_host_template *sht) |
6508 | { | 6873 | { |
6509 | int rc; | 6874 | int i, rc; |
6510 | 6875 | ||
6511 | rc = ata_host_start(host); | 6876 | rc = ata_host_start(host); |
6512 | if (rc) | 6877 | if (rc) |
@@ -6517,8 +6882,8 @@ int ata_host_activate(struct ata_host *host, int irq, | |||
6517 | if (rc) | 6882 | if (rc) |
6518 | return rc; | 6883 | return rc; |
6519 | 6884 | ||
6520 | /* Used to print device info at probe */ | 6885 | for (i = 0; i < host->n_ports; i++) |
6521 | host->irq = irq; | 6886 | ata_port_desc(host->ports[i], "irq %d", irq); |
6522 | 6887 | ||
6523 | rc = ata_host_register(host, sht); | 6888 | rc = ata_host_register(host, sht); |
6524 | /* if failed, just free the IRQ and leave ports alone */ | 6889 | /* if failed, just free the IRQ and leave ports alone */ |
@@ -6542,7 +6907,8 @@ int ata_host_activate(struct ata_host *host, int irq, | |||
6542 | void ata_port_detach(struct ata_port *ap) | 6907 | void ata_port_detach(struct ata_port *ap) |
6543 | { | 6908 | { |
6544 | unsigned long flags; | 6909 | unsigned long flags; |
6545 | int i; | 6910 | struct ata_link *link; |
6911 | struct ata_device *dev; | ||
6546 | 6912 | ||
6547 | if (!ap->ops->error_handler) | 6913 | if (!ap->ops->error_handler) |
6548 | goto skip_eh; | 6914 | goto skip_eh; |
@@ -6559,8 +6925,10 @@ void ata_port_detach(struct ata_port *ap) | |||
6559 | */ | 6925 | */ |
6560 | spin_lock_irqsave(ap->lock, flags); | 6926 | spin_lock_irqsave(ap->lock, flags); |
6561 | 6927 | ||
6562 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 6928 | ata_port_for_each_link(link, ap) { |
6563 | ata_dev_disable(&ap->device[i]); | 6929 | ata_link_for_each_dev(dev, link) |
6930 | ata_dev_disable(dev); | ||
6931 | } | ||
6564 | 6932 | ||
6565 | spin_unlock_irqrestore(ap->lock, flags); | 6933 | spin_unlock_irqrestore(ap->lock, flags); |
6566 | 6934 | ||
@@ -6639,7 +7007,7 @@ void ata_std_ports(struct ata_ioports *ioaddr) | |||
6639 | */ | 7007 | */ |
6640 | void ata_pci_remove_one(struct pci_dev *pdev) | 7008 | void ata_pci_remove_one(struct pci_dev *pdev) |
6641 | { | 7009 | { |
6642 | struct device *dev = pci_dev_to_dev(pdev); | 7010 | struct device *dev = &pdev->dev; |
6643 | struct ata_host *host = dev_get_drvdata(dev); | 7011 | struct ata_host *host = dev_get_drvdata(dev); |
6644 | 7012 | ||
6645 | ata_host_detach(host); | 7013 | ata_host_detach(host); |
@@ -6847,7 +7215,6 @@ static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) | |||
6847 | } | 7215 | } |
6848 | 7216 | ||
6849 | const struct ata_port_operations ata_dummy_port_ops = { | 7217 | const struct ata_port_operations ata_dummy_port_ops = { |
6850 | .port_disable = ata_port_disable, | ||
6851 | .check_status = ata_dummy_check_status, | 7218 | .check_status = ata_dummy_check_status, |
6852 | .check_altstatus = ata_dummy_check_status, | 7219 | .check_altstatus = ata_dummy_check_status, |
6853 | .dev_select = ata_noop_dev_select, | 7220 | .dev_select = ata_noop_dev_select, |
@@ -6909,6 +7276,7 @@ EXPORT_SYMBOL_GPL(ata_interrupt); | |||
6909 | EXPORT_SYMBOL_GPL(ata_do_set_mode); | 7276 | EXPORT_SYMBOL_GPL(ata_do_set_mode); |
6910 | EXPORT_SYMBOL_GPL(ata_data_xfer); | 7277 | EXPORT_SYMBOL_GPL(ata_data_xfer); |
6911 | EXPORT_SYMBOL_GPL(ata_data_xfer_noirq); | 7278 | EXPORT_SYMBOL_GPL(ata_data_xfer_noirq); |
7279 | EXPORT_SYMBOL_GPL(ata_std_qc_defer); | ||
6912 | EXPORT_SYMBOL_GPL(ata_qc_prep); | 7280 | EXPORT_SYMBOL_GPL(ata_qc_prep); |
6913 | EXPORT_SYMBOL_GPL(ata_dumb_qc_prep); | 7281 | EXPORT_SYMBOL_GPL(ata_dumb_qc_prep); |
6914 | EXPORT_SYMBOL_GPL(ata_noop_qc_prep); | 7282 | EXPORT_SYMBOL_GPL(ata_noop_qc_prep); |
@@ -6925,14 +7293,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); | |||
6925 | EXPORT_SYMBOL_GPL(ata_port_probe); | 7293 | EXPORT_SYMBOL_GPL(ata_port_probe); |
6926 | EXPORT_SYMBOL_GPL(ata_dev_disable); | 7294 | EXPORT_SYMBOL_GPL(ata_dev_disable); |
6927 | EXPORT_SYMBOL_GPL(sata_set_spd); | 7295 | EXPORT_SYMBOL_GPL(sata_set_spd); |
6928 | EXPORT_SYMBOL_GPL(sata_phy_debounce); | 7296 | EXPORT_SYMBOL_GPL(sata_link_debounce); |
6929 | EXPORT_SYMBOL_GPL(sata_phy_resume); | 7297 | EXPORT_SYMBOL_GPL(sata_link_resume); |
6930 | EXPORT_SYMBOL_GPL(sata_phy_reset); | 7298 | EXPORT_SYMBOL_GPL(sata_phy_reset); |
6931 | EXPORT_SYMBOL_GPL(__sata_phy_reset); | 7299 | EXPORT_SYMBOL_GPL(__sata_phy_reset); |
6932 | EXPORT_SYMBOL_GPL(ata_bus_reset); | 7300 | EXPORT_SYMBOL_GPL(ata_bus_reset); |
6933 | EXPORT_SYMBOL_GPL(ata_std_prereset); | 7301 | EXPORT_SYMBOL_GPL(ata_std_prereset); |
6934 | EXPORT_SYMBOL_GPL(ata_std_softreset); | 7302 | EXPORT_SYMBOL_GPL(ata_std_softreset); |
6935 | EXPORT_SYMBOL_GPL(sata_port_hardreset); | 7303 | EXPORT_SYMBOL_GPL(sata_link_hardreset); |
6936 | EXPORT_SYMBOL_GPL(sata_std_hardreset); | 7304 | EXPORT_SYMBOL_GPL(sata_std_hardreset); |
6937 | EXPORT_SYMBOL_GPL(ata_std_postreset); | 7305 | EXPORT_SYMBOL_GPL(ata_std_postreset); |
6938 | EXPORT_SYMBOL_GPL(ata_dev_classify); | 7306 | EXPORT_SYMBOL_GPL(ata_dev_classify); |
@@ -6953,8 +7321,8 @@ EXPORT_SYMBOL_GPL(sata_scr_valid); | |||
6953 | EXPORT_SYMBOL_GPL(sata_scr_read); | 7321 | EXPORT_SYMBOL_GPL(sata_scr_read); |
6954 | EXPORT_SYMBOL_GPL(sata_scr_write); | 7322 | EXPORT_SYMBOL_GPL(sata_scr_write); |
6955 | EXPORT_SYMBOL_GPL(sata_scr_write_flush); | 7323 | EXPORT_SYMBOL_GPL(sata_scr_write_flush); |
6956 | EXPORT_SYMBOL_GPL(ata_port_online); | 7324 | EXPORT_SYMBOL_GPL(ata_link_online); |
6957 | EXPORT_SYMBOL_GPL(ata_port_offline); | 7325 | EXPORT_SYMBOL_GPL(ata_link_offline); |
6958 | #ifdef CONFIG_PM | 7326 | #ifdef CONFIG_PM |
6959 | EXPORT_SYMBOL_GPL(ata_host_suspend); | 7327 | EXPORT_SYMBOL_GPL(ata_host_suspend); |
6960 | EXPORT_SYMBOL_GPL(ata_host_resume); | 7328 | EXPORT_SYMBOL_GPL(ata_host_resume); |
@@ -6985,22 +7353,31 @@ EXPORT_SYMBOL_GPL(ata_pci_default_filter); | |||
6985 | EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); | 7353 | EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); |
6986 | #endif /* CONFIG_PCI */ | 7354 | #endif /* CONFIG_PCI */ |
6987 | 7355 | ||
7356 | EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch); | ||
7357 | EXPORT_SYMBOL_GPL(sata_pmp_std_prereset); | ||
7358 | EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset); | ||
7359 | EXPORT_SYMBOL_GPL(sata_pmp_std_postreset); | ||
7360 | EXPORT_SYMBOL_GPL(sata_pmp_do_eh); | ||
7361 | |||
6988 | EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); | 7362 | EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); |
6989 | EXPORT_SYMBOL_GPL(ata_ehi_push_desc); | 7363 | EXPORT_SYMBOL_GPL(ata_ehi_push_desc); |
6990 | EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); | 7364 | EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); |
7365 | EXPORT_SYMBOL_GPL(ata_port_desc); | ||
7366 | #ifdef CONFIG_PCI | ||
7367 | EXPORT_SYMBOL_GPL(ata_port_pbar_desc); | ||
7368 | #endif /* CONFIG_PCI */ | ||
6991 | EXPORT_SYMBOL_GPL(ata_eng_timeout); | 7369 | EXPORT_SYMBOL_GPL(ata_eng_timeout); |
6992 | EXPORT_SYMBOL_GPL(ata_port_schedule_eh); | 7370 | EXPORT_SYMBOL_GPL(ata_port_schedule_eh); |
7371 | EXPORT_SYMBOL_GPL(ata_link_abort); | ||
6993 | EXPORT_SYMBOL_GPL(ata_port_abort); | 7372 | EXPORT_SYMBOL_GPL(ata_port_abort); |
6994 | EXPORT_SYMBOL_GPL(ata_port_freeze); | 7373 | EXPORT_SYMBOL_GPL(ata_port_freeze); |
7374 | EXPORT_SYMBOL_GPL(sata_async_notification); | ||
6995 | EXPORT_SYMBOL_GPL(ata_eh_freeze_port); | 7375 | EXPORT_SYMBOL_GPL(ata_eh_freeze_port); |
6996 | EXPORT_SYMBOL_GPL(ata_eh_thaw_port); | 7376 | EXPORT_SYMBOL_GPL(ata_eh_thaw_port); |
6997 | EXPORT_SYMBOL_GPL(ata_eh_qc_complete); | 7377 | EXPORT_SYMBOL_GPL(ata_eh_qc_complete); |
6998 | EXPORT_SYMBOL_GPL(ata_eh_qc_retry); | 7378 | EXPORT_SYMBOL_GPL(ata_eh_qc_retry); |
6999 | EXPORT_SYMBOL_GPL(ata_do_eh); | 7379 | EXPORT_SYMBOL_GPL(ata_do_eh); |
7000 | EXPORT_SYMBOL_GPL(ata_irq_on); | 7380 | EXPORT_SYMBOL_GPL(ata_irq_on); |
7001 | EXPORT_SYMBOL_GPL(ata_dummy_irq_on); | ||
7002 | EXPORT_SYMBOL_GPL(ata_irq_ack); | ||
7003 | EXPORT_SYMBOL_GPL(ata_dummy_irq_ack); | ||
7004 | EXPORT_SYMBOL_GPL(ata_dev_try_classify); | 7381 | EXPORT_SYMBOL_GPL(ata_dev_try_classify); |
7005 | 7382 | ||
7006 | EXPORT_SYMBOL_GPL(ata_cable_40wire); | 7383 | EXPORT_SYMBOL_GPL(ata_cable_40wire); |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index ac6ceed4bb60..2eaa39fc65d0 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -33,6 +33,7 @@ | |||
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/pci.h> | ||
36 | #include <scsi/scsi.h> | 37 | #include <scsi/scsi.h> |
37 | #include <scsi/scsi_host.h> | 38 | #include <scsi/scsi_host.h> |
38 | #include <scsi/scsi_eh.h> | 39 | #include <scsi/scsi_eh.h> |
@@ -74,7 +75,6 @@ static const unsigned long ata_eh_reset_timeouts[] = { | |||
74 | }; | 75 | }; |
75 | 76 | ||
76 | static void __ata_port_freeze(struct ata_port *ap); | 77 | static void __ata_port_freeze(struct ata_port *ap); |
77 | static void ata_eh_finish(struct ata_port *ap); | ||
78 | #ifdef CONFIG_PM | 78 | #ifdef CONFIG_PM |
79 | static void ata_eh_handle_port_suspend(struct ata_port *ap); | 79 | static void ata_eh_handle_port_suspend(struct ata_port *ap); |
80 | static void ata_eh_handle_port_resume(struct ata_port *ap); | 80 | static void ata_eh_handle_port_resume(struct ata_port *ap); |
@@ -151,6 +151,73 @@ void ata_ehi_clear_desc(struct ata_eh_info *ehi) | |||
151 | ehi->desc_len = 0; | 151 | ehi->desc_len = 0; |
152 | } | 152 | } |
153 | 153 | ||
154 | /** | ||
155 | * ata_port_desc - append port description | ||
156 | * @ap: target ATA port | ||
157 | * @fmt: printf format string | ||
158 | * | ||
159 | * Format string according to @fmt and append it to port | ||
160 | * description. If port description is not empty, " " is added | ||
161 | * in-between. This function is to be used while initializing | ||
162 | * ata_host. The description is printed on host registration. | ||
163 | * | ||
164 | * LOCKING: | ||
165 | * None. | ||
166 | */ | ||
167 | void ata_port_desc(struct ata_port *ap, const char *fmt, ...) | ||
168 | { | ||
169 | va_list args; | ||
170 | |||
171 | WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); | ||
172 | |||
173 | if (ap->link.eh_info.desc_len) | ||
174 | __ata_ehi_push_desc(&ap->link.eh_info, " "); | ||
175 | |||
176 | va_start(args, fmt); | ||
177 | __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); | ||
178 | va_end(args); | ||
179 | } | ||
180 | |||
181 | #ifdef CONFIG_PCI | ||
182 | |||
183 | /** | ||
184 | * ata_port_pbar_desc - append PCI BAR description | ||
185 | * @ap: target ATA port | ||
186 | * @bar: target PCI BAR | ||
187 | * @offset: offset into PCI BAR | ||
188 | * @name: name of the area | ||
189 | * | ||
190 | * If @offset is negative, this function formats a string which | ||
191 | * contains the name, address, size and type of the BAR and | ||
192 | * appends it to the port description. If @offset is zero or | ||
193 | * positive, only name and offsetted address is appended. | ||
194 | * | ||
195 | * LOCKING: | ||
196 | * None. | ||
197 | */ | ||
198 | void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, | ||
199 | const char *name) | ||
200 | { | ||
201 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | ||
202 | char *type = ""; | ||
203 | unsigned long long start, len; | ||
204 | |||
205 | if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) | ||
206 | type = "m"; | ||
207 | else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) | ||
208 | type = "i"; | ||
209 | |||
210 | start = (unsigned long long)pci_resource_start(pdev, bar); | ||
211 | len = (unsigned long long)pci_resource_len(pdev, bar); | ||
212 | |||
213 | if (offset < 0) | ||
214 | ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); | ||
215 | else | ||
216 | ata_port_desc(ap, "%s 0x%llx", name, start + offset); | ||
217 | } | ||
218 | |||
219 | #endif /* CONFIG_PCI */ | ||
220 | |||
154 | static void ata_ering_record(struct ata_ering *ering, int is_io, | 221 | static void ata_ering_record(struct ata_ering *ering, int is_io, |
155 | unsigned int err_mask) | 222 | unsigned int err_mask) |
156 | { | 223 | { |
@@ -195,28 +262,29 @@ static int ata_ering_map(struct ata_ering *ering, | |||
195 | 262 | ||
196 | static unsigned int ata_eh_dev_action(struct ata_device *dev) | 263 | static unsigned int ata_eh_dev_action(struct ata_device *dev) |
197 | { | 264 | { |
198 | struct ata_eh_context *ehc = &dev->ap->eh_context; | 265 | struct ata_eh_context *ehc = &dev->link->eh_context; |
199 | 266 | ||
200 | return ehc->i.action | ehc->i.dev_action[dev->devno]; | 267 | return ehc->i.action | ehc->i.dev_action[dev->devno]; |
201 | } | 268 | } |
202 | 269 | ||
203 | static void ata_eh_clear_action(struct ata_device *dev, | 270 | static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, |
204 | struct ata_eh_info *ehi, unsigned int action) | 271 | struct ata_eh_info *ehi, unsigned int action) |
205 | { | 272 | { |
206 | int i; | 273 | struct ata_device *tdev; |
207 | 274 | ||
208 | if (!dev) { | 275 | if (!dev) { |
209 | ehi->action &= ~action; | 276 | ehi->action &= ~action; |
210 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 277 | ata_link_for_each_dev(tdev, link) |
211 | ehi->dev_action[i] &= ~action; | 278 | ehi->dev_action[tdev->devno] &= ~action; |
212 | } else { | 279 | } else { |
213 | /* doesn't make sense for port-wide EH actions */ | 280 | /* doesn't make sense for port-wide EH actions */ |
214 | WARN_ON(!(action & ATA_EH_PERDEV_MASK)); | 281 | WARN_ON(!(action & ATA_EH_PERDEV_MASK)); |
215 | 282 | ||
216 | /* break ehi->action into ehi->dev_action */ | 283 | /* break ehi->action into ehi->dev_action */ |
217 | if (ehi->action & action) { | 284 | if (ehi->action & action) { |
218 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 285 | ata_link_for_each_dev(tdev, link) |
219 | ehi->dev_action[i] |= ehi->action & action; | 286 | ehi->dev_action[tdev->devno] |= |
287 | ehi->action & action; | ||
220 | ehi->action &= ~action; | 288 | ehi->action &= ~action; |
221 | } | 289 | } |
222 | 290 | ||
@@ -261,7 +329,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) | |||
261 | 329 | ||
262 | ret = EH_HANDLED; | 330 | ret = EH_HANDLED; |
263 | spin_lock_irqsave(ap->lock, flags); | 331 | spin_lock_irqsave(ap->lock, flags); |
264 | qc = ata_qc_from_tag(ap, ap->active_tag); | 332 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
265 | if (qc) { | 333 | if (qc) { |
266 | WARN_ON(qc->scsicmd != cmd); | 334 | WARN_ON(qc->scsicmd != cmd); |
267 | qc->flags |= ATA_QCFLAG_EH_SCHEDULED; | 335 | qc->flags |= ATA_QCFLAG_EH_SCHEDULED; |
@@ -290,7 +358,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) | |||
290 | void ata_scsi_error(struct Scsi_Host *host) | 358 | void ata_scsi_error(struct Scsi_Host *host) |
291 | { | 359 | { |
292 | struct ata_port *ap = ata_shost_to_port(host); | 360 | struct ata_port *ap = ata_shost_to_port(host); |
293 | int i, repeat_cnt = ATA_EH_MAX_REPEAT; | 361 | int i; |
294 | unsigned long flags; | 362 | unsigned long flags; |
295 | 363 | ||
296 | DPRINTK("ENTER\n"); | 364 | DPRINTK("ENTER\n"); |
@@ -356,12 +424,17 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
356 | __ata_port_freeze(ap); | 424 | __ata_port_freeze(ap); |
357 | 425 | ||
358 | spin_unlock_irqrestore(ap->lock, flags); | 426 | spin_unlock_irqrestore(ap->lock, flags); |
427 | |||
428 | /* initialize eh_tries */ | ||
429 | ap->eh_tries = ATA_EH_MAX_TRIES; | ||
359 | } else | 430 | } else |
360 | spin_unlock_wait(ap->lock); | 431 | spin_unlock_wait(ap->lock); |
361 | 432 | ||
362 | repeat: | 433 | repeat: |
363 | /* invoke error handler */ | 434 | /* invoke error handler */ |
364 | if (ap->ops->error_handler) { | 435 | if (ap->ops->error_handler) { |
436 | struct ata_link *link; | ||
437 | |||
365 | /* kill fast drain timer */ | 438 | /* kill fast drain timer */ |
366 | del_timer_sync(&ap->fastdrain_timer); | 439 | del_timer_sync(&ap->fastdrain_timer); |
367 | 440 | ||
@@ -371,12 +444,15 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
371 | /* fetch & clear EH info */ | 444 | /* fetch & clear EH info */ |
372 | spin_lock_irqsave(ap->lock, flags); | 445 | spin_lock_irqsave(ap->lock, flags); |
373 | 446 | ||
374 | memset(&ap->eh_context, 0, sizeof(ap->eh_context)); | 447 | __ata_port_for_each_link(link, ap) { |
375 | ap->eh_context.i = ap->eh_info; | 448 | memset(&link->eh_context, 0, sizeof(link->eh_context)); |
376 | memset(&ap->eh_info, 0, sizeof(ap->eh_info)); | 449 | link->eh_context.i = link->eh_info; |
450 | memset(&link->eh_info, 0, sizeof(link->eh_info)); | ||
451 | } | ||
377 | 452 | ||
378 | ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; | 453 | ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; |
379 | ap->pflags &= ~ATA_PFLAG_EH_PENDING; | 454 | ap->pflags &= ~ATA_PFLAG_EH_PENDING; |
455 | ap->excl_link = NULL; /* don't maintain exclusion over EH */ | ||
380 | 456 | ||
381 | spin_unlock_irqrestore(ap->lock, flags); | 457 | spin_unlock_irqrestore(ap->lock, flags); |
382 | 458 | ||
@@ -396,20 +472,18 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
396 | spin_lock_irqsave(ap->lock, flags); | 472 | spin_lock_irqsave(ap->lock, flags); |
397 | 473 | ||
398 | if (ap->pflags & ATA_PFLAG_EH_PENDING) { | 474 | if (ap->pflags & ATA_PFLAG_EH_PENDING) { |
399 | if (--repeat_cnt) { | 475 | if (--ap->eh_tries) { |
400 | ata_port_printk(ap, KERN_INFO, | ||
401 | "EH pending after completion, " | ||
402 | "repeating EH (cnt=%d)\n", repeat_cnt); | ||
403 | spin_unlock_irqrestore(ap->lock, flags); | 476 | spin_unlock_irqrestore(ap->lock, flags); |
404 | goto repeat; | 477 | goto repeat; |
405 | } | 478 | } |
406 | ata_port_printk(ap, KERN_ERR, "EH pending after %d " | 479 | ata_port_printk(ap, KERN_ERR, "EH pending after %d " |
407 | "tries, giving up\n", ATA_EH_MAX_REPEAT); | 480 | "tries, giving up\n", ATA_EH_MAX_TRIES); |
408 | ap->pflags &= ~ATA_PFLAG_EH_PENDING; | 481 | ap->pflags &= ~ATA_PFLAG_EH_PENDING; |
409 | } | 482 | } |
410 | 483 | ||
411 | /* this run is complete, make sure EH info is clear */ | 484 | /* this run is complete, make sure EH info is clear */ |
412 | memset(&ap->eh_info, 0, sizeof(ap->eh_info)); | 485 | __ata_port_for_each_link(link, ap) |
486 | memset(&link->eh_info, 0, sizeof(link->eh_info)); | ||
413 | 487 | ||
414 | /* Clear host_eh_scheduled while holding ap->lock such | 488 | /* Clear host_eh_scheduled while holding ap->lock such |
415 | * that if exception occurs after this point but | 489 | * that if exception occurs after this point but |
@@ -420,7 +494,7 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
420 | 494 | ||
421 | spin_unlock_irqrestore(ap->lock, flags); | 495 | spin_unlock_irqrestore(ap->lock, flags); |
422 | } else { | 496 | } else { |
423 | WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); | 497 | WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); |
424 | ap->ops->eng_timeout(ap); | 498 | ap->ops->eng_timeout(ap); |
425 | } | 499 | } |
426 | 500 | ||
@@ -575,7 +649,7 @@ void ata_eng_timeout(struct ata_port *ap) | |||
575 | { | 649 | { |
576 | DPRINTK("ENTER\n"); | 650 | DPRINTK("ENTER\n"); |
577 | 651 | ||
578 | ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag)); | 652 | ata_qc_timeout(ata_qc_from_tag(ap, ap->link.active_tag)); |
579 | 653 | ||
580 | DPRINTK("EXIT\n"); | 654 | DPRINTK("EXIT\n"); |
581 | } | 655 | } |
@@ -718,19 +792,7 @@ void ata_port_schedule_eh(struct ata_port *ap) | |||
718 | DPRINTK("port EH scheduled\n"); | 792 | DPRINTK("port EH scheduled\n"); |
719 | } | 793 | } |
720 | 794 | ||
721 | /** | 795 | static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) |
722 | * ata_port_abort - abort all qc's on the port | ||
723 | * @ap: ATA port to abort qc's for | ||
724 | * | ||
725 | * Abort all active qc's of @ap and schedule EH. | ||
726 | * | ||
727 | * LOCKING: | ||
728 | * spin_lock_irqsave(host lock) | ||
729 | * | ||
730 | * RETURNS: | ||
731 | * Number of aborted qc's. | ||
732 | */ | ||
733 | int ata_port_abort(struct ata_port *ap) | ||
734 | { | 796 | { |
735 | int tag, nr_aborted = 0; | 797 | int tag, nr_aborted = 0; |
736 | 798 | ||
@@ -742,7 +804,7 @@ int ata_port_abort(struct ata_port *ap) | |||
742 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 804 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { |
743 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); | 805 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); |
744 | 806 | ||
745 | if (qc) { | 807 | if (qc && (!link || qc->dev->link == link)) { |
746 | qc->flags |= ATA_QCFLAG_FAILED; | 808 | qc->flags |= ATA_QCFLAG_FAILED; |
747 | ata_qc_complete(qc); | 809 | ata_qc_complete(qc); |
748 | nr_aborted++; | 810 | nr_aborted++; |
@@ -756,6 +818,40 @@ int ata_port_abort(struct ata_port *ap) | |||
756 | } | 818 | } |
757 | 819 | ||
758 | /** | 820 | /** |
821 | * ata_link_abort - abort all qc's on the link | ||
822 | * @link: ATA link to abort qc's for | ||
823 | * | ||
824 | * Abort all active qc's active on @link and schedule EH. | ||
825 | * | ||
826 | * LOCKING: | ||
827 | * spin_lock_irqsave(host lock) | ||
828 | * | ||
829 | * RETURNS: | ||
830 | * Number of aborted qc's. | ||
831 | */ | ||
832 | int ata_link_abort(struct ata_link *link) | ||
833 | { | ||
834 | return ata_do_link_abort(link->ap, link); | ||
835 | } | ||
836 | |||
837 | /** | ||
838 | * ata_port_abort - abort all qc's on the port | ||
839 | * @ap: ATA port to abort qc's for | ||
840 | * | ||
841 | * Abort all active qc's of @ap and schedule EH. | ||
842 | * | ||
843 | * LOCKING: | ||
844 | * spin_lock_irqsave(host_set lock) | ||
845 | * | ||
846 | * RETURNS: | ||
847 | * Number of aborted qc's. | ||
848 | */ | ||
849 | int ata_port_abort(struct ata_port *ap) | ||
850 | { | ||
851 | return ata_do_link_abort(ap, NULL); | ||
852 | } | ||
853 | |||
854 | /** | ||
759 | * __ata_port_freeze - freeze port | 855 | * __ata_port_freeze - freeze port |
760 | * @ap: ATA port to freeze | 856 | * @ap: ATA port to freeze |
761 | * | 857 | * |
@@ -810,6 +906,79 @@ int ata_port_freeze(struct ata_port *ap) | |||
810 | } | 906 | } |
811 | 907 | ||
812 | /** | 908 | /** |
909 | * sata_async_notification - SATA async notification handler | ||
910 | * @ap: ATA port where async notification is received | ||
911 | * | ||
912 | * Handler to be called when async notification via SDB FIS is | ||
913 | * received. This function schedules EH if necessary. | ||
914 | * | ||
915 | * LOCKING: | ||
916 | * spin_lock_irqsave(host lock) | ||
917 | * | ||
918 | * RETURNS: | ||
919 | * 1 if EH is scheduled, 0 otherwise. | ||
920 | */ | ||
921 | int sata_async_notification(struct ata_port *ap) | ||
922 | { | ||
923 | u32 sntf; | ||
924 | int rc; | ||
925 | |||
926 | if (!(ap->flags & ATA_FLAG_AN)) | ||
927 | return 0; | ||
928 | |||
929 | rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); | ||
930 | if (rc == 0) | ||
931 | sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); | ||
932 | |||
933 | if (!ap->nr_pmp_links || rc) { | ||
934 | /* PMP is not attached or SNTF is not available */ | ||
935 | if (!ap->nr_pmp_links) { | ||
936 | /* PMP is not attached. Check whether ATAPI | ||
937 | * AN is configured. If so, notify media | ||
938 | * change. | ||
939 | */ | ||
940 | struct ata_device *dev = ap->link.device; | ||
941 | |||
942 | if ((dev->class == ATA_DEV_ATAPI) && | ||
943 | (dev->flags & ATA_DFLAG_AN)) | ||
944 | ata_scsi_media_change_notify(dev); | ||
945 | return 0; | ||
946 | } else { | ||
947 | /* PMP is attached but SNTF is not available. | ||
948 | * ATAPI async media change notification is | ||
949 | * not used. The PMP must be reporting PHY | ||
950 | * status change, schedule EH. | ||
951 | */ | ||
952 | ata_port_schedule_eh(ap); | ||
953 | return 1; | ||
954 | } | ||
955 | } else { | ||
956 | /* PMP is attached and SNTF is available */ | ||
957 | struct ata_link *link; | ||
958 | |||
959 | /* check and notify ATAPI AN */ | ||
960 | ata_port_for_each_link(link, ap) { | ||
961 | if (!(sntf & (1 << link->pmp))) | ||
962 | continue; | ||
963 | |||
964 | if ((link->device->class == ATA_DEV_ATAPI) && | ||
965 | (link->device->flags & ATA_DFLAG_AN)) | ||
966 | ata_scsi_media_change_notify(link->device); | ||
967 | } | ||
968 | |||
969 | /* If PMP is reporting that PHY status of some | ||
970 | * downstream ports has changed, schedule EH. | ||
971 | */ | ||
972 | if (sntf & (1 << SATA_PMP_CTRL_PORT)) { | ||
973 | ata_port_schedule_eh(ap); | ||
974 | return 1; | ||
975 | } | ||
976 | |||
977 | return 0; | ||
978 | } | ||
979 | } | ||
980 | |||
981 | /** | ||
813 | * ata_eh_freeze_port - EH helper to freeze port | 982 | * ata_eh_freeze_port - EH helper to freeze port |
814 | * @ap: ATA port to freeze | 983 | * @ap: ATA port to freeze |
815 | * | 984 | * |
@@ -920,9 +1089,10 @@ void ata_eh_qc_retry(struct ata_queued_cmd *qc) | |||
920 | * LOCKING: | 1089 | * LOCKING: |
921 | * None. | 1090 | * None. |
922 | */ | 1091 | */ |
923 | static void ata_eh_detach_dev(struct ata_device *dev) | 1092 | void ata_eh_detach_dev(struct ata_device *dev) |
924 | { | 1093 | { |
925 | struct ata_port *ap = dev->ap; | 1094 | struct ata_link *link = dev->link; |
1095 | struct ata_port *ap = link->ap; | ||
926 | unsigned long flags; | 1096 | unsigned long flags; |
927 | 1097 | ||
928 | ata_dev_disable(dev); | 1098 | ata_dev_disable(dev); |
@@ -937,31 +1107,32 @@ static void ata_eh_detach_dev(struct ata_device *dev) | |||
937 | } | 1107 | } |
938 | 1108 | ||
939 | /* clear per-dev EH actions */ | 1109 | /* clear per-dev EH actions */ |
940 | ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK); | 1110 | ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); |
941 | ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK); | 1111 | ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); |
942 | 1112 | ||
943 | spin_unlock_irqrestore(ap->lock, flags); | 1113 | spin_unlock_irqrestore(ap->lock, flags); |
944 | } | 1114 | } |
945 | 1115 | ||
946 | /** | 1116 | /** |
947 | * ata_eh_about_to_do - about to perform eh_action | 1117 | * ata_eh_about_to_do - about to perform eh_action |
948 | * @ap: target ATA port | 1118 | * @link: target ATA link |
949 | * @dev: target ATA dev for per-dev action (can be NULL) | 1119 | * @dev: target ATA dev for per-dev action (can be NULL) |
950 | * @action: action about to be performed | 1120 | * @action: action about to be performed |
951 | * | 1121 | * |
952 | * Called just before performing EH actions to clear related bits | 1122 | * Called just before performing EH actions to clear related bits |
953 | * in @ap->eh_info such that eh actions are not unnecessarily | 1123 | * in @link->eh_info such that eh actions are not unnecessarily |
954 | * repeated. | 1124 | * repeated. |
955 | * | 1125 | * |
956 | * LOCKING: | 1126 | * LOCKING: |
957 | * None. | 1127 | * None. |
958 | */ | 1128 | */ |
959 | static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, | 1129 | void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, |
960 | unsigned int action) | 1130 | unsigned int action) |
961 | { | 1131 | { |
1132 | struct ata_port *ap = link->ap; | ||
1133 | struct ata_eh_info *ehi = &link->eh_info; | ||
1134 | struct ata_eh_context *ehc = &link->eh_context; | ||
962 | unsigned long flags; | 1135 | unsigned long flags; |
963 | struct ata_eh_info *ehi = &ap->eh_info; | ||
964 | struct ata_eh_context *ehc = &ap->eh_context; | ||
965 | 1136 | ||
966 | spin_lock_irqsave(ap->lock, flags); | 1137 | spin_lock_irqsave(ap->lock, flags); |
967 | 1138 | ||
@@ -978,7 +1149,7 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, | |||
978 | ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK; | 1149 | ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK; |
979 | } | 1150 | } |
980 | 1151 | ||
981 | ata_eh_clear_action(dev, ehi, action); | 1152 | ata_eh_clear_action(link, dev, ehi, action); |
982 | 1153 | ||
983 | if (!(ehc->i.flags & ATA_EHI_QUIET)) | 1154 | if (!(ehc->i.flags & ATA_EHI_QUIET)) |
984 | ap->pflags |= ATA_PFLAG_RECOVERED; | 1155 | ap->pflags |= ATA_PFLAG_RECOVERED; |
@@ -988,26 +1159,28 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, | |||
988 | 1159 | ||
989 | /** | 1160 | /** |
990 | * ata_eh_done - EH action complete | 1161 | * ata_eh_done - EH action complete |
991 | * @ap: target ATA port | 1162 | * @ap: target ATA port |
992 | * @dev: target ATA dev for per-dev action (can be NULL) | 1163 | * @dev: target ATA dev for per-dev action (can be NULL) |
993 | * @action: action just completed | 1164 | * @action: action just completed |
994 | * | 1165 | * |
995 | * Called right after performing EH actions to clear related bits | 1166 | * Called right after performing EH actions to clear related bits |
996 | * in @ap->eh_context. | 1167 | * in @link->eh_context. |
997 | * | 1168 | * |
998 | * LOCKING: | 1169 | * LOCKING: |
999 | * None. | 1170 | * None. |
1000 | */ | 1171 | */ |
1001 | static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, | 1172 | void ata_eh_done(struct ata_link *link, struct ata_device *dev, |
1002 | unsigned int action) | 1173 | unsigned int action) |
1003 | { | 1174 | { |
1175 | struct ata_eh_context *ehc = &link->eh_context; | ||
1176 | |||
1004 | /* if reset is complete, clear all reset actions & reset modifier */ | 1177 | /* if reset is complete, clear all reset actions & reset modifier */ |
1005 | if (action & ATA_EH_RESET_MASK) { | 1178 | if (action & ATA_EH_RESET_MASK) { |
1006 | action |= ATA_EH_RESET_MASK; | 1179 | action |= ATA_EH_RESET_MASK; |
1007 | ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK; | 1180 | ehc->i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK; |
1008 | } | 1181 | } |
1009 | 1182 | ||
1010 | ata_eh_clear_action(dev, &ap->eh_context.i, action); | 1183 | ata_eh_clear_action(link, dev, &ehc->i, action); |
1011 | } | 1184 | } |
1012 | 1185 | ||
1013 | /** | 1186 | /** |
@@ -1077,7 +1250,7 @@ static unsigned int ata_read_log_page(struct ata_device *dev, | |||
1077 | tf.protocol = ATA_PROT_PIO; | 1250 | tf.protocol = ATA_PROT_PIO; |
1078 | 1251 | ||
1079 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, | 1252 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, |
1080 | buf, sectors * ATA_SECT_SIZE); | 1253 | buf, sectors * ATA_SECT_SIZE, 0); |
1081 | 1254 | ||
1082 | DPRINTK("EXIT, err_mask=%x\n", err_mask); | 1255 | DPRINTK("EXIT, err_mask=%x\n", err_mask); |
1083 | return err_mask; | 1256 | return err_mask; |
@@ -1101,7 +1274,7 @@ static unsigned int ata_read_log_page(struct ata_device *dev, | |||
1101 | static int ata_eh_read_log_10h(struct ata_device *dev, | 1274 | static int ata_eh_read_log_10h(struct ata_device *dev, |
1102 | int *tag, struct ata_taskfile *tf) | 1275 | int *tag, struct ata_taskfile *tf) |
1103 | { | 1276 | { |
1104 | u8 *buf = dev->ap->sector_buf; | 1277 | u8 *buf = dev->link->ap->sector_buf; |
1105 | unsigned int err_mask; | 1278 | unsigned int err_mask; |
1106 | u8 csum; | 1279 | u8 csum; |
1107 | int i; | 1280 | int i; |
@@ -1155,7 +1328,7 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc) | |||
1155 | { | 1328 | { |
1156 | struct ata_device *dev = qc->dev; | 1329 | struct ata_device *dev = qc->dev; |
1157 | unsigned char *sense_buf = qc->scsicmd->sense_buffer; | 1330 | unsigned char *sense_buf = qc->scsicmd->sense_buffer; |
1158 | struct ata_port *ap = dev->ap; | 1331 | struct ata_port *ap = dev->link->ap; |
1159 | struct ata_taskfile tf; | 1332 | struct ata_taskfile tf; |
1160 | u8 cdb[ATAPI_CDB_LEN]; | 1333 | u8 cdb[ATAPI_CDB_LEN]; |
1161 | 1334 | ||
@@ -1191,12 +1364,12 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc) | |||
1191 | } | 1364 | } |
1192 | 1365 | ||
1193 | return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, | 1366 | return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, |
1194 | sense_buf, SCSI_SENSE_BUFFERSIZE); | 1367 | sense_buf, SCSI_SENSE_BUFFERSIZE, 0); |
1195 | } | 1368 | } |
1196 | 1369 | ||
1197 | /** | 1370 | /** |
1198 | * ata_eh_analyze_serror - analyze SError for a failed port | 1371 | * ata_eh_analyze_serror - analyze SError for a failed port |
1199 | * @ap: ATA port to analyze SError for | 1372 | * @link: ATA link to analyze SError for |
1200 | * | 1373 | * |
1201 | * Analyze SError if available and further determine cause of | 1374 | * Analyze SError if available and further determine cause of |
1202 | * failure. | 1375 | * failure. |
@@ -1204,11 +1377,12 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc) | |||
1204 | * LOCKING: | 1377 | * LOCKING: |
1205 | * None. | 1378 | * None. |
1206 | */ | 1379 | */ |
1207 | static void ata_eh_analyze_serror(struct ata_port *ap) | 1380 | static void ata_eh_analyze_serror(struct ata_link *link) |
1208 | { | 1381 | { |
1209 | struct ata_eh_context *ehc = &ap->eh_context; | 1382 | struct ata_eh_context *ehc = &link->eh_context; |
1210 | u32 serror = ehc->i.serror; | 1383 | u32 serror = ehc->i.serror; |
1211 | unsigned int err_mask = 0, action = 0; | 1384 | unsigned int err_mask = 0, action = 0; |
1385 | u32 hotplug_mask; | ||
1212 | 1386 | ||
1213 | if (serror & SERR_PERSISTENT) { | 1387 | if (serror & SERR_PERSISTENT) { |
1214 | err_mask |= AC_ERR_ATA_BUS; | 1388 | err_mask |= AC_ERR_ATA_BUS; |
@@ -1227,7 +1401,20 @@ static void ata_eh_analyze_serror(struct ata_port *ap) | |||
1227 | err_mask |= AC_ERR_SYSTEM; | 1401 | err_mask |= AC_ERR_SYSTEM; |
1228 | action |= ATA_EH_HARDRESET; | 1402 | action |= ATA_EH_HARDRESET; |
1229 | } | 1403 | } |
1230 | if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG)) | 1404 | |
1405 | /* Determine whether a hotplug event has occurred. Both | ||
1406 | * SError.N/X are considered hotplug events for enabled or | ||
1407 | * host links. For disabled PMP links, only N bit is | ||
1408 | * considered as X bit is left at 1 for link plugging. | ||
1409 | */ | ||
1410 | hotplug_mask = 0; | ||
1411 | |||
1412 | if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) | ||
1413 | hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; | ||
1414 | else | ||
1415 | hotplug_mask = SERR_PHYRDY_CHG; | ||
1416 | |||
1417 | if (serror & hotplug_mask) | ||
1231 | ata_ehi_hotplugged(&ehc->i); | 1418 | ata_ehi_hotplugged(&ehc->i); |
1232 | 1419 | ||
1233 | ehc->i.err_mask |= err_mask; | 1420 | ehc->i.err_mask |= err_mask; |
@@ -1236,7 +1423,7 @@ static void ata_eh_analyze_serror(struct ata_port *ap) | |||
1236 | 1423 | ||
1237 | /** | 1424 | /** |
1238 | * ata_eh_analyze_ncq_error - analyze NCQ error | 1425 | * ata_eh_analyze_ncq_error - analyze NCQ error |
1239 | * @ap: ATA port to analyze NCQ error for | 1426 | * @link: ATA link to analyze NCQ error for |
1240 | * | 1427 | * |
1241 | * Read log page 10h, determine the offending qc and acquire | 1428 | * Read log page 10h, determine the offending qc and acquire |
1242 | * error status TF. For NCQ device errors, all LLDDs have to do | 1429 | * error status TF. For NCQ device errors, all LLDDs have to do |
@@ -1246,10 +1433,11 @@ static void ata_eh_analyze_serror(struct ata_port *ap) | |||
1246 | * LOCKING: | 1433 | * LOCKING: |
1247 | * Kernel thread context (may sleep). | 1434 | * Kernel thread context (may sleep). |
1248 | */ | 1435 | */ |
1249 | static void ata_eh_analyze_ncq_error(struct ata_port *ap) | 1436 | static void ata_eh_analyze_ncq_error(struct ata_link *link) |
1250 | { | 1437 | { |
1251 | struct ata_eh_context *ehc = &ap->eh_context; | 1438 | struct ata_port *ap = link->ap; |
1252 | struct ata_device *dev = ap->device; | 1439 | struct ata_eh_context *ehc = &link->eh_context; |
1440 | struct ata_device *dev = link->device; | ||
1253 | struct ata_queued_cmd *qc; | 1441 | struct ata_queued_cmd *qc; |
1254 | struct ata_taskfile tf; | 1442 | struct ata_taskfile tf; |
1255 | int tag, rc; | 1443 | int tag, rc; |
@@ -1259,7 +1447,7 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap) | |||
1259 | return; | 1447 | return; |
1260 | 1448 | ||
1261 | /* is it NCQ device error? */ | 1449 | /* is it NCQ device error? */ |
1262 | if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) | 1450 | if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) |
1263 | return; | 1451 | return; |
1264 | 1452 | ||
1265 | /* has LLDD analyzed already? */ | 1453 | /* has LLDD analyzed already? */ |
@@ -1276,13 +1464,13 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap) | |||
1276 | /* okay, this error is ours */ | 1464 | /* okay, this error is ours */ |
1277 | rc = ata_eh_read_log_10h(dev, &tag, &tf); | 1465 | rc = ata_eh_read_log_10h(dev, &tag, &tf); |
1278 | if (rc) { | 1466 | if (rc) { |
1279 | ata_port_printk(ap, KERN_ERR, "failed to read log page 10h " | 1467 | ata_link_printk(link, KERN_ERR, "failed to read log page 10h " |
1280 | "(errno=%d)\n", rc); | 1468 | "(errno=%d)\n", rc); |
1281 | return; | 1469 | return; |
1282 | } | 1470 | } |
1283 | 1471 | ||
1284 | if (!(ap->sactive & (1 << tag))) { | 1472 | if (!(link->sactive & (1 << tag))) { |
1285 | ata_port_printk(ap, KERN_ERR, "log page 10h reported " | 1473 | ata_link_printk(link, KERN_ERR, "log page 10h reported " |
1286 | "inactive tag %d\n", tag); | 1474 | "inactive tag %d\n", tag); |
1287 | return; | 1475 | return; |
1288 | } | 1476 | } |
@@ -1497,7 +1685,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io, | |||
1497 | /* speed down? */ | 1685 | /* speed down? */ |
1498 | if (verdict & ATA_EH_SPDN_SPEED_DOWN) { | 1686 | if (verdict & ATA_EH_SPDN_SPEED_DOWN) { |
1499 | /* speed down SATA link speed if possible */ | 1687 | /* speed down SATA link speed if possible */ |
1500 | if (sata_down_spd_limit(dev->ap) == 0) { | 1688 | if (sata_down_spd_limit(dev->link) == 0) { |
1501 | action |= ATA_EH_HARDRESET; | 1689 | action |= ATA_EH_HARDRESET; |
1502 | goto done; | 1690 | goto done; |
1503 | } | 1691 | } |
@@ -1528,7 +1716,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io, | |||
1528 | * SATA. Consider it only for PATA. | 1716 | * SATA. Consider it only for PATA. |
1529 | */ | 1717 | */ |
1530 | if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && | 1718 | if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && |
1531 | (dev->ap->cbl != ATA_CBL_SATA) && | 1719 | (dev->link->ap->cbl != ATA_CBL_SATA) && |
1532 | (dev->xfer_shift != ATA_SHIFT_PIO)) { | 1720 | (dev->xfer_shift != ATA_SHIFT_PIO)) { |
1533 | if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { | 1721 | if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { |
1534 | dev->spdn_cnt = 0; | 1722 | dev->spdn_cnt = 0; |
@@ -1545,19 +1733,20 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io, | |||
1545 | } | 1733 | } |
1546 | 1734 | ||
1547 | /** | 1735 | /** |
1548 | * ata_eh_autopsy - analyze error and determine recovery action | 1736 | * ata_eh_link_autopsy - analyze error and determine recovery action |
1549 | * @ap: ATA port to perform autopsy on | 1737 | * @link: host link to perform autopsy on |
1550 | * | 1738 | * |
1551 | * Analyze why @ap failed and determine which recovery action is | 1739 | * Analyze why @link failed and determine which recovery actions |
1552 | * needed. This function also sets more detailed AC_ERR_* values | 1740 | * are needed. This function also sets more detailed AC_ERR_* |
1553 | * and fills sense data for ATAPI CHECK SENSE. | 1741 | * values and fills sense data for ATAPI CHECK SENSE. |
1554 | * | 1742 | * |
1555 | * LOCKING: | 1743 | * LOCKING: |
1556 | * Kernel thread context (may sleep). | 1744 | * Kernel thread context (may sleep). |
1557 | */ | 1745 | */ |
1558 | static void ata_eh_autopsy(struct ata_port *ap) | 1746 | static void ata_eh_link_autopsy(struct ata_link *link) |
1559 | { | 1747 | { |
1560 | struct ata_eh_context *ehc = &ap->eh_context; | 1748 | struct ata_port *ap = link->ap; |
1749 | struct ata_eh_context *ehc = &link->eh_context; | ||
1561 | unsigned int all_err_mask = 0; | 1750 | unsigned int all_err_mask = 0; |
1562 | int tag, is_io = 0; | 1751 | int tag, is_io = 0; |
1563 | u32 serror; | 1752 | u32 serror; |
@@ -1569,10 +1758,10 @@ static void ata_eh_autopsy(struct ata_port *ap) | |||
1569 | return; | 1758 | return; |
1570 | 1759 | ||
1571 | /* obtain and analyze SError */ | 1760 | /* obtain and analyze SError */ |
1572 | rc = sata_scr_read(ap, SCR_ERROR, &serror); | 1761 | rc = sata_scr_read(link, SCR_ERROR, &serror); |
1573 | if (rc == 0) { | 1762 | if (rc == 0) { |
1574 | ehc->i.serror |= serror; | 1763 | ehc->i.serror |= serror; |
1575 | ata_eh_analyze_serror(ap); | 1764 | ata_eh_analyze_serror(link); |
1576 | } else if (rc != -EOPNOTSUPP) { | 1765 | } else if (rc != -EOPNOTSUPP) { |
1577 | /* SError read failed, force hardreset and probing */ | 1766 | /* SError read failed, force hardreset and probing */ |
1578 | ata_ehi_schedule_probe(&ehc->i); | 1767 | ata_ehi_schedule_probe(&ehc->i); |
@@ -1581,7 +1770,7 @@ static void ata_eh_autopsy(struct ata_port *ap) | |||
1581 | } | 1770 | } |
1582 | 1771 | ||
1583 | /* analyze NCQ failure */ | 1772 | /* analyze NCQ failure */ |
1584 | ata_eh_analyze_ncq_error(ap); | 1773 | ata_eh_analyze_ncq_error(link); |
1585 | 1774 | ||
1586 | /* any real error trumps AC_ERR_OTHER */ | 1775 | /* any real error trumps AC_ERR_OTHER */ |
1587 | if (ehc->i.err_mask & ~AC_ERR_OTHER) | 1776 | if (ehc->i.err_mask & ~AC_ERR_OTHER) |
@@ -1592,7 +1781,7 @@ static void ata_eh_autopsy(struct ata_port *ap) | |||
1592 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 1781 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { |
1593 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); | 1782 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); |
1594 | 1783 | ||
1595 | if (!(qc->flags & ATA_QCFLAG_FAILED)) | 1784 | if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link) |
1596 | continue; | 1785 | continue; |
1597 | 1786 | ||
1598 | /* inherit upper level err_mask */ | 1787 | /* inherit upper level err_mask */ |
@@ -1646,20 +1835,43 @@ static void ata_eh_autopsy(struct ata_port *ap) | |||
1646 | } | 1835 | } |
1647 | 1836 | ||
1648 | /** | 1837 | /** |
1649 | * ata_eh_report - report error handling to user | 1838 | * ata_eh_autopsy - analyze error and determine recovery action |
1650 | * @ap: ATA port EH is going on | 1839 | * @ap: host port to perform autopsy on |
1840 | * | ||
1841 | * Analyze all links of @ap and determine why they failed and | ||
1842 | * which recovery actions are needed. | ||
1843 | * | ||
1844 | * LOCKING: | ||
1845 | * Kernel thread context (may sleep). | ||
1846 | */ | ||
1847 | void ata_eh_autopsy(struct ata_port *ap) | ||
1848 | { | ||
1849 | struct ata_link *link; | ||
1850 | |||
1851 | __ata_port_for_each_link(link, ap) | ||
1852 | ata_eh_link_autopsy(link); | ||
1853 | } | ||
1854 | |||
1855 | /** | ||
1856 | * ata_eh_link_report - report error handling to user | ||
1857 | * @link: ATA link EH is going on | ||
1651 | * | 1858 | * |
1652 | * Report EH to user. | 1859 | * Report EH to user. |
1653 | * | 1860 | * |
1654 | * LOCKING: | 1861 | * LOCKING: |
1655 | * None. | 1862 | * None. |
1656 | */ | 1863 | */ |
1657 | static void ata_eh_report(struct ata_port *ap) | 1864 | static void ata_eh_link_report(struct ata_link *link) |
1658 | { | 1865 | { |
1659 | struct ata_eh_context *ehc = &ap->eh_context; | 1866 | struct ata_port *ap = link->ap; |
1867 | struct ata_eh_context *ehc = &link->eh_context; | ||
1660 | const char *frozen, *desc; | 1868 | const char *frozen, *desc; |
1869 | char tries_buf[6]; | ||
1661 | int tag, nr_failed = 0; | 1870 | int tag, nr_failed = 0; |
1662 | 1871 | ||
1872 | if (ehc->i.flags & ATA_EHI_QUIET) | ||
1873 | return; | ||
1874 | |||
1663 | desc = NULL; | 1875 | desc = NULL; |
1664 | if (ehc->i.desc[0] != '\0') | 1876 | if (ehc->i.desc[0] != '\0') |
1665 | desc = ehc->i.desc; | 1877 | desc = ehc->i.desc; |
@@ -1667,7 +1879,7 @@ static void ata_eh_report(struct ata_port *ap) | |||
1667 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 1879 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { |
1668 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); | 1880 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); |
1669 | 1881 | ||
1670 | if (!(qc->flags & ATA_QCFLAG_FAILED)) | 1882 | if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link) |
1671 | continue; | 1883 | continue; |
1672 | if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) | 1884 | if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) |
1673 | continue; | 1885 | continue; |
@@ -1682,22 +1894,48 @@ static void ata_eh_report(struct ata_port *ap) | |||
1682 | if (ap->pflags & ATA_PFLAG_FROZEN) | 1894 | if (ap->pflags & ATA_PFLAG_FROZEN) |
1683 | frozen = " frozen"; | 1895 | frozen = " frozen"; |
1684 | 1896 | ||
1897 | memset(tries_buf, 0, sizeof(tries_buf)); | ||
1898 | if (ap->eh_tries < ATA_EH_MAX_TRIES) | ||
1899 | snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", | ||
1900 | ap->eh_tries); | ||
1901 | |||
1685 | if (ehc->i.dev) { | 1902 | if (ehc->i.dev) { |
1686 | ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " | 1903 | ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " |
1687 | "SAct 0x%x SErr 0x%x action 0x%x%s\n", | 1904 | "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", |
1688 | ehc->i.err_mask, ap->sactive, ehc->i.serror, | 1905 | ehc->i.err_mask, link->sactive, ehc->i.serror, |
1689 | ehc->i.action, frozen); | 1906 | ehc->i.action, frozen, tries_buf); |
1690 | if (desc) | 1907 | if (desc) |
1691 | ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); | 1908 | ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); |
1692 | } else { | 1909 | } else { |
1693 | ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x " | 1910 | ata_link_printk(link, KERN_ERR, "exception Emask 0x%x " |
1694 | "SAct 0x%x SErr 0x%x action 0x%x%s\n", | 1911 | "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", |
1695 | ehc->i.err_mask, ap->sactive, ehc->i.serror, | 1912 | ehc->i.err_mask, link->sactive, ehc->i.serror, |
1696 | ehc->i.action, frozen); | 1913 | ehc->i.action, frozen, tries_buf); |
1697 | if (desc) | 1914 | if (desc) |
1698 | ata_port_printk(ap, KERN_ERR, "%s\n", desc); | 1915 | ata_link_printk(link, KERN_ERR, "%s\n", desc); |
1699 | } | 1916 | } |
1700 | 1917 | ||
1918 | if (ehc->i.serror) | ||
1919 | ata_port_printk(ap, KERN_ERR, | ||
1920 | "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", | ||
1921 | ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", | ||
1922 | ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", | ||
1923 | ehc->i.serror & SERR_DATA ? "UnrecovData " : "", | ||
1924 | ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", | ||
1925 | ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", | ||
1926 | ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", | ||
1927 | ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", | ||
1928 | ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", | ||
1929 | ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", | ||
1930 | ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", | ||
1931 | ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", | ||
1932 | ehc->i.serror & SERR_CRC ? "BadCRC " : "", | ||
1933 | ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", | ||
1934 | ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", | ||
1935 | ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", | ||
1936 | ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", | ||
1937 | ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "" ); | ||
1938 | |||
1701 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 1939 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { |
1702 | static const char *dma_str[] = { | 1940 | static const char *dma_str[] = { |
1703 | [DMA_BIDIRECTIONAL] = "bidi", | 1941 | [DMA_BIDIRECTIONAL] = "bidi", |
@@ -1708,7 +1946,8 @@ static void ata_eh_report(struct ata_port *ap) | |||
1708 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); | 1946 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); |
1709 | struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; | 1947 | struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; |
1710 | 1948 | ||
1711 | if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask) | 1949 | if (!(qc->flags & ATA_QCFLAG_FAILED) || |
1950 | qc->dev->link != link || !qc->err_mask) | ||
1712 | continue; | 1951 | continue; |
1713 | 1952 | ||
1714 | ata_dev_printk(qc->dev, KERN_ERR, | 1953 | ata_dev_printk(qc->dev, KERN_ERR, |
@@ -1728,18 +1967,60 @@ static void ata_eh_report(struct ata_port *ap) | |||
1728 | res->hob_lbal, res->hob_lbam, res->hob_lbah, | 1967 | res->hob_lbal, res->hob_lbam, res->hob_lbah, |
1729 | res->device, qc->err_mask, ata_err_string(qc->err_mask), | 1968 | res->device, qc->err_mask, ata_err_string(qc->err_mask), |
1730 | qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); | 1969 | qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); |
1970 | |||
1971 | if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | | ||
1972 | ATA_ERR) ) { | ||
1973 | if (res->command & ATA_BUSY) | ||
1974 | ata_dev_printk(qc->dev, KERN_ERR, | ||
1975 | "status: { Busy }\n" ); | ||
1976 | else | ||
1977 | ata_dev_printk(qc->dev, KERN_ERR, | ||
1978 | "status: { %s%s%s%s}\n", | ||
1979 | res->command & ATA_DRDY ? "DRDY " : "", | ||
1980 | res->command & ATA_DF ? "DF " : "", | ||
1981 | res->command & ATA_DRQ ? "DRQ " : "", | ||
1982 | res->command & ATA_ERR ? "ERR " : "" ); | ||
1983 | } | ||
1984 | |||
1985 | if (cmd->command != ATA_CMD_PACKET && | ||
1986 | (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | | ||
1987 | ATA_ABORTED))) | ||
1988 | ata_dev_printk(qc->dev, KERN_ERR, | ||
1989 | "error: { %s%s%s%s}\n", | ||
1990 | res->feature & ATA_ICRC ? "ICRC " : "", | ||
1991 | res->feature & ATA_UNC ? "UNC " : "", | ||
1992 | res->feature & ATA_IDNF ? "IDNF " : "", | ||
1993 | res->feature & ATA_ABORTED ? "ABRT " : "" ); | ||
1731 | } | 1994 | } |
1732 | } | 1995 | } |
1733 | 1996 | ||
1734 | static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset, | 1997 | /** |
1998 | * ata_eh_report - report error handling to user | ||
1999 | * @ap: ATA port to report EH about | ||
2000 | * | ||
2001 | * Report EH to user. | ||
2002 | * | ||
2003 | * LOCKING: | ||
2004 | * None. | ||
2005 | */ | ||
2006 | void ata_eh_report(struct ata_port *ap) | ||
2007 | { | ||
2008 | struct ata_link *link; | ||
2009 | |||
2010 | __ata_port_for_each_link(link, ap) | ||
2011 | ata_eh_link_report(link); | ||
2012 | } | ||
2013 | |||
2014 | static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, | ||
1735 | unsigned int *classes, unsigned long deadline) | 2015 | unsigned int *classes, unsigned long deadline) |
1736 | { | 2016 | { |
1737 | int i, rc; | 2017 | struct ata_device *dev; |
2018 | int rc; | ||
1738 | 2019 | ||
1739 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 2020 | ata_link_for_each_dev(dev, link) |
1740 | classes[i] = ATA_DEV_UNKNOWN; | 2021 | classes[dev->devno] = ATA_DEV_UNKNOWN; |
1741 | 2022 | ||
1742 | rc = reset(ap, classes, deadline); | 2023 | rc = reset(link, classes, deadline); |
1743 | if (rc) | 2024 | if (rc) |
1744 | return rc; | 2025 | return rc; |
1745 | 2026 | ||
@@ -1747,71 +2028,87 @@ static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset, | |||
1747 | * is complete and convert all ATA_DEV_UNKNOWN to | 2028 | * is complete and convert all ATA_DEV_UNKNOWN to |
1748 | * ATA_DEV_NONE. | 2029 | * ATA_DEV_NONE. |
1749 | */ | 2030 | */ |
1750 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 2031 | ata_link_for_each_dev(dev, link) |
1751 | if (classes[i] != ATA_DEV_UNKNOWN) | 2032 | if (classes[dev->devno] != ATA_DEV_UNKNOWN) |
1752 | break; | 2033 | break; |
1753 | 2034 | ||
1754 | if (i < ATA_MAX_DEVICES) | 2035 | if (dev) { |
1755 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 2036 | ata_link_for_each_dev(dev, link) { |
1756 | if (classes[i] == ATA_DEV_UNKNOWN) | 2037 | if (classes[dev->devno] == ATA_DEV_UNKNOWN) |
1757 | classes[i] = ATA_DEV_NONE; | 2038 | classes[dev->devno] = ATA_DEV_NONE; |
2039 | } | ||
2040 | } | ||
1758 | 2041 | ||
1759 | return 0; | 2042 | return 0; |
1760 | } | 2043 | } |
1761 | 2044 | ||
1762 | static int ata_eh_followup_srst_needed(int rc, int classify, | 2045 | static int ata_eh_followup_srst_needed(struct ata_link *link, |
2046 | int rc, int classify, | ||
1763 | const unsigned int *classes) | 2047 | const unsigned int *classes) |
1764 | { | 2048 | { |
2049 | if (link->flags & ATA_LFLAG_NO_SRST) | ||
2050 | return 0; | ||
1765 | if (rc == -EAGAIN) | 2051 | if (rc == -EAGAIN) |
1766 | return 1; | 2052 | return 1; |
1767 | if (rc != 0) | 2053 | if (rc != 0) |
1768 | return 0; | 2054 | return 0; |
1769 | if (classify && classes[0] == ATA_DEV_UNKNOWN) | 2055 | if ((link->ap->flags & ATA_FLAG_PMP) && ata_is_host_link(link)) |
2056 | return 1; | ||
2057 | if (classify && !(link->flags & ATA_LFLAG_ASSUME_CLASS) && | ||
2058 | classes[0] == ATA_DEV_UNKNOWN) | ||
1770 | return 1; | 2059 | return 1; |
1771 | return 0; | 2060 | return 0; |
1772 | } | 2061 | } |
1773 | 2062 | ||
1774 | static int ata_eh_reset(struct ata_port *ap, int classify, | 2063 | int ata_eh_reset(struct ata_link *link, int classify, |
1775 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, | 2064 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, |
1776 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) | 2065 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) |
1777 | { | 2066 | { |
1778 | struct ata_eh_context *ehc = &ap->eh_context; | 2067 | struct ata_port *ap = link->ap; |
2068 | struct ata_eh_context *ehc = &link->eh_context; | ||
1779 | unsigned int *classes = ehc->classes; | 2069 | unsigned int *classes = ehc->classes; |
1780 | int verbose = !(ehc->i.flags & ATA_EHI_QUIET); | 2070 | int verbose = !(ehc->i.flags & ATA_EHI_QUIET); |
1781 | int try = 0; | 2071 | int try = 0; |
2072 | struct ata_device *dev; | ||
1782 | unsigned long deadline; | 2073 | unsigned long deadline; |
1783 | unsigned int action; | 2074 | unsigned int action; |
1784 | ata_reset_fn_t reset; | 2075 | ata_reset_fn_t reset; |
1785 | int i, rc; | 2076 | unsigned long flags; |
2077 | int rc; | ||
1786 | 2078 | ||
1787 | /* about to reset */ | 2079 | /* about to reset */ |
1788 | ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); | 2080 | spin_lock_irqsave(ap->lock, flags); |
2081 | ap->pflags |= ATA_PFLAG_RESETTING; | ||
2082 | spin_unlock_irqrestore(ap->lock, flags); | ||
2083 | |||
2084 | ata_eh_about_to_do(link, NULL, ehc->i.action & ATA_EH_RESET_MASK); | ||
1789 | 2085 | ||
1790 | /* Determine which reset to use and record in ehc->i.action. | 2086 | /* Determine which reset to use and record in ehc->i.action. |
1791 | * prereset() may examine and modify it. | 2087 | * prereset() may examine and modify it. |
1792 | */ | 2088 | */ |
1793 | action = ehc->i.action; | 2089 | action = ehc->i.action; |
1794 | ehc->i.action &= ~ATA_EH_RESET_MASK; | 2090 | ehc->i.action &= ~ATA_EH_RESET_MASK; |
1795 | if (softreset && (!hardreset || (!sata_set_spd_needed(ap) && | 2091 | if (softreset && (!hardreset || (!(link->flags & ATA_LFLAG_NO_SRST) && |
2092 | !sata_set_spd_needed(link) && | ||
1796 | !(action & ATA_EH_HARDRESET)))) | 2093 | !(action & ATA_EH_HARDRESET)))) |
1797 | ehc->i.action |= ATA_EH_SOFTRESET; | 2094 | ehc->i.action |= ATA_EH_SOFTRESET; |
1798 | else | 2095 | else |
1799 | ehc->i.action |= ATA_EH_HARDRESET; | 2096 | ehc->i.action |= ATA_EH_HARDRESET; |
1800 | 2097 | ||
1801 | if (prereset) { | 2098 | if (prereset) { |
1802 | rc = prereset(ap, jiffies + ATA_EH_PRERESET_TIMEOUT); | 2099 | rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT); |
1803 | if (rc) { | 2100 | if (rc) { |
1804 | if (rc == -ENOENT) { | 2101 | if (rc == -ENOENT) { |
1805 | ata_port_printk(ap, KERN_DEBUG, | 2102 | ata_link_printk(link, KERN_DEBUG, |
1806 | "port disabled. ignoring.\n"); | 2103 | "port disabled. ignoring.\n"); |
1807 | ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; | 2104 | ehc->i.action &= ~ATA_EH_RESET_MASK; |
1808 | 2105 | ||
1809 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 2106 | ata_link_for_each_dev(dev, link) |
1810 | classes[i] = ATA_DEV_NONE; | 2107 | classes[dev->devno] = ATA_DEV_NONE; |
1811 | 2108 | ||
1812 | rc = 0; | 2109 | rc = 0; |
1813 | } else | 2110 | } else |
1814 | ata_port_printk(ap, KERN_ERR, | 2111 | ata_link_printk(link, KERN_ERR, |
1815 | "prereset failed (errno=%d)\n", rc); | 2112 | "prereset failed (errno=%d)\n", rc); |
1816 | goto out; | 2113 | goto out; |
1817 | } | 2114 | } |
@@ -1824,8 +2121,8 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1824 | reset = softreset; | 2121 | reset = softreset; |
1825 | else { | 2122 | else { |
1826 | /* prereset told us not to reset, bang classes and return */ | 2123 | /* prereset told us not to reset, bang classes and return */ |
1827 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 2124 | ata_link_for_each_dev(dev, link) |
1828 | classes[i] = ATA_DEV_NONE; | 2125 | classes[dev->devno] = ATA_DEV_NONE; |
1829 | rc = 0; | 2126 | rc = 0; |
1830 | goto out; | 2127 | goto out; |
1831 | } | 2128 | } |
@@ -1843,7 +2140,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1843 | 2140 | ||
1844 | /* shut up during boot probing */ | 2141 | /* shut up during boot probing */ |
1845 | if (verbose) | 2142 | if (verbose) |
1846 | ata_port_printk(ap, KERN_INFO, "%s resetting port\n", | 2143 | ata_link_printk(link, KERN_INFO, "%s resetting link\n", |
1847 | reset == softreset ? "soft" : "hard"); | 2144 | reset == softreset ? "soft" : "hard"); |
1848 | 2145 | ||
1849 | /* mark that this EH session started with reset */ | 2146 | /* mark that this EH session started with reset */ |
@@ -1852,49 +2149,54 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1852 | else | 2149 | else |
1853 | ehc->i.flags |= ATA_EHI_DID_SOFTRESET; | 2150 | ehc->i.flags |= ATA_EHI_DID_SOFTRESET; |
1854 | 2151 | ||
1855 | rc = ata_do_reset(ap, reset, classes, deadline); | 2152 | rc = ata_do_reset(link, reset, classes, deadline); |
1856 | 2153 | ||
1857 | if (reset == hardreset && | 2154 | if (reset == hardreset && |
1858 | ata_eh_followup_srst_needed(rc, classify, classes)) { | 2155 | ata_eh_followup_srst_needed(link, rc, classify, classes)) { |
1859 | /* okay, let's do follow-up softreset */ | 2156 | /* okay, let's do follow-up softreset */ |
1860 | reset = softreset; | 2157 | reset = softreset; |
1861 | 2158 | ||
1862 | if (!reset) { | 2159 | if (!reset) { |
1863 | ata_port_printk(ap, KERN_ERR, | 2160 | ata_link_printk(link, KERN_ERR, |
1864 | "follow-up softreset required " | 2161 | "follow-up softreset required " |
1865 | "but no softreset avaliable\n"); | 2162 | "but no softreset avaliable\n"); |
1866 | rc = -EINVAL; | 2163 | rc = -EINVAL; |
1867 | goto out; | 2164 | goto out; |
1868 | } | 2165 | } |
1869 | 2166 | ||
1870 | ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); | 2167 | ata_eh_about_to_do(link, NULL, ATA_EH_RESET_MASK); |
1871 | rc = ata_do_reset(ap, reset, classes, deadline); | 2168 | rc = ata_do_reset(link, reset, classes, deadline); |
1872 | 2169 | ||
1873 | if (rc == 0 && classify && | 2170 | if (rc == 0 && classify && classes[0] == ATA_DEV_UNKNOWN && |
1874 | classes[0] == ATA_DEV_UNKNOWN) { | 2171 | !(link->flags & ATA_LFLAG_ASSUME_CLASS)) { |
1875 | ata_port_printk(ap, KERN_ERR, | 2172 | ata_link_printk(link, KERN_ERR, |
1876 | "classification failed\n"); | 2173 | "classification failed\n"); |
1877 | rc = -EINVAL; | 2174 | rc = -EINVAL; |
1878 | goto out; | 2175 | goto out; |
1879 | } | 2176 | } |
1880 | } | 2177 | } |
1881 | 2178 | ||
1882 | if (rc && try < ARRAY_SIZE(ata_eh_reset_timeouts)) { | 2179 | /* if we skipped follow-up srst, clear rc */ |
2180 | if (rc == -EAGAIN) | ||
2181 | rc = 0; | ||
2182 | |||
2183 | if (rc && rc != -ERESTART && try < ARRAY_SIZE(ata_eh_reset_timeouts)) { | ||
1883 | unsigned long now = jiffies; | 2184 | unsigned long now = jiffies; |
1884 | 2185 | ||
1885 | if (time_before(now, deadline)) { | 2186 | if (time_before(now, deadline)) { |
1886 | unsigned long delta = deadline - jiffies; | 2187 | unsigned long delta = deadline - jiffies; |
1887 | 2188 | ||
1888 | ata_port_printk(ap, KERN_WARNING, "reset failed " | 2189 | ata_link_printk(link, KERN_WARNING, "reset failed " |
1889 | "(errno=%d), retrying in %u secs\n", | 2190 | "(errno=%d), retrying in %u secs\n", |
1890 | rc, (jiffies_to_msecs(delta) + 999) / 1000); | 2191 | rc, (jiffies_to_msecs(delta) + 999) / 1000); |
1891 | 2192 | ||
1892 | schedule_timeout_uninterruptible(delta); | 2193 | while (delta) |
2194 | delta = schedule_timeout_uninterruptible(delta); | ||
1893 | } | 2195 | } |
1894 | 2196 | ||
1895 | if (rc == -EPIPE || | 2197 | if (rc == -EPIPE || |
1896 | try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1) | 2198 | try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1) |
1897 | sata_down_spd_limit(ap); | 2199 | sata_down_spd_limit(link); |
1898 | if (hardreset) | 2200 | if (hardreset) |
1899 | reset = hardreset; | 2201 | reset = hardreset; |
1900 | goto retry; | 2202 | goto retry; |
@@ -1903,37 +2205,56 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1903 | if (rc == 0) { | 2205 | if (rc == 0) { |
1904 | u32 sstatus; | 2206 | u32 sstatus; |
1905 | 2207 | ||
1906 | /* After the reset, the device state is PIO 0 and the | 2208 | ata_link_for_each_dev(dev, link) { |
1907 | * controller state is undefined. Record the mode. | 2209 | /* After the reset, the device state is PIO 0 |
1908 | */ | 2210 | * and the controller state is undefined. |
1909 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 2211 | * Record the mode. |
1910 | ap->device[i].pio_mode = XFER_PIO_0; | 2212 | */ |
2213 | dev->pio_mode = XFER_PIO_0; | ||
2214 | |||
2215 | if (ata_link_offline(link)) | ||
2216 | continue; | ||
2217 | |||
2218 | /* apply class override and convert UNKNOWN to NONE */ | ||
2219 | if (link->flags & ATA_LFLAG_ASSUME_ATA) | ||
2220 | classes[dev->devno] = ATA_DEV_ATA; | ||
2221 | else if (link->flags & ATA_LFLAG_ASSUME_SEMB) | ||
2222 | classes[dev->devno] = ATA_DEV_SEMB_UNSUP; /* not yet */ | ||
2223 | else if (classes[dev->devno] == ATA_DEV_UNKNOWN) | ||
2224 | classes[dev->devno] = ATA_DEV_NONE; | ||
2225 | } | ||
1911 | 2226 | ||
1912 | /* record current link speed */ | 2227 | /* record current link speed */ |
1913 | if (sata_scr_read(ap, SCR_STATUS, &sstatus) == 0) | 2228 | if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) |
1914 | ap->sata_spd = (sstatus >> 4) & 0xf; | 2229 | link->sata_spd = (sstatus >> 4) & 0xf; |
1915 | 2230 | ||
1916 | if (postreset) | 2231 | if (postreset) |
1917 | postreset(ap, classes); | 2232 | postreset(link, classes); |
1918 | 2233 | ||
1919 | /* reset successful, schedule revalidation */ | 2234 | /* reset successful, schedule revalidation */ |
1920 | ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); | 2235 | ata_eh_done(link, NULL, ehc->i.action & ATA_EH_RESET_MASK); |
1921 | ehc->i.action |= ATA_EH_REVALIDATE; | 2236 | ehc->i.action |= ATA_EH_REVALIDATE; |
1922 | } | 2237 | } |
1923 | out: | 2238 | out: |
1924 | /* clear hotplug flag */ | 2239 | /* clear hotplug flag */ |
1925 | ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; | 2240 | ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; |
2241 | |||
2242 | spin_lock_irqsave(ap->lock, flags); | ||
2243 | ap->pflags &= ~ATA_PFLAG_RESETTING; | ||
2244 | spin_unlock_irqrestore(ap->lock, flags); | ||
2245 | |||
1926 | return rc; | 2246 | return rc; |
1927 | } | 2247 | } |
1928 | 2248 | ||
1929 | static int ata_eh_revalidate_and_attach(struct ata_port *ap, | 2249 | static int ata_eh_revalidate_and_attach(struct ata_link *link, |
1930 | struct ata_device **r_failed_dev) | 2250 | struct ata_device **r_failed_dev) |
1931 | { | 2251 | { |
1932 | struct ata_eh_context *ehc = &ap->eh_context; | 2252 | struct ata_port *ap = link->ap; |
2253 | struct ata_eh_context *ehc = &link->eh_context; | ||
1933 | struct ata_device *dev; | 2254 | struct ata_device *dev; |
1934 | unsigned int new_mask = 0; | 2255 | unsigned int new_mask = 0; |
1935 | unsigned long flags; | 2256 | unsigned long flags; |
1936 | int i, rc = 0; | 2257 | int rc = 0; |
1937 | 2258 | ||
1938 | DPRINTK("ENTER\n"); | 2259 | DPRINTK("ENTER\n"); |
1939 | 2260 | ||
@@ -1941,27 +2262,28 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap, | |||
1941 | * be done backwards such that PDIAG- is released by the slave | 2262 | * be done backwards such that PDIAG- is released by the slave |
1942 | * device before the master device is identified. | 2263 | * device before the master device is identified. |
1943 | */ | 2264 | */ |
1944 | for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) { | 2265 | ata_link_for_each_dev_reverse(dev, link) { |
1945 | unsigned int action, readid_flags = 0; | 2266 | unsigned int action = ata_eh_dev_action(dev); |
1946 | 2267 | unsigned int readid_flags = 0; | |
1947 | dev = &ap->device[i]; | ||
1948 | action = ata_eh_dev_action(dev); | ||
1949 | 2268 | ||
1950 | if (ehc->i.flags & ATA_EHI_DID_RESET) | 2269 | if (ehc->i.flags & ATA_EHI_DID_RESET) |
1951 | readid_flags |= ATA_READID_POSTRESET; | 2270 | readid_flags |= ATA_READID_POSTRESET; |
1952 | 2271 | ||
1953 | if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { | 2272 | if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { |
1954 | if (ata_port_offline(ap)) { | 2273 | WARN_ON(dev->class == ATA_DEV_PMP); |
2274 | |||
2275 | if (ata_link_offline(link)) { | ||
1955 | rc = -EIO; | 2276 | rc = -EIO; |
1956 | goto err; | 2277 | goto err; |
1957 | } | 2278 | } |
1958 | 2279 | ||
1959 | ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE); | 2280 | ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); |
1960 | rc = ata_dev_revalidate(dev, readid_flags); | 2281 | rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], |
2282 | readid_flags); | ||
1961 | if (rc) | 2283 | if (rc) |
1962 | goto err; | 2284 | goto err; |
1963 | 2285 | ||
1964 | ata_eh_done(ap, dev, ATA_EH_REVALIDATE); | 2286 | ata_eh_done(link, dev, ATA_EH_REVALIDATE); |
1965 | 2287 | ||
1966 | /* Configuration may have changed, reconfigure | 2288 | /* Configuration may have changed, reconfigure |
1967 | * transfer mode. | 2289 | * transfer mode. |
@@ -1975,11 +2297,14 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap, | |||
1975 | ata_class_enabled(ehc->classes[dev->devno])) { | 2297 | ata_class_enabled(ehc->classes[dev->devno])) { |
1976 | dev->class = ehc->classes[dev->devno]; | 2298 | dev->class = ehc->classes[dev->devno]; |
1977 | 2299 | ||
1978 | rc = ata_dev_read_id(dev, &dev->class, readid_flags, | 2300 | if (dev->class == ATA_DEV_PMP) |
1979 | dev->id); | 2301 | rc = sata_pmp_attach(dev); |
2302 | else | ||
2303 | rc = ata_dev_read_id(dev, &dev->class, | ||
2304 | readid_flags, dev->id); | ||
1980 | switch (rc) { | 2305 | switch (rc) { |
1981 | case 0: | 2306 | case 0: |
1982 | new_mask |= 1 << i; | 2307 | new_mask |= 1 << dev->devno; |
1983 | break; | 2308 | break; |
1984 | case -ENOENT: | 2309 | case -ENOENT: |
1985 | /* IDENTIFY was issued to non-existent | 2310 | /* IDENTIFY was issued to non-existent |
@@ -1997,16 +2322,16 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap, | |||
1997 | } | 2322 | } |
1998 | 2323 | ||
1999 | /* PDIAG- should have been released, ask cable type if post-reset */ | 2324 | /* PDIAG- should have been released, ask cable type if post-reset */ |
2000 | if ((ehc->i.flags & ATA_EHI_DID_RESET) && ap->ops->cable_detect) | 2325 | if (ata_is_host_link(link) && ap->ops->cable_detect && |
2326 | (ehc->i.flags & ATA_EHI_DID_RESET)) | ||
2001 | ap->cbl = ap->ops->cable_detect(ap); | 2327 | ap->cbl = ap->ops->cable_detect(ap); |
2002 | 2328 | ||
2003 | /* Configure new devices forward such that user doesn't see | 2329 | /* Configure new devices forward such that user doesn't see |
2004 | * device detection messages backwards. | 2330 | * device detection messages backwards. |
2005 | */ | 2331 | */ |
2006 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2332 | ata_link_for_each_dev(dev, link) { |
2007 | dev = &ap->device[i]; | 2333 | if (!(new_mask & (1 << dev->devno)) || |
2008 | 2334 | dev->class == ATA_DEV_PMP) | |
2009 | if (!(new_mask & (1 << i))) | ||
2010 | continue; | 2335 | continue; |
2011 | 2336 | ||
2012 | ehc->i.flags |= ATA_EHI_PRINTINFO; | 2337 | ehc->i.flags |= ATA_EHI_PRINTINFO; |
@@ -2031,40 +2356,44 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap, | |||
2031 | return rc; | 2356 | return rc; |
2032 | } | 2357 | } |
2033 | 2358 | ||
2034 | static int ata_port_nr_enabled(struct ata_port *ap) | 2359 | static int ata_link_nr_enabled(struct ata_link *link) |
2035 | { | 2360 | { |
2036 | int i, cnt = 0; | 2361 | struct ata_device *dev; |
2362 | int cnt = 0; | ||
2037 | 2363 | ||
2038 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 2364 | ata_link_for_each_dev(dev, link) |
2039 | if (ata_dev_enabled(&ap->device[i])) | 2365 | if (ata_dev_enabled(dev)) |
2040 | cnt++; | 2366 | cnt++; |
2041 | return cnt; | 2367 | return cnt; |
2042 | } | 2368 | } |
2043 | 2369 | ||
2044 | static int ata_port_nr_vacant(struct ata_port *ap) | 2370 | static int ata_link_nr_vacant(struct ata_link *link) |
2045 | { | 2371 | { |
2046 | int i, cnt = 0; | 2372 | struct ata_device *dev; |
2373 | int cnt = 0; | ||
2047 | 2374 | ||
2048 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 2375 | ata_link_for_each_dev(dev, link) |
2049 | if (ap->device[i].class == ATA_DEV_UNKNOWN) | 2376 | if (dev->class == ATA_DEV_UNKNOWN) |
2050 | cnt++; | 2377 | cnt++; |
2051 | return cnt; | 2378 | return cnt; |
2052 | } | 2379 | } |
2053 | 2380 | ||
2054 | static int ata_eh_skip_recovery(struct ata_port *ap) | 2381 | static int ata_eh_skip_recovery(struct ata_link *link) |
2055 | { | 2382 | { |
2056 | struct ata_eh_context *ehc = &ap->eh_context; | 2383 | struct ata_eh_context *ehc = &link->eh_context; |
2057 | int i; | 2384 | struct ata_device *dev; |
2385 | |||
2386 | /* skip disabled links */ | ||
2387 | if (link->flags & ATA_LFLAG_DISABLED) | ||
2388 | return 1; | ||
2058 | 2389 | ||
2059 | /* thaw frozen port, resume link and recover failed devices */ | 2390 | /* thaw frozen port, resume link and recover failed devices */ |
2060 | if ((ap->pflags & ATA_PFLAG_FROZEN) || | 2391 | if ((link->ap->pflags & ATA_PFLAG_FROZEN) || |
2061 | (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap)) | 2392 | (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_link_nr_enabled(link)) |
2062 | return 0; | 2393 | return 0; |
2063 | 2394 | ||
2064 | /* skip if class codes for all vacant slots are ATA_DEV_NONE */ | 2395 | /* skip if class codes for all vacant slots are ATA_DEV_NONE */ |
2065 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2396 | ata_link_for_each_dev(dev, link) { |
2066 | struct ata_device *dev = &ap->device[i]; | ||
2067 | |||
2068 | if (dev->class == ATA_DEV_UNKNOWN && | 2397 | if (dev->class == ATA_DEV_UNKNOWN && |
2069 | ehc->classes[dev->devno] != ATA_DEV_NONE) | 2398 | ehc->classes[dev->devno] != ATA_DEV_NONE) |
2070 | return 0; | 2399 | return 0; |
@@ -2073,10 +2402,9 @@ static int ata_eh_skip_recovery(struct ata_port *ap) | |||
2073 | return 1; | 2402 | return 1; |
2074 | } | 2403 | } |
2075 | 2404 | ||
2076 | static void ata_eh_handle_dev_fail(struct ata_device *dev, int err) | 2405 | static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) |
2077 | { | 2406 | { |
2078 | struct ata_port *ap = dev->ap; | 2407 | struct ata_eh_context *ehc = &dev->link->eh_context; |
2079 | struct ata_eh_context *ehc = &ap->eh_context; | ||
2080 | 2408 | ||
2081 | ehc->tries[dev->devno]--; | 2409 | ehc->tries[dev->devno]--; |
2082 | 2410 | ||
@@ -2092,7 +2420,7 @@ static void ata_eh_handle_dev_fail(struct ata_device *dev, int err) | |||
2092 | /* This is the last chance, better to slow | 2420 | /* This is the last chance, better to slow |
2093 | * down than lose it. | 2421 | * down than lose it. |
2094 | */ | 2422 | */ |
2095 | sata_down_spd_limit(ap); | 2423 | sata_down_spd_limit(dev->link); |
2096 | ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); | 2424 | ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); |
2097 | } | 2425 | } |
2098 | } | 2426 | } |
@@ -2102,7 +2430,7 @@ static void ata_eh_handle_dev_fail(struct ata_device *dev, int err) | |||
2102 | ata_dev_disable(dev); | 2430 | ata_dev_disable(dev); |
2103 | 2431 | ||
2104 | /* detach if offline */ | 2432 | /* detach if offline */ |
2105 | if (ata_port_offline(ap)) | 2433 | if (ata_link_offline(dev->link)) |
2106 | ata_eh_detach_dev(dev); | 2434 | ata_eh_detach_dev(dev); |
2107 | 2435 | ||
2108 | /* probe if requested */ | 2436 | /* probe if requested */ |
@@ -2115,12 +2443,16 @@ static void ata_eh_handle_dev_fail(struct ata_device *dev, int err) | |||
2115 | ehc->did_probe_mask |= (1 << dev->devno); | 2443 | ehc->did_probe_mask |= (1 << dev->devno); |
2116 | ehc->i.action |= ATA_EH_SOFTRESET; | 2444 | ehc->i.action |= ATA_EH_SOFTRESET; |
2117 | } | 2445 | } |
2446 | |||
2447 | return 1; | ||
2118 | } else { | 2448 | } else { |
2119 | /* soft didn't work? be haaaaard */ | 2449 | /* soft didn't work? be haaaaard */ |
2120 | if (ehc->i.flags & ATA_EHI_DID_RESET) | 2450 | if (ehc->i.flags & ATA_EHI_DID_RESET) |
2121 | ehc->i.action |= ATA_EH_HARDRESET; | 2451 | ehc->i.action |= ATA_EH_HARDRESET; |
2122 | else | 2452 | else |
2123 | ehc->i.action |= ATA_EH_SOFTRESET; | 2453 | ehc->i.action |= ATA_EH_SOFTRESET; |
2454 | |||
2455 | return 0; | ||
2124 | } | 2456 | } |
2125 | } | 2457 | } |
2126 | 2458 | ||
@@ -2131,12 +2463,13 @@ static void ata_eh_handle_dev_fail(struct ata_device *dev, int err) | |||
2131 | * @softreset: softreset method (can be NULL) | 2463 | * @softreset: softreset method (can be NULL) |
2132 | * @hardreset: hardreset method (can be NULL) | 2464 | * @hardreset: hardreset method (can be NULL) |
2133 | * @postreset: postreset method (can be NULL) | 2465 | * @postreset: postreset method (can be NULL) |
2466 | * @r_failed_link: out parameter for failed link | ||
2134 | * | 2467 | * |
2135 | * This is the alpha and omega, eum and yang, heart and soul of | 2468 | * This is the alpha and omega, eum and yang, heart and soul of |
2136 | * libata exception handling. On entry, actions required to | 2469 | * libata exception handling. On entry, actions required to |
2137 | * recover the port and hotplug requests are recorded in | 2470 | * recover each link and hotplug requests are recorded in the |
2138 | * eh_context. This function executes all the operations with | 2471 | * link's eh_context. This function executes all the operations |
2139 | * appropriate retrials and fallbacks to resurrect failed | 2472 | * with appropriate retrials and fallbacks to resurrect failed |
2140 | * devices, detach goners and greet newcomers. | 2473 | * devices, detach goners and greet newcomers. |
2141 | * | 2474 | * |
2142 | * LOCKING: | 2475 | * LOCKING: |
@@ -2145,104 +2478,171 @@ static void ata_eh_handle_dev_fail(struct ata_device *dev, int err) | |||
2145 | * RETURNS: | 2478 | * RETURNS: |
2146 | * 0 on success, -errno on failure. | 2479 | * 0 on success, -errno on failure. |
2147 | */ | 2480 | */ |
2148 | static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | 2481 | int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, |
2149 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, | 2482 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, |
2150 | ata_postreset_fn_t postreset) | 2483 | ata_postreset_fn_t postreset, |
2484 | struct ata_link **r_failed_link) | ||
2151 | { | 2485 | { |
2152 | struct ata_eh_context *ehc = &ap->eh_context; | 2486 | struct ata_link *link; |
2153 | struct ata_device *dev; | 2487 | struct ata_device *dev; |
2154 | int i, rc; | 2488 | int nr_failed_devs, nr_disabled_devs; |
2489 | int reset, rc; | ||
2490 | unsigned long flags; | ||
2155 | 2491 | ||
2156 | DPRINTK("ENTER\n"); | 2492 | DPRINTK("ENTER\n"); |
2157 | 2493 | ||
2158 | /* prep for recovery */ | 2494 | /* prep for recovery */ |
2159 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2495 | ata_port_for_each_link(link, ap) { |
2160 | dev = &ap->device[i]; | 2496 | struct ata_eh_context *ehc = &link->eh_context; |
2161 | 2497 | ||
2162 | ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; | 2498 | /* re-enable link? */ |
2163 | 2499 | if (ehc->i.action & ATA_EH_ENABLE_LINK) { | |
2164 | /* collect port action mask recorded in dev actions */ | 2500 | ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); |
2165 | ehc->i.action |= ehc->i.dev_action[i] & ~ATA_EH_PERDEV_MASK; | 2501 | spin_lock_irqsave(ap->lock, flags); |
2166 | ehc->i.dev_action[i] &= ATA_EH_PERDEV_MASK; | 2502 | link->flags &= ~ATA_LFLAG_DISABLED; |
2167 | 2503 | spin_unlock_irqrestore(ap->lock, flags); | |
2168 | /* process hotplug request */ | 2504 | ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); |
2169 | if (dev->flags & ATA_DFLAG_DETACH) | 2505 | } |
2170 | ata_eh_detach_dev(dev); | ||
2171 | 2506 | ||
2172 | if (!ata_dev_enabled(dev) && | 2507 | ata_link_for_each_dev(dev, link) { |
2173 | ((ehc->i.probe_mask & (1 << dev->devno)) && | 2508 | if (link->flags & ATA_LFLAG_NO_RETRY) |
2174 | !(ehc->did_probe_mask & (1 << dev->devno)))) { | 2509 | ehc->tries[dev->devno] = 1; |
2175 | ata_eh_detach_dev(dev); | 2510 | else |
2176 | ata_dev_init(dev); | 2511 | ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; |
2177 | ehc->did_probe_mask |= (1 << dev->devno); | 2512 | |
2178 | ehc->i.action |= ATA_EH_SOFTRESET; | 2513 | /* collect port action mask recorded in dev actions */ |
2514 | ehc->i.action |= ehc->i.dev_action[dev->devno] & | ||
2515 | ~ATA_EH_PERDEV_MASK; | ||
2516 | ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; | ||
2517 | |||
2518 | /* process hotplug request */ | ||
2519 | if (dev->flags & ATA_DFLAG_DETACH) | ||
2520 | ata_eh_detach_dev(dev); | ||
2521 | |||
2522 | if (!ata_dev_enabled(dev) && | ||
2523 | ((ehc->i.probe_mask & (1 << dev->devno)) && | ||
2524 | !(ehc->did_probe_mask & (1 << dev->devno)))) { | ||
2525 | ata_eh_detach_dev(dev); | ||
2526 | ata_dev_init(dev); | ||
2527 | ehc->did_probe_mask |= (1 << dev->devno); | ||
2528 | ehc->i.action |= ATA_EH_SOFTRESET; | ||
2529 | } | ||
2179 | } | 2530 | } |
2180 | } | 2531 | } |
2181 | 2532 | ||
2182 | retry: | 2533 | retry: |
2183 | rc = 0; | 2534 | rc = 0; |
2535 | nr_failed_devs = 0; | ||
2536 | nr_disabled_devs = 0; | ||
2537 | reset = 0; | ||
2184 | 2538 | ||
2185 | /* if UNLOADING, finish immediately */ | 2539 | /* if UNLOADING, finish immediately */ |
2186 | if (ap->pflags & ATA_PFLAG_UNLOADING) | 2540 | if (ap->pflags & ATA_PFLAG_UNLOADING) |
2187 | goto out; | 2541 | goto out; |
2188 | 2542 | ||
2189 | /* skip EH if possible. */ | 2543 | /* prep for EH */ |
2190 | if (ata_eh_skip_recovery(ap)) | 2544 | ata_port_for_each_link(link, ap) { |
2191 | ehc->i.action = 0; | 2545 | struct ata_eh_context *ehc = &link->eh_context; |
2192 | 2546 | ||
2193 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 2547 | /* skip EH if possible. */ |
2194 | ehc->classes[i] = ATA_DEV_UNKNOWN; | 2548 | if (ata_eh_skip_recovery(link)) |
2549 | ehc->i.action = 0; | ||
2195 | 2550 | ||
2196 | /* reset */ | 2551 | /* do we need to reset? */ |
2197 | if (ehc->i.action & ATA_EH_RESET_MASK) { | 2552 | if (ehc->i.action & ATA_EH_RESET_MASK) |
2198 | ata_eh_freeze_port(ap); | 2553 | reset = 1; |
2199 | 2554 | ||
2200 | rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset, | 2555 | ata_link_for_each_dev(dev, link) |
2201 | softreset, hardreset, postreset); | 2556 | ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; |
2202 | if (rc) { | 2557 | } |
2203 | ata_port_printk(ap, KERN_ERR, | 2558 | |
2204 | "reset failed, giving up\n"); | 2559 | /* reset */ |
2205 | goto out; | 2560 | if (reset) { |
2561 | /* if PMP is attached, this function only deals with | ||
2562 | * downstream links, port should stay thawed. | ||
2563 | */ | ||
2564 | if (!ap->nr_pmp_links) | ||
2565 | ata_eh_freeze_port(ap); | ||
2566 | |||
2567 | ata_port_for_each_link(link, ap) { | ||
2568 | struct ata_eh_context *ehc = &link->eh_context; | ||
2569 | |||
2570 | if (!(ehc->i.action & ATA_EH_RESET_MASK)) | ||
2571 | continue; | ||
2572 | |||
2573 | rc = ata_eh_reset(link, ata_link_nr_vacant(link), | ||
2574 | prereset, softreset, hardreset, | ||
2575 | postreset); | ||
2576 | if (rc) { | ||
2577 | ata_link_printk(link, KERN_ERR, | ||
2578 | "reset failed, giving up\n"); | ||
2579 | goto out; | ||
2580 | } | ||
2206 | } | 2581 | } |
2207 | 2582 | ||
2208 | ata_eh_thaw_port(ap); | 2583 | if (!ap->nr_pmp_links) |
2584 | ata_eh_thaw_port(ap); | ||
2209 | } | 2585 | } |
2210 | 2586 | ||
2211 | /* revalidate existing devices and attach new ones */ | 2587 | /* the rest */ |
2212 | rc = ata_eh_revalidate_and_attach(ap, &dev); | 2588 | ata_port_for_each_link(link, ap) { |
2213 | if (rc) | 2589 | struct ata_eh_context *ehc = &link->eh_context; |
2214 | goto dev_fail; | ||
2215 | 2590 | ||
2216 | /* configure transfer mode if necessary */ | 2591 | /* revalidate existing devices and attach new ones */ |
2217 | if (ehc->i.flags & ATA_EHI_SETMODE) { | 2592 | rc = ata_eh_revalidate_and_attach(link, &dev); |
2218 | rc = ata_set_mode(ap, &dev); | ||
2219 | if (rc) | 2593 | if (rc) |
2220 | goto dev_fail; | 2594 | goto dev_fail; |
2221 | ehc->i.flags &= ~ATA_EHI_SETMODE; | ||
2222 | } | ||
2223 | 2595 | ||
2224 | goto out; | 2596 | /* if PMP got attached, return, pmp EH will take care of it */ |
2597 | if (link->device->class == ATA_DEV_PMP) { | ||
2598 | ehc->i.action = 0; | ||
2599 | return 0; | ||
2600 | } | ||
2225 | 2601 | ||
2226 | dev_fail: | 2602 | /* configure transfer mode if necessary */ |
2227 | ata_eh_handle_dev_fail(dev, rc); | 2603 | if (ehc->i.flags & ATA_EHI_SETMODE) { |
2604 | rc = ata_set_mode(link, &dev); | ||
2605 | if (rc) | ||
2606 | goto dev_fail; | ||
2607 | ehc->i.flags &= ~ATA_EHI_SETMODE; | ||
2608 | } | ||
2228 | 2609 | ||
2229 | if (ata_port_nr_enabled(ap)) { | 2610 | /* this link is okay now */ |
2230 | ata_port_printk(ap, KERN_WARNING, "failed to recover some " | 2611 | ehc->i.flags = 0; |
2231 | "devices, retrying in 5 secs\n"); | 2612 | continue; |
2232 | ssleep(5); | 2613 | |
2233 | } else { | 2614 | dev_fail: |
2234 | /* no device left, repeat fast */ | 2615 | nr_failed_devs++; |
2235 | msleep(500); | 2616 | if (ata_eh_handle_dev_fail(dev, rc)) |
2617 | nr_disabled_devs++; | ||
2618 | |||
2619 | if (ap->pflags & ATA_PFLAG_FROZEN) { | ||
2620 | /* PMP reset requires working host port. | ||
2621 | * Can't retry if it's frozen. | ||
2622 | */ | ||
2623 | if (ap->nr_pmp_links) | ||
2624 | goto out; | ||
2625 | break; | ||
2626 | } | ||
2236 | } | 2627 | } |
2237 | 2628 | ||
2238 | goto retry; | 2629 | if (nr_failed_devs) { |
2630 | if (nr_failed_devs != nr_disabled_devs) { | ||
2631 | ata_port_printk(ap, KERN_WARNING, "failed to recover " | ||
2632 | "some devices, retrying in 5 secs\n"); | ||
2633 | ssleep(5); | ||
2634 | } else { | ||
2635 | /* no device left to recover, repeat fast */ | ||
2636 | msleep(500); | ||
2637 | } | ||
2239 | 2638 | ||
2240 | out: | 2639 | goto retry; |
2241 | if (rc) { | ||
2242 | for (i = 0; i < ATA_MAX_DEVICES; i++) | ||
2243 | ata_dev_disable(&ap->device[i]); | ||
2244 | } | 2640 | } |
2245 | 2641 | ||
2642 | out: | ||
2643 | if (rc && r_failed_link) | ||
2644 | *r_failed_link = link; | ||
2645 | |||
2246 | DPRINTK("EXIT, rc=%d\n", rc); | 2646 | DPRINTK("EXIT, rc=%d\n", rc); |
2247 | return rc; | 2647 | return rc; |
2248 | } | 2648 | } |
@@ -2257,7 +2657,7 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2257 | * LOCKING: | 2657 | * LOCKING: |
2258 | * None. | 2658 | * None. |
2259 | */ | 2659 | */ |
2260 | static void ata_eh_finish(struct ata_port *ap) | 2660 | void ata_eh_finish(struct ata_port *ap) |
2261 | { | 2661 | { |
2262 | int tag; | 2662 | int tag; |
2263 | 2663 | ||
@@ -2287,6 +2687,10 @@ static void ata_eh_finish(struct ata_port *ap) | |||
2287 | } | 2687 | } |
2288 | } | 2688 | } |
2289 | } | 2689 | } |
2690 | |||
2691 | /* make sure nr_active_links is zero after EH */ | ||
2692 | WARN_ON(ap->nr_active_links); | ||
2693 | ap->nr_active_links = 0; | ||
2290 | } | 2694 | } |
2291 | 2695 | ||
2292 | /** | 2696 | /** |
@@ -2306,9 +2710,19 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2306 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, | 2710 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, |
2307 | ata_postreset_fn_t postreset) | 2711 | ata_postreset_fn_t postreset) |
2308 | { | 2712 | { |
2713 | struct ata_device *dev; | ||
2714 | int rc; | ||
2715 | |||
2309 | ata_eh_autopsy(ap); | 2716 | ata_eh_autopsy(ap); |
2310 | ata_eh_report(ap); | 2717 | ata_eh_report(ap); |
2311 | ata_eh_recover(ap, prereset, softreset, hardreset, postreset); | 2718 | |
2719 | rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, | ||
2720 | NULL); | ||
2721 | if (rc) { | ||
2722 | ata_link_for_each_dev(dev, &ap->link) | ||
2723 | ata_dev_disable(dev); | ||
2724 | } | ||
2725 | |||
2312 | ata_eh_finish(ap); | 2726 | ata_eh_finish(ap); |
2313 | } | 2727 | } |
2314 | 2728 | ||
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c new file mode 100644 index 000000000000..c0c4dbcde091 --- /dev/null +++ b/drivers/ata/libata-pmp.c | |||
@@ -0,0 +1,1191 @@ | |||
1 | /* | ||
2 | * libata-pmp.c - libata port multiplier support | ||
3 | * | ||
4 | * Copyright (c) 2007 SUSE Linux Products GmbH | ||
5 | * Copyright (c) 2007 Tejun Heo <teheo@suse.de> | ||
6 | * | ||
7 | * This file is released under the GPLv2. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/libata.h> | ||
12 | #include "libata.h" | ||
13 | |||
14 | /** | ||
15 | * sata_pmp_read - read PMP register | ||
16 | * @link: link to read PMP register for | ||
17 | * @reg: register to read | ||
18 | * @r_val: resulting value | ||
19 | * | ||
20 | * Read PMP register. | ||
21 | * | ||
22 | * LOCKING: | ||
23 | * Kernel thread context (may sleep). | ||
24 | * | ||
25 | * RETURNS: | ||
26 | * 0 on success, AC_ERR_* mask on failure. | ||
27 | */ | ||
28 | static unsigned int sata_pmp_read(struct ata_link *link, int reg, u32 *r_val) | ||
29 | { | ||
30 | struct ata_port *ap = link->ap; | ||
31 | struct ata_device *pmp_dev = ap->link.device; | ||
32 | struct ata_taskfile tf; | ||
33 | unsigned int err_mask; | ||
34 | |||
35 | ata_tf_init(pmp_dev, &tf); | ||
36 | tf.command = ATA_CMD_PMP_READ; | ||
37 | tf.protocol = ATA_PROT_NODATA; | ||
38 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | ||
39 | tf.feature = reg; | ||
40 | tf.device = link->pmp; | ||
41 | |||
42 | err_mask = ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0, | ||
43 | SATA_PMP_SCR_TIMEOUT); | ||
44 | if (err_mask) | ||
45 | return err_mask; | ||
46 | |||
47 | *r_val = tf.nsect | tf.lbal << 8 | tf.lbam << 16 | tf.lbah << 24; | ||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | /** | ||
52 | * sata_pmp_write - write PMP register | ||
53 | * @link: link to write PMP register for | ||
54 | * @reg: register to write | ||
55 | * @r_val: value to write | ||
56 | * | ||
57 | * Write PMP register. | ||
58 | * | ||
59 | * LOCKING: | ||
60 | * Kernel thread context (may sleep). | ||
61 | * | ||
62 | * RETURNS: | ||
63 | * 0 on success, AC_ERR_* mask on failure. | ||
64 | */ | ||
65 | static unsigned int sata_pmp_write(struct ata_link *link, int reg, u32 val) | ||
66 | { | ||
67 | struct ata_port *ap = link->ap; | ||
68 | struct ata_device *pmp_dev = ap->link.device; | ||
69 | struct ata_taskfile tf; | ||
70 | |||
71 | ata_tf_init(pmp_dev, &tf); | ||
72 | tf.command = ATA_CMD_PMP_WRITE; | ||
73 | tf.protocol = ATA_PROT_NODATA; | ||
74 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | ||
75 | tf.feature = reg; | ||
76 | tf.device = link->pmp; | ||
77 | tf.nsect = val & 0xff; | ||
78 | tf.lbal = (val >> 8) & 0xff; | ||
79 | tf.lbam = (val >> 16) & 0xff; | ||
80 | tf.lbah = (val >> 24) & 0xff; | ||
81 | |||
82 | return ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0, | ||
83 | SATA_PMP_SCR_TIMEOUT); | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * sata_pmp_qc_defer_cmd_switch - qc_defer for command switching PMP | ||
88 | * @qc: ATA command in question | ||
89 | * | ||
90 | * A host which has command switching PMP support cannot issue | ||
91 | * commands to multiple links simultaneously. | ||
92 | * | ||
93 | * LOCKING: | ||
94 | * spin_lock_irqsave(host lock) | ||
95 | * | ||
96 | * RETURNS: | ||
97 | * ATA_DEFER_* if deferring is needed, 0 otherwise. | ||
98 | */ | ||
99 | int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc) | ||
100 | { | ||
101 | struct ata_link *link = qc->dev->link; | ||
102 | struct ata_port *ap = link->ap; | ||
103 | |||
104 | if (ap->excl_link == NULL || ap->excl_link == link) { | ||
105 | if (ap->nr_active_links == 0 || ata_link_active(link)) { | ||
106 | qc->flags |= ATA_QCFLAG_CLEAR_EXCL; | ||
107 | return ata_std_qc_defer(qc); | ||
108 | } | ||
109 | |||
110 | ap->excl_link = link; | ||
111 | } | ||
112 | |||
113 | return ATA_DEFER_PORT; | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * sata_pmp_scr_read - read PSCR | ||
118 | * @link: ATA link to read PSCR for | ||
119 | * @reg: PSCR to read | ||
120 | * @r_val: resulting value | ||
121 | * | ||
122 | * Read PSCR @reg into @r_val for @link, to be called from | ||
123 | * ata_scr_read(). | ||
124 | * | ||
125 | * LOCKING: | ||
126 | * Kernel thread context (may sleep). | ||
127 | * | ||
128 | * RETURNS: | ||
129 | * 0 on success, -errno on failure. | ||
130 | */ | ||
131 | int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *r_val) | ||
132 | { | ||
133 | unsigned int err_mask; | ||
134 | |||
135 | if (reg > SATA_PMP_PSCR_CONTROL) | ||
136 | return -EINVAL; | ||
137 | |||
138 | err_mask = sata_pmp_read(link, reg, r_val); | ||
139 | if (err_mask) { | ||
140 | ata_link_printk(link, KERN_WARNING, "failed to read SCR %d " | ||
141 | "(Emask=0x%x)\n", reg, err_mask); | ||
142 | return -EIO; | ||
143 | } | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * sata_pmp_scr_write - write PSCR | ||
149 | * @link: ATA link to write PSCR for | ||
150 | * @reg: PSCR to write | ||
151 | * @val: value to be written | ||
152 | * | ||
153 | * Write @val to PSCR @reg for @link, to be called from | ||
154 | * ata_scr_write() and ata_scr_write_flush(). | ||
155 | * | ||
156 | * LOCKING: | ||
157 | * Kernel thread context (may sleep). | ||
158 | * | ||
159 | * RETURNS: | ||
160 | * 0 on success, -errno on failure. | ||
161 | */ | ||
162 | int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val) | ||
163 | { | ||
164 | unsigned int err_mask; | ||
165 | |||
166 | if (reg > SATA_PMP_PSCR_CONTROL) | ||
167 | return -EINVAL; | ||
168 | |||
169 | err_mask = sata_pmp_write(link, reg, val); | ||
170 | if (err_mask) { | ||
171 | ata_link_printk(link, KERN_WARNING, "failed to write SCR %d " | ||
172 | "(Emask=0x%x)\n", reg, err_mask); | ||
173 | return -EIO; | ||
174 | } | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * sata_pmp_std_prereset - prepare PMP link for reset | ||
180 | * @link: link to be reset | ||
181 | * @deadline: deadline jiffies for the operation | ||
182 | * | ||
183 | * @link is about to be reset. Initialize it. | ||
184 | * | ||
185 | * LOCKING: | ||
186 | * Kernel thread context (may sleep) | ||
187 | * | ||
188 | * RETURNS: | ||
189 | * 0 on success, -errno otherwise. | ||
190 | */ | ||
191 | int sata_pmp_std_prereset(struct ata_link *link, unsigned long deadline) | ||
192 | { | ||
193 | struct ata_eh_context *ehc = &link->eh_context; | ||
194 | const unsigned long *timing = sata_ehc_deb_timing(ehc); | ||
195 | int rc; | ||
196 | |||
197 | /* force HRST? */ | ||
198 | if (link->flags & ATA_LFLAG_NO_SRST) | ||
199 | ehc->i.action |= ATA_EH_HARDRESET; | ||
200 | |||
201 | /* handle link resume */ | ||
202 | if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && | ||
203 | (link->flags & ATA_LFLAG_HRST_TO_RESUME)) | ||
204 | ehc->i.action |= ATA_EH_HARDRESET; | ||
205 | |||
206 | /* if we're about to do hardreset, nothing more to do */ | ||
207 | if (ehc->i.action & ATA_EH_HARDRESET) | ||
208 | return 0; | ||
209 | |||
210 | /* resume link */ | ||
211 | rc = sata_link_resume(link, timing, deadline); | ||
212 | if (rc) { | ||
213 | /* phy resume failed */ | ||
214 | ata_link_printk(link, KERN_WARNING, "failed to resume link " | ||
215 | "for reset (errno=%d)\n", rc); | ||
216 | return rc; | ||
217 | } | ||
218 | |||
219 | /* clear SError bits including .X which blocks the port when set */ | ||
220 | rc = sata_scr_write(link, SCR_ERROR, 0xffffffff); | ||
221 | if (rc) { | ||
222 | ata_link_printk(link, KERN_ERR, | ||
223 | "failed to clear SError (errno=%d)\n", rc); | ||
224 | return rc; | ||
225 | } | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * sata_pmp_std_hardreset - standard hardreset method for PMP link | ||
232 | * @link: link to be reset | ||
233 | * @class: resulting class of attached device | ||
234 | * @deadline: deadline jiffies for the operation | ||
235 | * | ||
236 | * Hardreset PMP port @link. Note that this function doesn't | ||
237 | * wait for BSY clearance. There simply isn't a generic way to | ||
238 | * wait the event. Instead, this function return -EAGAIN thus | ||
239 | * telling libata-EH to followup with softreset. | ||
240 | * | ||
241 | * LOCKING: | ||
242 | * Kernel thread context (may sleep) | ||
243 | * | ||
244 | * RETURNS: | ||
245 | * 0 on success, -errno otherwise. | ||
246 | */ | ||
247 | int sata_pmp_std_hardreset(struct ata_link *link, unsigned int *class, | ||
248 | unsigned long deadline) | ||
249 | { | ||
250 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); | ||
251 | u32 tmp; | ||
252 | int rc; | ||
253 | |||
254 | DPRINTK("ENTER\n"); | ||
255 | |||
256 | /* do hardreset */ | ||
257 | rc = sata_link_hardreset(link, timing, deadline); | ||
258 | if (rc) { | ||
259 | ata_link_printk(link, KERN_ERR, | ||
260 | "COMRESET failed (errno=%d)\n", rc); | ||
261 | goto out; | ||
262 | } | ||
263 | |||
264 | /* clear SError bits including .X which blocks the port when set */ | ||
265 | rc = sata_scr_write(link, SCR_ERROR, 0xffffffff); | ||
266 | if (rc) { | ||
267 | ata_link_printk(link, KERN_ERR, "failed to clear SError " | ||
268 | "during hardreset (errno=%d)\n", rc); | ||
269 | goto out; | ||
270 | } | ||
271 | |||
272 | /* if device is present, follow up with srst to wait for !BSY */ | ||
273 | if (ata_link_online(link)) | ||
274 | rc = -EAGAIN; | ||
275 | out: | ||
276 | /* if SCR isn't accessible, we need to reset the PMP */ | ||
277 | if (rc && rc != -EAGAIN && sata_scr_read(link, SCR_STATUS, &tmp)) | ||
278 | rc = -ERESTART; | ||
279 | |||
280 | DPRINTK("EXIT, rc=%d\n", rc); | ||
281 | return rc; | ||
282 | } | ||
283 | |||
284 | /** | ||
285 | * ata_std_postreset - standard postreset method for PMP link | ||
286 | * @link: the target ata_link | ||
287 | * @classes: classes of attached devices | ||
288 | * | ||
289 | * This function is invoked after a successful reset. Note that | ||
290 | * the device might have been reset more than once using | ||
291 | * different reset methods before postreset is invoked. | ||
292 | * | ||
293 | * LOCKING: | ||
294 | * Kernel thread context (may sleep) | ||
295 | */ | ||
296 | void sata_pmp_std_postreset(struct ata_link *link, unsigned int *class) | ||
297 | { | ||
298 | u32 serror; | ||
299 | |||
300 | DPRINTK("ENTER\n"); | ||
301 | |||
302 | /* clear SError */ | ||
303 | if (sata_scr_read(link, SCR_ERROR, &serror) == 0) | ||
304 | sata_scr_write(link, SCR_ERROR, serror); | ||
305 | |||
306 | /* print link status */ | ||
307 | sata_print_link_status(link); | ||
308 | |||
309 | DPRINTK("EXIT\n"); | ||
310 | } | ||
311 | |||
312 | /** | ||
313 | * sata_pmp_read_gscr - read GSCR block of SATA PMP | ||
314 | * @dev: PMP device | ||
315 | * @gscr: buffer to read GSCR block into | ||
316 | * | ||
317 | * Read selected PMP GSCRs from the PMP at @dev. This will serve | ||
318 | * as configuration and identification info for the PMP. | ||
319 | * | ||
320 | * LOCKING: | ||
321 | * Kernel thread context (may sleep). | ||
322 | * | ||
323 | * RETURNS: | ||
324 | * 0 on success, -errno on failure. | ||
325 | */ | ||
326 | static int sata_pmp_read_gscr(struct ata_device *dev, u32 *gscr) | ||
327 | { | ||
328 | static const int gscr_to_read[] = { 0, 1, 2, 32, 33, 64, 96 }; | ||
329 | int i; | ||
330 | |||
331 | for (i = 0; i < ARRAY_SIZE(gscr_to_read); i++) { | ||
332 | int reg = gscr_to_read[i]; | ||
333 | unsigned int err_mask; | ||
334 | |||
335 | err_mask = sata_pmp_read(dev->link, reg, &gscr[reg]); | ||
336 | if (err_mask) { | ||
337 | ata_dev_printk(dev, KERN_ERR, "failed to read PMP " | ||
338 | "GSCR[%d] (Emask=0x%x)\n", reg, err_mask); | ||
339 | return -EIO; | ||
340 | } | ||
341 | } | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | static const char *sata_pmp_spec_rev_str(const u32 *gscr) | ||
347 | { | ||
348 | u32 rev = gscr[SATA_PMP_GSCR_REV]; | ||
349 | |||
350 | if (rev & (1 << 2)) | ||
351 | return "1.1"; | ||
352 | if (rev & (1 << 1)) | ||
353 | return "1.0"; | ||
354 | return "<unknown>"; | ||
355 | } | ||
356 | |||
357 | static int sata_pmp_configure(struct ata_device *dev, int print_info) | ||
358 | { | ||
359 | struct ata_port *ap = dev->link->ap; | ||
360 | u32 *gscr = dev->gscr; | ||
361 | unsigned int err_mask = 0; | ||
362 | const char *reason; | ||
363 | int nr_ports, rc; | ||
364 | |||
365 | nr_ports = sata_pmp_gscr_ports(gscr); | ||
366 | |||
367 | if (nr_ports <= 0 || nr_ports > SATA_PMP_MAX_PORTS) { | ||
368 | rc = -EINVAL; | ||
369 | reason = "invalid nr_ports"; | ||
370 | goto fail; | ||
371 | } | ||
372 | |||
373 | if ((ap->flags & ATA_FLAG_AN) && | ||
374 | (gscr[SATA_PMP_GSCR_FEAT] & SATA_PMP_FEAT_NOTIFY)) | ||
375 | dev->flags |= ATA_DFLAG_AN; | ||
376 | |||
377 | /* monitor SERR_PHYRDY_CHG on fan-out ports */ | ||
378 | err_mask = sata_pmp_write(dev->link, SATA_PMP_GSCR_ERROR_EN, | ||
379 | SERR_PHYRDY_CHG); | ||
380 | if (err_mask) { | ||
381 | rc = -EIO; | ||
382 | reason = "failed to write GSCR_ERROR_EN"; | ||
383 | goto fail; | ||
384 | } | ||
385 | |||
386 | /* turn off notification till fan-out ports are reset and configured */ | ||
387 | if (gscr[SATA_PMP_GSCR_FEAT_EN] & SATA_PMP_FEAT_NOTIFY) { | ||
388 | gscr[SATA_PMP_GSCR_FEAT_EN] &= ~SATA_PMP_FEAT_NOTIFY; | ||
389 | |||
390 | err_mask = sata_pmp_write(dev->link, SATA_PMP_GSCR_FEAT_EN, | ||
391 | gscr[SATA_PMP_GSCR_FEAT_EN]); | ||
392 | if (err_mask) { | ||
393 | rc = -EIO; | ||
394 | reason = "failed to write GSCR_FEAT_EN"; | ||
395 | goto fail; | ||
396 | } | ||
397 | } | ||
398 | |||
399 | if (print_info) { | ||
400 | ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, " | ||
401 | "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n", | ||
402 | sata_pmp_spec_rev_str(gscr), | ||
403 | sata_pmp_gscr_vendor(gscr), | ||
404 | sata_pmp_gscr_devid(gscr), | ||
405 | sata_pmp_gscr_rev(gscr), | ||
406 | nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN], | ||
407 | gscr[SATA_PMP_GSCR_FEAT]); | ||
408 | |||
409 | if (!(dev->flags & ATA_DFLAG_AN)) | ||
410 | ata_dev_printk(dev, KERN_INFO, | ||
411 | "Asynchronous notification not supported, " | ||
412 | "hotplug won't\n work on fan-out " | ||
413 | "ports. Use warm-plug instead.\n"); | ||
414 | } | ||
415 | |||
416 | return 0; | ||
417 | |||
418 | fail: | ||
419 | ata_dev_printk(dev, KERN_ERR, | ||
420 | "failed to configure Port Multiplier (%s, Emask=0x%x)\n", | ||
421 | reason, err_mask); | ||
422 | return rc; | ||
423 | } | ||
424 | |||
425 | static int sata_pmp_init_links(struct ata_port *ap, int nr_ports) | ||
426 | { | ||
427 | struct ata_link *pmp_link = ap->pmp_link; | ||
428 | int i; | ||
429 | |||
430 | if (!pmp_link) { | ||
431 | pmp_link = kzalloc(sizeof(pmp_link[0]) * SATA_PMP_MAX_PORTS, | ||
432 | GFP_NOIO); | ||
433 | if (!pmp_link) | ||
434 | return -ENOMEM; | ||
435 | |||
436 | for (i = 0; i < SATA_PMP_MAX_PORTS; i++) | ||
437 | ata_link_init(ap, &pmp_link[i], i); | ||
438 | |||
439 | ap->pmp_link = pmp_link; | ||
440 | } | ||
441 | |||
442 | for (i = 0; i < nr_ports; i++) { | ||
443 | struct ata_link *link = &pmp_link[i]; | ||
444 | struct ata_eh_context *ehc = &link->eh_context; | ||
445 | |||
446 | link->flags = 0; | ||
447 | ehc->i.probe_mask |= 1; | ||
448 | ehc->i.action |= ATA_EH_SOFTRESET; | ||
449 | ehc->i.flags |= ATA_EHI_RESUME_LINK; | ||
450 | } | ||
451 | |||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static void sata_pmp_quirks(struct ata_port *ap) | ||
456 | { | ||
457 | u32 *gscr = ap->link.device->gscr; | ||
458 | u16 vendor = sata_pmp_gscr_vendor(gscr); | ||
459 | u16 devid = sata_pmp_gscr_devid(gscr); | ||
460 | struct ata_link *link; | ||
461 | |||
462 | if (vendor == 0x1095 && devid == 0x3726) { | ||
463 | /* sil3726 quirks */ | ||
464 | ata_port_for_each_link(link, ap) { | ||
465 | /* SError.N need a kick in the ass to get working */ | ||
466 | link->flags |= ATA_LFLAG_HRST_TO_RESUME; | ||
467 | |||
468 | /* class code report is unreliable */ | ||
469 | if (link->pmp < 5) | ||
470 | link->flags |= ATA_LFLAG_ASSUME_ATA; | ||
471 | |||
472 | /* port 5 is for SEMB device and it doesn't like SRST */ | ||
473 | if (link->pmp == 5) | ||
474 | link->flags |= ATA_LFLAG_NO_SRST | | ||
475 | ATA_LFLAG_ASSUME_SEMB; | ||
476 | } | ||
477 | } else if (vendor == 0x1095 && devid == 0x4723) { | ||
478 | /* sil4723 quirks */ | ||
479 | ata_port_for_each_link(link, ap) { | ||
480 | /* SError.N need a kick in the ass to get working */ | ||
481 | link->flags |= ATA_LFLAG_HRST_TO_RESUME; | ||
482 | |||
483 | /* class code report is unreliable */ | ||
484 | if (link->pmp < 2) | ||
485 | link->flags |= ATA_LFLAG_ASSUME_ATA; | ||
486 | |||
487 | /* the config device at port 2 locks up on SRST */ | ||
488 | if (link->pmp == 2) | ||
489 | link->flags |= ATA_LFLAG_NO_SRST | | ||
490 | ATA_LFLAG_ASSUME_ATA; | ||
491 | } | ||
492 | } else if (vendor == 0x1095 && devid == 0x4726) { | ||
493 | /* sil4726 quirks */ | ||
494 | ata_port_for_each_link(link, ap) { | ||
495 | /* SError.N need a kick in the ass to get working */ | ||
496 | link->flags |= ATA_LFLAG_HRST_TO_RESUME; | ||
497 | |||
498 | /* class code report is unreliable */ | ||
499 | if (link->pmp < 5) | ||
500 | link->flags |= ATA_LFLAG_ASSUME_ATA; | ||
501 | |||
502 | /* The config device, which can be either at | ||
503 | * port 0 or 5, locks up on SRST. | ||
504 | */ | ||
505 | if (link->pmp == 0 || link->pmp == 5) | ||
506 | link->flags |= ATA_LFLAG_NO_SRST | | ||
507 | ATA_LFLAG_ASSUME_ATA; | ||
508 | |||
509 | /* Port 6 is for SEMB device which doesn't | ||
510 | * like SRST either. | ||
511 | */ | ||
512 | if (link->pmp == 6) | ||
513 | link->flags |= ATA_LFLAG_NO_SRST | | ||
514 | ATA_LFLAG_ASSUME_SEMB; | ||
515 | } | ||
516 | } else if (vendor == 0x1095 && (devid == 0x5723 || devid == 0x5733 || | ||
517 | devid == 0x5734 || devid == 0x5744)) { | ||
518 | /* sil5723/5744 quirks */ | ||
519 | |||
520 | /* sil5723/5744 has either two or three downstream | ||
521 | * ports depending on operation mode. The last port | ||
522 | * is empty if any actual IO device is available or | ||
523 | * occupied by a pseudo configuration device | ||
524 | * otherwise. Don't try hard to recover it. | ||
525 | */ | ||
526 | ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY; | ||
527 | } else if (vendor == 0x11ab && devid == 0x4140) { | ||
528 | /* Marvell 88SM4140 quirks. Fan-out ports require PHY | ||
529 | * reset to work; other than that, it behaves very | ||
530 | * nicely. | ||
531 | */ | ||
532 | ata_port_for_each_link(link, ap) | ||
533 | link->flags |= ATA_LFLAG_HRST_TO_RESUME; | ||
534 | } | ||
535 | } | ||
536 | |||
537 | /** | ||
538 | * sata_pmp_attach - attach a SATA PMP device | ||
539 | * @dev: SATA PMP device to attach | ||
540 | * | ||
541 | * Configure and attach SATA PMP device @dev. This function is | ||
542 | * also responsible for allocating and initializing PMP links. | ||
543 | * | ||
544 | * LOCKING: | ||
545 | * Kernel thread context (may sleep). | ||
546 | * | ||
547 | * RETURNS: | ||
548 | * 0 on success, -errno on failure. | ||
549 | */ | ||
550 | int sata_pmp_attach(struct ata_device *dev) | ||
551 | { | ||
552 | struct ata_link *link = dev->link; | ||
553 | struct ata_port *ap = link->ap; | ||
554 | unsigned long flags; | ||
555 | struct ata_link *tlink; | ||
556 | int rc; | ||
557 | |||
558 | /* is it hanging off the right place? */ | ||
559 | if (!(ap->flags & ATA_FLAG_PMP)) { | ||
560 | ata_dev_printk(dev, KERN_ERR, | ||
561 | "host does not support Port Multiplier\n"); | ||
562 | return -EINVAL; | ||
563 | } | ||
564 | |||
565 | if (!ata_is_host_link(link)) { | ||
566 | ata_dev_printk(dev, KERN_ERR, | ||
567 | "Port Multipliers cannot be nested\n"); | ||
568 | return -EINVAL; | ||
569 | } | ||
570 | |||
571 | if (dev->devno) { | ||
572 | ata_dev_printk(dev, KERN_ERR, | ||
573 | "Port Multiplier must be the first device\n"); | ||
574 | return -EINVAL; | ||
575 | } | ||
576 | |||
577 | WARN_ON(link->pmp != 0); | ||
578 | link->pmp = SATA_PMP_CTRL_PORT; | ||
579 | |||
580 | /* read GSCR block */ | ||
581 | rc = sata_pmp_read_gscr(dev, dev->gscr); | ||
582 | if (rc) | ||
583 | goto fail; | ||
584 | |||
585 | /* config PMP */ | ||
586 | rc = sata_pmp_configure(dev, 1); | ||
587 | if (rc) | ||
588 | goto fail; | ||
589 | |||
590 | rc = sata_pmp_init_links(ap, sata_pmp_gscr_ports(dev->gscr)); | ||
591 | if (rc) { | ||
592 | ata_dev_printk(dev, KERN_INFO, | ||
593 | "failed to initialize PMP links\n"); | ||
594 | goto fail; | ||
595 | } | ||
596 | |||
597 | /* attach it */ | ||
598 | spin_lock_irqsave(ap->lock, flags); | ||
599 | WARN_ON(ap->nr_pmp_links); | ||
600 | ap->nr_pmp_links = sata_pmp_gscr_ports(dev->gscr); | ||
601 | spin_unlock_irqrestore(ap->lock, flags); | ||
602 | |||
603 | sata_pmp_quirks(ap); | ||
604 | |||
605 | if (ap->ops->pmp_attach) | ||
606 | ap->ops->pmp_attach(ap); | ||
607 | |||
608 | ata_port_for_each_link(tlink, ap) | ||
609 | sata_link_init_spd(tlink); | ||
610 | |||
611 | ata_acpi_associate_sata_port(ap); | ||
612 | |||
613 | return 0; | ||
614 | |||
615 | fail: | ||
616 | link->pmp = 0; | ||
617 | return rc; | ||
618 | } | ||
619 | |||
620 | /** | ||
621 | * sata_pmp_detach - detach a SATA PMP device | ||
622 | * @dev: SATA PMP device to detach | ||
623 | * | ||
624 | * Detach SATA PMP device @dev. This function is also | ||
625 | * responsible for deconfiguring PMP links. | ||
626 | * | ||
627 | * LOCKING: | ||
628 | * Kernel thread context (may sleep). | ||
629 | */ | ||
630 | static void sata_pmp_detach(struct ata_device *dev) | ||
631 | { | ||
632 | struct ata_link *link = dev->link; | ||
633 | struct ata_port *ap = link->ap; | ||
634 | struct ata_link *tlink; | ||
635 | unsigned long flags; | ||
636 | |||
637 | ata_dev_printk(dev, KERN_INFO, "Port Multiplier detaching\n"); | ||
638 | |||
639 | WARN_ON(!ata_is_host_link(link) || dev->devno || | ||
640 | link->pmp != SATA_PMP_CTRL_PORT); | ||
641 | |||
642 | if (ap->ops->pmp_detach) | ||
643 | ap->ops->pmp_detach(ap); | ||
644 | |||
645 | ata_port_for_each_link(tlink, ap) | ||
646 | ata_eh_detach_dev(tlink->device); | ||
647 | |||
648 | spin_lock_irqsave(ap->lock, flags); | ||
649 | ap->nr_pmp_links = 0; | ||
650 | link->pmp = 0; | ||
651 | spin_unlock_irqrestore(ap->lock, flags); | ||
652 | |||
653 | ata_acpi_associate_sata_port(ap); | ||
654 | } | ||
655 | |||
656 | /** | ||
657 | * sata_pmp_same_pmp - does new GSCR matches the configured PMP? | ||
658 | * @dev: PMP device to compare against | ||
659 | * @new_gscr: GSCR block of the new device | ||
660 | * | ||
661 | * Compare @new_gscr against @dev and determine whether @dev is | ||
662 | * the PMP described by @new_gscr. | ||
663 | * | ||
664 | * LOCKING: | ||
665 | * None. | ||
666 | * | ||
667 | * RETURNS: | ||
668 | * 1 if @dev matches @new_gscr, 0 otherwise. | ||
669 | */ | ||
670 | static int sata_pmp_same_pmp(struct ata_device *dev, const u32 *new_gscr) | ||
671 | { | ||
672 | const u32 *old_gscr = dev->gscr; | ||
673 | u16 old_vendor, new_vendor, old_devid, new_devid; | ||
674 | int old_nr_ports, new_nr_ports; | ||
675 | |||
676 | old_vendor = sata_pmp_gscr_vendor(old_gscr); | ||
677 | new_vendor = sata_pmp_gscr_vendor(new_gscr); | ||
678 | old_devid = sata_pmp_gscr_devid(old_gscr); | ||
679 | new_devid = sata_pmp_gscr_devid(new_gscr); | ||
680 | old_nr_ports = sata_pmp_gscr_ports(old_gscr); | ||
681 | new_nr_ports = sata_pmp_gscr_ports(new_gscr); | ||
682 | |||
683 | if (old_vendor != new_vendor) { | ||
684 | ata_dev_printk(dev, KERN_INFO, "Port Multiplier " | ||
685 | "vendor mismatch '0x%x' != '0x%x'\n", | ||
686 | old_vendor, new_vendor); | ||
687 | return 0; | ||
688 | } | ||
689 | |||
690 | if (old_devid != new_devid) { | ||
691 | ata_dev_printk(dev, KERN_INFO, "Port Multiplier " | ||
692 | "device ID mismatch '0x%x' != '0x%x'\n", | ||
693 | old_devid, new_devid); | ||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | if (old_nr_ports != new_nr_ports) { | ||
698 | ata_dev_printk(dev, KERN_INFO, "Port Multiplier " | ||
699 | "nr_ports mismatch '0x%x' != '0x%x'\n", | ||
700 | old_nr_ports, new_nr_ports); | ||
701 | return 0; | ||
702 | } | ||
703 | |||
704 | return 1; | ||
705 | } | ||
706 | |||
707 | /** | ||
708 | * sata_pmp_revalidate - revalidate SATA PMP | ||
709 | * @dev: PMP device to revalidate | ||
710 | * @new_class: new class code | ||
711 | * | ||
712 | * Re-read GSCR block and make sure @dev is still attached to the | ||
713 | * port and properly configured. | ||
714 | * | ||
715 | * LOCKING: | ||
716 | * Kernel thread context (may sleep). | ||
717 | * | ||
718 | * RETURNS: | ||
719 | * 0 on success, -errno otherwise. | ||
720 | */ | ||
721 | static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class) | ||
722 | { | ||
723 | struct ata_link *link = dev->link; | ||
724 | struct ata_port *ap = link->ap; | ||
725 | u32 *gscr = (void *)ap->sector_buf; | ||
726 | int rc; | ||
727 | |||
728 | DPRINTK("ENTER\n"); | ||
729 | |||
730 | ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE); | ||
731 | |||
732 | if (!ata_dev_enabled(dev)) { | ||
733 | rc = -ENODEV; | ||
734 | goto fail; | ||
735 | } | ||
736 | |||
737 | /* wrong class? */ | ||
738 | if (ata_class_enabled(new_class) && new_class != ATA_DEV_PMP) { | ||
739 | rc = -ENODEV; | ||
740 | goto fail; | ||
741 | } | ||
742 | |||
743 | /* read GSCR */ | ||
744 | rc = sata_pmp_read_gscr(dev, gscr); | ||
745 | if (rc) | ||
746 | goto fail; | ||
747 | |||
748 | /* is the pmp still there? */ | ||
749 | if (!sata_pmp_same_pmp(dev, gscr)) { | ||
750 | rc = -ENODEV; | ||
751 | goto fail; | ||
752 | } | ||
753 | |||
754 | memcpy(dev->gscr, gscr, sizeof(gscr[0]) * SATA_PMP_GSCR_DWORDS); | ||
755 | |||
756 | rc = sata_pmp_configure(dev, 0); | ||
757 | if (rc) | ||
758 | goto fail; | ||
759 | |||
760 | ata_eh_done(link, NULL, ATA_EH_REVALIDATE); | ||
761 | |||
762 | DPRINTK("EXIT, rc=0\n"); | ||
763 | return 0; | ||
764 | |||
765 | fail: | ||
766 | ata_dev_printk(dev, KERN_ERR, | ||
767 | "PMP revalidation failed (errno=%d)\n", rc); | ||
768 | DPRINTK("EXIT, rc=%d\n", rc); | ||
769 | return rc; | ||
770 | } | ||
771 | |||
772 | /** | ||
773 | * sata_pmp_revalidate_quick - revalidate SATA PMP quickly | ||
774 | * @dev: PMP device to revalidate | ||
775 | * | ||
776 | * Make sure the attached PMP is accessible. | ||
777 | * | ||
778 | * LOCKING: | ||
779 | * Kernel thread context (may sleep). | ||
780 | * | ||
781 | * RETURNS: | ||
782 | * 0 on success, -errno otherwise. | ||
783 | */ | ||
784 | static int sata_pmp_revalidate_quick(struct ata_device *dev) | ||
785 | { | ||
786 | unsigned int err_mask; | ||
787 | u32 prod_id; | ||
788 | |||
789 | err_mask = sata_pmp_read(dev->link, SATA_PMP_GSCR_PROD_ID, &prod_id); | ||
790 | if (err_mask) { | ||
791 | ata_dev_printk(dev, KERN_ERR, "failed to read PMP product ID " | ||
792 | "(Emask=0x%x)\n", err_mask); | ||
793 | return -EIO; | ||
794 | } | ||
795 | |||
796 | if (prod_id != dev->gscr[SATA_PMP_GSCR_PROD_ID]) { | ||
797 | ata_dev_printk(dev, KERN_ERR, "PMP product ID mismatch\n"); | ||
798 | /* something weird is going on, request full PMP recovery */ | ||
799 | return -EIO; | ||
800 | } | ||
801 | |||
802 | return 0; | ||
803 | } | ||
804 | |||
805 | /** | ||
806 | * sata_pmp_eh_recover_pmp - recover PMP | ||
807 | * @ap: ATA port PMP is attached to | ||
808 | * @prereset: prereset method (can be NULL) | ||
809 | * @softreset: softreset method | ||
810 | * @hardreset: hardreset method | ||
811 | * @postreset: postreset method (can be NULL) | ||
812 | * | ||
813 | * Recover PMP attached to @ap. Recovery procedure is somewhat | ||
814 | * similar to that of ata_eh_recover() except that reset should | ||
815 | * always be performed in hard->soft sequence and recovery | ||
816 | * failure results in PMP detachment. | ||
817 | * | ||
818 | * LOCKING: | ||
819 | * Kernel thread context (may sleep). | ||
820 | * | ||
821 | * RETURNS: | ||
822 | * 0 on success, -errno on failure. | ||
823 | */ | ||
824 | static int sata_pmp_eh_recover_pmp(struct ata_port *ap, | ||
825 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, | ||
826 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) | ||
827 | { | ||
828 | struct ata_link *link = &ap->link; | ||
829 | struct ata_eh_context *ehc = &link->eh_context; | ||
830 | struct ata_device *dev = link->device; | ||
831 | int tries = ATA_EH_PMP_TRIES; | ||
832 | int detach = 0, rc = 0; | ||
833 | int reval_failed = 0; | ||
834 | |||
835 | DPRINTK("ENTER\n"); | ||
836 | |||
837 | if (dev->flags & ATA_DFLAG_DETACH) { | ||
838 | detach = 1; | ||
839 | goto fail; | ||
840 | } | ||
841 | |||
842 | retry: | ||
843 | ehc->classes[0] = ATA_DEV_UNKNOWN; | ||
844 | |||
845 | if (ehc->i.action & ATA_EH_RESET_MASK) { | ||
846 | struct ata_link *tlink; | ||
847 | |||
848 | ata_eh_freeze_port(ap); | ||
849 | |||
850 | /* reset */ | ||
851 | ehc->i.action = ATA_EH_HARDRESET; | ||
852 | rc = ata_eh_reset(link, 0, prereset, softreset, hardreset, | ||
853 | postreset); | ||
854 | if (rc) { | ||
855 | ata_link_printk(link, KERN_ERR, | ||
856 | "failed to reset PMP, giving up\n"); | ||
857 | goto fail; | ||
858 | } | ||
859 | |||
860 | ata_eh_thaw_port(ap); | ||
861 | |||
862 | /* PMP is reset, SErrors cannot be trusted, scan all */ | ||
863 | ata_port_for_each_link(tlink, ap) | ||
864 | ata_ehi_schedule_probe(&tlink->eh_context.i); | ||
865 | } | ||
866 | |||
867 | /* If revalidation is requested, revalidate and reconfigure; | ||
868 | * otherwise, do quick revalidation. | ||
869 | */ | ||
870 | if (ehc->i.action & ATA_EH_REVALIDATE) | ||
871 | rc = sata_pmp_revalidate(dev, ehc->classes[0]); | ||
872 | else | ||
873 | rc = sata_pmp_revalidate_quick(dev); | ||
874 | |||
875 | if (rc) { | ||
876 | tries--; | ||
877 | |||
878 | if (rc == -ENODEV) { | ||
879 | ehc->i.probe_mask |= 1; | ||
880 | detach = 1; | ||
881 | /* give it just two more chances */ | ||
882 | tries = min(tries, 2); | ||
883 | } | ||
884 | |||
885 | if (tries) { | ||
886 | int sleep = ehc->i.flags & ATA_EHI_DID_RESET; | ||
887 | |||
888 | /* consecutive revalidation failures? speed down */ | ||
889 | if (reval_failed) | ||
890 | sata_down_spd_limit(link); | ||
891 | else | ||
892 | reval_failed = 1; | ||
893 | |||
894 | ata_dev_printk(dev, KERN_WARNING, | ||
895 | "retrying hardreset%s\n", | ||
896 | sleep ? " in 5 secs" : ""); | ||
897 | if (sleep) | ||
898 | ssleep(5); | ||
899 | ehc->i.action |= ATA_EH_HARDRESET; | ||
900 | goto retry; | ||
901 | } else { | ||
902 | ata_dev_printk(dev, KERN_ERR, "failed to recover PMP " | ||
903 | "after %d tries, giving up\n", | ||
904 | ATA_EH_PMP_TRIES); | ||
905 | goto fail; | ||
906 | } | ||
907 | } | ||
908 | |||
909 | /* okay, PMP resurrected */ | ||
910 | ehc->i.flags = 0; | ||
911 | |||
912 | DPRINTK("EXIT, rc=0\n"); | ||
913 | return 0; | ||
914 | |||
915 | fail: | ||
916 | sata_pmp_detach(dev); | ||
917 | if (detach) | ||
918 | ata_eh_detach_dev(dev); | ||
919 | else | ||
920 | ata_dev_disable(dev); | ||
921 | |||
922 | DPRINTK("EXIT, rc=%d\n", rc); | ||
923 | return rc; | ||
924 | } | ||
925 | |||
926 | static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap) | ||
927 | { | ||
928 | struct ata_link *link; | ||
929 | unsigned long flags; | ||
930 | int rc; | ||
931 | |||
932 | spin_lock_irqsave(ap->lock, flags); | ||
933 | |||
934 | ata_port_for_each_link(link, ap) { | ||
935 | if (!(link->flags & ATA_LFLAG_DISABLED)) | ||
936 | continue; | ||
937 | |||
938 | spin_unlock_irqrestore(ap->lock, flags); | ||
939 | |||
940 | /* Some PMPs require hardreset sequence to get | ||
941 | * SError.N working. | ||
942 | */ | ||
943 | if ((link->flags & ATA_LFLAG_HRST_TO_RESUME) && | ||
944 | (link->eh_context.i.flags & ATA_EHI_RESUME_LINK)) | ||
945 | sata_link_hardreset(link, sata_deb_timing_normal, | ||
946 | jiffies + ATA_TMOUT_INTERNAL_QUICK); | ||
947 | |||
948 | /* unconditionally clear SError.N */ | ||
949 | rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); | ||
950 | if (rc) { | ||
951 | ata_link_printk(link, KERN_ERR, "failed to clear " | ||
952 | "SError.N (errno=%d)\n", rc); | ||
953 | return rc; | ||
954 | } | ||
955 | |||
956 | spin_lock_irqsave(ap->lock, flags); | ||
957 | } | ||
958 | |||
959 | spin_unlock_irqrestore(ap->lock, flags); | ||
960 | |||
961 | return 0; | ||
962 | } | ||
963 | |||
964 | static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries) | ||
965 | { | ||
966 | struct ata_port *ap = link->ap; | ||
967 | unsigned long flags; | ||
968 | |||
969 | if (link_tries[link->pmp] && --link_tries[link->pmp]) | ||
970 | return 1; | ||
971 | |||
972 | /* disable this link */ | ||
973 | if (!(link->flags & ATA_LFLAG_DISABLED)) { | ||
974 | ata_link_printk(link, KERN_WARNING, | ||
975 | "failed to recover link after %d tries, disabling\n", | ||
976 | ATA_EH_PMP_LINK_TRIES); | ||
977 | |||
978 | spin_lock_irqsave(ap->lock, flags); | ||
979 | link->flags |= ATA_LFLAG_DISABLED; | ||
980 | spin_unlock_irqrestore(ap->lock, flags); | ||
981 | } | ||
982 | |||
983 | ata_dev_disable(link->device); | ||
984 | link->eh_context.i.action = 0; | ||
985 | |||
986 | return 0; | ||
987 | } | ||
988 | |||
989 | /** | ||
990 | * sata_pmp_eh_recover - recover PMP-enabled port | ||
991 | * @ap: ATA port to recover | ||
992 | * @prereset: prereset method (can be NULL) | ||
993 | * @softreset: softreset method | ||
994 | * @hardreset: hardreset method | ||
995 | * @postreset: postreset method (can be NULL) | ||
996 | * @pmp_prereset: PMP prereset method (can be NULL) | ||
997 | * @pmp_softreset: PMP softreset method (can be NULL) | ||
998 | * @pmp_hardreset: PMP hardreset method (can be NULL) | ||
999 | * @pmp_postreset: PMP postreset method (can be NULL) | ||
1000 | * | ||
1001 | * Drive EH recovery operation for PMP enabled port @ap. This | ||
1002 | * function recovers host and PMP ports with proper retrials and | ||
1003 | * fallbacks. Actual recovery operations are performed using | ||
1004 | * ata_eh_recover() and sata_pmp_eh_recover_pmp(). | ||
1005 | * | ||
1006 | * LOCKING: | ||
1007 | * Kernel thread context (may sleep). | ||
1008 | * | ||
1009 | * RETURNS: | ||
1010 | * 0 on success, -errno on failure. | ||
1011 | */ | ||
1012 | static int sata_pmp_eh_recover(struct ata_port *ap, | ||
1013 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, | ||
1014 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset, | ||
1015 | ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset, | ||
1016 | ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset) | ||
1017 | { | ||
1018 | int pmp_tries, link_tries[SATA_PMP_MAX_PORTS]; | ||
1019 | struct ata_link *pmp_link = &ap->link; | ||
1020 | struct ata_device *pmp_dev = pmp_link->device; | ||
1021 | struct ata_eh_context *pmp_ehc = &pmp_link->eh_context; | ||
1022 | struct ata_link *link; | ||
1023 | struct ata_device *dev; | ||
1024 | unsigned int err_mask; | ||
1025 | u32 gscr_error, sntf; | ||
1026 | int cnt, rc; | ||
1027 | |||
1028 | pmp_tries = ATA_EH_PMP_TRIES; | ||
1029 | ata_port_for_each_link(link, ap) | ||
1030 | link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES; | ||
1031 | |||
1032 | retry: | ||
1033 | /* PMP attached? */ | ||
1034 | if (!ap->nr_pmp_links) { | ||
1035 | rc = ata_eh_recover(ap, prereset, softreset, hardreset, | ||
1036 | postreset, NULL); | ||
1037 | if (rc) { | ||
1038 | ata_link_for_each_dev(dev, &ap->link) | ||
1039 | ata_dev_disable(dev); | ||
1040 | return rc; | ||
1041 | } | ||
1042 | |||
1043 | if (pmp_dev->class != ATA_DEV_PMP) | ||
1044 | return 0; | ||
1045 | |||
1046 | /* new PMP online */ | ||
1047 | ata_port_for_each_link(link, ap) | ||
1048 | link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES; | ||
1049 | |||
1050 | /* fall through */ | ||
1051 | } | ||
1052 | |||
1053 | /* recover pmp */ | ||
1054 | rc = sata_pmp_eh_recover_pmp(ap, prereset, softreset, hardreset, | ||
1055 | postreset); | ||
1056 | if (rc) | ||
1057 | goto pmp_fail; | ||
1058 | |||
1059 | /* handle disabled links */ | ||
1060 | rc = sata_pmp_eh_handle_disabled_links(ap); | ||
1061 | if (rc) | ||
1062 | goto pmp_fail; | ||
1063 | |||
1064 | /* recover links */ | ||
1065 | rc = ata_eh_recover(ap, pmp_prereset, pmp_softreset, pmp_hardreset, | ||
1066 | pmp_postreset, &link); | ||
1067 | if (rc) | ||
1068 | goto link_fail; | ||
1069 | |||
1070 | /* Connection status might have changed while resetting other | ||
1071 | * links, check SATA_PMP_GSCR_ERROR before returning. | ||
1072 | */ | ||
1073 | |||
1074 | /* clear SNotification */ | ||
1075 | rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); | ||
1076 | if (rc == 0) | ||
1077 | sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); | ||
1078 | |||
1079 | /* enable notification */ | ||
1080 | if (pmp_dev->flags & ATA_DFLAG_AN) { | ||
1081 | pmp_dev->gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY; | ||
1082 | |||
1083 | err_mask = sata_pmp_write(pmp_dev->link, SATA_PMP_GSCR_FEAT_EN, | ||
1084 | pmp_dev->gscr[SATA_PMP_GSCR_FEAT_EN]); | ||
1085 | if (err_mask) { | ||
1086 | ata_dev_printk(pmp_dev, KERN_ERR, "failed to write " | ||
1087 | "PMP_FEAT_EN (Emask=0x%x)\n", err_mask); | ||
1088 | rc = -EIO; | ||
1089 | goto pmp_fail; | ||
1090 | } | ||
1091 | } | ||
1092 | |||
1093 | /* check GSCR_ERROR */ | ||
1094 | err_mask = sata_pmp_read(pmp_link, SATA_PMP_GSCR_ERROR, &gscr_error); | ||
1095 | if (err_mask) { | ||
1096 | ata_dev_printk(pmp_dev, KERN_ERR, "failed to read " | ||
1097 | "PMP_GSCR_ERROR (Emask=0x%x)\n", err_mask); | ||
1098 | rc = -EIO; | ||
1099 | goto pmp_fail; | ||
1100 | } | ||
1101 | |||
1102 | cnt = 0; | ||
1103 | ata_port_for_each_link(link, ap) { | ||
1104 | if (!(gscr_error & (1 << link->pmp))) | ||
1105 | continue; | ||
1106 | |||
1107 | if (sata_pmp_handle_link_fail(link, link_tries)) { | ||
1108 | ata_ehi_hotplugged(&link->eh_context.i); | ||
1109 | cnt++; | ||
1110 | } else { | ||
1111 | ata_link_printk(link, KERN_WARNING, | ||
1112 | "PHY status changed but maxed out on retries, " | ||
1113 | "giving up\n"); | ||
1114 | ata_link_printk(link, KERN_WARNING, | ||
1115 | "Manully issue scan to resume this link\n"); | ||
1116 | } | ||
1117 | } | ||
1118 | |||
1119 | if (cnt) { | ||
1120 | ata_port_printk(ap, KERN_INFO, "PMP SError.N set for some " | ||
1121 | "ports, repeating recovery\n"); | ||
1122 | goto retry; | ||
1123 | } | ||
1124 | |||
1125 | return 0; | ||
1126 | |||
1127 | link_fail: | ||
1128 | if (sata_pmp_handle_link_fail(link, link_tries)) { | ||
1129 | pmp_ehc->i.action |= ATA_EH_HARDRESET; | ||
1130 | goto retry; | ||
1131 | } | ||
1132 | |||
1133 | /* fall through */ | ||
1134 | pmp_fail: | ||
1135 | /* Control always ends up here after detaching PMP. Shut up | ||
1136 | * and return if we're unloading. | ||
1137 | */ | ||
1138 | if (ap->pflags & ATA_PFLAG_UNLOADING) | ||
1139 | return rc; | ||
1140 | |||
1141 | if (!ap->nr_pmp_links) | ||
1142 | goto retry; | ||
1143 | |||
1144 | if (--pmp_tries) { | ||
1145 | ata_port_printk(ap, KERN_WARNING, | ||
1146 | "failed to recover PMP, retrying in 5 secs\n"); | ||
1147 | pmp_ehc->i.action |= ATA_EH_HARDRESET; | ||
1148 | ssleep(5); | ||
1149 | goto retry; | ||
1150 | } | ||
1151 | |||
1152 | ata_port_printk(ap, KERN_ERR, | ||
1153 | "failed to recover PMP after %d tries, giving up\n", | ||
1154 | ATA_EH_PMP_TRIES); | ||
1155 | sata_pmp_detach(pmp_dev); | ||
1156 | ata_dev_disable(pmp_dev); | ||
1157 | |||
1158 | return rc; | ||
1159 | } | ||
1160 | |||
1161 | /** | ||
1162 | * sata_pmp_do_eh - do standard error handling for PMP-enabled host | ||
1163 | * @ap: host port to handle error for | ||
1164 | * @prereset: prereset method (can be NULL) | ||
1165 | * @softreset: softreset method | ||
1166 | * @hardreset: hardreset method | ||
1167 | * @postreset: postreset method (can be NULL) | ||
1168 | * @pmp_prereset: PMP prereset method (can be NULL) | ||
1169 | * @pmp_softreset: PMP softreset method (can be NULL) | ||
1170 | * @pmp_hardreset: PMP hardreset method (can be NULL) | ||
1171 | * @pmp_postreset: PMP postreset method (can be NULL) | ||
1172 | * | ||
1173 | * Perform standard error handling sequence for PMP-enabled host | ||
1174 | * @ap. | ||
1175 | * | ||
1176 | * LOCKING: | ||
1177 | * Kernel thread context (may sleep). | ||
1178 | */ | ||
1179 | void sata_pmp_do_eh(struct ata_port *ap, | ||
1180 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, | ||
1181 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset, | ||
1182 | ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset, | ||
1183 | ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset) | ||
1184 | { | ||
1185 | ata_eh_autopsy(ap); | ||
1186 | ata_eh_report(ap); | ||
1187 | sata_pmp_eh_recover(ap, prereset, softreset, hardreset, postreset, | ||
1188 | pmp_prereset, pmp_softreset, pmp_hardreset, | ||
1189 | pmp_postreset); | ||
1190 | ata_eh_finish(ap); | ||
1191 | } | ||
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index e83647651b31..ea53e6a570b4 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -71,11 +71,10 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | |||
71 | #define ALL_SUB_MPAGES 0xff | 71 | #define ALL_SUB_MPAGES 0xff |
72 | 72 | ||
73 | 73 | ||
74 | static const u8 def_rw_recovery_mpage[] = { | 74 | static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = { |
75 | RW_RECOVERY_MPAGE, | 75 | RW_RECOVERY_MPAGE, |
76 | RW_RECOVERY_MPAGE_LEN - 2, | 76 | RW_RECOVERY_MPAGE_LEN - 2, |
77 | (1 << 7) | /* AWRE, sat-r06 say it shall be 0 */ | 77 | (1 << 7), /* AWRE */ |
78 | (1 << 6), /* ARRE (auto read reallocation) */ | ||
79 | 0, /* read retry count */ | 78 | 0, /* read retry count */ |
80 | 0, 0, 0, 0, | 79 | 0, 0, 0, 0, |
81 | 0, /* write retry count */ | 80 | 0, /* write retry count */ |
@@ -450,13 +449,8 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, | |||
450 | qc->scsicmd = cmd; | 449 | qc->scsicmd = cmd; |
451 | qc->scsidone = done; | 450 | qc->scsidone = done; |
452 | 451 | ||
453 | if (cmd->use_sg) { | 452 | qc->__sg = scsi_sglist(cmd); |
454 | qc->__sg = (struct scatterlist *) cmd->request_buffer; | 453 | qc->n_elem = scsi_sg_count(cmd); |
455 | qc->n_elem = cmd->use_sg; | ||
456 | } else if (cmd->request_bufflen) { | ||
457 | qc->__sg = &qc->sgent; | ||
458 | qc->n_elem = 1; | ||
459 | } | ||
460 | } else { | 454 | } else { |
461 | cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); | 455 | cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); |
462 | done(cmd); | 456 | done(cmd); |
@@ -755,6 +749,13 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev) | |||
755 | { | 749 | { |
756 | sdev->use_10_for_rw = 1; | 750 | sdev->use_10_for_rw = 1; |
757 | sdev->use_10_for_ms = 1; | 751 | sdev->use_10_for_ms = 1; |
752 | |||
753 | /* Schedule policy is determined by ->qc_defer() callback and | ||
754 | * it needs to see every deferred qc. Set dev_blocked to 1 to | ||
755 | * prevent SCSI midlayer from automatically deferring | ||
756 | * requests. | ||
757 | */ | ||
758 | sdev->max_device_blocked = 1; | ||
758 | } | 759 | } |
759 | 760 | ||
760 | static void ata_scsi_dev_config(struct scsi_device *sdev, | 761 | static void ata_scsi_dev_config(struct scsi_device *sdev, |
@@ -943,6 +944,13 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) | |||
943 | goto invalid_fld; /* LOEJ bit set not supported */ | 944 | goto invalid_fld; /* LOEJ bit set not supported */ |
944 | if (((cdb[4] >> 4) & 0xf) != 0) | 945 | if (((cdb[4] >> 4) & 0xf) != 0) |
945 | goto invalid_fld; /* power conditions not supported */ | 946 | goto invalid_fld; /* power conditions not supported */ |
947 | |||
948 | if (qc->dev->horkage & ATA_HORKAGE_SKIP_PM) { | ||
949 | /* the device lacks PM support, finish without doing anything */ | ||
950 | scmd->result = SAM_STAT_GOOD; | ||
951 | return 1; | ||
952 | } | ||
953 | |||
946 | if (cdb[4] & 0x1) { | 954 | if (cdb[4] & 0x1) { |
947 | tf->nsect = 1; /* 1 sector, lba=0 */ | 955 | tf->nsect = 1; /* 1 sector, lba=0 */ |
948 | 956 | ||
@@ -1368,14 +1376,14 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) | |||
1368 | case ATA_CMD_SET_FEATURES: | 1376 | case ATA_CMD_SET_FEATURES: |
1369 | if ((qc->tf.feature == SETFEATURES_WC_ON) || | 1377 | if ((qc->tf.feature == SETFEATURES_WC_ON) || |
1370 | (qc->tf.feature == SETFEATURES_WC_OFF)) { | 1378 | (qc->tf.feature == SETFEATURES_WC_OFF)) { |
1371 | ap->eh_info.action |= ATA_EH_REVALIDATE; | 1379 | ap->link.eh_info.action |= ATA_EH_REVALIDATE; |
1372 | ata_port_schedule_eh(ap); | 1380 | ata_port_schedule_eh(ap); |
1373 | } | 1381 | } |
1374 | break; | 1382 | break; |
1375 | 1383 | ||
1376 | case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ | 1384 | case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ |
1377 | case ATA_CMD_SET_MULTI: /* multi_count changed */ | 1385 | case ATA_CMD_SET_MULTI: /* multi_count changed */ |
1378 | ap->eh_info.action |= ATA_EH_REVALIDATE; | 1386 | ap->link.eh_info.action |= ATA_EH_REVALIDATE; |
1379 | ata_port_schedule_eh(ap); | 1387 | ata_port_schedule_eh(ap); |
1380 | break; | 1388 | break; |
1381 | } | 1389 | } |
@@ -1422,37 +1430,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) | |||
1422 | } | 1430 | } |
1423 | 1431 | ||
1424 | /** | 1432 | /** |
1425 | * ata_scmd_need_defer - Check whether we need to defer scmd | ||
1426 | * @dev: ATA device to which the command is addressed | ||
1427 | * @is_io: Is the command IO (and thus possibly NCQ)? | ||
1428 | * | ||
1429 | * NCQ and non-NCQ commands cannot run together. As upper layer | ||
1430 | * only knows the queue depth, we are responsible for maintaining | ||
1431 | * exclusion. This function checks whether a new command can be | ||
1432 | * issued to @dev. | ||
1433 | * | ||
1434 | * LOCKING: | ||
1435 | * spin_lock_irqsave(host lock) | ||
1436 | * | ||
1437 | * RETURNS: | ||
1438 | * 1 if deferring is needed, 0 otherwise. | ||
1439 | */ | ||
1440 | static int ata_scmd_need_defer(struct ata_device *dev, int is_io) | ||
1441 | { | ||
1442 | struct ata_port *ap = dev->ap; | ||
1443 | int is_ncq = is_io && ata_ncq_enabled(dev); | ||
1444 | |||
1445 | if (is_ncq) { | ||
1446 | if (!ata_tag_valid(ap->active_tag)) | ||
1447 | return 0; | ||
1448 | } else { | ||
1449 | if (!ata_tag_valid(ap->active_tag) && !ap->sactive) | ||
1450 | return 0; | ||
1451 | } | ||
1452 | return 1; | ||
1453 | } | ||
1454 | |||
1455 | /** | ||
1456 | * ata_scsi_translate - Translate then issue SCSI command to ATA device | 1433 | * ata_scsi_translate - Translate then issue SCSI command to ATA device |
1457 | * @dev: ATA device to which the command is addressed | 1434 | * @dev: ATA device to which the command is addressed |
1458 | * @cmd: SCSI command to execute | 1435 | * @cmd: SCSI command to execute |
@@ -1483,14 +1460,12 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, | |||
1483 | void (*done)(struct scsi_cmnd *), | 1460 | void (*done)(struct scsi_cmnd *), |
1484 | ata_xlat_func_t xlat_func) | 1461 | ata_xlat_func_t xlat_func) |
1485 | { | 1462 | { |
1463 | struct ata_port *ap = dev->link->ap; | ||
1486 | struct ata_queued_cmd *qc; | 1464 | struct ata_queued_cmd *qc; |
1487 | int is_io = xlat_func == ata_scsi_rw_xlat; | 1465 | int rc; |
1488 | 1466 | ||
1489 | VPRINTK("ENTER\n"); | 1467 | VPRINTK("ENTER\n"); |
1490 | 1468 | ||
1491 | if (unlikely(ata_scmd_need_defer(dev, is_io))) | ||
1492 | goto defer; | ||
1493 | |||
1494 | qc = ata_scsi_qc_new(dev, cmd, done); | 1469 | qc = ata_scsi_qc_new(dev, cmd, done); |
1495 | if (!qc) | 1470 | if (!qc) |
1496 | goto err_mem; | 1471 | goto err_mem; |
@@ -1498,17 +1473,13 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, | |||
1498 | /* data is present; dma-map it */ | 1473 | /* data is present; dma-map it */ |
1499 | if (cmd->sc_data_direction == DMA_FROM_DEVICE || | 1474 | if (cmd->sc_data_direction == DMA_FROM_DEVICE || |
1500 | cmd->sc_data_direction == DMA_TO_DEVICE) { | 1475 | cmd->sc_data_direction == DMA_TO_DEVICE) { |
1501 | if (unlikely(cmd->request_bufflen < 1)) { | 1476 | if (unlikely(scsi_bufflen(cmd) < 1)) { |
1502 | ata_dev_printk(dev, KERN_WARNING, | 1477 | ata_dev_printk(dev, KERN_WARNING, |
1503 | "WARNING: zero len r/w req\n"); | 1478 | "WARNING: zero len r/w req\n"); |
1504 | goto err_did; | 1479 | goto err_did; |
1505 | } | 1480 | } |
1506 | 1481 | ||
1507 | if (cmd->use_sg) | 1482 | ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd)); |
1508 | ata_sg_init(qc, cmd->request_buffer, cmd->use_sg); | ||
1509 | else | ||
1510 | ata_sg_init_one(qc, cmd->request_buffer, | ||
1511 | cmd->request_bufflen); | ||
1512 | 1483 | ||
1513 | qc->dma_dir = cmd->sc_data_direction; | 1484 | qc->dma_dir = cmd->sc_data_direction; |
1514 | } | 1485 | } |
@@ -1518,6 +1489,11 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, | |||
1518 | if (xlat_func(qc)) | 1489 | if (xlat_func(qc)) |
1519 | goto early_finish; | 1490 | goto early_finish; |
1520 | 1491 | ||
1492 | if (ap->ops->qc_defer) { | ||
1493 | if ((rc = ap->ops->qc_defer(qc))) | ||
1494 | goto defer; | ||
1495 | } | ||
1496 | |||
1521 | /* select device, send command to hardware */ | 1497 | /* select device, send command to hardware */ |
1522 | ata_qc_issue(qc); | 1498 | ata_qc_issue(qc); |
1523 | 1499 | ||
@@ -1539,8 +1515,12 @@ err_mem: | |||
1539 | return 0; | 1515 | return 0; |
1540 | 1516 | ||
1541 | defer: | 1517 | defer: |
1518 | ata_qc_free(qc); | ||
1542 | DPRINTK("EXIT - defer\n"); | 1519 | DPRINTK("EXIT - defer\n"); |
1543 | return SCSI_MLQUEUE_DEVICE_BUSY; | 1520 | if (rc == ATA_DEFER_LINK) |
1521 | return SCSI_MLQUEUE_DEVICE_BUSY; | ||
1522 | else | ||
1523 | return SCSI_MLQUEUE_HOST_BUSY; | ||
1544 | } | 1524 | } |
1545 | 1525 | ||
1546 | /** | 1526 | /** |
@@ -1562,15 +1542,14 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out) | |||
1562 | u8 *buf; | 1542 | u8 *buf; |
1563 | unsigned int buflen; | 1543 | unsigned int buflen; |
1564 | 1544 | ||
1565 | if (cmd->use_sg) { | 1545 | struct scatterlist *sg = scsi_sglist(cmd); |
1566 | struct scatterlist *sg; | ||
1567 | 1546 | ||
1568 | sg = (struct scatterlist *) cmd->request_buffer; | 1547 | if (sg) { |
1569 | buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; | 1548 | buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; |
1570 | buflen = sg->length; | 1549 | buflen = sg->length; |
1571 | } else { | 1550 | } else { |
1572 | buf = cmd->request_buffer; | 1551 | buf = NULL; |
1573 | buflen = cmd->request_bufflen; | 1552 | buflen = 0; |
1574 | } | 1553 | } |
1575 | 1554 | ||
1576 | *buf_out = buf; | 1555 | *buf_out = buf; |
@@ -1590,12 +1569,9 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out) | |||
1590 | 1569 | ||
1591 | static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) | 1570 | static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) |
1592 | { | 1571 | { |
1593 | if (cmd->use_sg) { | 1572 | struct scatterlist *sg = scsi_sglist(cmd); |
1594 | struct scatterlist *sg; | 1573 | if (sg) |
1595 | |||
1596 | sg = (struct scatterlist *) cmd->request_buffer; | ||
1597 | kunmap_atomic(buf - sg->offset, KM_IRQ0); | 1574 | kunmap_atomic(buf - sg->offset, KM_IRQ0); |
1598 | } | ||
1599 | } | 1575 | } |
1600 | 1576 | ||
1601 | /** | 1577 | /** |
@@ -1817,6 +1793,62 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, | |||
1817 | } | 1793 | } |
1818 | 1794 | ||
1819 | /** | 1795 | /** |
1796 | * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info | ||
1797 | * @args: device IDENTIFY data / SCSI command of interest. | ||
1798 | * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. | ||
1799 | * @buflen: Response buffer length. | ||
1800 | * | ||
1801 | * Yields SAT-specified ATA VPD page. | ||
1802 | * | ||
1803 | * LOCKING: | ||
1804 | * spin_lock_irqsave(host lock) | ||
1805 | */ | ||
1806 | |||
1807 | unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf, | ||
1808 | unsigned int buflen) | ||
1809 | { | ||
1810 | u8 pbuf[60]; | ||
1811 | struct ata_taskfile tf; | ||
1812 | unsigned int i; | ||
1813 | |||
1814 | if (!buflen) | ||
1815 | return 0; | ||
1816 | |||
1817 | memset(&pbuf, 0, sizeof(pbuf)); | ||
1818 | memset(&tf, 0, sizeof(tf)); | ||
1819 | |||
1820 | pbuf[1] = 0x89; /* our page code */ | ||
1821 | pbuf[2] = (0x238 >> 8); /* page size fixed at 238h */ | ||
1822 | pbuf[3] = (0x238 & 0xff); | ||
1823 | |||
1824 | memcpy(&pbuf[8], "linux ", 8); | ||
1825 | memcpy(&pbuf[16], "libata ", 16); | ||
1826 | memcpy(&pbuf[32], DRV_VERSION, 4); | ||
1827 | ata_id_string(args->id, &pbuf[32], ATA_ID_FW_REV, 4); | ||
1828 | |||
1829 | /* we don't store the ATA device signature, so we fake it */ | ||
1830 | |||
1831 | tf.command = ATA_DRDY; /* really, this is Status reg */ | ||
1832 | tf.lbal = 0x1; | ||
1833 | tf.nsect = 0x1; | ||
1834 | |||
1835 | ata_tf_to_fis(&tf, 0, 1, &pbuf[36]); /* TODO: PMP? */ | ||
1836 | pbuf[36] = 0x34; /* force D2H Reg FIS (34h) */ | ||
1837 | |||
1838 | pbuf[56] = ATA_CMD_ID_ATA; | ||
1839 | |||
1840 | i = min(buflen, 60U); | ||
1841 | memcpy(rbuf, &pbuf[0], i); | ||
1842 | buflen -= i; | ||
1843 | |||
1844 | if (!buflen) | ||
1845 | return 0; | ||
1846 | |||
1847 | memcpy(&rbuf[60], &args->id[0], min(buflen, 512U)); | ||
1848 | return 0; | ||
1849 | } | ||
1850 | |||
1851 | /** | ||
1820 | * ata_scsiop_noop - Command handler that simply returns success. | 1852 | * ata_scsiop_noop - Command handler that simply returns success. |
1821 | * @args: device IDENTIFY data / SCSI command of interest. | 1853 | * @args: device IDENTIFY data / SCSI command of interest. |
1822 | * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. | 1854 | * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. |
@@ -2273,8 +2305,8 @@ static void atapi_request_sense(struct ata_queued_cmd *qc) | |||
2273 | qc->tf.feature |= ATAPI_PKT_DMA; | 2305 | qc->tf.feature |= ATAPI_PKT_DMA; |
2274 | } else { | 2306 | } else { |
2275 | qc->tf.protocol = ATA_PROT_ATAPI; | 2307 | qc->tf.protocol = ATA_PROT_ATAPI; |
2276 | qc->tf.lbam = (8 * 1024) & 0xff; | 2308 | qc->tf.lbam = SCSI_SENSE_BUFFERSIZE; |
2277 | qc->tf.lbah = (8 * 1024) >> 8; | 2309 | qc->tf.lbah = 0; |
2278 | } | 2310 | } |
2279 | qc->nbytes = SCSI_SENSE_BUFFERSIZE; | 2311 | qc->nbytes = SCSI_SENSE_BUFFERSIZE; |
2280 | 2312 | ||
@@ -2383,6 +2415,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2383 | struct ata_device *dev = qc->dev; | 2415 | struct ata_device *dev = qc->dev; |
2384 | int using_pio = (dev->flags & ATA_DFLAG_PIO); | 2416 | int using_pio = (dev->flags & ATA_DFLAG_PIO); |
2385 | int nodata = (scmd->sc_data_direction == DMA_NONE); | 2417 | int nodata = (scmd->sc_data_direction == DMA_NONE); |
2418 | unsigned int nbytes; | ||
2386 | 2419 | ||
2387 | memset(qc->cdb, 0, dev->cdb_len); | 2420 | memset(qc->cdb, 0, dev->cdb_len); |
2388 | memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); | 2421 | memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); |
@@ -2396,20 +2429,26 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2396 | } | 2429 | } |
2397 | 2430 | ||
2398 | qc->tf.command = ATA_CMD_PACKET; | 2431 | qc->tf.command = ATA_CMD_PACKET; |
2399 | qc->nbytes = scmd->request_bufflen; | 2432 | qc->nbytes = scsi_bufflen(scmd); |
2400 | 2433 | ||
2401 | /* check whether ATAPI DMA is safe */ | 2434 | /* check whether ATAPI DMA is safe */ |
2402 | if (!using_pio && ata_check_atapi_dma(qc)) | 2435 | if (!using_pio && ata_check_atapi_dma(qc)) |
2403 | using_pio = 1; | 2436 | using_pio = 1; |
2404 | 2437 | ||
2438 | /* Some controller variants snoop this value for Packet transfers | ||
2439 | to do state machine and FIFO management. Thus we want to set it | ||
2440 | properly, and for DMA where it is effectively meaningless */ | ||
2441 | nbytes = min(qc->nbytes, (unsigned int)63 * 1024); | ||
2442 | |||
2443 | qc->tf.lbam = (nbytes & 0xFF); | ||
2444 | qc->tf.lbah = (nbytes >> 8); | ||
2445 | |||
2405 | if (using_pio || nodata) { | 2446 | if (using_pio || nodata) { |
2406 | /* no data, or PIO data xfer */ | 2447 | /* no data, or PIO data xfer */ |
2407 | if (nodata) | 2448 | if (nodata) |
2408 | qc->tf.protocol = ATA_PROT_ATAPI_NODATA; | 2449 | qc->tf.protocol = ATA_PROT_ATAPI_NODATA; |
2409 | else | 2450 | else |
2410 | qc->tf.protocol = ATA_PROT_ATAPI; | 2451 | qc->tf.protocol = ATA_PROT_ATAPI; |
2411 | qc->tf.lbam = (8 * 1024) & 0xff; | ||
2412 | qc->tf.lbah = (8 * 1024) >> 8; | ||
2413 | } else { | 2452 | } else { |
2414 | /* DMA data xfer */ | 2453 | /* DMA data xfer */ |
2415 | qc->tf.protocol = ATA_PROT_ATAPI_DMA; | 2454 | qc->tf.protocol = ATA_PROT_ATAPI_DMA; |
@@ -2420,24 +2459,42 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2420 | qc->tf.feature |= ATAPI_DMADIR; | 2459 | qc->tf.feature |= ATAPI_DMADIR; |
2421 | } | 2460 | } |
2422 | 2461 | ||
2462 | |||
2463 | /* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE | ||
2464 | as ATAPI tape drives don't get this right otherwise */ | ||
2423 | return 0; | 2465 | return 0; |
2424 | } | 2466 | } |
2425 | 2467 | ||
2426 | static struct ata_device * ata_find_dev(struct ata_port *ap, int id) | 2468 | static struct ata_device * ata_find_dev(struct ata_port *ap, int devno) |
2427 | { | 2469 | { |
2428 | if (likely(id < ATA_MAX_DEVICES)) | 2470 | if (ap->nr_pmp_links == 0) { |
2429 | return &ap->device[id]; | 2471 | if (likely(devno < ata_link_max_devices(&ap->link))) |
2472 | return &ap->link.device[devno]; | ||
2473 | } else { | ||
2474 | if (likely(devno < ap->nr_pmp_links)) | ||
2475 | return &ap->pmp_link[devno].device[0]; | ||
2476 | } | ||
2477 | |||
2430 | return NULL; | 2478 | return NULL; |
2431 | } | 2479 | } |
2432 | 2480 | ||
2433 | static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap, | 2481 | static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap, |
2434 | const struct scsi_device *scsidev) | 2482 | const struct scsi_device *scsidev) |
2435 | { | 2483 | { |
2484 | int devno; | ||
2485 | |||
2436 | /* skip commands not addressed to targets we simulate */ | 2486 | /* skip commands not addressed to targets we simulate */ |
2437 | if (unlikely(scsidev->channel || scsidev->lun)) | 2487 | if (ap->nr_pmp_links == 0) { |
2438 | return NULL; | 2488 | if (unlikely(scsidev->channel || scsidev->lun)) |
2489 | return NULL; | ||
2490 | devno = scsidev->id; | ||
2491 | } else { | ||
2492 | if (unlikely(scsidev->id || scsidev->lun)) | ||
2493 | return NULL; | ||
2494 | devno = scsidev->channel; | ||
2495 | } | ||
2439 | 2496 | ||
2440 | return ata_find_dev(ap, scsidev->id); | 2497 | return ata_find_dev(ap, devno); |
2441 | } | 2498 | } |
2442 | 2499 | ||
2443 | /** | 2500 | /** |
@@ -2458,7 +2515,7 @@ static int ata_scsi_dev_enabled(struct ata_device *dev) | |||
2458 | if (unlikely(!ata_dev_enabled(dev))) | 2515 | if (unlikely(!ata_dev_enabled(dev))) |
2459 | return 0; | 2516 | return 0; |
2460 | 2517 | ||
2461 | if (!atapi_enabled || (dev->ap->flags & ATA_FLAG_NO_ATAPI)) { | 2518 | if (!atapi_enabled || (dev->link->ap->flags & ATA_FLAG_NO_ATAPI)) { |
2462 | if (unlikely(dev->class == ATA_DEV_ATAPI)) { | 2519 | if (unlikely(dev->class == ATA_DEV_ATAPI)) { |
2463 | ata_dev_printk(dev, KERN_WARNING, | 2520 | ata_dev_printk(dev, KERN_WARNING, |
2464 | "WARNING: ATAPI is %s, device ignored.\n", | 2521 | "WARNING: ATAPI is %s, device ignored.\n", |
@@ -2631,7 +2688,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
2631 | case ATA_CMD_WRITE_LONG_ONCE: | 2688 | case ATA_CMD_WRITE_LONG_ONCE: |
2632 | if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) | 2689 | if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) |
2633 | goto invalid_fld; | 2690 | goto invalid_fld; |
2634 | qc->sect_size = scmd->request_bufflen; | 2691 | qc->sect_size = scsi_bufflen(scmd); |
2635 | } | 2692 | } |
2636 | 2693 | ||
2637 | /* | 2694 | /* |
@@ -2661,7 +2718,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
2661 | * TODO: find out if we need to do more here to | 2718 | * TODO: find out if we need to do more here to |
2662 | * cover scatter/gather case. | 2719 | * cover scatter/gather case. |
2663 | */ | 2720 | */ |
2664 | qc->nbytes = scmd->request_bufflen; | 2721 | qc->nbytes = scsi_bufflen(scmd); |
2665 | 2722 | ||
2666 | /* request result TF */ | 2723 | /* request result TF */ |
2667 | qc->flags |= ATA_QCFLAG_RESULT_TF; | 2724 | qc->flags |= ATA_QCFLAG_RESULT_TF; |
@@ -2746,28 +2803,48 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, | |||
2746 | void (*done)(struct scsi_cmnd *), | 2803 | void (*done)(struct scsi_cmnd *), |
2747 | struct ata_device *dev) | 2804 | struct ata_device *dev) |
2748 | { | 2805 | { |
2806 | u8 scsi_op = scmd->cmnd[0]; | ||
2807 | ata_xlat_func_t xlat_func; | ||
2749 | int rc = 0; | 2808 | int rc = 0; |
2750 | 2809 | ||
2751 | if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) { | ||
2752 | DPRINTK("bad CDB len=%u, max=%u\n", | ||
2753 | scmd->cmd_len, dev->cdb_len); | ||
2754 | scmd->result = DID_ERROR << 16; | ||
2755 | done(scmd); | ||
2756 | return 0; | ||
2757 | } | ||
2758 | |||
2759 | if (dev->class == ATA_DEV_ATA) { | 2810 | if (dev->class == ATA_DEV_ATA) { |
2760 | ata_xlat_func_t xlat_func = ata_get_xlat_func(dev, | 2811 | if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) |
2761 | scmd->cmnd[0]); | 2812 | goto bad_cdb_len; |
2762 | 2813 | ||
2763 | if (xlat_func) | 2814 | xlat_func = ata_get_xlat_func(dev, scsi_op); |
2764 | rc = ata_scsi_translate(dev, scmd, done, xlat_func); | 2815 | } else { |
2765 | else | 2816 | if (unlikely(!scmd->cmd_len)) |
2766 | ata_scsi_simulate(dev, scmd, done); | 2817 | goto bad_cdb_len; |
2767 | } else | 2818 | |
2768 | rc = ata_scsi_translate(dev, scmd, done, atapi_xlat); | 2819 | xlat_func = NULL; |
2820 | if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { | ||
2821 | /* relay SCSI command to ATAPI device */ | ||
2822 | if (unlikely(scmd->cmd_len > dev->cdb_len)) | ||
2823 | goto bad_cdb_len; | ||
2824 | |||
2825 | xlat_func = atapi_xlat; | ||
2826 | } else { | ||
2827 | /* ATA_16 passthru, treat as an ATA command */ | ||
2828 | if (unlikely(scmd->cmd_len > 16)) | ||
2829 | goto bad_cdb_len; | ||
2830 | |||
2831 | xlat_func = ata_get_xlat_func(dev, scsi_op); | ||
2832 | } | ||
2833 | } | ||
2834 | |||
2835 | if (xlat_func) | ||
2836 | rc = ata_scsi_translate(dev, scmd, done, xlat_func); | ||
2837 | else | ||
2838 | ata_scsi_simulate(dev, scmd, done); | ||
2769 | 2839 | ||
2770 | return rc; | 2840 | return rc; |
2841 | |||
2842 | bad_cdb_len: | ||
2843 | DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", | ||
2844 | scmd->cmd_len, scsi_op, dev->cdb_len); | ||
2845 | scmd->result = DID_ERROR << 16; | ||
2846 | done(scmd); | ||
2847 | return 0; | ||
2771 | } | 2848 | } |
2772 | 2849 | ||
2773 | /** | 2850 | /** |
@@ -2835,6 +2912,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, | |||
2835 | { | 2912 | { |
2836 | struct ata_scsi_args args; | 2913 | struct ata_scsi_args args; |
2837 | const u8 *scsicmd = cmd->cmnd; | 2914 | const u8 *scsicmd = cmd->cmnd; |
2915 | u8 tmp8; | ||
2838 | 2916 | ||
2839 | args.dev = dev; | 2917 | args.dev = dev; |
2840 | args.id = dev->id; | 2918 | args.id = dev->id; |
@@ -2842,15 +2920,9 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, | |||
2842 | args.done = done; | 2920 | args.done = done; |
2843 | 2921 | ||
2844 | switch(scsicmd[0]) { | 2922 | switch(scsicmd[0]) { |
2845 | /* no-op's, complete with success */ | 2923 | /* TODO: worth improving? */ |
2846 | case SYNCHRONIZE_CACHE: | 2924 | case FORMAT_UNIT: |
2847 | case REZERO_UNIT: | 2925 | ata_scsi_invalid_field(cmd, done); |
2848 | case SEEK_6: | ||
2849 | case SEEK_10: | ||
2850 | case TEST_UNIT_READY: | ||
2851 | case FORMAT_UNIT: /* FIXME: correct? */ | ||
2852 | case SEND_DIAGNOSTIC: /* FIXME: correct? */ | ||
2853 | ata_scsi_rbuf_fill(&args, ata_scsiop_noop); | ||
2854 | break; | 2926 | break; |
2855 | 2927 | ||
2856 | case INQUIRY: | 2928 | case INQUIRY: |
@@ -2858,14 +2930,23 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, | |||
2858 | ata_scsi_invalid_field(cmd, done); | 2930 | ata_scsi_invalid_field(cmd, done); |
2859 | else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ | 2931 | else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ |
2860 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); | 2932 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); |
2861 | else if (scsicmd[2] == 0x00) | 2933 | else switch (scsicmd[2]) { |
2934 | case 0x00: | ||
2862 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); | 2935 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); |
2863 | else if (scsicmd[2] == 0x80) | 2936 | break; |
2937 | case 0x80: | ||
2864 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); | 2938 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); |
2865 | else if (scsicmd[2] == 0x83) | 2939 | break; |
2940 | case 0x83: | ||
2866 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); | 2941 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); |
2867 | else | 2942 | break; |
2943 | case 0x89: | ||
2944 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); | ||
2945 | break; | ||
2946 | default: | ||
2868 | ata_scsi_invalid_field(cmd, done); | 2947 | ata_scsi_invalid_field(cmd, done); |
2948 | break; | ||
2949 | } | ||
2869 | break; | 2950 | break; |
2870 | 2951 | ||
2871 | case MODE_SENSE: | 2952 | case MODE_SENSE: |
@@ -2893,8 +2974,33 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, | |||
2893 | ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); | 2974 | ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); |
2894 | break; | 2975 | break; |
2895 | 2976 | ||
2896 | /* mandatory commands we haven't implemented yet */ | ||
2897 | case REQUEST_SENSE: | 2977 | case REQUEST_SENSE: |
2978 | ata_scsi_set_sense(cmd, 0, 0, 0); | ||
2979 | cmd->result = (DRIVER_SENSE << 24); | ||
2980 | done(cmd); | ||
2981 | break; | ||
2982 | |||
2983 | /* if we reach this, then writeback caching is disabled, | ||
2984 | * turning this into a no-op. | ||
2985 | */ | ||
2986 | case SYNCHRONIZE_CACHE: | ||
2987 | /* fall through */ | ||
2988 | |||
2989 | /* no-op's, complete with success */ | ||
2990 | case REZERO_UNIT: | ||
2991 | case SEEK_6: | ||
2992 | case SEEK_10: | ||
2993 | case TEST_UNIT_READY: | ||
2994 | ata_scsi_rbuf_fill(&args, ata_scsiop_noop); | ||
2995 | break; | ||
2996 | |||
2997 | case SEND_DIAGNOSTIC: | ||
2998 | tmp8 = scsicmd[1] & ~(1 << 3); | ||
2999 | if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) | ||
3000 | ata_scsi_rbuf_fill(&args, ata_scsiop_noop); | ||
3001 | else | ||
3002 | ata_scsi_invalid_field(cmd, done); | ||
3003 | break; | ||
2898 | 3004 | ||
2899 | /* all other commands */ | 3005 | /* all other commands */ |
2900 | default: | 3006 | default: |
@@ -2928,6 +3034,13 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) | |||
2928 | shost->max_channel = 1; | 3034 | shost->max_channel = 1; |
2929 | shost->max_cmd_len = 16; | 3035 | shost->max_cmd_len = 16; |
2930 | 3036 | ||
3037 | /* Schedule policy is determined by ->qc_defer() | ||
3038 | * callback and it needs to see every deferred qc. | ||
3039 | * Set host_blocked to 1 to prevent SCSI midlayer from | ||
3040 | * automatically deferring requests. | ||
3041 | */ | ||
3042 | shost->max_host_blocked = 1; | ||
3043 | |||
2931 | rc = scsi_add_host(ap->scsi_host, ap->host->dev); | 3044 | rc = scsi_add_host(ap->scsi_host, ap->host->dev); |
2932 | if (rc) | 3045 | if (rc) |
2933 | goto err_add; | 3046 | goto err_add; |
@@ -2951,25 +3064,32 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync) | |||
2951 | { | 3064 | { |
2952 | int tries = 5; | 3065 | int tries = 5; |
2953 | struct ata_device *last_failed_dev = NULL; | 3066 | struct ata_device *last_failed_dev = NULL; |
3067 | struct ata_link *link; | ||
2954 | struct ata_device *dev; | 3068 | struct ata_device *dev; |
2955 | unsigned int i; | ||
2956 | 3069 | ||
2957 | if (ap->flags & ATA_FLAG_DISABLED) | 3070 | if (ap->flags & ATA_FLAG_DISABLED) |
2958 | return; | 3071 | return; |
2959 | 3072 | ||
2960 | repeat: | 3073 | repeat: |
2961 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 3074 | ata_port_for_each_link(link, ap) { |
2962 | struct scsi_device *sdev; | 3075 | ata_link_for_each_dev(dev, link) { |
3076 | struct scsi_device *sdev; | ||
3077 | int channel = 0, id = 0; | ||
2963 | 3078 | ||
2964 | dev = &ap->device[i]; | 3079 | if (!ata_dev_enabled(dev) || dev->sdev) |
3080 | continue; | ||
2965 | 3081 | ||
2966 | if (!ata_dev_enabled(dev) || dev->sdev) | 3082 | if (ata_is_host_link(link)) |
2967 | continue; | 3083 | id = dev->devno; |
3084 | else | ||
3085 | channel = link->pmp; | ||
2968 | 3086 | ||
2969 | sdev = __scsi_add_device(ap->scsi_host, 0, i, 0, NULL); | 3087 | sdev = __scsi_add_device(ap->scsi_host, channel, id, 0, |
2970 | if (!IS_ERR(sdev)) { | 3088 | NULL); |
2971 | dev->sdev = sdev; | 3089 | if (!IS_ERR(sdev)) { |
2972 | scsi_device_put(sdev); | 3090 | dev->sdev = sdev; |
3091 | scsi_device_put(sdev); | ||
3092 | } | ||
2973 | } | 3093 | } |
2974 | } | 3094 | } |
2975 | 3095 | ||
@@ -2977,12 +3097,14 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync) | |||
2977 | * failure occurred, scan would have failed silently. Check | 3097 | * failure occurred, scan would have failed silently. Check |
2978 | * whether all devices are attached. | 3098 | * whether all devices are attached. |
2979 | */ | 3099 | */ |
2980 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 3100 | ata_port_for_each_link(link, ap) { |
2981 | dev = &ap->device[i]; | 3101 | ata_link_for_each_dev(dev, link) { |
2982 | if (ata_dev_enabled(dev) && !dev->sdev) | 3102 | if (ata_dev_enabled(dev) && !dev->sdev) |
2983 | break; | 3103 | goto exit_loop; |
3104 | } | ||
2984 | } | 3105 | } |
2985 | if (i == ATA_MAX_DEVICES) | 3106 | exit_loop: |
3107 | if (!link) | ||
2986 | return; | 3108 | return; |
2987 | 3109 | ||
2988 | /* we're missing some SCSI devices */ | 3110 | /* we're missing some SCSI devices */ |
@@ -3049,7 +3171,7 @@ int ata_scsi_offline_dev(struct ata_device *dev) | |||
3049 | */ | 3171 | */ |
3050 | static void ata_scsi_remove_dev(struct ata_device *dev) | 3172 | static void ata_scsi_remove_dev(struct ata_device *dev) |
3051 | { | 3173 | { |
3052 | struct ata_port *ap = dev->ap; | 3174 | struct ata_port *ap = dev->link->ap; |
3053 | struct scsi_device *sdev; | 3175 | struct scsi_device *sdev; |
3054 | unsigned long flags; | 3176 | unsigned long flags; |
3055 | 3177 | ||
@@ -3096,6 +3218,43 @@ static void ata_scsi_remove_dev(struct ata_device *dev) | |||
3096 | } | 3218 | } |
3097 | } | 3219 | } |
3098 | 3220 | ||
3221 | static void ata_scsi_handle_link_detach(struct ata_link *link) | ||
3222 | { | ||
3223 | struct ata_port *ap = link->ap; | ||
3224 | struct ata_device *dev; | ||
3225 | |||
3226 | ata_link_for_each_dev(dev, link) { | ||
3227 | unsigned long flags; | ||
3228 | |||
3229 | if (!(dev->flags & ATA_DFLAG_DETACHED)) | ||
3230 | continue; | ||
3231 | |||
3232 | spin_lock_irqsave(ap->lock, flags); | ||
3233 | dev->flags &= ~ATA_DFLAG_DETACHED; | ||
3234 | spin_unlock_irqrestore(ap->lock, flags); | ||
3235 | |||
3236 | ata_scsi_remove_dev(dev); | ||
3237 | } | ||
3238 | } | ||
3239 | |||
3240 | /** | ||
3241 | * ata_scsi_media_change_notify - send media change event | ||
3242 | * @atadev: Pointer to the disk device with media change event | ||
3243 | * | ||
3244 | * Tell the block layer to send a media change notification | ||
3245 | * event. | ||
3246 | * | ||
3247 | * LOCKING: | ||
3248 | * spin_lock_irqsave(host lock) | ||
3249 | */ | ||
3250 | void ata_scsi_media_change_notify(struct ata_device *dev) | ||
3251 | { | ||
3252 | #ifdef OTHER_AN_PATCHES_HAVE_BEEN_APPLIED | ||
3253 | if (dev->sdev) | ||
3254 | scsi_device_event_notify(dev->sdev, SDEV_MEDIA_CHANGE); | ||
3255 | #endif | ||
3256 | } | ||
3257 | |||
3099 | /** | 3258 | /** |
3100 | * ata_scsi_hotplug - SCSI part of hotplug | 3259 | * ata_scsi_hotplug - SCSI part of hotplug |
3101 | * @work: Pointer to ATA port to perform SCSI hotplug on | 3260 | * @work: Pointer to ATA port to perform SCSI hotplug on |
@@ -3121,20 +3280,14 @@ void ata_scsi_hotplug(struct work_struct *work) | |||
3121 | 3280 | ||
3122 | DPRINTK("ENTER\n"); | 3281 | DPRINTK("ENTER\n"); |
3123 | 3282 | ||
3124 | /* unplug detached devices */ | 3283 | /* Unplug detached devices. We cannot use link iterator here |
3125 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 3284 | * because PMP links have to be scanned even if PMP is |
3126 | struct ata_device *dev = &ap->device[i]; | 3285 | * currently not attached. Iterate manually. |
3127 | unsigned long flags; | 3286 | */ |
3128 | 3287 | ata_scsi_handle_link_detach(&ap->link); | |
3129 | if (!(dev->flags & ATA_DFLAG_DETACHED)) | 3288 | if (ap->pmp_link) |
3130 | continue; | 3289 | for (i = 0; i < SATA_PMP_MAX_PORTS; i++) |
3131 | 3290 | ata_scsi_handle_link_detach(&ap->pmp_link[i]); | |
3132 | spin_lock_irqsave(ap->lock, flags); | ||
3133 | dev->flags &= ~ATA_DFLAG_DETACHED; | ||
3134 | spin_unlock_irqrestore(ap->lock, flags); | ||
3135 | |||
3136 | ata_scsi_remove_dev(dev); | ||
3137 | } | ||
3138 | 3291 | ||
3139 | /* scan for new ones */ | 3292 | /* scan for new ones */ |
3140 | ata_scsi_scan_host(ap, 0); | 3293 | ata_scsi_scan_host(ap, 0); |
@@ -3163,27 +3316,42 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | |||
3163 | { | 3316 | { |
3164 | struct ata_port *ap = ata_shost_to_port(shost); | 3317 | struct ata_port *ap = ata_shost_to_port(shost); |
3165 | unsigned long flags; | 3318 | unsigned long flags; |
3166 | int rc = 0; | 3319 | int devno, rc = 0; |
3167 | 3320 | ||
3168 | if (!ap->ops->error_handler) | 3321 | if (!ap->ops->error_handler) |
3169 | return -EOPNOTSUPP; | 3322 | return -EOPNOTSUPP; |
3170 | 3323 | ||
3171 | if ((channel != SCAN_WILD_CARD && channel != 0) || | 3324 | if (lun != SCAN_WILD_CARD && lun) |
3172 | (lun != SCAN_WILD_CARD && lun != 0)) | ||
3173 | return -EINVAL; | 3325 | return -EINVAL; |
3174 | 3326 | ||
3327 | if (ap->nr_pmp_links == 0) { | ||
3328 | if (channel != SCAN_WILD_CARD && channel) | ||
3329 | return -EINVAL; | ||
3330 | devno = id; | ||
3331 | } else { | ||
3332 | if (id != SCAN_WILD_CARD && id) | ||
3333 | return -EINVAL; | ||
3334 | devno = channel; | ||
3335 | } | ||
3336 | |||
3175 | spin_lock_irqsave(ap->lock, flags); | 3337 | spin_lock_irqsave(ap->lock, flags); |
3176 | 3338 | ||
3177 | if (id == SCAN_WILD_CARD) { | 3339 | if (devno == SCAN_WILD_CARD) { |
3178 | ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1; | 3340 | struct ata_link *link; |
3179 | ap->eh_info.action |= ATA_EH_SOFTRESET; | 3341 | |
3342 | ata_port_for_each_link(link, ap) { | ||
3343 | struct ata_eh_info *ehi = &link->eh_info; | ||
3344 | ehi->probe_mask |= (1 << ata_link_max_devices(link)) - 1; | ||
3345 | ehi->action |= ATA_EH_SOFTRESET; | ||
3346 | } | ||
3180 | } else { | 3347 | } else { |
3181 | struct ata_device *dev = ata_find_dev(ap, id); | 3348 | struct ata_device *dev = ata_find_dev(ap, devno); |
3182 | 3349 | ||
3183 | if (dev) { | 3350 | if (dev) { |
3184 | ap->eh_info.probe_mask |= 1 << dev->devno; | 3351 | struct ata_eh_info *ehi = &dev->link->eh_info; |
3185 | ap->eh_info.action |= ATA_EH_SOFTRESET; | 3352 | ehi->probe_mask |= 1 << dev->devno; |
3186 | ap->eh_info.flags |= ATA_EHI_RESUME_LINK; | 3353 | ehi->action |= ATA_EH_SOFTRESET; |
3354 | ehi->flags |= ATA_EHI_RESUME_LINK; | ||
3187 | } else | 3355 | } else |
3188 | rc = -EINVAL; | 3356 | rc = -EINVAL; |
3189 | } | 3357 | } |
@@ -3214,24 +3382,26 @@ void ata_scsi_dev_rescan(struct work_struct *work) | |||
3214 | { | 3382 | { |
3215 | struct ata_port *ap = | 3383 | struct ata_port *ap = |
3216 | container_of(work, struct ata_port, scsi_rescan_task); | 3384 | container_of(work, struct ata_port, scsi_rescan_task); |
3385 | struct ata_link *link; | ||
3386 | struct ata_device *dev; | ||
3217 | unsigned long flags; | 3387 | unsigned long flags; |
3218 | unsigned int i; | ||
3219 | 3388 | ||
3220 | spin_lock_irqsave(ap->lock, flags); | 3389 | spin_lock_irqsave(ap->lock, flags); |
3221 | 3390 | ||
3222 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 3391 | ata_port_for_each_link(link, ap) { |
3223 | struct ata_device *dev = &ap->device[i]; | 3392 | ata_link_for_each_dev(dev, link) { |
3224 | struct scsi_device *sdev = dev->sdev; | 3393 | struct scsi_device *sdev = dev->sdev; |
3225 | 3394 | ||
3226 | if (!ata_dev_enabled(dev) || !sdev) | 3395 | if (!ata_dev_enabled(dev) || !sdev) |
3227 | continue; | 3396 | continue; |
3228 | if (scsi_device_get(sdev)) | 3397 | if (scsi_device_get(sdev)) |
3229 | continue; | 3398 | continue; |
3230 | 3399 | ||
3231 | spin_unlock_irqrestore(ap->lock, flags); | 3400 | spin_unlock_irqrestore(ap->lock, flags); |
3232 | scsi_rescan_device(&(sdev->sdev_gendev)); | 3401 | scsi_rescan_device(&(sdev->sdev_gendev)); |
3233 | scsi_device_put(sdev); | 3402 | scsi_device_put(sdev); |
3234 | spin_lock_irqsave(ap->lock, flags); | 3403 | spin_lock_irqsave(ap->lock, flags); |
3404 | } | ||
3235 | } | 3405 | } |
3236 | 3406 | ||
3237 | spin_unlock_irqrestore(ap->lock, flags); | 3407 | spin_unlock_irqrestore(ap->lock, flags); |
@@ -3359,7 +3529,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_destroy); | |||
3359 | int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) | 3529 | int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) |
3360 | { | 3530 | { |
3361 | ata_scsi_sdev_config(sdev); | 3531 | ata_scsi_sdev_config(sdev); |
3362 | ata_scsi_dev_config(sdev, ap->device); | 3532 | ata_scsi_dev_config(sdev, ap->link.device); |
3363 | return 0; | 3533 | return 0; |
3364 | } | 3534 | } |
3365 | EXPORT_SYMBOL_GPL(ata_sas_slave_configure); | 3535 | EXPORT_SYMBOL_GPL(ata_sas_slave_configure); |
@@ -3382,8 +3552,8 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), | |||
3382 | 3552 | ||
3383 | ata_scsi_dump_cdb(ap, cmd); | 3553 | ata_scsi_dump_cdb(ap, cmd); |
3384 | 3554 | ||
3385 | if (likely(ata_scsi_dev_enabled(ap->device))) | 3555 | if (likely(ata_scsi_dev_enabled(ap->link.device))) |
3386 | rc = __ata_scsi_queuecmd(cmd, done, ap->device); | 3556 | rc = __ata_scsi_queuecmd(cmd, done, ap->link.device); |
3387 | else { | 3557 | else { |
3388 | cmd->result = (DID_BAD_TARGET << 16); | 3558 | cmd->result = (DID_BAD_TARGET << 16); |
3389 | done(cmd); | 3559 | done(cmd); |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 8023167bbbeb..026439e05afe 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -64,46 +64,6 @@ u8 ata_irq_on(struct ata_port *ap) | |||
64 | return tmp; | 64 | return tmp; |
65 | } | 65 | } |
66 | 66 | ||
67 | u8 ata_dummy_irq_on (struct ata_port *ap) { return 0; } | ||
68 | |||
69 | /** | ||
70 | * ata_irq_ack - Acknowledge a device interrupt. | ||
71 | * @ap: Port on which interrupts are enabled. | ||
72 | * | ||
73 | * Wait up to 10 ms for legacy IDE device to become idle (BUSY | ||
74 | * or BUSY+DRQ clear). Obtain dma status and port status from | ||
75 | * device. Clear the interrupt. Return port status. | ||
76 | * | ||
77 | * LOCKING: | ||
78 | */ | ||
79 | |||
80 | u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq) | ||
81 | { | ||
82 | unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; | ||
83 | u8 host_stat = 0, post_stat = 0, status; | ||
84 | |||
85 | status = ata_busy_wait(ap, bits, 1000); | ||
86 | if (status & bits) | ||
87 | if (ata_msg_err(ap)) | ||
88 | printk(KERN_ERR "abnormal status 0x%X\n", status); | ||
89 | |||
90 | if (ap->ioaddr.bmdma_addr) { | ||
91 | /* get controller status; clear intr, err bits */ | ||
92 | host_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
93 | iowrite8(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, | ||
94 | ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
95 | |||
96 | post_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
97 | } | ||
98 | if (ata_msg_intr(ap)) | ||
99 | printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", | ||
100 | __FUNCTION__, | ||
101 | host_stat, post_stat, status); | ||
102 | return status; | ||
103 | } | ||
104 | |||
105 | u8 ata_dummy_irq_ack(struct ata_port *ap, unsigned int chk_drq) { return 0; } | ||
106 | |||
107 | /** | 67 | /** |
108 | * ata_tf_load - send taskfile registers to host controller | 68 | * ata_tf_load - send taskfile registers to host controller |
109 | * @ap: Port to which output is sent | 69 | * @ap: Port to which output is sent |
@@ -445,7 +405,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
445 | unsigned long flags; | 405 | unsigned long flags; |
446 | int thaw = 0; | 406 | int thaw = 0; |
447 | 407 | ||
448 | qc = __ata_qc_from_tag(ap, ap->active_tag); | 408 | qc = __ata_qc_from_tag(ap, ap->link.active_tag); |
449 | if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) | 409 | if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) |
450 | qc = NULL; | 410 | qc = NULL; |
451 | 411 | ||
@@ -500,7 +460,7 @@ void ata_bmdma_error_handler(struct ata_port *ap) | |||
500 | ata_reset_fn_t hardreset; | 460 | ata_reset_fn_t hardreset; |
501 | 461 | ||
502 | hardreset = NULL; | 462 | hardreset = NULL; |
503 | if (sata_scr_valid(ap)) | 463 | if (sata_scr_valid(&ap->link)) |
504 | hardreset = sata_std_hardreset; | 464 | hardreset = sata_std_hardreset; |
505 | 465 | ||
506 | ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset, | 466 | ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset, |
@@ -607,6 +567,9 @@ int ata_pci_init_bmdma(struct ata_host *host) | |||
607 | if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && | 567 | if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && |
608 | (ioread8(bmdma + 2) & 0x80)) | 568 | (ioread8(bmdma + 2) & 0x80)) |
609 | host->flags |= ATA_HOST_SIMPLEX; | 569 | host->flags |= ATA_HOST_SIMPLEX; |
570 | |||
571 | ata_port_desc(ap, "bmdma 0x%llx", | ||
572 | (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); | ||
610 | } | 573 | } |
611 | 574 | ||
612 | return 0; | 575 | return 0; |
@@ -674,6 +637,10 @@ int ata_pci_init_sff_host(struct ata_host *host) | |||
674 | ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); | 637 | ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); |
675 | ata_std_ports(&ap->ioaddr); | 638 | ata_std_ports(&ap->ioaddr); |
676 | 639 | ||
640 | ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", | ||
641 | (unsigned long long)pci_resource_start(pdev, base), | ||
642 | (unsigned long long)pci_resource_start(pdev, base + 1)); | ||
643 | |||
677 | mask |= 1 << i; | 644 | mask |= 1 << i; |
678 | } | 645 | } |
679 | 646 | ||
@@ -844,24 +811,30 @@ int ata_pci_init_one(struct pci_dev *pdev, | |||
844 | IRQF_SHARED, DRV_NAME, host); | 811 | IRQF_SHARED, DRV_NAME, host); |
845 | if (rc) | 812 | if (rc) |
846 | goto err_out; | 813 | goto err_out; |
847 | host->irq = pdev->irq; | 814 | |
815 | ata_port_desc(host->ports[0], "irq %d", pdev->irq); | ||
816 | ata_port_desc(host->ports[1], "irq %d", pdev->irq); | ||
848 | } else { | 817 | } else { |
849 | if (!ata_port_is_dummy(host->ports[0])) { | 818 | if (!ata_port_is_dummy(host->ports[0])) { |
850 | host->irq = ATA_PRIMARY_IRQ(pdev); | 819 | rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev), |
851 | rc = devm_request_irq(dev, host->irq, | ||
852 | pi->port_ops->irq_handler, | 820 | pi->port_ops->irq_handler, |
853 | IRQF_SHARED, DRV_NAME, host); | 821 | IRQF_SHARED, DRV_NAME, host); |
854 | if (rc) | 822 | if (rc) |
855 | goto err_out; | 823 | goto err_out; |
824 | |||
825 | ata_port_desc(host->ports[0], "irq %d", | ||
826 | ATA_PRIMARY_IRQ(pdev)); | ||
856 | } | 827 | } |
857 | 828 | ||
858 | if (!ata_port_is_dummy(host->ports[1])) { | 829 | if (!ata_port_is_dummy(host->ports[1])) { |
859 | host->irq2 = ATA_SECONDARY_IRQ(pdev); | 830 | rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev), |
860 | rc = devm_request_irq(dev, host->irq2, | ||
861 | pi->port_ops->irq_handler, | 831 | pi->port_ops->irq_handler, |
862 | IRQF_SHARED, DRV_NAME, host); | 832 | IRQF_SHARED, DRV_NAME, host); |
863 | if (rc) | 833 | if (rc) |
864 | goto err_out; | 834 | goto err_out; |
835 | |||
836 | ata_port_desc(host->ports[1], "irq %d", | ||
837 | ATA_SECONDARY_IRQ(pdev)); | ||
865 | } | 838 | } |
866 | } | 839 | } |
867 | 840 | ||
@@ -909,7 +882,7 @@ unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer | |||
909 | /* Filter out DMA modes if the device has been configured by | 882 | /* Filter out DMA modes if the device has been configured by |
910 | the BIOS as PIO only */ | 883 | the BIOS as PIO only */ |
911 | 884 | ||
912 | if (adev->ap->ioaddr.bmdma_addr == 0) | 885 | if (adev->link->ap->ioaddr.bmdma_addr == 0) |
913 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); | 886 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); |
914 | return xfer_mask; | 887 | return xfer_mask; |
915 | } | 888 | } |
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 564cd234c805..90df58a3edc9 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #define __LIBATA_H__ | 29 | #define __LIBATA_H__ |
30 | 30 | ||
31 | #define DRV_NAME "libata" | 31 | #define DRV_NAME "libata" |
32 | #define DRV_VERSION "3.00" /* must be exactly four chars */ | ||
32 | 33 | ||
33 | struct ata_scsi_args { | 34 | struct ata_scsi_args { |
34 | struct ata_device *dev; | 35 | struct ata_device *dev; |
@@ -56,6 +57,7 @@ extern unsigned int ata_print_id; | |||
56 | extern struct workqueue_struct *ata_aux_wq; | 57 | extern struct workqueue_struct *ata_aux_wq; |
57 | extern int atapi_enabled; | 58 | extern int atapi_enabled; |
58 | extern int atapi_dmadir; | 59 | extern int atapi_dmadir; |
60 | extern int atapi_passthru16; | ||
59 | extern int libata_fua; | 61 | extern int libata_fua; |
60 | extern int libata_noacpi; | 62 | extern int libata_noacpi; |
61 | extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev); | 63 | extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev); |
@@ -67,21 +69,23 @@ extern void ata_dev_disable(struct ata_device *dev); | |||
67 | extern void ata_port_flush_task(struct ata_port *ap); | 69 | extern void ata_port_flush_task(struct ata_port *ap); |
68 | extern unsigned ata_exec_internal(struct ata_device *dev, | 70 | extern unsigned ata_exec_internal(struct ata_device *dev, |
69 | struct ata_taskfile *tf, const u8 *cdb, | 71 | struct ata_taskfile *tf, const u8 *cdb, |
70 | int dma_dir, void *buf, unsigned int buflen); | 72 | int dma_dir, void *buf, unsigned int buflen, |
73 | unsigned long timeout); | ||
71 | extern unsigned ata_exec_internal_sg(struct ata_device *dev, | 74 | extern unsigned ata_exec_internal_sg(struct ata_device *dev, |
72 | struct ata_taskfile *tf, const u8 *cdb, | 75 | struct ata_taskfile *tf, const u8 *cdb, |
73 | int dma_dir, struct scatterlist *sg, | 76 | int dma_dir, struct scatterlist *sg, |
74 | unsigned int n_elem); | 77 | unsigned int n_elem, unsigned long timeout); |
75 | extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd); | 78 | extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd); |
76 | extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | 79 | extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, |
77 | unsigned int flags, u16 *id); | 80 | unsigned int flags, u16 *id); |
78 | extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags); | 81 | extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags); |
79 | extern int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags); | 82 | extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, |
83 | unsigned int readid_flags); | ||
80 | extern int ata_dev_configure(struct ata_device *dev); | 84 | extern int ata_dev_configure(struct ata_device *dev); |
81 | extern int sata_down_spd_limit(struct ata_port *ap); | 85 | extern int sata_down_spd_limit(struct ata_link *link); |
82 | extern int sata_set_spd_needed(struct ata_port *ap); | 86 | extern int sata_set_spd_needed(struct ata_link *link); |
83 | extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); | 87 | extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); |
84 | extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev); | 88 | extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); |
85 | extern void ata_sg_clean(struct ata_queued_cmd *qc); | 89 | extern void ata_sg_clean(struct ata_queued_cmd *qc); |
86 | extern void ata_qc_free(struct ata_queued_cmd *qc); | 90 | extern void ata_qc_free(struct ata_queued_cmd *qc); |
87 | extern void ata_qc_issue(struct ata_queued_cmd *qc); | 91 | extern void ata_qc_issue(struct ata_queued_cmd *qc); |
@@ -92,17 +96,21 @@ extern void ata_dev_select(struct ata_port *ap, unsigned int device, | |||
92 | extern void swap_buf_le16(u16 *buf, unsigned int buf_words); | 96 | extern void swap_buf_le16(u16 *buf, unsigned int buf_words); |
93 | extern int ata_flush_cache(struct ata_device *dev); | 97 | extern int ata_flush_cache(struct ata_device *dev); |
94 | extern void ata_dev_init(struct ata_device *dev); | 98 | extern void ata_dev_init(struct ata_device *dev); |
99 | extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp); | ||
100 | extern int sata_link_init_spd(struct ata_link *link); | ||
95 | extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); | 101 | extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); |
96 | extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); | 102 | extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); |
97 | extern struct ata_port *ata_port_alloc(struct ata_host *host); | 103 | extern struct ata_port *ata_port_alloc(struct ata_host *host); |
98 | 104 | ||
99 | /* libata-acpi.c */ | 105 | /* libata-acpi.c */ |
100 | #ifdef CONFIG_ATA_ACPI | 106 | #ifdef CONFIG_ATA_ACPI |
107 | extern void ata_acpi_associate_sata_port(struct ata_port *ap); | ||
101 | extern void ata_acpi_associate(struct ata_host *host); | 108 | extern void ata_acpi_associate(struct ata_host *host); |
102 | extern int ata_acpi_on_suspend(struct ata_port *ap); | 109 | extern int ata_acpi_on_suspend(struct ata_port *ap); |
103 | extern void ata_acpi_on_resume(struct ata_port *ap); | 110 | extern void ata_acpi_on_resume(struct ata_port *ap); |
104 | extern int ata_acpi_on_devcfg(struct ata_device *adev); | 111 | extern int ata_acpi_on_devcfg(struct ata_device *adev); |
105 | #else | 112 | #else |
113 | static inline void ata_acpi_associate_sata_port(struct ata_port *ap) { } | ||
106 | static inline void ata_acpi_associate(struct ata_host *host) { } | 114 | static inline void ata_acpi_associate(struct ata_host *host) { } |
107 | static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } | 115 | static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } |
108 | static inline void ata_acpi_on_resume(struct ata_port *ap) { } | 116 | static inline void ata_acpi_on_resume(struct ata_port *ap) { } |
@@ -114,6 +122,7 @@ extern int ata_scsi_add_hosts(struct ata_host *host, | |||
114 | struct scsi_host_template *sht); | 122 | struct scsi_host_template *sht); |
115 | extern void ata_scsi_scan_host(struct ata_port *ap, int sync); | 123 | extern void ata_scsi_scan_host(struct ata_port *ap, int sync); |
116 | extern int ata_scsi_offline_dev(struct ata_device *dev); | 124 | extern int ata_scsi_offline_dev(struct ata_device *dev); |
125 | extern void ata_scsi_media_change_notify(struct ata_device *dev); | ||
117 | extern void ata_scsi_hotplug(struct work_struct *work); | 126 | extern void ata_scsi_hotplug(struct work_struct *work); |
118 | extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, | 127 | extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, |
119 | unsigned int buflen); | 128 | unsigned int buflen); |
@@ -147,12 +156,32 @@ extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); | |||
147 | extern void ata_scsi_dev_rescan(struct work_struct *work); | 156 | extern void ata_scsi_dev_rescan(struct work_struct *work); |
148 | extern int ata_bus_probe(struct ata_port *ap); | 157 | extern int ata_bus_probe(struct ata_port *ap); |
149 | 158 | ||
159 | /* libata-pmp.c */ | ||
160 | extern int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val); | ||
161 | extern int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val); | ||
162 | extern int sata_pmp_attach(struct ata_device *dev); | ||
163 | |||
150 | /* libata-eh.c */ | 164 | /* libata-eh.c */ |
151 | extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); | 165 | extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); |
152 | extern void ata_scsi_error(struct Scsi_Host *host); | 166 | extern void ata_scsi_error(struct Scsi_Host *host); |
153 | extern void ata_port_wait_eh(struct ata_port *ap); | 167 | extern void ata_port_wait_eh(struct ata_port *ap); |
154 | extern void ata_eh_fastdrain_timerfn(unsigned long arg); | 168 | extern void ata_eh_fastdrain_timerfn(unsigned long arg); |
155 | extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); | 169 | extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); |
170 | extern void ata_eh_detach_dev(struct ata_device *dev); | ||
171 | extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, | ||
172 | unsigned int action); | ||
173 | extern void ata_eh_done(struct ata_link *link, struct ata_device *dev, | ||
174 | unsigned int action); | ||
175 | extern void ata_eh_autopsy(struct ata_port *ap); | ||
176 | extern void ata_eh_report(struct ata_port *ap); | ||
177 | extern int ata_eh_reset(struct ata_link *link, int classify, | ||
178 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, | ||
179 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset); | ||
180 | extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | ||
181 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, | ||
182 | ata_postreset_fn_t postreset, | ||
183 | struct ata_link **r_failed_disk); | ||
184 | extern void ata_eh_finish(struct ata_port *ap); | ||
156 | 185 | ||
157 | /* libata-sff.c */ | 186 | /* libata-sff.c */ |
158 | extern u8 ata_irq_on(struct ata_port *ap); | 187 | extern u8 ata_irq_on(struct ata_port *ap); |
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c new file mode 100644 index 000000000000..5d3920f6fd69 --- /dev/null +++ b/drivers/ata/pata_acpi.c | |||
@@ -0,0 +1,395 @@ | |||
1 | /* | ||
2 | * ACPI PATA driver | ||
3 | * | ||
4 | * (c) 2007 Red Hat <alan@redhat.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/pci.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/blkdev.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <scsi/scsi_host.h> | ||
15 | #include <acpi/acpi_bus.h> | ||
16 | #include <acpi/acnames.h> | ||
17 | #include <acpi/acnamesp.h> | ||
18 | #include <acpi/acparser.h> | ||
19 | #include <acpi/acexcep.h> | ||
20 | #include <acpi/acmacros.h> | ||
21 | #include <acpi/actypes.h> | ||
22 | |||
23 | #include <linux/libata.h> | ||
24 | #include <linux/ata.h> | ||
25 | |||
26 | #define DRV_NAME "pata_acpi" | ||
27 | #define DRV_VERSION "0.2.3" | ||
28 | |||
29 | struct pata_acpi { | ||
30 | struct ata_acpi_gtm gtm; | ||
31 | void *last; | ||
32 | unsigned long mask[2]; | ||
33 | }; | ||
34 | |||
35 | /** | ||
36 | * pacpi_pre_reset - check for 40/80 pin | ||
37 | * @ap: Port | ||
38 | * @deadline: deadline jiffies for the operation | ||
39 | * | ||
40 | * Perform the PATA port setup we need. | ||
41 | */ | ||
42 | |||
43 | static int pacpi_pre_reset(struct ata_link *link, unsigned long deadline) | ||
44 | { | ||
45 | struct ata_port *ap = link->ap; | ||
46 | struct pata_acpi *acpi = ap->private_data; | ||
47 | if (ap->acpi_handle == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0) | ||
48 | return -ENODEV; | ||
49 | |||
50 | return ata_std_prereset(link, deadline); | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * pacpi_cable_detect - cable type detection | ||
55 | * @ap: port to detect | ||
56 | * | ||
57 | * Perform device specific cable detection | ||
58 | */ | ||
59 | |||
60 | static int pacpi_cable_detect(struct ata_port *ap) | ||
61 | { | ||
62 | struct pata_acpi *acpi = ap->private_data; | ||
63 | |||
64 | if ((acpi->mask[0] | acpi->mask[1]) & (0xF8 << ATA_SHIFT_UDMA)) | ||
65 | return ATA_CBL_PATA80; | ||
66 | else | ||
67 | return ATA_CBL_PATA40; | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * pacpi_error_handler - Setup and error handler | ||
72 | * @ap: Port to handle | ||
73 | * | ||
74 | * LOCKING: | ||
75 | * None (inherited from caller). | ||
76 | */ | ||
77 | |||
78 | static void pacpi_error_handler(struct ata_port *ap) | ||
79 | { | ||
80 | return ata_bmdma_drive_eh(ap, pacpi_pre_reset, ata_std_softreset, | ||
81 | NULL, ata_std_postreset); | ||
82 | } | ||
83 | |||
84 | /* Welcome to ACPI, bring a bucket */ | ||
85 | static const unsigned int pio_cycle[7] = { | ||
86 | 600, 383, 240, 180, 120, 100, 80 | ||
87 | }; | ||
88 | static const unsigned int mwdma_cycle[5] = { | ||
89 | 480, 150, 120, 100, 80 | ||
90 | }; | ||
91 | static const unsigned int udma_cycle[7] = { | ||
92 | 120, 80, 60, 45, 30, 20, 15 | ||
93 | }; | ||
94 | |||
95 | /** | ||
96 | * pacpi_discover_modes - filter non ACPI modes | ||
97 | * @adev: ATA device | ||
98 | * @mask: proposed modes | ||
99 | * | ||
100 | * Try the modes available and see which ones the ACPI method will | ||
101 | * set up sensibly. From this we get a mask of ACPI modes we can use | ||
102 | */ | ||
103 | |||
104 | static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device *adev) | ||
105 | { | ||
106 | int unit = adev->devno; | ||
107 | struct pata_acpi *acpi = ap->private_data; | ||
108 | int i; | ||
109 | u32 t; | ||
110 | unsigned long mask = (0x7f << ATA_SHIFT_UDMA) | (0x7 << ATA_SHIFT_MWDMA) | (0x1F << ATA_SHIFT_PIO); | ||
111 | |||
112 | struct ata_acpi_gtm probe; | ||
113 | |||
114 | probe = acpi->gtm; | ||
115 | |||
116 | /* We always use the 0 slot for crap hardware */ | ||
117 | if (!(probe.flags & 0x10)) | ||
118 | unit = 0; | ||
119 | |||
120 | ata_acpi_gtm(ap, &probe); | ||
121 | |||
122 | /* Start by scanning for PIO modes */ | ||
123 | for (i = 0; i < 7; i++) { | ||
124 | t = probe.drive[unit].pio; | ||
125 | if (t <= pio_cycle[i]) { | ||
126 | mask |= (2 << (ATA_SHIFT_PIO + i)) - 1; | ||
127 | break; | ||
128 | } | ||
129 | } | ||
130 | |||
131 | /* See if we have MWDMA or UDMA data. We don't bother with MWDMA | ||
132 | if UDMA is availabe as this means the BIOS set UDMA and our | ||
133 | error changedown if it works is UDMA to PIO anyway */ | ||
134 | if (probe.flags & (1 << (2 * unit))) { | ||
135 | /* MWDMA */ | ||
136 | for (i = 0; i < 5; i++) { | ||
137 | t = probe.drive[unit].dma; | ||
138 | if (t <= mwdma_cycle[i]) { | ||
139 | mask |= (2 << (ATA_SHIFT_MWDMA + i)) - 1; | ||
140 | break; | ||
141 | } | ||
142 | } | ||
143 | } else { | ||
144 | /* UDMA */ | ||
145 | for (i = 0; i < 7; i++) { | ||
146 | t = probe.drive[unit].dma; | ||
147 | if (t <= udma_cycle[i]) { | ||
148 | mask |= (2 << (ATA_SHIFT_UDMA + i)) - 1; | ||
149 | break; | ||
150 | } | ||
151 | } | ||
152 | } | ||
153 | if (mask & (0xF8 << ATA_SHIFT_UDMA)) | ||
154 | ap->cbl = ATA_CBL_PATA80; | ||
155 | return mask; | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * pacpi_mode_filter - mode filter for ACPI | ||
160 | * @adev: device | ||
161 | * @mask: mask of valid modes | ||
162 | * | ||
163 | * Filter the valid mode list according to our own specific rules, in | ||
164 | * this case the list of discovered valid modes obtained by ACPI probing | ||
165 | */ | ||
166 | |||
167 | static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask) | ||
168 | { | ||
169 | struct pata_acpi *acpi = adev->link->ap->private_data; | ||
170 | return ata_pci_default_filter(adev, mask & acpi->mask[adev->devno]); | ||
171 | } | ||
172 | |||
173 | /** | ||
174 | * pacpi_set_piomode - set initial PIO mode data | ||
175 | * @ap: ATA interface | ||
176 | * @adev: ATA device | ||
177 | */ | ||
178 | |||
179 | static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
180 | { | ||
181 | int unit = adev->devno; | ||
182 | struct pata_acpi *acpi = ap->private_data; | ||
183 | |||
184 | if(!(acpi->gtm.flags & 0x10)) | ||
185 | unit = 0; | ||
186 | |||
187 | /* Now stuff the nS values into the structure */ | ||
188 | acpi->gtm.drive[unit].pio = pio_cycle[adev->pio_mode - XFER_PIO_0]; | ||
189 | ata_acpi_stm(ap, &acpi->gtm); | ||
190 | /* See what mode we actually got */ | ||
191 | ata_acpi_gtm(ap, &acpi->gtm); | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * pacpi_set_dmamode - set initial DMA mode data | ||
196 | * @ap: ATA interface | ||
197 | * @adev: ATA device | ||
198 | */ | ||
199 | |||
200 | static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev) | ||
201 | { | ||
202 | int unit = adev->devno; | ||
203 | struct pata_acpi *acpi = ap->private_data; | ||
204 | |||
205 | if(!(acpi->gtm.flags & 0x10)) | ||
206 | unit = 0; | ||
207 | |||
208 | /* Now stuff the nS values into the structure */ | ||
209 | if (adev->dma_mode >= XFER_UDMA_0) { | ||
210 | acpi->gtm.drive[unit].dma = udma_cycle[adev->dma_mode - XFER_UDMA_0]; | ||
211 | acpi->gtm.flags |= (1 << (2 * unit)); | ||
212 | } else { | ||
213 | acpi->gtm.drive[unit].dma = mwdma_cycle[adev->dma_mode - XFER_MW_DMA_0]; | ||
214 | acpi->gtm.flags &= ~(1 << (2 * unit)); | ||
215 | } | ||
216 | ata_acpi_stm(ap, &acpi->gtm); | ||
217 | /* See what mode we actually got */ | ||
218 | ata_acpi_gtm(ap, &acpi->gtm); | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * pacpi_qc_issue_prot - command issue | ||
223 | * @qc: command pending | ||
224 | * | ||
225 | * Called when the libata layer is about to issue a command. We wrap | ||
226 | * this interface so that we can load the correct ATA timings if | ||
227 | * neccessary. | ||
228 | */ | ||
229 | |||
230 | static unsigned int pacpi_qc_issue_prot(struct ata_queued_cmd *qc) | ||
231 | { | ||
232 | struct ata_port *ap = qc->ap; | ||
233 | struct ata_device *adev = qc->dev; | ||
234 | struct pata_acpi *acpi = ap->private_data; | ||
235 | |||
236 | if (acpi->gtm.flags & 0x10) | ||
237 | return ata_qc_issue_prot(qc); | ||
238 | |||
239 | if (adev != acpi->last) { | ||
240 | pacpi_set_piomode(ap, adev); | ||
241 | if (adev->dma_mode) | ||
242 | pacpi_set_dmamode(ap, adev); | ||
243 | acpi->last = adev; | ||
244 | } | ||
245 | return ata_qc_issue_prot(qc); | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * pacpi_port_start - port setup | ||
250 | * @ap: ATA port being set up | ||
251 | * | ||
252 | * Use the port_start hook to maintain private control structures | ||
253 | */ | ||
254 | |||
255 | static int pacpi_port_start(struct ata_port *ap) | ||
256 | { | ||
257 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | ||
258 | struct pata_acpi *acpi; | ||
259 | |||
260 | int ret; | ||
261 | |||
262 | if (ap->acpi_handle == NULL) | ||
263 | return -ENODEV; | ||
264 | |||
265 | acpi = ap->private_data = devm_kzalloc(&pdev->dev, sizeof(struct pata_acpi), GFP_KERNEL); | ||
266 | if (ap->private_data == NULL) | ||
267 | return -ENOMEM; | ||
268 | acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]); | ||
269 | acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]); | ||
270 | ret = ata_sff_port_start(ap); | ||
271 | if (ret < 0) | ||
272 | return ret; | ||
273 | |||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | static struct scsi_host_template pacpi_sht = { | ||
278 | .module = THIS_MODULE, | ||
279 | .name = DRV_NAME, | ||
280 | .ioctl = ata_scsi_ioctl, | ||
281 | .queuecommand = ata_scsi_queuecmd, | ||
282 | .can_queue = ATA_DEF_QUEUE, | ||
283 | .this_id = ATA_SHT_THIS_ID, | ||
284 | .sg_tablesize = LIBATA_MAX_PRD, | ||
285 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
286 | .emulated = ATA_SHT_EMULATED, | ||
287 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
288 | .proc_name = DRV_NAME, | ||
289 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
290 | .slave_configure = ata_scsi_slave_config, | ||
291 | .slave_destroy = ata_scsi_slave_destroy, | ||
292 | /* Use standard CHS mapping rules */ | ||
293 | .bios_param = ata_std_bios_param, | ||
294 | }; | ||
295 | |||
296 | static const struct ata_port_operations pacpi_ops = { | ||
297 | .set_piomode = pacpi_set_piomode, | ||
298 | .set_dmamode = pacpi_set_dmamode, | ||
299 | .mode_filter = pacpi_mode_filter, | ||
300 | |||
301 | /* Task file is PCI ATA format, use helpers */ | ||
302 | .tf_load = ata_tf_load, | ||
303 | .tf_read = ata_tf_read, | ||
304 | .check_status = ata_check_status, | ||
305 | .exec_command = ata_exec_command, | ||
306 | .dev_select = ata_std_dev_select, | ||
307 | |||
308 | .freeze = ata_bmdma_freeze, | ||
309 | .thaw = ata_bmdma_thaw, | ||
310 | .error_handler = pacpi_error_handler, | ||
311 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
312 | .cable_detect = pacpi_cable_detect, | ||
313 | |||
314 | /* BMDMA handling is PCI ATA format, use helpers */ | ||
315 | .bmdma_setup = ata_bmdma_setup, | ||
316 | .bmdma_start = ata_bmdma_start, | ||
317 | .bmdma_stop = ata_bmdma_stop, | ||
318 | .bmdma_status = ata_bmdma_status, | ||
319 | .qc_prep = ata_qc_prep, | ||
320 | .qc_issue = pacpi_qc_issue_prot, | ||
321 | .data_xfer = ata_data_xfer, | ||
322 | |||
323 | /* Timeout handling */ | ||
324 | .irq_handler = ata_interrupt, | ||
325 | .irq_clear = ata_bmdma_irq_clear, | ||
326 | .irq_on = ata_irq_on, | ||
327 | |||
328 | /* Generic PATA PCI ATA helpers */ | ||
329 | .port_start = pacpi_port_start, | ||
330 | }; | ||
331 | |||
332 | |||
333 | /** | ||
334 | * pacpi_init_one - Register ACPI ATA PCI device with kernel services | ||
335 | * @pdev: PCI device to register | ||
336 | * @ent: Entry in pacpi_pci_tbl matching with @pdev | ||
337 | * | ||
338 | * Called from kernel PCI layer. | ||
339 | * | ||
340 | * LOCKING: | ||
341 | * Inherited from PCI layer (may sleep). | ||
342 | * | ||
343 | * RETURNS: | ||
344 | * Zero on success, or -ERRNO value. | ||
345 | */ | ||
346 | |||
347 | static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | ||
348 | { | ||
349 | static const struct ata_port_info info = { | ||
350 | .sht = &pacpi_sht, | ||
351 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, | ||
352 | |||
353 | .pio_mask = 0x1f, | ||
354 | .mwdma_mask = 0x07, | ||
355 | .udma_mask = 0x7f, | ||
356 | |||
357 | .port_ops = &pacpi_ops, | ||
358 | }; | ||
359 | const struct ata_port_info *ppi[] = { &info, NULL }; | ||
360 | return ata_pci_init_one(pdev, ppi); | ||
361 | } | ||
362 | |||
363 | static const struct pci_device_id pacpi_pci_tbl[] = { | ||
364 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1}, | ||
365 | { } /* terminate list */ | ||
366 | }; | ||
367 | |||
368 | static struct pci_driver pacpi_pci_driver = { | ||
369 | .name = DRV_NAME, | ||
370 | .id_table = pacpi_pci_tbl, | ||
371 | .probe = pacpi_init_one, | ||
372 | .remove = ata_pci_remove_one, | ||
373 | .suspend = ata_pci_device_suspend, | ||
374 | .resume = ata_pci_device_resume, | ||
375 | }; | ||
376 | |||
377 | static int __init pacpi_init(void) | ||
378 | { | ||
379 | return pci_register_driver(&pacpi_pci_driver); | ||
380 | } | ||
381 | |||
382 | static void __exit pacpi_exit(void) | ||
383 | { | ||
384 | pci_unregister_driver(&pacpi_pci_driver); | ||
385 | } | ||
386 | |||
387 | module_init(pacpi_init); | ||
388 | module_exit(pacpi_exit); | ||
389 | |||
390 | MODULE_AUTHOR("Alan Cox"); | ||
391 | MODULE_DESCRIPTION("SCSI low-level driver for ATA in ACPI mode"); | ||
392 | MODULE_LICENSE("GPL"); | ||
393 | MODULE_DEVICE_TABLE(pci, pacpi_pci_tbl); | ||
394 | MODULE_VERSION(DRV_VERSION); | ||
395 | |||
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index 32a10c99c06f..364534e7aff4 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c | |||
@@ -305,7 +305,6 @@ static struct scsi_host_template ali_sht = { | |||
305 | */ | 305 | */ |
306 | 306 | ||
307 | static struct ata_port_operations ali_early_port_ops = { | 307 | static struct ata_port_operations ali_early_port_ops = { |
308 | .port_disable = ata_port_disable, | ||
309 | .set_piomode = ali_set_piomode, | 308 | .set_piomode = ali_set_piomode, |
310 | .tf_load = ata_tf_load, | 309 | .tf_load = ata_tf_load, |
311 | .tf_read = ata_tf_read, | 310 | .tf_read = ata_tf_read, |
@@ -327,9 +326,8 @@ static struct ata_port_operations ali_early_port_ops = { | |||
327 | .irq_handler = ata_interrupt, | 326 | .irq_handler = ata_interrupt, |
328 | .irq_clear = ata_bmdma_irq_clear, | 327 | .irq_clear = ata_bmdma_irq_clear, |
329 | .irq_on = ata_irq_on, | 328 | .irq_on = ata_irq_on, |
330 | .irq_ack = ata_irq_ack, | ||
331 | 329 | ||
332 | .port_start = ata_port_start, | 330 | .port_start = ata_sff_port_start, |
333 | }; | 331 | }; |
334 | 332 | ||
335 | /* | 333 | /* |
@@ -337,8 +335,6 @@ static struct ata_port_operations ali_early_port_ops = { | |||
337 | * detect | 335 | * detect |
338 | */ | 336 | */ |
339 | static struct ata_port_operations ali_20_port_ops = { | 337 | static struct ata_port_operations ali_20_port_ops = { |
340 | .port_disable = ata_port_disable, | ||
341 | |||
342 | .set_piomode = ali_set_piomode, | 338 | .set_piomode = ali_set_piomode, |
343 | .set_dmamode = ali_set_dmamode, | 339 | .set_dmamode = ali_set_dmamode, |
344 | .mode_filter = ali_20_filter, | 340 | .mode_filter = ali_20_filter, |
@@ -369,16 +365,14 @@ static struct ata_port_operations ali_20_port_ops = { | |||
369 | .irq_handler = ata_interrupt, | 365 | .irq_handler = ata_interrupt, |
370 | .irq_clear = ata_bmdma_irq_clear, | 366 | .irq_clear = ata_bmdma_irq_clear, |
371 | .irq_on = ata_irq_on, | 367 | .irq_on = ata_irq_on, |
372 | .irq_ack = ata_irq_ack, | ||
373 | 368 | ||
374 | .port_start = ata_port_start, | 369 | .port_start = ata_sff_port_start, |
375 | }; | 370 | }; |
376 | 371 | ||
377 | /* | 372 | /* |
378 | * Port operations for DMA capable ALi with cable detect | 373 | * Port operations for DMA capable ALi with cable detect |
379 | */ | 374 | */ |
380 | static struct ata_port_operations ali_c2_port_ops = { | 375 | static struct ata_port_operations ali_c2_port_ops = { |
381 | .port_disable = ata_port_disable, | ||
382 | .set_piomode = ali_set_piomode, | 376 | .set_piomode = ali_set_piomode, |
383 | .set_dmamode = ali_set_dmamode, | 377 | .set_dmamode = ali_set_dmamode, |
384 | .mode_filter = ata_pci_default_filter, | 378 | .mode_filter = ata_pci_default_filter, |
@@ -408,16 +402,14 @@ static struct ata_port_operations ali_c2_port_ops = { | |||
408 | .irq_handler = ata_interrupt, | 402 | .irq_handler = ata_interrupt, |
409 | .irq_clear = ata_bmdma_irq_clear, | 403 | .irq_clear = ata_bmdma_irq_clear, |
410 | .irq_on = ata_irq_on, | 404 | .irq_on = ata_irq_on, |
411 | .irq_ack = ata_irq_ack, | ||
412 | 405 | ||
413 | .port_start = ata_port_start, | 406 | .port_start = ata_sff_port_start, |
414 | }; | 407 | }; |
415 | 408 | ||
416 | /* | 409 | /* |
417 | * Port operations for DMA capable ALi with cable detect and LBA48 | 410 | * Port operations for DMA capable ALi with cable detect and LBA48 |
418 | */ | 411 | */ |
419 | static struct ata_port_operations ali_c5_port_ops = { | 412 | static struct ata_port_operations ali_c5_port_ops = { |
420 | .port_disable = ata_port_disable, | ||
421 | .set_piomode = ali_set_piomode, | 413 | .set_piomode = ali_set_piomode, |
422 | .set_dmamode = ali_set_dmamode, | 414 | .set_dmamode = ali_set_dmamode, |
423 | .mode_filter = ata_pci_default_filter, | 415 | .mode_filter = ata_pci_default_filter, |
@@ -446,9 +438,8 @@ static struct ata_port_operations ali_c5_port_ops = { | |||
446 | .irq_handler = ata_interrupt, | 438 | .irq_handler = ata_interrupt, |
447 | .irq_clear = ata_bmdma_irq_clear, | 439 | .irq_clear = ata_bmdma_irq_clear, |
448 | .irq_on = ata_irq_on, | 440 | .irq_on = ata_irq_on, |
449 | .irq_ack = ata_irq_ack, | ||
450 | 441 | ||
451 | .port_start = ata_port_start, | 442 | .port_start = ata_sff_port_start, |
452 | }; | 443 | }; |
453 | 444 | ||
454 | 445 | ||
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index 04048fcf6305..c5779ad4abca 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c | |||
@@ -119,27 +119,28 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse | |||
119 | } | 119 | } |
120 | 120 | ||
121 | /** | 121 | /** |
122 | * amd_probe_init - perform reset handling | 122 | * amd_pre_reset - perform reset handling |
123 | * @ap: ATA port | 123 | * @link: ATA link |
124 | * @deadline: deadline jiffies for the operation | 124 | * @deadline: deadline jiffies for the operation |
125 | * | 125 | * |
126 | * Reset sequence checking enable bits to see which ports are | 126 | * Reset sequence checking enable bits to see which ports are |
127 | * active. | 127 | * active. |
128 | */ | 128 | */ |
129 | 129 | ||
130 | static int amd_pre_reset(struct ata_port *ap, unsigned long deadline) | 130 | static int amd_pre_reset(struct ata_link *link, unsigned long deadline) |
131 | { | 131 | { |
132 | static const struct pci_bits amd_enable_bits[] = { | 132 | static const struct pci_bits amd_enable_bits[] = { |
133 | { 0x40, 1, 0x02, 0x02 }, | 133 | { 0x40, 1, 0x02, 0x02 }, |
134 | { 0x40, 1, 0x01, 0x01 } | 134 | { 0x40, 1, 0x01, 0x01 } |
135 | }; | 135 | }; |
136 | 136 | ||
137 | struct ata_port *ap = link->ap; | ||
137 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 138 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
138 | 139 | ||
139 | if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) | 140 | if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) |
140 | return -ENOENT; | 141 | return -ENOENT; |
141 | 142 | ||
142 | return ata_std_prereset(ap, deadline); | 143 | return ata_std_prereset(link, deadline); |
143 | } | 144 | } |
144 | 145 | ||
145 | static void amd_error_handler(struct ata_port *ap) | 146 | static void amd_error_handler(struct ata_port *ap) |
@@ -221,25 +222,26 @@ static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
221 | 222 | ||
222 | /** | 223 | /** |
223 | * nv_probe_init - cable detection | 224 | * nv_probe_init - cable detection |
224 | * @ap: ATA port | 225 | * @lin: ATA link |
225 | * | 226 | * |
226 | * Perform cable detection. The BIOS stores this in PCI config | 227 | * Perform cable detection. The BIOS stores this in PCI config |
227 | * space for us. | 228 | * space for us. |
228 | */ | 229 | */ |
229 | 230 | ||
230 | static int nv_pre_reset(struct ata_port *ap, unsigned long deadline) | 231 | static int nv_pre_reset(struct ata_link *link, unsigned long deadline) |
231 | { | 232 | { |
232 | static const struct pci_bits nv_enable_bits[] = { | 233 | static const struct pci_bits nv_enable_bits[] = { |
233 | { 0x50, 1, 0x02, 0x02 }, | 234 | { 0x50, 1, 0x02, 0x02 }, |
234 | { 0x50, 1, 0x01, 0x01 } | 235 | { 0x50, 1, 0x01, 0x01 } |
235 | }; | 236 | }; |
236 | 237 | ||
238 | struct ata_port *ap = link->ap; | ||
237 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 239 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
238 | 240 | ||
239 | if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no])) | 241 | if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no])) |
240 | return -ENOENT; | 242 | return -ENOENT; |
241 | 243 | ||
242 | return ata_std_prereset(ap, deadline); | 244 | return ata_std_prereset(link, deadline); |
243 | } | 245 | } |
244 | 246 | ||
245 | static void nv_error_handler(struct ata_port *ap) | 247 | static void nv_error_handler(struct ata_port *ap) |
@@ -268,6 +270,9 @@ static int nv_cable_detect(struct ata_port *ap) | |||
268 | pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma); | 270 | pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma); |
269 | if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400) | 271 | if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400) |
270 | cbl = ATA_CBL_PATA80; | 272 | cbl = ATA_CBL_PATA80; |
273 | /* And a triple check across suspend/resume with ACPI around */ | ||
274 | if (ata_acpi_cbl_80wire(ap)) | ||
275 | cbl = ATA_CBL_PATA80; | ||
271 | return cbl; | 276 | return cbl; |
272 | } | 277 | } |
273 | 278 | ||
@@ -327,7 +332,6 @@ static struct scsi_host_template amd_sht = { | |||
327 | }; | 332 | }; |
328 | 333 | ||
329 | static struct ata_port_operations amd33_port_ops = { | 334 | static struct ata_port_operations amd33_port_ops = { |
330 | .port_disable = ata_port_disable, | ||
331 | .set_piomode = amd33_set_piomode, | 335 | .set_piomode = amd33_set_piomode, |
332 | .set_dmamode = amd33_set_dmamode, | 336 | .set_dmamode = amd33_set_dmamode, |
333 | .mode_filter = ata_pci_default_filter, | 337 | .mode_filter = ata_pci_default_filter, |
@@ -356,13 +360,11 @@ static struct ata_port_operations amd33_port_ops = { | |||
356 | .irq_handler = ata_interrupt, | 360 | .irq_handler = ata_interrupt, |
357 | .irq_clear = ata_bmdma_irq_clear, | 361 | .irq_clear = ata_bmdma_irq_clear, |
358 | .irq_on = ata_irq_on, | 362 | .irq_on = ata_irq_on, |
359 | .irq_ack = ata_irq_ack, | ||
360 | 363 | ||
361 | .port_start = ata_port_start, | 364 | .port_start = ata_sff_port_start, |
362 | }; | 365 | }; |
363 | 366 | ||
364 | static struct ata_port_operations amd66_port_ops = { | 367 | static struct ata_port_operations amd66_port_ops = { |
365 | .port_disable = ata_port_disable, | ||
366 | .set_piomode = amd66_set_piomode, | 368 | .set_piomode = amd66_set_piomode, |
367 | .set_dmamode = amd66_set_dmamode, | 369 | .set_dmamode = amd66_set_dmamode, |
368 | .mode_filter = ata_pci_default_filter, | 370 | .mode_filter = ata_pci_default_filter, |
@@ -391,13 +393,11 @@ static struct ata_port_operations amd66_port_ops = { | |||
391 | .irq_handler = ata_interrupt, | 393 | .irq_handler = ata_interrupt, |
392 | .irq_clear = ata_bmdma_irq_clear, | 394 | .irq_clear = ata_bmdma_irq_clear, |
393 | .irq_on = ata_irq_on, | 395 | .irq_on = ata_irq_on, |
394 | .irq_ack = ata_irq_ack, | ||
395 | 396 | ||
396 | .port_start = ata_port_start, | 397 | .port_start = ata_sff_port_start, |
397 | }; | 398 | }; |
398 | 399 | ||
399 | static struct ata_port_operations amd100_port_ops = { | 400 | static struct ata_port_operations amd100_port_ops = { |
400 | .port_disable = ata_port_disable, | ||
401 | .set_piomode = amd100_set_piomode, | 401 | .set_piomode = amd100_set_piomode, |
402 | .set_dmamode = amd100_set_dmamode, | 402 | .set_dmamode = amd100_set_dmamode, |
403 | .mode_filter = ata_pci_default_filter, | 403 | .mode_filter = ata_pci_default_filter, |
@@ -426,13 +426,11 @@ static struct ata_port_operations amd100_port_ops = { | |||
426 | .irq_handler = ata_interrupt, | 426 | .irq_handler = ata_interrupt, |
427 | .irq_clear = ata_bmdma_irq_clear, | 427 | .irq_clear = ata_bmdma_irq_clear, |
428 | .irq_on = ata_irq_on, | 428 | .irq_on = ata_irq_on, |
429 | .irq_ack = ata_irq_ack, | ||
430 | 429 | ||
431 | .port_start = ata_port_start, | 430 | .port_start = ata_sff_port_start, |
432 | }; | 431 | }; |
433 | 432 | ||
434 | static struct ata_port_operations amd133_port_ops = { | 433 | static struct ata_port_operations amd133_port_ops = { |
435 | .port_disable = ata_port_disable, | ||
436 | .set_piomode = amd133_set_piomode, | 434 | .set_piomode = amd133_set_piomode, |
437 | .set_dmamode = amd133_set_dmamode, | 435 | .set_dmamode = amd133_set_dmamode, |
438 | .mode_filter = ata_pci_default_filter, | 436 | .mode_filter = ata_pci_default_filter, |
@@ -461,13 +459,11 @@ static struct ata_port_operations amd133_port_ops = { | |||
461 | .irq_handler = ata_interrupt, | 459 | .irq_handler = ata_interrupt, |
462 | .irq_clear = ata_bmdma_irq_clear, | 460 | .irq_clear = ata_bmdma_irq_clear, |
463 | .irq_on = ata_irq_on, | 461 | .irq_on = ata_irq_on, |
464 | .irq_ack = ata_irq_ack, | ||
465 | 462 | ||
466 | .port_start = ata_port_start, | 463 | .port_start = ata_sff_port_start, |
467 | }; | 464 | }; |
468 | 465 | ||
469 | static struct ata_port_operations nv100_port_ops = { | 466 | static struct ata_port_operations nv100_port_ops = { |
470 | .port_disable = ata_port_disable, | ||
471 | .set_piomode = nv100_set_piomode, | 467 | .set_piomode = nv100_set_piomode, |
472 | .set_dmamode = nv100_set_dmamode, | 468 | .set_dmamode = nv100_set_dmamode, |
473 | .mode_filter = ata_pci_default_filter, | 469 | .mode_filter = ata_pci_default_filter, |
@@ -496,13 +492,11 @@ static struct ata_port_operations nv100_port_ops = { | |||
496 | .irq_handler = ata_interrupt, | 492 | .irq_handler = ata_interrupt, |
497 | .irq_clear = ata_bmdma_irq_clear, | 493 | .irq_clear = ata_bmdma_irq_clear, |
498 | .irq_on = ata_irq_on, | 494 | .irq_on = ata_irq_on, |
499 | .irq_ack = ata_irq_ack, | ||
500 | 495 | ||
501 | .port_start = ata_port_start, | 496 | .port_start = ata_sff_port_start, |
502 | }; | 497 | }; |
503 | 498 | ||
504 | static struct ata_port_operations nv133_port_ops = { | 499 | static struct ata_port_operations nv133_port_ops = { |
505 | .port_disable = ata_port_disable, | ||
506 | .set_piomode = nv133_set_piomode, | 500 | .set_piomode = nv133_set_piomode, |
507 | .set_dmamode = nv133_set_dmamode, | 501 | .set_dmamode = nv133_set_dmamode, |
508 | .mode_filter = ata_pci_default_filter, | 502 | .mode_filter = ata_pci_default_filter, |
@@ -531,9 +525,8 @@ static struct ata_port_operations nv133_port_ops = { | |||
531 | .irq_handler = ata_interrupt, | 525 | .irq_handler = ata_interrupt, |
532 | .irq_clear = ata_bmdma_irq_clear, | 526 | .irq_clear = ata_bmdma_irq_clear, |
533 | .irq_on = ata_irq_on, | 527 | .irq_on = ata_irq_on, |
534 | .irq_ack = ata_irq_ack, | ||
535 | 528 | ||
536 | .port_start = ata_port_start, | 529 | .port_start = ata_sff_port_start, |
537 | }; | 530 | }; |
538 | 531 | ||
539 | static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 532 | static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c index b5352ebecef9..d4218310327b 100644 --- a/drivers/ata/pata_artop.c +++ b/drivers/ata/pata_artop.c | |||
@@ -40,8 +40,9 @@ | |||
40 | 40 | ||
41 | static int clock = 0; | 41 | static int clock = 0; |
42 | 42 | ||
43 | static int artop6210_pre_reset(struct ata_port *ap, unsigned long deadline) | 43 | static int artop6210_pre_reset(struct ata_link *link, unsigned long deadline) |
44 | { | 44 | { |
45 | struct ata_port *ap = link->ap; | ||
45 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 46 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
46 | const struct pci_bits artop_enable_bits[] = { | 47 | const struct pci_bits artop_enable_bits[] = { |
47 | { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */ | 48 | { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */ |
@@ -51,7 +52,7 @@ static int artop6210_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
51 | if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) | 52 | if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) |
52 | return -ENOENT; | 53 | return -ENOENT; |
53 | 54 | ||
54 | return ata_std_prereset(ap, deadline); | 55 | return ata_std_prereset(link, deadline); |
55 | } | 56 | } |
56 | 57 | ||
57 | /** | 58 | /** |
@@ -71,27 +72,28 @@ static void artop6210_error_handler(struct ata_port *ap) | |||
71 | 72 | ||
72 | /** | 73 | /** |
73 | * artop6260_pre_reset - check for 40/80 pin | 74 | * artop6260_pre_reset - check for 40/80 pin |
74 | * @ap: Port | 75 | * @link: link |
75 | * @deadline: deadline jiffies for the operation | 76 | * @deadline: deadline jiffies for the operation |
76 | * | 77 | * |
77 | * The ARTOP hardware reports the cable detect bits in register 0x49. | 78 | * The ARTOP hardware reports the cable detect bits in register 0x49. |
78 | * Nothing complicated needed here. | 79 | * Nothing complicated needed here. |
79 | */ | 80 | */ |
80 | 81 | ||
81 | static int artop6260_pre_reset(struct ata_port *ap, unsigned long deadline) | 82 | static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline) |
82 | { | 83 | { |
83 | static const struct pci_bits artop_enable_bits[] = { | 84 | static const struct pci_bits artop_enable_bits[] = { |
84 | { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */ | 85 | { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */ |
85 | { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */ | 86 | { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */ |
86 | }; | 87 | }; |
87 | 88 | ||
89 | struct ata_port *ap = link->ap; | ||
88 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 90 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
89 | 91 | ||
90 | /* Odd numbered device ids are the units with enable bits (the -R cards) */ | 92 | /* Odd numbered device ids are the units with enable bits (the -R cards) */ |
91 | if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) | 93 | if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) |
92 | return -ENOENT; | 94 | return -ENOENT; |
93 | 95 | ||
94 | return ata_std_prereset(ap, deadline); | 96 | return ata_std_prereset(link, deadline); |
95 | } | 97 | } |
96 | 98 | ||
97 | /** | 99 | /** |
@@ -330,7 +332,6 @@ static struct scsi_host_template artop_sht = { | |||
330 | }; | 332 | }; |
331 | 333 | ||
332 | static const struct ata_port_operations artop6210_ops = { | 334 | static const struct ata_port_operations artop6210_ops = { |
333 | .port_disable = ata_port_disable, | ||
334 | .set_piomode = artop6210_set_piomode, | 335 | .set_piomode = artop6210_set_piomode, |
335 | .set_dmamode = artop6210_set_dmamode, | 336 | .set_dmamode = artop6210_set_dmamode, |
336 | .mode_filter = ata_pci_default_filter, | 337 | .mode_filter = ata_pci_default_filter, |
@@ -359,13 +360,11 @@ static const struct ata_port_operations artop6210_ops = { | |||
359 | .irq_handler = ata_interrupt, | 360 | .irq_handler = ata_interrupt, |
360 | .irq_clear = ata_bmdma_irq_clear, | 361 | .irq_clear = ata_bmdma_irq_clear, |
361 | .irq_on = ata_irq_on, | 362 | .irq_on = ata_irq_on, |
362 | .irq_ack = ata_irq_ack, | ||
363 | 363 | ||
364 | .port_start = ata_port_start, | 364 | .port_start = ata_sff_port_start, |
365 | }; | 365 | }; |
366 | 366 | ||
367 | static const struct ata_port_operations artop6260_ops = { | 367 | static const struct ata_port_operations artop6260_ops = { |
368 | .port_disable = ata_port_disable, | ||
369 | .set_piomode = artop6260_set_piomode, | 368 | .set_piomode = artop6260_set_piomode, |
370 | .set_dmamode = artop6260_set_dmamode, | 369 | .set_dmamode = artop6260_set_dmamode, |
371 | 370 | ||
@@ -392,9 +391,8 @@ static const struct ata_port_operations artop6260_ops = { | |||
392 | .irq_handler = ata_interrupt, | 391 | .irq_handler = ata_interrupt, |
393 | .irq_clear = ata_bmdma_irq_clear, | 392 | .irq_clear = ata_bmdma_irq_clear, |
394 | .irq_on = ata_irq_on, | 393 | .irq_on = ata_irq_on, |
395 | .irq_ack = ata_irq_ack, | ||
396 | 394 | ||
397 | .port_start = ata_port_start, | 395 | .port_start = ata_sff_port_start, |
398 | }; | 396 | }; |
399 | 397 | ||
400 | 398 | ||
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c new file mode 100644 index 000000000000..bb250a48e27c --- /dev/null +++ b/drivers/ata/pata_at32.c | |||
@@ -0,0 +1,441 @@ | |||
1 | /* | ||
2 | * AVR32 SMC/CFC PATA Driver | ||
3 | * | ||
4 | * Copyright (C) 2007 Atmel Norway | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #define DEBUG | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/device.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/irq.h> | ||
21 | #include <scsi/scsi_host.h> | ||
22 | #include <linux/ata.h> | ||
23 | #include <linux/libata.h> | ||
24 | #include <linux/err.h> | ||
25 | #include <linux/io.h> | ||
26 | |||
27 | #include <asm/arch/board.h> | ||
28 | #include <asm/arch/smc.h> | ||
29 | |||
30 | #define DRV_NAME "pata_at32" | ||
31 | #define DRV_VERSION "0.0.2" | ||
32 | |||
33 | /* | ||
34 | * CompactFlash controller memory layout relative to the base address: | ||
35 | * | ||
36 | * Attribute memory: 0000 0000 -> 003f ffff | ||
37 | * Common memory: 0040 0000 -> 007f ffff | ||
38 | * I/O memory: 0080 0000 -> 00bf ffff | ||
39 | * True IDE Mode: 00c0 0000 -> 00df ffff | ||
40 | * Alt IDE Mode: 00e0 0000 -> 00ff ffff | ||
41 | * | ||
42 | * Only True IDE and Alt True IDE mode are needed for this driver. | ||
43 | * | ||
44 | * True IDE mode => CS0 = 0, CS1 = 1 (cmd, error, stat, etc) | ||
45 | * Alt True IDE mode => CS0 = 1, CS1 = 0 (ctl, alt_stat) | ||
46 | */ | ||
47 | #define CF_IDE_OFFSET 0x00c00000 | ||
48 | #define CF_ALT_IDE_OFFSET 0x00e00000 | ||
49 | #define CF_RES_SIZE 2048 | ||
50 | |||
51 | /* | ||
52 | * Define DEBUG_BUS if you are doing debugging of your own EBI -> PATA | ||
53 | * adaptor with a logic analyzer or similar. | ||
54 | */ | ||
55 | #undef DEBUG_BUS | ||
56 | |||
57 | /* | ||
58 | * ATA PIO modes | ||
59 | * | ||
60 | * Name | Mb/s | Min cycle time | Mask | ||
61 | * --------+-------+----------------+-------- | ||
62 | * Mode 0 | 3.3 | 600 ns | 0x01 | ||
63 | * Mode 1 | 5.2 | 383 ns | 0x03 | ||
64 | * Mode 2 | 8.3 | 240 ns | 0x07 | ||
65 | * Mode 3 | 11.1 | 180 ns | 0x0f | ||
66 | * Mode 4 | 16.7 | 120 ns | 0x1f | ||
67 | */ | ||
68 | #define PIO_MASK (0x1f) | ||
69 | |||
70 | /* | ||
71 | * Struct containing private information about device. | ||
72 | */ | ||
73 | struct at32_ide_info { | ||
74 | unsigned int irq; | ||
75 | struct resource res_ide; | ||
76 | struct resource res_alt; | ||
77 | void __iomem *ide_addr; | ||
78 | void __iomem *alt_addr; | ||
79 | unsigned int cs; | ||
80 | struct smc_config smc; | ||
81 | }; | ||
82 | |||
83 | /* | ||
84 | * Setup SMC for the given ATA timing. | ||
85 | */ | ||
86 | static int pata_at32_setup_timing(struct device *dev, | ||
87 | struct at32_ide_info *info, | ||
88 | const struct ata_timing *timing) | ||
89 | { | ||
90 | /* These two values are found through testing */ | ||
91 | const int min_recover = 25; | ||
92 | const int ncs_hold = 15; | ||
93 | |||
94 | struct smc_config *smc = &info->smc; | ||
95 | |||
96 | int active; | ||
97 | int recover; | ||
98 | |||
99 | /* Total cycle time */ | ||
100 | smc->read_cycle = timing->cyc8b; | ||
101 | |||
102 | /* DIOR <= CFIOR timings */ | ||
103 | smc->nrd_setup = timing->setup; | ||
104 | smc->nrd_pulse = timing->act8b; | ||
105 | |||
106 | /* Compute recover, extend total cycle if needed */ | ||
107 | active = smc->nrd_setup + smc->nrd_pulse; | ||
108 | recover = smc->read_cycle - active; | ||
109 | |||
110 | if (recover < min_recover) { | ||
111 | smc->read_cycle = active + min_recover; | ||
112 | recover = min_recover; | ||
113 | } | ||
114 | |||
115 | /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */ | ||
116 | smc->ncs_read_setup = 0; | ||
117 | smc->ncs_read_pulse = active + ncs_hold; | ||
118 | |||
119 | /* Write timings same as read timings */ | ||
120 | smc->write_cycle = smc->read_cycle; | ||
121 | smc->nwe_setup = smc->nrd_setup; | ||
122 | smc->nwe_pulse = smc->nrd_pulse; | ||
123 | smc->ncs_write_setup = smc->ncs_read_setup; | ||
124 | smc->ncs_write_pulse = smc->ncs_read_pulse; | ||
125 | |||
126 | /* Do some debugging output */ | ||
127 | dev_dbg(dev, "SMC: C=%d S=%d P=%d R=%d NCSS=%d NCSP=%d NCSR=%d\n", | ||
128 | smc->read_cycle, smc->nrd_setup, smc->nrd_pulse, | ||
129 | recover, smc->ncs_read_setup, smc->ncs_read_pulse, | ||
130 | smc->read_cycle - smc->ncs_read_pulse); | ||
131 | |||
132 | /* Finally, configure the SMC */ | ||
133 | return smc_set_configuration(info->cs, smc); | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * Procedures for libATA. | ||
138 | */ | ||
139 | static void pata_at32_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
140 | { | ||
141 | struct ata_timing timing; | ||
142 | struct at32_ide_info *info = ap->host->private_data; | ||
143 | |||
144 | int ret; | ||
145 | |||
146 | /* Compute ATA timing */ | ||
147 | ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0); | ||
148 | if (ret) { | ||
149 | dev_warn(ap->dev, "Failed to compute ATA timing %d\n", ret); | ||
150 | return; | ||
151 | } | ||
152 | |||
153 | /* Setup SMC to ATA timing */ | ||
154 | ret = pata_at32_setup_timing(ap->dev, info, &timing); | ||
155 | if (ret) { | ||
156 | dev_warn(ap->dev, "Failed to setup ATA timing %d\n", ret); | ||
157 | return; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | static void pata_at32_irq_clear(struct ata_port *ap) | ||
162 | { | ||
163 | /* No DMA controller yet */ | ||
164 | } | ||
165 | |||
166 | static struct scsi_host_template at32_sht = { | ||
167 | .module = THIS_MODULE, | ||
168 | .name = DRV_NAME, | ||
169 | .ioctl = ata_scsi_ioctl, | ||
170 | .queuecommand = ata_scsi_queuecmd, | ||
171 | .can_queue = ATA_DEF_QUEUE, | ||
172 | .this_id = ATA_SHT_THIS_ID, | ||
173 | .sg_tablesize = LIBATA_MAX_PRD, | ||
174 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
175 | .emulated = ATA_SHT_EMULATED, | ||
176 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
177 | .proc_name = DRV_NAME, | ||
178 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
179 | .slave_configure = ata_scsi_slave_config, | ||
180 | .slave_destroy = ata_scsi_slave_destroy, | ||
181 | .bios_param = ata_std_bios_param, | ||
182 | }; | ||
183 | |||
184 | static struct ata_port_operations at32_port_ops = { | ||
185 | .port_disable = ata_port_disable, | ||
186 | .set_piomode = pata_at32_set_piomode, | ||
187 | .tf_load = ata_tf_load, | ||
188 | .tf_read = ata_tf_read, | ||
189 | .exec_command = ata_exec_command, | ||
190 | .check_status = ata_check_status, | ||
191 | .dev_select = ata_std_dev_select, | ||
192 | |||
193 | .freeze = ata_bmdma_freeze, | ||
194 | .thaw = ata_bmdma_thaw, | ||
195 | .error_handler = ata_bmdma_error_handler, | ||
196 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
197 | .cable_detect = ata_cable_40wire, | ||
198 | |||
199 | .qc_prep = ata_qc_prep, | ||
200 | .qc_issue = ata_qc_issue_prot, | ||
201 | |||
202 | .data_xfer = ata_data_xfer, | ||
203 | |||
204 | .irq_clear = pata_at32_irq_clear, | ||
205 | .irq_on = ata_irq_on, | ||
206 | .irq_ack = ata_irq_ack, | ||
207 | |||
208 | .port_start = ata_sff_port_start, | ||
209 | }; | ||
210 | |||
211 | static int __init pata_at32_init_one(struct device *dev, | ||
212 | struct at32_ide_info *info) | ||
213 | { | ||
214 | struct ata_host *host; | ||
215 | struct ata_port *ap; | ||
216 | |||
217 | host = ata_host_alloc(dev, 1); | ||
218 | if (!host) | ||
219 | return -ENOMEM; | ||
220 | |||
221 | ap = host->ports[0]; | ||
222 | |||
223 | /* Setup ATA bindings */ | ||
224 | ap->ops = &at32_port_ops; | ||
225 | ap->pio_mask = PIO_MASK; | ||
226 | ap->flags = ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS | ||
227 | | ATA_FLAG_PIO_POLLING; | ||
228 | |||
229 | /* | ||
230 | * Since all 8-bit taskfile transfers has to go on the lower | ||
231 | * byte of the data bus and there is a bug in the SMC that | ||
232 | * makes it impossible to alter the bus width during runtime, | ||
233 | * we need to hardwire the address signals as follows: | ||
234 | * | ||
235 | * A_IDE(2:0) <= A_EBI(3:1) | ||
236 | * | ||
237 | * This makes all addresses on the EBI even, thus all data | ||
238 | * will be on the lower byte of the data bus. All addresses | ||
239 | * used by libATA need to be altered according to this. | ||
240 | */ | ||
241 | ap->ioaddr.altstatus_addr = info->alt_addr + (0x06 << 1); | ||
242 | ap->ioaddr.ctl_addr = info->alt_addr + (0x06 << 1); | ||
243 | |||
244 | ap->ioaddr.data_addr = info->ide_addr + (ATA_REG_DATA << 1); | ||
245 | ap->ioaddr.error_addr = info->ide_addr + (ATA_REG_ERR << 1); | ||
246 | ap->ioaddr.feature_addr = info->ide_addr + (ATA_REG_FEATURE << 1); | ||
247 | ap->ioaddr.nsect_addr = info->ide_addr + (ATA_REG_NSECT << 1); | ||
248 | ap->ioaddr.lbal_addr = info->ide_addr + (ATA_REG_LBAL << 1); | ||
249 | ap->ioaddr.lbam_addr = info->ide_addr + (ATA_REG_LBAM << 1); | ||
250 | ap->ioaddr.lbah_addr = info->ide_addr + (ATA_REG_LBAH << 1); | ||
251 | ap->ioaddr.device_addr = info->ide_addr + (ATA_REG_DEVICE << 1); | ||
252 | ap->ioaddr.status_addr = info->ide_addr + (ATA_REG_STATUS << 1); | ||
253 | ap->ioaddr.command_addr = info->ide_addr + (ATA_REG_CMD << 1); | ||
254 | |||
255 | /* Set info as private data of ATA host */ | ||
256 | host->private_data = info; | ||
257 | |||
258 | /* Register ATA device and return */ | ||
259 | return ata_host_activate(host, info->irq, ata_interrupt, | ||
260 | IRQF_SHARED | IRQF_TRIGGER_RISING, | ||
261 | &at32_sht); | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * This function may come in handy for people analyzing their own | ||
266 | * EBI -> PATA adaptors. | ||
267 | */ | ||
268 | #ifdef DEBUG_BUS | ||
269 | |||
270 | static void __init pata_at32_debug_bus(struct device *dev, | ||
271 | struct at32_ide_info *info) | ||
272 | { | ||
273 | const int d1 = 0xff; | ||
274 | const int d2 = 0x00; | ||
275 | |||
276 | int i; | ||
277 | |||
278 | /* Write 8-bit values (registers) */ | ||
279 | iowrite8(d1, info->alt_addr + (0x06 << 1)); | ||
280 | iowrite8(d2, info->alt_addr + (0x06 << 1)); | ||
281 | |||
282 | for (i = 0; i < 8; i++) { | ||
283 | iowrite8(d1, info->ide_addr + (i << 1)); | ||
284 | iowrite8(d2, info->ide_addr + (i << 1)); | ||
285 | } | ||
286 | |||
287 | /* Write 16 bit values (data) */ | ||
288 | iowrite16(d1, info->ide_addr); | ||
289 | iowrite16(d1 << 8, info->ide_addr); | ||
290 | |||
291 | iowrite16(d1, info->ide_addr); | ||
292 | iowrite16(d1 << 8, info->ide_addr); | ||
293 | } | ||
294 | |||
295 | #endif | ||
296 | |||
297 | static int __init pata_at32_probe(struct platform_device *pdev) | ||
298 | { | ||
299 | const struct ata_timing initial_timing = | ||
300 | {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; | ||
301 | |||
302 | struct device *dev = &pdev->dev; | ||
303 | struct at32_ide_info *info; | ||
304 | struct ide_platform_data *board = pdev->dev.platform_data; | ||
305 | struct resource *res; | ||
306 | |||
307 | int irq; | ||
308 | int ret; | ||
309 | |||
310 | if (!board) | ||
311 | return -ENXIO; | ||
312 | |||
313 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
314 | if (!res) | ||
315 | return -ENXIO; | ||
316 | |||
317 | /* Retrive IRQ */ | ||
318 | irq = platform_get_irq(pdev, 0); | ||
319 | if (irq < 0) | ||
320 | return irq; | ||
321 | |||
322 | /* Setup struct containing private infomation */ | ||
323 | info = kzalloc(sizeof(struct at32_ide_info), GFP_KERNEL); | ||
324 | if (!info) | ||
325 | return -ENOMEM; | ||
326 | |||
327 | memset(info, 0, sizeof(struct at32_ide_info)); | ||
328 | |||
329 | info->irq = irq; | ||
330 | info->cs = board->cs; | ||
331 | |||
332 | /* Request memory resources */ | ||
333 | info->res_ide.start = res->start + CF_IDE_OFFSET; | ||
334 | info->res_ide.end = info->res_ide.start + CF_RES_SIZE - 1; | ||
335 | info->res_ide.name = "ide"; | ||
336 | info->res_ide.flags = IORESOURCE_MEM; | ||
337 | |||
338 | ret = request_resource(res, &info->res_ide); | ||
339 | if (ret) | ||
340 | goto err_req_res_ide; | ||
341 | |||
342 | info->res_alt.start = res->start + CF_ALT_IDE_OFFSET; | ||
343 | info->res_alt.end = info->res_alt.start + CF_RES_SIZE - 1; | ||
344 | info->res_alt.name = "alt"; | ||
345 | info->res_alt.flags = IORESOURCE_MEM; | ||
346 | |||
347 | ret = request_resource(res, &info->res_alt); | ||
348 | if (ret) | ||
349 | goto err_req_res_alt; | ||
350 | |||
351 | /* Setup non-timing elements of SMC */ | ||
352 | info->smc.bus_width = 2; /* 16 bit data bus */ | ||
353 | info->smc.nrd_controlled = 1; /* Sample data on rising edge of NRD */ | ||
354 | info->smc.nwe_controlled = 0; /* Drive data on falling edge of NCS */ | ||
355 | info->smc.nwait_mode = 3; /* NWAIT is in READY mode */ | ||
356 | info->smc.byte_write = 0; /* Byte select access type */ | ||
357 | info->smc.tdf_mode = 0; /* TDF optimization disabled */ | ||
358 | info->smc.tdf_cycles = 0; /* No TDF wait cycles */ | ||
359 | |||
360 | /* Setup ATA timing */ | ||
361 | ret = pata_at32_setup_timing(dev, info, &initial_timing); | ||
362 | if (ret) | ||
363 | goto err_setup_timing; | ||
364 | |||
365 | /* Setup ATA addresses */ | ||
366 | ret = -ENOMEM; | ||
367 | info->ide_addr = devm_ioremap(dev, info->res_ide.start, 16); | ||
368 | info->alt_addr = devm_ioremap(dev, info->res_alt.start, 16); | ||
369 | if (!info->ide_addr || !info->alt_addr) | ||
370 | goto err_ioremap; | ||
371 | |||
372 | #ifdef DEBUG_BUS | ||
373 | pata_at32_debug_bus(dev, info); | ||
374 | #endif | ||
375 | |||
376 | /* Register ATA device */ | ||
377 | ret = pata_at32_init_one(dev, info); | ||
378 | if (ret) | ||
379 | goto err_ata_device; | ||
380 | |||
381 | return 0; | ||
382 | |||
383 | err_ata_device: | ||
384 | err_ioremap: | ||
385 | err_setup_timing: | ||
386 | release_resource(&info->res_alt); | ||
387 | err_req_res_alt: | ||
388 | release_resource(&info->res_ide); | ||
389 | err_req_res_ide: | ||
390 | kfree(info); | ||
391 | |||
392 | return ret; | ||
393 | } | ||
394 | |||
395 | static int __exit pata_at32_remove(struct platform_device *pdev) | ||
396 | { | ||
397 | struct ata_host *host = platform_get_drvdata(pdev); | ||
398 | struct at32_ide_info *info; | ||
399 | |||
400 | if (!host) | ||
401 | return 0; | ||
402 | |||
403 | info = host->private_data; | ||
404 | ata_host_detach(host); | ||
405 | |||
406 | if (!info) | ||
407 | return 0; | ||
408 | |||
409 | release_resource(&info->res_ide); | ||
410 | release_resource(&info->res_alt); | ||
411 | |||
412 | kfree(info); | ||
413 | |||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | static struct platform_driver pata_at32_driver = { | ||
418 | .remove = __exit_p(pata_at32_remove), | ||
419 | .driver = { | ||
420 | .name = "at32_ide", | ||
421 | .owner = THIS_MODULE, | ||
422 | }, | ||
423 | }; | ||
424 | |||
425 | static int __init pata_at32_init(void) | ||
426 | { | ||
427 | return platform_driver_probe(&pata_at32_driver, pata_at32_probe); | ||
428 | } | ||
429 | |||
430 | static void __exit pata_at32_exit(void) | ||
431 | { | ||
432 | platform_driver_unregister(&pata_at32_driver); | ||
433 | } | ||
434 | |||
435 | module_init(pata_at32_init); | ||
436 | module_exit(pata_at32_exit); | ||
437 | |||
438 | MODULE_LICENSE("GPL"); | ||
439 | MODULE_DESCRIPTION("AVR32 SMC/CFC PATA Driver"); | ||
440 | MODULE_AUTHOR("Kristoffer Nyborg Gregertsen <kngregertsen@norway.atmel.com>"); | ||
441 | MODULE_VERSION(DRV_VERSION); | ||
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index 86f85a2cab7e..9623f5295530 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c | |||
@@ -33,8 +33,9 @@ enum { | |||
33 | ATIIXP_IDE_UDMA_MODE = 0x56 | 33 | ATIIXP_IDE_UDMA_MODE = 0x56 |
34 | }; | 34 | }; |
35 | 35 | ||
36 | static int atiixp_pre_reset(struct ata_port *ap, unsigned long deadline) | 36 | static int atiixp_pre_reset(struct ata_link *link, unsigned long deadline) |
37 | { | 37 | { |
38 | struct ata_port *ap = link->ap; | ||
38 | static const struct pci_bits atiixp_enable_bits[] = { | 39 | static const struct pci_bits atiixp_enable_bits[] = { |
39 | { 0x48, 1, 0x01, 0x00 }, | 40 | { 0x48, 1, 0x01, 0x00 }, |
40 | { 0x48, 1, 0x08, 0x00 } | 41 | { 0x48, 1, 0x08, 0x00 } |
@@ -44,7 +45,7 @@ static int atiixp_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
44 | if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no])) | 45 | if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no])) |
45 | return -ENOENT; | 46 | return -ENOENT; |
46 | 47 | ||
47 | return ata_std_prereset(ap, deadline); | 48 | return ata_std_prereset(link, deadline); |
48 | } | 49 | } |
49 | 50 | ||
50 | static void atiixp_error_handler(struct ata_port *ap) | 51 | static void atiixp_error_handler(struct ata_port *ap) |
@@ -172,6 +173,9 @@ static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
172 | * | 173 | * |
173 | * When DMA begins we need to ensure that the UDMA control | 174 | * When DMA begins we need to ensure that the UDMA control |
174 | * register for the channel is correctly set. | 175 | * register for the channel is correctly set. |
176 | * | ||
177 | * Note: The host lock held by the libata layer protects | ||
178 | * us from two channels both trying to set DMA bits at once | ||
175 | */ | 179 | */ |
176 | 180 | ||
177 | static void atiixp_bmdma_start(struct ata_queued_cmd *qc) | 181 | static void atiixp_bmdma_start(struct ata_queued_cmd *qc) |
@@ -198,6 +202,9 @@ static void atiixp_bmdma_start(struct ata_queued_cmd *qc) | |||
198 | * | 202 | * |
199 | * DMA has completed. Clear the UDMA flag as the next operations will | 203 | * DMA has completed. Clear the UDMA flag as the next operations will |
200 | * be PIO ones not UDMA data transfer. | 204 | * be PIO ones not UDMA data transfer. |
205 | * | ||
206 | * Note: The host lock held by the libata layer protects | ||
207 | * us from two channels both trying to set DMA bits at once | ||
201 | */ | 208 | */ |
202 | 209 | ||
203 | static void atiixp_bmdma_stop(struct ata_queued_cmd *qc) | 210 | static void atiixp_bmdma_stop(struct ata_queued_cmd *qc) |
@@ -232,7 +239,6 @@ static struct scsi_host_template atiixp_sht = { | |||
232 | }; | 239 | }; |
233 | 240 | ||
234 | static struct ata_port_operations atiixp_port_ops = { | 241 | static struct ata_port_operations atiixp_port_ops = { |
235 | .port_disable = ata_port_disable, | ||
236 | .set_piomode = atiixp_set_piomode, | 242 | .set_piomode = atiixp_set_piomode, |
237 | .set_dmamode = atiixp_set_dmamode, | 243 | .set_dmamode = atiixp_set_dmamode, |
238 | .mode_filter = ata_pci_default_filter, | 244 | .mode_filter = ata_pci_default_filter, |
@@ -261,9 +267,8 @@ static struct ata_port_operations atiixp_port_ops = { | |||
261 | .irq_handler = ata_interrupt, | 267 | .irq_handler = ata_interrupt, |
262 | .irq_clear = ata_bmdma_irq_clear, | 268 | .irq_clear = ata_bmdma_irq_clear, |
263 | .irq_on = ata_irq_on, | 269 | .irq_on = ata_irq_on, |
264 | .irq_ack = ata_irq_ack, | ||
265 | 270 | ||
266 | .port_start = ata_port_start, | 271 | .port_start = ata_sff_port_start, |
267 | }; | 272 | }; |
268 | 273 | ||
269 | static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 274 | static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c new file mode 100644 index 000000000000..747549e4563a --- /dev/null +++ b/drivers/ata/pata_bf54x.c | |||
@@ -0,0 +1,1627 @@ | |||
1 | /* | ||
2 | * File: drivers/ata/pata_bf54x.c | ||
3 | * Author: Sonic Zhang <sonic.zhang@analog.com> | ||
4 | * | ||
5 | * Created: | ||
6 | * Description: PATA Driver for blackfin 54x | ||
7 | * | ||
8 | * Modified: | ||
9 | * Copyright 2007 Analog Devices Inc. | ||
10 | * | ||
11 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, see the file COPYING, or write | ||
25 | * to the Free Software Foundation, Inc., | ||
26 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
27 | */ | ||
28 | |||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/blkdev.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <linux/device.h> | ||
36 | #include <scsi/scsi_host.h> | ||
37 | #include <linux/libata.h> | ||
38 | #include <linux/platform_device.h> | ||
39 | #include <asm/dma.h> | ||
40 | #include <asm/gpio.h> | ||
41 | #include <asm/portmux.h> | ||
42 | |||
43 | #define DRV_NAME "pata-bf54x" | ||
44 | #define DRV_VERSION "0.9" | ||
45 | |||
46 | #define ATA_REG_CTRL 0x0E | ||
47 | #define ATA_REG_ALTSTATUS ATA_REG_CTRL | ||
48 | |||
49 | /* These are the offset of the controller's registers */ | ||
50 | #define ATAPI_OFFSET_CONTROL 0x00 | ||
51 | #define ATAPI_OFFSET_STATUS 0x04 | ||
52 | #define ATAPI_OFFSET_DEV_ADDR 0x08 | ||
53 | #define ATAPI_OFFSET_DEV_TXBUF 0x0c | ||
54 | #define ATAPI_OFFSET_DEV_RXBUF 0x10 | ||
55 | #define ATAPI_OFFSET_INT_MASK 0x14 | ||
56 | #define ATAPI_OFFSET_INT_STATUS 0x18 | ||
57 | #define ATAPI_OFFSET_XFER_LEN 0x1c | ||
58 | #define ATAPI_OFFSET_LINE_STATUS 0x20 | ||
59 | #define ATAPI_OFFSET_SM_STATE 0x24 | ||
60 | #define ATAPI_OFFSET_TERMINATE 0x28 | ||
61 | #define ATAPI_OFFSET_PIO_TFRCNT 0x2c | ||
62 | #define ATAPI_OFFSET_DMA_TFRCNT 0x30 | ||
63 | #define ATAPI_OFFSET_UMAIN_TFRCNT 0x34 | ||
64 | #define ATAPI_OFFSET_UDMAOUT_TFRCNT 0x38 | ||
65 | #define ATAPI_OFFSET_REG_TIM_0 0x40 | ||
66 | #define ATAPI_OFFSET_PIO_TIM_0 0x44 | ||
67 | #define ATAPI_OFFSET_PIO_TIM_1 0x48 | ||
68 | #define ATAPI_OFFSET_MULTI_TIM_0 0x50 | ||
69 | #define ATAPI_OFFSET_MULTI_TIM_1 0x54 | ||
70 | #define ATAPI_OFFSET_MULTI_TIM_2 0x58 | ||
71 | #define ATAPI_OFFSET_ULTRA_TIM_0 0x60 | ||
72 | #define ATAPI_OFFSET_ULTRA_TIM_1 0x64 | ||
73 | #define ATAPI_OFFSET_ULTRA_TIM_2 0x68 | ||
74 | #define ATAPI_OFFSET_ULTRA_TIM_3 0x6c | ||
75 | |||
76 | |||
77 | #define ATAPI_GET_CONTROL(base)\ | ||
78 | bfin_read16(base + ATAPI_OFFSET_CONTROL) | ||
79 | #define ATAPI_SET_CONTROL(base, val)\ | ||
80 | bfin_write16(base + ATAPI_OFFSET_CONTROL, val) | ||
81 | #define ATAPI_GET_STATUS(base)\ | ||
82 | bfin_read16(base + ATAPI_OFFSET_STATUS) | ||
83 | #define ATAPI_GET_DEV_ADDR(base)\ | ||
84 | bfin_read16(base + ATAPI_OFFSET_DEV_ADDR) | ||
85 | #define ATAPI_SET_DEV_ADDR(base, val)\ | ||
86 | bfin_write16(base + ATAPI_OFFSET_DEV_ADDR, val) | ||
87 | #define ATAPI_GET_DEV_TXBUF(base)\ | ||
88 | bfin_read16(base + ATAPI_OFFSET_DEV_TXBUF) | ||
89 | #define ATAPI_SET_DEV_TXBUF(base, val)\ | ||
90 | bfin_write16(base + ATAPI_OFFSET_DEV_TXBUF, val) | ||
91 | #define ATAPI_GET_DEV_RXBUF(base)\ | ||
92 | bfin_read16(base + ATAPI_OFFSET_DEV_RXBUF) | ||
93 | #define ATAPI_SET_DEV_RXBUF(base, val)\ | ||
94 | bfin_write16(base + ATAPI_OFFSET_DEV_RXBUF, val) | ||
95 | #define ATAPI_GET_INT_MASK(base)\ | ||
96 | bfin_read16(base + ATAPI_OFFSET_INT_MASK) | ||
97 | #define ATAPI_SET_INT_MASK(base, val)\ | ||
98 | bfin_write16(base + ATAPI_OFFSET_INT_MASK, val) | ||
99 | #define ATAPI_GET_INT_STATUS(base)\ | ||
100 | bfin_read16(base + ATAPI_OFFSET_INT_STATUS) | ||
101 | #define ATAPI_SET_INT_STATUS(base, val)\ | ||
102 | bfin_write16(base + ATAPI_OFFSET_INT_STATUS, val) | ||
103 | #define ATAPI_GET_XFER_LEN(base)\ | ||
104 | bfin_read16(base + ATAPI_OFFSET_XFER_LEN) | ||
105 | #define ATAPI_SET_XFER_LEN(base, val)\ | ||
106 | bfin_write16(base + ATAPI_OFFSET_XFER_LEN, val) | ||
107 | #define ATAPI_GET_LINE_STATUS(base)\ | ||
108 | bfin_read16(base + ATAPI_OFFSET_LINE_STATUS) | ||
109 | #define ATAPI_GET_SM_STATE(base)\ | ||
110 | bfin_read16(base + ATAPI_OFFSET_SM_STATE) | ||
111 | #define ATAPI_GET_TERMINATE(base)\ | ||
112 | bfin_read16(base + ATAPI_OFFSET_TERMINATE) | ||
113 | #define ATAPI_SET_TERMINATE(base, val)\ | ||
114 | bfin_write16(base + ATAPI_OFFSET_TERMINATE, val) | ||
115 | #define ATAPI_GET_PIO_TFRCNT(base)\ | ||
116 | bfin_read16(base + ATAPI_OFFSET_PIO_TFRCNT) | ||
117 | #define ATAPI_GET_DMA_TFRCNT(base)\ | ||
118 | bfin_read16(base + ATAPI_OFFSET_DMA_TFRCNT) | ||
119 | #define ATAPI_GET_UMAIN_TFRCNT(base)\ | ||
120 | bfin_read16(base + ATAPI_OFFSET_UMAIN_TFRCNT) | ||
121 | #define ATAPI_GET_UDMAOUT_TFRCNT(base)\ | ||
122 | bfin_read16(base + ATAPI_OFFSET_UDMAOUT_TFRCNT) | ||
123 | #define ATAPI_GET_REG_TIM_0(base)\ | ||
124 | bfin_read16(base + ATAPI_OFFSET_REG_TIM_0) | ||
125 | #define ATAPI_SET_REG_TIM_0(base, val)\ | ||
126 | bfin_write16(base + ATAPI_OFFSET_REG_TIM_0, val) | ||
127 | #define ATAPI_GET_PIO_TIM_0(base)\ | ||
128 | bfin_read16(base + ATAPI_OFFSET_PIO_TIM_0) | ||
129 | #define ATAPI_SET_PIO_TIM_0(base, val)\ | ||
130 | bfin_write16(base + ATAPI_OFFSET_PIO_TIM_0, val) | ||
131 | #define ATAPI_GET_PIO_TIM_1(base)\ | ||
132 | bfin_read16(base + ATAPI_OFFSET_PIO_TIM_1) | ||
133 | #define ATAPI_SET_PIO_TIM_1(base, val)\ | ||
134 | bfin_write16(base + ATAPI_OFFSET_PIO_TIM_1, val) | ||
135 | #define ATAPI_GET_MULTI_TIM_0(base)\ | ||
136 | bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_0) | ||
137 | #define ATAPI_SET_MULTI_TIM_0(base, val)\ | ||
138 | bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_0, val) | ||
139 | #define ATAPI_GET_MULTI_TIM_1(base)\ | ||
140 | bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_1) | ||
141 | #define ATAPI_SET_MULTI_TIM_1(base, val)\ | ||
142 | bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_1, val) | ||
143 | #define ATAPI_GET_MULTI_TIM_2(base)\ | ||
144 | bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_2) | ||
145 | #define ATAPI_SET_MULTI_TIM_2(base, val)\ | ||
146 | bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_2, val) | ||
147 | #define ATAPI_GET_ULTRA_TIM_0(base)\ | ||
148 | bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_0) | ||
149 | #define ATAPI_SET_ULTRA_TIM_0(base, val)\ | ||
150 | bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_0, val) | ||
151 | #define ATAPI_GET_ULTRA_TIM_1(base)\ | ||
152 | bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_1) | ||
153 | #define ATAPI_SET_ULTRA_TIM_1(base, val)\ | ||
154 | bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_1, val) | ||
155 | #define ATAPI_GET_ULTRA_TIM_2(base)\ | ||
156 | bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_2) | ||
157 | #define ATAPI_SET_ULTRA_TIM_2(base, val)\ | ||
158 | bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_2, val) | ||
159 | #define ATAPI_GET_ULTRA_TIM_3(base)\ | ||
160 | bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_3) | ||
161 | #define ATAPI_SET_ULTRA_TIM_3(base, val)\ | ||
162 | bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_3, val) | ||
163 | |||
164 | /** | ||
165 | * PIO Mode - Frequency compatibility | ||
166 | */ | ||
167 | /* mode: 0 1 2 3 4 */ | ||
168 | static const u32 pio_fsclk[] = | ||
169 | { 33333333, 33333333, 33333333, 33333333, 33333333 }; | ||
170 | |||
171 | /** | ||
172 | * MDMA Mode - Frequency compatibility | ||
173 | */ | ||
174 | /* mode: 0 1 2 */ | ||
175 | static const u32 mdma_fsclk[] = { 33333333, 33333333, 33333333 }; | ||
176 | |||
177 | /** | ||
178 | * UDMA Mode - Frequency compatibility | ||
179 | * | ||
180 | * UDMA5 - 100 MB/s - SCLK = 133 MHz | ||
181 | * UDMA4 - 66 MB/s - SCLK >= 80 MHz | ||
182 | * UDMA3 - 44.4 MB/s - SCLK >= 50 MHz | ||
183 | * UDMA2 - 33 MB/s - SCLK >= 40 MHz | ||
184 | */ | ||
185 | /* mode: 0 1 2 3 4 5 */ | ||
186 | static const u32 udma_fsclk[] = | ||
187 | { 33333333, 33333333, 40000000, 50000000, 80000000, 133333333 }; | ||
188 | |||
189 | /** | ||
190 | * Register transfer timing table | ||
191 | */ | ||
192 | /* mode: 0 1 2 3 4 */ | ||
193 | /* Cycle Time */ | ||
194 | static const u32 reg_t0min[] = { 600, 383, 330, 180, 120 }; | ||
195 | /* DIOR/DIOW to end cycle */ | ||
196 | static const u32 reg_t2min[] = { 290, 290, 290, 70, 25 }; | ||
197 | /* DIOR/DIOW asserted pulse width */ | ||
198 | static const u32 reg_teocmin[] = { 290, 290, 290, 80, 70 }; | ||
199 | |||
200 | /** | ||
201 | * PIO timing table | ||
202 | */ | ||
203 | /* mode: 0 1 2 3 4 */ | ||
204 | /* Cycle Time */ | ||
205 | static const u32 pio_t0min[] = { 600, 383, 240, 180, 120 }; | ||
206 | /* Address valid to DIOR/DIORW */ | ||
207 | static const u32 pio_t1min[] = { 70, 50, 30, 30, 25 }; | ||
208 | /* DIOR/DIOW to end cycle */ | ||
209 | static const u32 pio_t2min[] = { 165, 125, 100, 80, 70 }; | ||
210 | /* DIOR/DIOW asserted pulse width */ | ||
211 | static const u32 pio_teocmin[] = { 165, 125, 100, 70, 25 }; | ||
212 | /* DIOW data hold */ | ||
213 | static const u32 pio_t4min[] = { 30, 20, 15, 10, 10 }; | ||
214 | |||
215 | /* ****************************************************************** | ||
216 | * Multiword DMA timing table | ||
217 | * ****************************************************************** | ||
218 | */ | ||
219 | /* mode: 0 1 2 */ | ||
220 | /* Cycle Time */ | ||
221 | static const u32 mdma_t0min[] = { 480, 150, 120 }; | ||
222 | /* DIOR/DIOW asserted pulse width */ | ||
223 | static const u32 mdma_tdmin[] = { 215, 80, 70 }; | ||
224 | /* DMACK to read data released */ | ||
225 | static const u32 mdma_thmin[] = { 20, 15, 10 }; | ||
226 | /* DIOR/DIOW to DMACK hold */ | ||
227 | static const u32 mdma_tjmin[] = { 20, 5, 5 }; | ||
228 | /* DIOR negated pulse width */ | ||
229 | static const u32 mdma_tkrmin[] = { 50, 50, 25 }; | ||
230 | /* DIOR negated pulse width */ | ||
231 | static const u32 mdma_tkwmin[] = { 215, 50, 25 }; | ||
232 | /* CS[1:0] valid to DIOR/DIOW */ | ||
233 | static const u32 mdma_tmmin[] = { 50, 30, 25 }; | ||
234 | /* DMACK to read data released */ | ||
235 | static const u32 mdma_tzmax[] = { 20, 25, 25 }; | ||
236 | |||
237 | /** | ||
238 | * Ultra DMA timing table | ||
239 | */ | ||
240 | /* mode: 0 1 2 3 4 5 */ | ||
241 | static const u32 udma_tcycmin[] = { 112, 73, 54, 39, 25, 17 }; | ||
242 | static const u32 udma_tdvsmin[] = { 70, 48, 31, 20, 7, 5 }; | ||
243 | static const u32 udma_tenvmax[] = { 70, 70, 70, 55, 55, 50 }; | ||
244 | static const u32 udma_trpmin[] = { 160, 125, 100, 100, 100, 85 }; | ||
245 | static const u32 udma_tmin[] = { 5, 5, 5, 5, 3, 3 }; | ||
246 | |||
247 | |||
248 | static const u32 udma_tmlimin = 20; | ||
249 | static const u32 udma_tzahmin = 20; | ||
250 | static const u32 udma_tenvmin = 20; | ||
251 | static const u32 udma_tackmin = 20; | ||
252 | static const u32 udma_tssmin = 50; | ||
253 | |||
254 | /** | ||
255 | * | ||
256 | * Function: num_clocks_min | ||
257 | * | ||
258 | * Description: | ||
259 | * calculate number of SCLK cycles to meet minimum timing | ||
260 | */ | ||
261 | static unsigned short num_clocks_min(unsigned long tmin, | ||
262 | unsigned long fsclk) | ||
263 | { | ||
264 | unsigned long tmp ; | ||
265 | unsigned short result; | ||
266 | |||
267 | tmp = tmin * (fsclk/1000/1000) / 1000; | ||
268 | result = (unsigned short)tmp; | ||
269 | if ((tmp*1000*1000) < (tmin*(fsclk/1000))) { | ||
270 | result++; | ||
271 | } | ||
272 | |||
273 | return result; | ||
274 | } | ||
275 | |||
276 | /** | ||
277 | * bfin_set_piomode - Initialize host controller PATA PIO timings | ||
278 | * @ap: Port whose timings we are configuring | ||
279 | * @adev: um | ||
280 | * | ||
281 | * Set PIO mode for device. | ||
282 | * | ||
283 | * LOCKING: | ||
284 | * None (inherited from caller). | ||
285 | */ | ||
286 | |||
287 | static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
288 | { | ||
289 | int mode = adev->pio_mode - XFER_PIO_0; | ||
290 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
291 | unsigned int fsclk = get_sclk(); | ||
292 | unsigned short teoc_reg, t2_reg, teoc_pio; | ||
293 | unsigned short t4_reg, t2_pio, t1_reg; | ||
294 | unsigned short n0, n6, t6min = 5; | ||
295 | |||
296 | /* the most restrictive timing value is t6 and tc, the DIOW - data hold | ||
297 | * If one SCLK pulse is longer than this minimum value then register | ||
298 | * transfers cannot be supported at this frequency. | ||
299 | */ | ||
300 | n6 = num_clocks_min(t6min, fsclk); | ||
301 | if (mode >= 0 && mode <= 4 && n6 >= 1) { | ||
302 | pr_debug("set piomode: mode=%d, fsclk=%ud\n", mode, fsclk); | ||
303 | /* calculate the timing values for register transfers. */ | ||
304 | while (mode > 0 && pio_fsclk[mode] > fsclk) | ||
305 | mode--; | ||
306 | |||
307 | /* DIOR/DIOW to end cycle time */ | ||
308 | t2_reg = num_clocks_min(reg_t2min[mode], fsclk); | ||
309 | /* DIOR/DIOW asserted pulse width */ | ||
310 | teoc_reg = num_clocks_min(reg_teocmin[mode], fsclk); | ||
311 | /* Cycle Time */ | ||
312 | n0 = num_clocks_min(reg_t0min[mode], fsclk); | ||
313 | |||
314 | /* increase t2 until we meed the minimum cycle length */ | ||
315 | if (t2_reg + teoc_reg < n0) | ||
316 | t2_reg = n0 - teoc_reg; | ||
317 | |||
318 | /* calculate the timing values for pio transfers. */ | ||
319 | |||
320 | /* DIOR/DIOW to end cycle time */ | ||
321 | t2_pio = num_clocks_min(pio_t2min[mode], fsclk); | ||
322 | /* DIOR/DIOW asserted pulse width */ | ||
323 | teoc_pio = num_clocks_min(pio_teocmin[mode], fsclk); | ||
324 | /* Cycle Time */ | ||
325 | n0 = num_clocks_min(pio_t0min[mode], fsclk); | ||
326 | |||
327 | /* increase t2 until we meed the minimum cycle length */ | ||
328 | if (t2_pio + teoc_pio < n0) | ||
329 | t2_pio = n0 - teoc_pio; | ||
330 | |||
331 | /* Address valid to DIOR/DIORW */ | ||
332 | t1_reg = num_clocks_min(pio_t1min[mode], fsclk); | ||
333 | |||
334 | /* DIOW data hold */ | ||
335 | t4_reg = num_clocks_min(pio_t4min[mode], fsclk); | ||
336 | |||
337 | ATAPI_SET_REG_TIM_0(base, (teoc_reg<<8 | t2_reg)); | ||
338 | ATAPI_SET_PIO_TIM_0(base, (t4_reg<<12 | t2_pio<<4 | t1_reg)); | ||
339 | ATAPI_SET_PIO_TIM_1(base, teoc_pio); | ||
340 | if (mode > 2) { | ||
341 | ATAPI_SET_CONTROL(base, | ||
342 | ATAPI_GET_CONTROL(base) | IORDY_EN); | ||
343 | } else { | ||
344 | ATAPI_SET_CONTROL(base, | ||
345 | ATAPI_GET_CONTROL(base) & ~IORDY_EN); | ||
346 | } | ||
347 | |||
348 | /* Disable host ATAPI PIO interrupts */ | ||
349 | ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base) | ||
350 | & ~(PIO_DONE_MASK | HOST_TERM_XFER_MASK)); | ||
351 | SSYNC(); | ||
352 | } | ||
353 | } | ||
354 | |||
355 | /** | ||
356 | * bfin_set_dmamode - Initialize host controller PATA DMA timings | ||
357 | * @ap: Port whose timings we are configuring | ||
358 | * @adev: um | ||
359 | * @udma: udma mode, 0 - 6 | ||
360 | * | ||
361 | * Set UDMA mode for device. | ||
362 | * | ||
363 | * LOCKING: | ||
364 | * None (inherited from caller). | ||
365 | */ | ||
366 | |||
367 | static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev) | ||
368 | { | ||
369 | int mode; | ||
370 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
371 | unsigned long fsclk = get_sclk(); | ||
372 | unsigned short tenv, tack, tcyc_tdvs, tdvs, tmli, tss, trp, tzah; | ||
373 | unsigned short tm, td, tkr, tkw, teoc, th; | ||
374 | unsigned short n0, nf, tfmin = 5; | ||
375 | unsigned short nmin, tcyc; | ||
376 | |||
377 | mode = adev->dma_mode - XFER_UDMA_0; | ||
378 | if (mode >= 0 && mode <= 5) { | ||
379 | pr_debug("set udmamode: mode=%d\n", mode); | ||
380 | /* the most restrictive timing value is t6 and tc, | ||
381 | * the DIOW - data hold. If one SCLK pulse is longer | ||
382 | * than this minimum value then register | ||
383 | * transfers cannot be supported at this frequency. | ||
384 | */ | ||
385 | while (mode > 0 && udma_fsclk[mode] > fsclk) | ||
386 | mode--; | ||
387 | |||
388 | nmin = num_clocks_min(udma_tmin[mode], fsclk); | ||
389 | if (nmin >= 1) { | ||
390 | /* calculate the timing values for Ultra DMA. */ | ||
391 | tdvs = num_clocks_min(udma_tdvsmin[mode], fsclk); | ||
392 | tcyc = num_clocks_min(udma_tcycmin[mode], fsclk); | ||
393 | tcyc_tdvs = 2; | ||
394 | |||
395 | /* increase tcyc - tdvs (tcyc_tdvs) until we meed | ||
396 | * the minimum cycle length | ||
397 | */ | ||
398 | if (tdvs + tcyc_tdvs < tcyc) | ||
399 | tcyc_tdvs = tcyc - tdvs; | ||
400 | |||
401 | /* Mow assign the values required for the timing | ||
402 | * registers | ||
403 | */ | ||
404 | if (tcyc_tdvs < 2) | ||
405 | tcyc_tdvs = 2; | ||
406 | |||
407 | if (tdvs < 2) | ||
408 | tdvs = 2; | ||
409 | |||
410 | tack = num_clocks_min(udma_tackmin, fsclk); | ||
411 | tss = num_clocks_min(udma_tssmin, fsclk); | ||
412 | tmli = num_clocks_min(udma_tmlimin, fsclk); | ||
413 | tzah = num_clocks_min(udma_tzahmin, fsclk); | ||
414 | trp = num_clocks_min(udma_trpmin[mode], fsclk); | ||
415 | tenv = num_clocks_min(udma_tenvmin, fsclk); | ||
416 | if (tenv <= udma_tenvmax[mode]) { | ||
417 | ATAPI_SET_ULTRA_TIM_0(base, (tenv<<8 | tack)); | ||
418 | ATAPI_SET_ULTRA_TIM_1(base, | ||
419 | (tcyc_tdvs<<8 | tdvs)); | ||
420 | ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss)); | ||
421 | ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah)); | ||
422 | |||
423 | /* Enable host ATAPI Untra DMA interrupts */ | ||
424 | ATAPI_SET_INT_MASK(base, | ||
425 | ATAPI_GET_INT_MASK(base) | ||
426 | | UDMAIN_DONE_MASK | ||
427 | | UDMAOUT_DONE_MASK | ||
428 | | UDMAIN_TERM_MASK | ||
429 | | UDMAOUT_TERM_MASK); | ||
430 | } | ||
431 | } | ||
432 | } | ||
433 | |||
434 | mode = adev->dma_mode - XFER_MW_DMA_0; | ||
435 | if (mode >= 0 && mode <= 2) { | ||
436 | pr_debug("set mdmamode: mode=%d\n", mode); | ||
437 | /* the most restrictive timing value is tf, the DMACK to | ||
438 | * read data released. If one SCLK pulse is longer than | ||
439 | * this maximum value then the MDMA mode | ||
440 | * cannot be supported at this frequency. | ||
441 | */ | ||
442 | while (mode > 0 && mdma_fsclk[mode] > fsclk) | ||
443 | mode--; | ||
444 | |||
445 | nf = num_clocks_min(tfmin, fsclk); | ||
446 | if (nf >= 1) { | ||
447 | /* calculate the timing values for Multi-word DMA. */ | ||
448 | |||
449 | /* DIOR/DIOW asserted pulse width */ | ||
450 | td = num_clocks_min(mdma_tdmin[mode], fsclk); | ||
451 | |||
452 | /* DIOR negated pulse width */ | ||
453 | tkw = num_clocks_min(mdma_tkwmin[mode], fsclk); | ||
454 | |||
455 | /* Cycle Time */ | ||
456 | n0 = num_clocks_min(mdma_t0min[mode], fsclk); | ||
457 | |||
458 | /* increase tk until we meed the minimum cycle length */ | ||
459 | if (tkw + td < n0) | ||
460 | tkw = n0 - td; | ||
461 | |||
462 | /* DIOR negated pulse width - read */ | ||
463 | tkr = num_clocks_min(mdma_tkrmin[mode], fsclk); | ||
464 | /* CS{1:0] valid to DIOR/DIOW */ | ||
465 | tm = num_clocks_min(mdma_tmmin[mode], fsclk); | ||
466 | /* DIOR/DIOW to DMACK hold */ | ||
467 | teoc = num_clocks_min(mdma_tjmin[mode], fsclk); | ||
468 | /* DIOW Data hold */ | ||
469 | th = num_clocks_min(mdma_thmin[mode], fsclk); | ||
470 | |||
471 | ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td)); | ||
472 | ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw)); | ||
473 | ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th)); | ||
474 | |||
475 | /* Enable host ATAPI Multi DMA interrupts */ | ||
476 | ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base) | ||
477 | | MULTI_DONE_MASK | MULTI_TERM_MASK); | ||
478 | SSYNC(); | ||
479 | } | ||
480 | } | ||
481 | return; | ||
482 | } | ||
483 | |||
484 | /** | ||
485 | * | ||
486 | * Function: wait_complete | ||
487 | * | ||
488 | * Description: Waits the interrupt from device | ||
489 | * | ||
490 | */ | ||
491 | static inline void wait_complete(void __iomem *base, unsigned short mask) | ||
492 | { | ||
493 | unsigned short status; | ||
494 | unsigned int i = 0; | ||
495 | |||
496 | #define PATA_BF54X_WAIT_TIMEOUT 10000 | ||
497 | |||
498 | for (i = 0; i < PATA_BF54X_WAIT_TIMEOUT; i++) { | ||
499 | status = ATAPI_GET_INT_STATUS(base) & mask; | ||
500 | if (status) | ||
501 | break; | ||
502 | } | ||
503 | |||
504 | ATAPI_SET_INT_STATUS(base, mask); | ||
505 | } | ||
506 | |||
507 | /** | ||
508 | * | ||
509 | * Function: write_atapi_register | ||
510 | * | ||
511 | * Description: Writes to ATA Device Resgister | ||
512 | * | ||
513 | */ | ||
514 | |||
515 | static void write_atapi_register(void __iomem *base, | ||
516 | unsigned long ata_reg, unsigned short value) | ||
517 | { | ||
518 | /* Program the ATA_DEV_TXBUF register with write data (to be | ||
519 | * written into the device). | ||
520 | */ | ||
521 | ATAPI_SET_DEV_TXBUF(base, value); | ||
522 | |||
523 | /* Program the ATA_DEV_ADDR register with address of the | ||
524 | * device register (0x01 to 0x0F). | ||
525 | */ | ||
526 | ATAPI_SET_DEV_ADDR(base, ata_reg); | ||
527 | |||
528 | /* Program the ATA_CTRL register with dir set to write (1) | ||
529 | */ | ||
530 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR)); | ||
531 | |||
532 | /* ensure PIO DMA is not set */ | ||
533 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA)); | ||
534 | |||
535 | /* and start the transfer */ | ||
536 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START)); | ||
537 | |||
538 | /* Wait for the interrupt to indicate the end of the transfer. | ||
539 | * (We need to wait on and clear rhe ATA_DEV_INT interrupt status) | ||
540 | */ | ||
541 | wait_complete(base, PIO_DONE_INT); | ||
542 | } | ||
543 | |||
544 | /** | ||
545 | * | ||
546 | * Function: read_atapi_register | ||
547 | * | ||
548 | *Description: Reads from ATA Device Resgister | ||
549 | * | ||
550 | */ | ||
551 | |||
552 | static unsigned short read_atapi_register(void __iomem *base, | ||
553 | unsigned long ata_reg) | ||
554 | { | ||
555 | /* Program the ATA_DEV_ADDR register with address of the | ||
556 | * device register (0x01 to 0x0F). | ||
557 | */ | ||
558 | ATAPI_SET_DEV_ADDR(base, ata_reg); | ||
559 | |||
560 | /* Program the ATA_CTRL register with dir set to read (0) and | ||
561 | */ | ||
562 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR)); | ||
563 | |||
564 | /* ensure PIO DMA is not set */ | ||
565 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA)); | ||
566 | |||
567 | /* and start the transfer */ | ||
568 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START)); | ||
569 | |||
570 | /* Wait for the interrupt to indicate the end of the transfer. | ||
571 | * (PIO_DONE interrupt is set and it doesn't seem to matter | ||
572 | * that we don't clear it) | ||
573 | */ | ||
574 | wait_complete(base, PIO_DONE_INT); | ||
575 | |||
576 | /* Read the ATA_DEV_RXBUF register with write data (to be | ||
577 | * written into the device). | ||
578 | */ | ||
579 | return ATAPI_GET_DEV_RXBUF(base); | ||
580 | } | ||
581 | |||
582 | /** | ||
583 | * | ||
584 | * Function: write_atapi_register_data | ||
585 | * | ||
586 | * Description: Writes to ATA Device Resgister | ||
587 | * | ||
588 | */ | ||
589 | |||
590 | static void write_atapi_data(void __iomem *base, | ||
591 | int len, unsigned short *buf) | ||
592 | { | ||
593 | int i; | ||
594 | |||
595 | /* Set transfer length to 1 */ | ||
596 | ATAPI_SET_XFER_LEN(base, 1); | ||
597 | |||
598 | /* Program the ATA_DEV_ADDR register with address of the | ||
599 | * ATA_REG_DATA | ||
600 | */ | ||
601 | ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA); | ||
602 | |||
603 | /* Program the ATA_CTRL register with dir set to write (1) | ||
604 | */ | ||
605 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR)); | ||
606 | |||
607 | /* ensure PIO DMA is not set */ | ||
608 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA)); | ||
609 | |||
610 | for (i = 0; i < len; i++) { | ||
611 | /* Program the ATA_DEV_TXBUF register with write data (to be | ||
612 | * written into the device). | ||
613 | */ | ||
614 | ATAPI_SET_DEV_TXBUF(base, buf[i]); | ||
615 | |||
616 | /* and start the transfer */ | ||
617 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START)); | ||
618 | |||
619 | /* Wait for the interrupt to indicate the end of the transfer. | ||
620 | * (We need to wait on and clear rhe ATA_DEV_INT | ||
621 | * interrupt status) | ||
622 | */ | ||
623 | wait_complete(base, PIO_DONE_INT); | ||
624 | } | ||
625 | } | ||
626 | |||
627 | /** | ||
628 | * | ||
629 | * Function: read_atapi_register_data | ||
630 | * | ||
631 | * Description: Reads from ATA Device Resgister | ||
632 | * | ||
633 | */ | ||
634 | |||
635 | static void read_atapi_data(void __iomem *base, | ||
636 | int len, unsigned short *buf) | ||
637 | { | ||
638 | int i; | ||
639 | |||
640 | /* Set transfer length to 1 */ | ||
641 | ATAPI_SET_XFER_LEN(base, 1); | ||
642 | |||
643 | /* Program the ATA_DEV_ADDR register with address of the | ||
644 | * ATA_REG_DATA | ||
645 | */ | ||
646 | ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA); | ||
647 | |||
648 | /* Program the ATA_CTRL register with dir set to read (0) and | ||
649 | */ | ||
650 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR)); | ||
651 | |||
652 | /* ensure PIO DMA is not set */ | ||
653 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA)); | ||
654 | |||
655 | for (i = 0; i < len; i++) { | ||
656 | /* and start the transfer */ | ||
657 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START)); | ||
658 | |||
659 | /* Wait for the interrupt to indicate the end of the transfer. | ||
660 | * (PIO_DONE interrupt is set and it doesn't seem to matter | ||
661 | * that we don't clear it) | ||
662 | */ | ||
663 | wait_complete(base, PIO_DONE_INT); | ||
664 | |||
665 | /* Read the ATA_DEV_RXBUF register with write data (to be | ||
666 | * written into the device). | ||
667 | */ | ||
668 | buf[i] = ATAPI_GET_DEV_RXBUF(base); | ||
669 | } | ||
670 | } | ||
671 | |||
672 | /** | ||
673 | * bfin_tf_load - send taskfile registers to host controller | ||
674 | * @ap: Port to which output is sent | ||
675 | * @tf: ATA taskfile register set | ||
676 | * | ||
677 | * Note: Original code is ata_tf_load(). | ||
678 | */ | ||
679 | |||
680 | static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | ||
681 | { | ||
682 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
683 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; | ||
684 | |||
685 | if (tf->ctl != ap->last_ctl) { | ||
686 | write_atapi_register(base, ATA_REG_CTRL, tf->ctl); | ||
687 | ap->last_ctl = tf->ctl; | ||
688 | ata_wait_idle(ap); | ||
689 | } | ||
690 | |||
691 | if (is_addr) { | ||
692 | if (tf->flags & ATA_TFLAG_LBA48) { | ||
693 | write_atapi_register(base, ATA_REG_FEATURE, | ||
694 | tf->hob_feature); | ||
695 | write_atapi_register(base, ATA_REG_NSECT, | ||
696 | tf->hob_nsect); | ||
697 | write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal); | ||
698 | write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam); | ||
699 | write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah); | ||
700 | pr_debug("hob: feat 0x%X nsect 0x%X, lba 0x%X " | ||
701 | "0x%X 0x%X\n", | ||
702 | tf->hob_feature, | ||
703 | tf->hob_nsect, | ||
704 | tf->hob_lbal, | ||
705 | tf->hob_lbam, | ||
706 | tf->hob_lbah); | ||
707 | } | ||
708 | |||
709 | write_atapi_register(base, ATA_REG_FEATURE, tf->feature); | ||
710 | write_atapi_register(base, ATA_REG_NSECT, tf->nsect); | ||
711 | write_atapi_register(base, ATA_REG_LBAL, tf->lbal); | ||
712 | write_atapi_register(base, ATA_REG_LBAM, tf->lbam); | ||
713 | write_atapi_register(base, ATA_REG_LBAH, tf->lbah); | ||
714 | pr_debug("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", | ||
715 | tf->feature, | ||
716 | tf->nsect, | ||
717 | tf->lbal, | ||
718 | tf->lbam, | ||
719 | tf->lbah); | ||
720 | } | ||
721 | |||
722 | if (tf->flags & ATA_TFLAG_DEVICE) { | ||
723 | write_atapi_register(base, ATA_REG_DEVICE, tf->device); | ||
724 | pr_debug("device 0x%X\n", tf->device); | ||
725 | } | ||
726 | |||
727 | ata_wait_idle(ap); | ||
728 | } | ||
729 | |||
730 | /** | ||
731 | * bfin_check_status - Read device status reg & clear interrupt | ||
732 | * @ap: port where the device is | ||
733 | * | ||
734 | * Note: Original code is ata_check_status(). | ||
735 | */ | ||
736 | |||
737 | static u8 bfin_check_status(struct ata_port *ap) | ||
738 | { | ||
739 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
740 | return read_atapi_register(base, ATA_REG_STATUS); | ||
741 | } | ||
742 | |||
743 | /** | ||
744 | * bfin_tf_read - input device's ATA taskfile shadow registers | ||
745 | * @ap: Port from which input is read | ||
746 | * @tf: ATA taskfile register set for storing input | ||
747 | * | ||
748 | * Note: Original code is ata_tf_read(). | ||
749 | */ | ||
750 | |||
751 | static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | ||
752 | { | ||
753 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
754 | |||
755 | tf->command = bfin_check_status(ap); | ||
756 | tf->feature = read_atapi_register(base, ATA_REG_ERR); | ||
757 | tf->nsect = read_atapi_register(base, ATA_REG_NSECT); | ||
758 | tf->lbal = read_atapi_register(base, ATA_REG_LBAL); | ||
759 | tf->lbam = read_atapi_register(base, ATA_REG_LBAM); | ||
760 | tf->lbah = read_atapi_register(base, ATA_REG_LBAH); | ||
761 | tf->device = read_atapi_register(base, ATA_REG_DEVICE); | ||
762 | |||
763 | if (tf->flags & ATA_TFLAG_LBA48) { | ||
764 | write_atapi_register(base, ATA_REG_CTRL, tf->ctl | ATA_HOB); | ||
765 | tf->hob_feature = read_atapi_register(base, ATA_REG_ERR); | ||
766 | tf->hob_nsect = read_atapi_register(base, ATA_REG_NSECT); | ||
767 | tf->hob_lbal = read_atapi_register(base, ATA_REG_LBAL); | ||
768 | tf->hob_lbam = read_atapi_register(base, ATA_REG_LBAM); | ||
769 | tf->hob_lbah = read_atapi_register(base, ATA_REG_LBAH); | ||
770 | } | ||
771 | } | ||
772 | |||
773 | /** | ||
774 | * bfin_exec_command - issue ATA command to host controller | ||
775 | * @ap: port to which command is being issued | ||
776 | * @tf: ATA taskfile register set | ||
777 | * | ||
778 | * Note: Original code is ata_exec_command(). | ||
779 | */ | ||
780 | |||
781 | static void bfin_exec_command(struct ata_port *ap, | ||
782 | const struct ata_taskfile *tf) | ||
783 | { | ||
784 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
785 | pr_debug("ata%u: cmd 0x%X\n", ap->print_id, tf->command); | ||
786 | |||
787 | write_atapi_register(base, ATA_REG_CMD, tf->command); | ||
788 | ata_pause(ap); | ||
789 | } | ||
790 | |||
791 | /** | ||
792 | * bfin_check_altstatus - Read device alternate status reg | ||
793 | * @ap: port where the device is | ||
794 | */ | ||
795 | |||
796 | static u8 bfin_check_altstatus(struct ata_port *ap) | ||
797 | { | ||
798 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
799 | return read_atapi_register(base, ATA_REG_ALTSTATUS); | ||
800 | } | ||
801 | |||
802 | /** | ||
803 | * bfin_std_dev_select - Select device 0/1 on ATA bus | ||
804 | * @ap: ATA channel to manipulate | ||
805 | * @device: ATA device (numbered from zero) to select | ||
806 | * | ||
807 | * Note: Original code is ata_std_dev_select(). | ||
808 | */ | ||
809 | |||
810 | static void bfin_std_dev_select(struct ata_port *ap, unsigned int device) | ||
811 | { | ||
812 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
813 | u8 tmp; | ||
814 | |||
815 | if (device == 0) | ||
816 | tmp = ATA_DEVICE_OBS; | ||
817 | else | ||
818 | tmp = ATA_DEVICE_OBS | ATA_DEV1; | ||
819 | |||
820 | write_atapi_register(base, ATA_REG_DEVICE, tmp); | ||
821 | ata_pause(ap); | ||
822 | } | ||
823 | |||
824 | /** | ||
825 | * bfin_bmdma_setup - Set up IDE DMA transaction | ||
826 | * @qc: Info associated with this ATA transaction. | ||
827 | * | ||
828 | * Note: Original code is ata_bmdma_setup(). | ||
829 | */ | ||
830 | |||
831 | static void bfin_bmdma_setup(struct ata_queued_cmd *qc) | ||
832 | { | ||
833 | unsigned short config = WDSIZE_16; | ||
834 | struct scatterlist *sg; | ||
835 | |||
836 | pr_debug("in atapi dma setup\n"); | ||
837 | /* Program the ATA_CTRL register with dir */ | ||
838 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
839 | /* fill the ATAPI DMA controller */ | ||
840 | set_dma_config(CH_ATAPI_TX, config); | ||
841 | set_dma_x_modify(CH_ATAPI_TX, 2); | ||
842 | ata_for_each_sg(sg, qc) { | ||
843 | set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg)); | ||
844 | set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1); | ||
845 | } | ||
846 | } else { | ||
847 | config |= WNR; | ||
848 | /* fill the ATAPI DMA controller */ | ||
849 | set_dma_config(CH_ATAPI_RX, config); | ||
850 | set_dma_x_modify(CH_ATAPI_RX, 2); | ||
851 | ata_for_each_sg(sg, qc) { | ||
852 | set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg)); | ||
853 | set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1); | ||
854 | } | ||
855 | } | ||
856 | } | ||
857 | |||
858 | /** | ||
859 | * bfin_bmdma_start - Start an IDE DMA transaction | ||
860 | * @qc: Info associated with this ATA transaction. | ||
861 | * | ||
862 | * Note: Original code is ata_bmdma_start(). | ||
863 | */ | ||
864 | |||
865 | static void bfin_bmdma_start(struct ata_queued_cmd *qc) | ||
866 | { | ||
867 | struct ata_port *ap = qc->ap; | ||
868 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
869 | struct scatterlist *sg; | ||
870 | |||
871 | pr_debug("in atapi dma start\n"); | ||
872 | if (!(ap->udma_mask || ap->mwdma_mask)) | ||
873 | return; | ||
874 | |||
875 | /* start ATAPI DMA controller*/ | ||
876 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
877 | /* | ||
878 | * On blackfin arch, uncacheable memory is not | ||
879 | * allocated with flag GFP_DMA. DMA buffer from | ||
880 | * common kenel code should be flushed if WB | ||
881 | * data cache is enabled. Otherwise, this loop | ||
882 | * is an empty loop and optimized out. | ||
883 | */ | ||
884 | ata_for_each_sg(sg, qc) { | ||
885 | flush_dcache_range(sg_dma_address(sg), | ||
886 | sg_dma_address(sg) + sg_dma_len(sg)); | ||
887 | } | ||
888 | enable_dma(CH_ATAPI_TX); | ||
889 | pr_debug("enable udma write\n"); | ||
890 | |||
891 | /* Send ATA DMA write command */ | ||
892 | bfin_exec_command(ap, &qc->tf); | ||
893 | |||
894 | /* set ATA DMA write direction */ | ||
895 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | ||
896 | | XFER_DIR)); | ||
897 | } else { | ||
898 | enable_dma(CH_ATAPI_RX); | ||
899 | pr_debug("enable udma read\n"); | ||
900 | |||
901 | /* Send ATA DMA read command */ | ||
902 | bfin_exec_command(ap, &qc->tf); | ||
903 | |||
904 | /* set ATA DMA read direction */ | ||
905 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | ||
906 | & ~XFER_DIR)); | ||
907 | } | ||
908 | |||
909 | /* Reset all transfer count */ | ||
910 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST); | ||
911 | |||
912 | /* Set transfer length to buffer len */ | ||
913 | ata_for_each_sg(sg, qc) { | ||
914 | ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1)); | ||
915 | } | ||
916 | |||
917 | /* Enable ATA DMA operation*/ | ||
918 | if (ap->udma_mask) | ||
919 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | ||
920 | | ULTRA_START); | ||
921 | else | ||
922 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | ||
923 | | MULTI_START); | ||
924 | } | ||
925 | |||
926 | /** | ||
927 | * bfin_bmdma_stop - Stop IDE DMA transfer | ||
928 | * @qc: Command we are ending DMA for | ||
929 | */ | ||
930 | |||
931 | static void bfin_bmdma_stop(struct ata_queued_cmd *qc) | ||
932 | { | ||
933 | struct ata_port *ap = qc->ap; | ||
934 | struct scatterlist *sg; | ||
935 | |||
936 | pr_debug("in atapi dma stop\n"); | ||
937 | if (!(ap->udma_mask || ap->mwdma_mask)) | ||
938 | return; | ||
939 | |||
940 | /* stop ATAPI DMA controller*/ | ||
941 | if (qc->tf.flags & ATA_TFLAG_WRITE) | ||
942 | disable_dma(CH_ATAPI_TX); | ||
943 | else { | ||
944 | disable_dma(CH_ATAPI_RX); | ||
945 | if (ap->hsm_task_state & HSM_ST_LAST) { | ||
946 | /* | ||
947 | * On blackfin arch, uncacheable memory is not | ||
948 | * allocated with flag GFP_DMA. DMA buffer from | ||
949 | * common kenel code should be invalidated if | ||
950 | * data cache is enabled. Otherwise, this loop | ||
951 | * is an empty loop and optimized out. | ||
952 | */ | ||
953 | ata_for_each_sg(sg, qc) { | ||
954 | invalidate_dcache_range( | ||
955 | sg_dma_address(sg), | ||
956 | sg_dma_address(sg) | ||
957 | + sg_dma_len(sg)); | ||
958 | } | ||
959 | } | ||
960 | } | ||
961 | } | ||
962 | |||
963 | /** | ||
964 | * bfin_devchk - PATA device presence detection | ||
965 | * @ap: ATA channel to examine | ||
966 | * @device: Device to examine (starting at zero) | ||
967 | * | ||
968 | * Note: Original code is ata_devchk(). | ||
969 | */ | ||
970 | |||
971 | static unsigned int bfin_devchk(struct ata_port *ap, | ||
972 | unsigned int device) | ||
973 | { | ||
974 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
975 | u8 nsect, lbal; | ||
976 | |||
977 | bfin_std_dev_select(ap, device); | ||
978 | |||
979 | write_atapi_register(base, ATA_REG_NSECT, 0x55); | ||
980 | write_atapi_register(base, ATA_REG_LBAL, 0xaa); | ||
981 | |||
982 | write_atapi_register(base, ATA_REG_NSECT, 0xaa); | ||
983 | write_atapi_register(base, ATA_REG_LBAL, 0x55); | ||
984 | |||
985 | write_atapi_register(base, ATA_REG_NSECT, 0x55); | ||
986 | write_atapi_register(base, ATA_REG_LBAL, 0xaa); | ||
987 | |||
988 | nsect = read_atapi_register(base, ATA_REG_NSECT); | ||
989 | lbal = read_atapi_register(base, ATA_REG_LBAL); | ||
990 | |||
991 | if ((nsect == 0x55) && (lbal == 0xaa)) | ||
992 | return 1; /* we found a device */ | ||
993 | |||
994 | return 0; /* nothing found */ | ||
995 | } | ||
996 | |||
997 | /** | ||
998 | * bfin_bus_post_reset - PATA device post reset | ||
999 | * | ||
1000 | * Note: Original code is ata_bus_post_reset(). | ||
1001 | */ | ||
1002 | |||
1003 | static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) | ||
1004 | { | ||
1005 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
1006 | unsigned int dev0 = devmask & (1 << 0); | ||
1007 | unsigned int dev1 = devmask & (1 << 1); | ||
1008 | unsigned long timeout; | ||
1009 | |||
1010 | /* if device 0 was found in ata_devchk, wait for its | ||
1011 | * BSY bit to clear | ||
1012 | */ | ||
1013 | if (dev0) | ||
1014 | ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); | ||
1015 | |||
1016 | /* if device 1 was found in ata_devchk, wait for | ||
1017 | * register access, then wait for BSY to clear | ||
1018 | */ | ||
1019 | timeout = jiffies + ATA_TMOUT_BOOT; | ||
1020 | while (dev1) { | ||
1021 | u8 nsect, lbal; | ||
1022 | |||
1023 | bfin_std_dev_select(ap, 1); | ||
1024 | nsect = read_atapi_register(base, ATA_REG_NSECT); | ||
1025 | lbal = read_atapi_register(base, ATA_REG_LBAL); | ||
1026 | if ((nsect == 1) && (lbal == 1)) | ||
1027 | break; | ||
1028 | if (time_after(jiffies, timeout)) { | ||
1029 | dev1 = 0; | ||
1030 | break; | ||
1031 | } | ||
1032 | msleep(50); /* give drive a breather */ | ||
1033 | } | ||
1034 | if (dev1) | ||
1035 | ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); | ||
1036 | |||
1037 | /* is all this really necessary? */ | ||
1038 | bfin_std_dev_select(ap, 0); | ||
1039 | if (dev1) | ||
1040 | bfin_std_dev_select(ap, 1); | ||
1041 | if (dev0) | ||
1042 | bfin_std_dev_select(ap, 0); | ||
1043 | } | ||
1044 | |||
1045 | /** | ||
1046 | * bfin_bus_softreset - PATA device software reset | ||
1047 | * | ||
1048 | * Note: Original code is ata_bus_softreset(). | ||
1049 | */ | ||
1050 | |||
1051 | static unsigned int bfin_bus_softreset(struct ata_port *ap, | ||
1052 | unsigned int devmask) | ||
1053 | { | ||
1054 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
1055 | |||
1056 | /* software reset. causes dev0 to be selected */ | ||
1057 | write_atapi_register(base, ATA_REG_CTRL, ap->ctl); | ||
1058 | udelay(20); | ||
1059 | write_atapi_register(base, ATA_REG_CTRL, ap->ctl | ATA_SRST); | ||
1060 | udelay(20); | ||
1061 | write_atapi_register(base, ATA_REG_CTRL, ap->ctl); | ||
1062 | |||
1063 | /* spec mandates ">= 2ms" before checking status. | ||
1064 | * We wait 150ms, because that was the magic delay used for | ||
1065 | * ATAPI devices in Hale Landis's ATADRVR, for the period of time | ||
1066 | * between when the ATA command register is written, and then | ||
1067 | * status is checked. Because waiting for "a while" before | ||
1068 | * checking status is fine, post SRST, we perform this magic | ||
1069 | * delay here as well. | ||
1070 | * | ||
1071 | * Old drivers/ide uses the 2mS rule and then waits for ready | ||
1072 | */ | ||
1073 | msleep(150); | ||
1074 | |||
1075 | /* Before we perform post reset processing we want to see if | ||
1076 | * the bus shows 0xFF because the odd clown forgets the D7 | ||
1077 | * pulldown resistor. | ||
1078 | */ | ||
1079 | if (bfin_check_status(ap) == 0xFF) | ||
1080 | return 0; | ||
1081 | |||
1082 | bfin_bus_post_reset(ap, devmask); | ||
1083 | |||
1084 | return 0; | ||
1085 | } | ||
1086 | |||
1087 | /** | ||
1088 | * bfin_std_softreset - reset host port via ATA SRST | ||
1089 | * @ap: port to reset | ||
1090 | * @classes: resulting classes of attached devices | ||
1091 | * | ||
1092 | * Note: Original code is ata_std_softreset(). | ||
1093 | */ | ||
1094 | |||
1095 | static int bfin_std_softreset(struct ata_port *ap, unsigned int *classes, | ||
1096 | unsigned long deadline) | ||
1097 | { | ||
1098 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | ||
1099 | unsigned int devmask = 0, err_mask; | ||
1100 | u8 err; | ||
1101 | |||
1102 | if (ata_port_offline(ap)) { | ||
1103 | classes[0] = ATA_DEV_NONE; | ||
1104 | goto out; | ||
1105 | } | ||
1106 | |||
1107 | /* determine if device 0/1 are present */ | ||
1108 | if (bfin_devchk(ap, 0)) | ||
1109 | devmask |= (1 << 0); | ||
1110 | if (slave_possible && bfin_devchk(ap, 1)) | ||
1111 | devmask |= (1 << 1); | ||
1112 | |||
1113 | /* select device 0 again */ | ||
1114 | bfin_std_dev_select(ap, 0); | ||
1115 | |||
1116 | /* issue bus reset */ | ||
1117 | err_mask = bfin_bus_softreset(ap, devmask); | ||
1118 | if (err_mask) { | ||
1119 | ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n", | ||
1120 | err_mask); | ||
1121 | return -EIO; | ||
1122 | } | ||
1123 | |||
1124 | /* determine by signature whether we have ATA or ATAPI devices */ | ||
1125 | classes[0] = ata_dev_try_classify(ap, 0, &err); | ||
1126 | if (slave_possible && err != 0x81) | ||
1127 | classes[1] = ata_dev_try_classify(ap, 1, &err); | ||
1128 | |||
1129 | out: | ||
1130 | return 0; | ||
1131 | } | ||
1132 | |||
1133 | /** | ||
1134 | * bfin_bmdma_status - Read IDE DMA status | ||
1135 | * @ap: Port associated with this ATA transaction. | ||
1136 | */ | ||
1137 | |||
1138 | static unsigned char bfin_bmdma_status(struct ata_port *ap) | ||
1139 | { | ||
1140 | unsigned char host_stat = 0; | ||
1141 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
1142 | unsigned short int_status = ATAPI_GET_INT_STATUS(base); | ||
1143 | |||
1144 | if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON)) { | ||
1145 | host_stat = ATA_DMA_ACTIVE; | ||
1146 | } | ||
1147 | if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT)) { | ||
1148 | host_stat = ATA_DMA_INTR; | ||
1149 | } | ||
1150 | if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT)) { | ||
1151 | host_stat = ATA_DMA_ERR; | ||
1152 | } | ||
1153 | |||
1154 | return host_stat; | ||
1155 | } | ||
1156 | |||
1157 | /** | ||
1158 | * bfin_data_xfer - Transfer data by PIO | ||
1159 | * @adev: device for this I/O | ||
1160 | * @buf: data buffer | ||
1161 | * @buflen: buffer length | ||
1162 | * @write_data: read/write | ||
1163 | * | ||
1164 | * Note: Original code is ata_data_xfer(). | ||
1165 | */ | ||
1166 | |||
1167 | static void bfin_data_xfer(struct ata_device *adev, unsigned char *buf, | ||
1168 | unsigned int buflen, int write_data) | ||
1169 | { | ||
1170 | struct ata_port *ap = adev->ap; | ||
1171 | unsigned int words = buflen >> 1; | ||
1172 | unsigned short *buf16 = (u16 *) buf; | ||
1173 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
1174 | |||
1175 | /* Transfer multiple of 2 bytes */ | ||
1176 | if (write_data) { | ||
1177 | write_atapi_data(base, words, buf16); | ||
1178 | } else { | ||
1179 | read_atapi_data(base, words, buf16); | ||
1180 | } | ||
1181 | |||
1182 | /* Transfer trailing 1 byte, if any. */ | ||
1183 | if (unlikely(buflen & 0x01)) { | ||
1184 | unsigned short align_buf[1] = { 0 }; | ||
1185 | unsigned char *trailing_buf = buf + buflen - 1; | ||
1186 | |||
1187 | if (write_data) { | ||
1188 | memcpy(align_buf, trailing_buf, 1); | ||
1189 | write_atapi_data(base, 1, align_buf); | ||
1190 | } else { | ||
1191 | read_atapi_data(base, 1, align_buf); | ||
1192 | memcpy(trailing_buf, align_buf, 1); | ||
1193 | } | ||
1194 | } | ||
1195 | } | ||
1196 | |||
1197 | /** | ||
1198 | * bfin_irq_clear - Clear ATAPI interrupt. | ||
1199 | * @ap: Port associated with this ATA transaction. | ||
1200 | * | ||
1201 | * Note: Original code is ata_bmdma_irq_clear(). | ||
1202 | */ | ||
1203 | |||
1204 | static void bfin_irq_clear(struct ata_port *ap) | ||
1205 | { | ||
1206 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
1207 | |||
1208 | pr_debug("in atapi irq clear\n"); | ||
1209 | ATAPI_SET_INT_STATUS(base, 0x1FF); | ||
1210 | } | ||
1211 | |||
1212 | /** | ||
1213 | * bfin_irq_on - Enable interrupts on a port. | ||
1214 | * @ap: Port on which interrupts are enabled. | ||
1215 | * | ||
1216 | * Note: Original code is ata_irq_on(). | ||
1217 | */ | ||
1218 | |||
1219 | static unsigned char bfin_irq_on(struct ata_port *ap) | ||
1220 | { | ||
1221 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
1222 | u8 tmp; | ||
1223 | |||
1224 | pr_debug("in atapi irq on\n"); | ||
1225 | ap->ctl &= ~ATA_NIEN; | ||
1226 | ap->last_ctl = ap->ctl; | ||
1227 | |||
1228 | write_atapi_register(base, ATA_REG_CTRL, ap->ctl); | ||
1229 | tmp = ata_wait_idle(ap); | ||
1230 | |||
1231 | bfin_irq_clear(ap); | ||
1232 | |||
1233 | return tmp; | ||
1234 | } | ||
1235 | |||
1236 | /** | ||
1237 | * bfin_irq_ack - Acknowledge a device interrupt. | ||
1238 | * @ap: Port on which interrupts are enabled. | ||
1239 | * | ||
1240 | * Note: Original code is ata_irq_ack(). | ||
1241 | */ | ||
1242 | |||
1243 | static unsigned char bfin_irq_ack(struct ata_port *ap, unsigned int chk_drq) | ||
1244 | { | ||
1245 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
1246 | unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; | ||
1247 | unsigned char status; | ||
1248 | |||
1249 | pr_debug("in atapi irq ack\n"); | ||
1250 | status = ata_busy_wait(ap, bits, 1000); | ||
1251 | if (status & bits) | ||
1252 | if (ata_msg_err(ap)) | ||
1253 | dev_err(ap->dev, "abnormal status 0x%X\n", status); | ||
1254 | |||
1255 | /* get controller status; clear intr, err bits */ | ||
1256 | ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT | ||
1257 | | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT | ||
1258 | | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT); | ||
1259 | |||
1260 | return bfin_bmdma_status(ap); | ||
1261 | } | ||
1262 | |||
1263 | /** | ||
1264 | * bfin_bmdma_freeze - Freeze DMA controller port | ||
1265 | * @ap: port to freeze | ||
1266 | * | ||
1267 | * Note: Original code is ata_bmdma_freeze(). | ||
1268 | */ | ||
1269 | |||
1270 | static void bfin_bmdma_freeze(struct ata_port *ap) | ||
1271 | { | ||
1272 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
1273 | |||
1274 | pr_debug("in atapi dma freeze\n"); | ||
1275 | ap->ctl |= ATA_NIEN; | ||
1276 | ap->last_ctl = ap->ctl; | ||
1277 | |||
1278 | write_atapi_register(base, ATA_REG_CTRL, ap->ctl); | ||
1279 | |||
1280 | /* Under certain circumstances, some controllers raise IRQ on | ||
1281 | * ATA_NIEN manipulation. Also, many controllers fail to mask | ||
1282 | * previously pending IRQ on ATA_NIEN assertion. Clear it. | ||
1283 | */ | ||
1284 | ata_chk_status(ap); | ||
1285 | |||
1286 | bfin_irq_clear(ap); | ||
1287 | } | ||
1288 | |||
1289 | /** | ||
1290 | * bfin_bmdma_thaw - Thaw DMA controller port | ||
1291 | * @ap: port to thaw | ||
1292 | * | ||
1293 | * Note: Original code is ata_bmdma_thaw(). | ||
1294 | */ | ||
1295 | |||
1296 | void bfin_bmdma_thaw(struct ata_port *ap) | ||
1297 | { | ||
1298 | bfin_check_status(ap); | ||
1299 | bfin_irq_clear(ap); | ||
1300 | bfin_irq_on(ap); | ||
1301 | } | ||
1302 | |||
1303 | /** | ||
1304 | * bfin_std_postreset - standard postreset callback | ||
1305 | * @ap: the target ata_port | ||
1306 | * @classes: classes of attached devices | ||
1307 | * | ||
1308 | * Note: Original code is ata_std_postreset(). | ||
1309 | */ | ||
1310 | |||
1311 | static void bfin_std_postreset(struct ata_port *ap, unsigned int *classes) | ||
1312 | { | ||
1313 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
1314 | |||
1315 | /* re-enable interrupts */ | ||
1316 | bfin_irq_on(ap); | ||
1317 | |||
1318 | /* is double-select really necessary? */ | ||
1319 | if (classes[0] != ATA_DEV_NONE) | ||
1320 | bfin_std_dev_select(ap, 1); | ||
1321 | if (classes[1] != ATA_DEV_NONE) | ||
1322 | bfin_std_dev_select(ap, 0); | ||
1323 | |||
1324 | /* bail out if no device is present */ | ||
1325 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { | ||
1326 | return; | ||
1327 | } | ||
1328 | |||
1329 | /* set up device control */ | ||
1330 | write_atapi_register(base, ATA_REG_CTRL, ap->ctl); | ||
1331 | } | ||
1332 | |||
1333 | /** | ||
1334 | * bfin_error_handler - Stock error handler for DMA controller | ||
1335 | * @ap: port to handle error for | ||
1336 | */ | ||
1337 | |||
1338 | static void bfin_error_handler(struct ata_port *ap) | ||
1339 | { | ||
1340 | ata_bmdma_drive_eh(ap, ata_std_prereset, bfin_std_softreset, NULL, | ||
1341 | bfin_std_postreset); | ||
1342 | } | ||
1343 | |||
1344 | static void bfin_port_stop(struct ata_port *ap) | ||
1345 | { | ||
1346 | pr_debug("in atapi port stop\n"); | ||
1347 | if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { | ||
1348 | free_dma(CH_ATAPI_RX); | ||
1349 | free_dma(CH_ATAPI_TX); | ||
1350 | } | ||
1351 | } | ||
1352 | |||
1353 | static int bfin_port_start(struct ata_port *ap) | ||
1354 | { | ||
1355 | pr_debug("in atapi port start\n"); | ||
1356 | if (!(ap->udma_mask || ap->mwdma_mask)) | ||
1357 | return 0; | ||
1358 | |||
1359 | if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) { | ||
1360 | if (request_dma(CH_ATAPI_TX, | ||
1361 | "BFIN ATAPI TX DMA") >= 0) | ||
1362 | return 0; | ||
1363 | |||
1364 | free_dma(CH_ATAPI_RX); | ||
1365 | } | ||
1366 | |||
1367 | ap->udma_mask = 0; | ||
1368 | ap->mwdma_mask = 0; | ||
1369 | dev_err(ap->dev, "Unable to request ATAPI DMA!" | ||
1370 | " Continue in PIO mode.\n"); | ||
1371 | |||
1372 | return 0; | ||
1373 | } | ||
1374 | |||
1375 | static struct scsi_host_template bfin_sht = { | ||
1376 | .module = THIS_MODULE, | ||
1377 | .name = DRV_NAME, | ||
1378 | .ioctl = ata_scsi_ioctl, | ||
1379 | .queuecommand = ata_scsi_queuecmd, | ||
1380 | .can_queue = ATA_DEF_QUEUE, | ||
1381 | .this_id = ATA_SHT_THIS_ID, | ||
1382 | .sg_tablesize = SG_NONE, | ||
1383 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
1384 | .emulated = ATA_SHT_EMULATED, | ||
1385 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
1386 | .proc_name = DRV_NAME, | ||
1387 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
1388 | .slave_configure = ata_scsi_slave_config, | ||
1389 | .slave_destroy = ata_scsi_slave_destroy, | ||
1390 | .bios_param = ata_std_bios_param, | ||
1391 | #ifdef CONFIG_PM | ||
1392 | .resume = ata_scsi_device_resume, | ||
1393 | .suspend = ata_scsi_device_suspend, | ||
1394 | #endif | ||
1395 | }; | ||
1396 | |||
1397 | static const struct ata_port_operations bfin_pata_ops = { | ||
1398 | .port_disable = ata_port_disable, | ||
1399 | .set_piomode = bfin_set_piomode, | ||
1400 | .set_dmamode = bfin_set_dmamode, | ||
1401 | |||
1402 | .tf_load = bfin_tf_load, | ||
1403 | .tf_read = bfin_tf_read, | ||
1404 | .exec_command = bfin_exec_command, | ||
1405 | .check_status = bfin_check_status, | ||
1406 | .check_altstatus = bfin_check_altstatus, | ||
1407 | .dev_select = bfin_std_dev_select, | ||
1408 | |||
1409 | .bmdma_setup = bfin_bmdma_setup, | ||
1410 | .bmdma_start = bfin_bmdma_start, | ||
1411 | .bmdma_stop = bfin_bmdma_stop, | ||
1412 | .bmdma_status = bfin_bmdma_status, | ||
1413 | .data_xfer = bfin_data_xfer, | ||
1414 | |||
1415 | .qc_prep = ata_noop_qc_prep, | ||
1416 | .qc_issue = ata_qc_issue_prot, | ||
1417 | |||
1418 | .freeze = bfin_bmdma_freeze, | ||
1419 | .thaw = bfin_bmdma_thaw, | ||
1420 | .error_handler = bfin_error_handler, | ||
1421 | .post_internal_cmd = bfin_bmdma_stop, | ||
1422 | |||
1423 | .irq_handler = ata_interrupt, | ||
1424 | .irq_clear = bfin_irq_clear, | ||
1425 | .irq_on = bfin_irq_on, | ||
1426 | .irq_ack = bfin_irq_ack, | ||
1427 | |||
1428 | .port_start = bfin_port_start, | ||
1429 | .port_stop = bfin_port_stop, | ||
1430 | }; | ||
1431 | |||
1432 | static struct ata_port_info bfin_port_info[] = { | ||
1433 | { | ||
1434 | .sht = &bfin_sht, | ||
1435 | .flags = ATA_FLAG_SLAVE_POSS | ||
1436 | | ATA_FLAG_MMIO | ||
1437 | | ATA_FLAG_NO_LEGACY, | ||
1438 | .pio_mask = 0x1f, /* pio0-4 */ | ||
1439 | .mwdma_mask = 0, | ||
1440 | #ifdef CONFIG_PATA_BF54X_DMA | ||
1441 | .udma_mask = ATA_UDMA5, | ||
1442 | #else | ||
1443 | .udma_mask = 0, | ||
1444 | #endif | ||
1445 | .port_ops = &bfin_pata_ops, | ||
1446 | }, | ||
1447 | }; | ||
1448 | |||
1449 | /** | ||
1450 | * bfin_reset_controller - initialize BF54x ATAPI controller. | ||
1451 | */ | ||
1452 | |||
1453 | static int bfin_reset_controller(struct ata_host *host) | ||
1454 | { | ||
1455 | void __iomem *base = (void __iomem *)host->ports[0]->ioaddr.ctl_addr; | ||
1456 | int count; | ||
1457 | unsigned short status; | ||
1458 | |||
1459 | /* Disable all ATAPI interrupts */ | ||
1460 | ATAPI_SET_INT_MASK(base, 0); | ||
1461 | SSYNC(); | ||
1462 | |||
1463 | /* Assert the RESET signal 25us*/ | ||
1464 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | DEV_RST); | ||
1465 | udelay(30); | ||
1466 | |||
1467 | /* Negate the RESET signal for 2ms*/ | ||
1468 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) & ~DEV_RST); | ||
1469 | msleep(2); | ||
1470 | |||
1471 | /* Wait on Busy flag to clear */ | ||
1472 | count = 10000000; | ||
1473 | do { | ||
1474 | status = read_atapi_register(base, ATA_REG_STATUS); | ||
1475 | } while (count-- && (status & ATA_BUSY)); | ||
1476 | |||
1477 | /* Enable only ATAPI Device interrupt */ | ||
1478 | ATAPI_SET_INT_MASK(base, 1); | ||
1479 | SSYNC(); | ||
1480 | |||
1481 | return (!count); | ||
1482 | } | ||
1483 | |||
1484 | /** | ||
1485 | * atapi_io_port - define atapi peripheral port pins. | ||
1486 | */ | ||
1487 | static unsigned short atapi_io_port[] = { | ||
1488 | P_ATAPI_RESET, | ||
1489 | P_ATAPI_DIOR, | ||
1490 | P_ATAPI_DIOW, | ||
1491 | P_ATAPI_CS0, | ||
1492 | P_ATAPI_CS1, | ||
1493 | P_ATAPI_DMACK, | ||
1494 | P_ATAPI_DMARQ, | ||
1495 | P_ATAPI_INTRQ, | ||
1496 | P_ATAPI_IORDY, | ||
1497 | 0 | ||
1498 | }; | ||
1499 | |||
1500 | /** | ||
1501 | * bfin_atapi_probe - attach a bfin atapi interface | ||
1502 | * @pdev: platform device | ||
1503 | * | ||
1504 | * Register a bfin atapi interface. | ||
1505 | * | ||
1506 | * | ||
1507 | * Platform devices are expected to contain 2 resources per port: | ||
1508 | * | ||
1509 | * - I/O Base (IORESOURCE_IO) | ||
1510 | * - IRQ (IORESOURCE_IRQ) | ||
1511 | * | ||
1512 | */ | ||
1513 | static int __devinit bfin_atapi_probe(struct platform_device *pdev) | ||
1514 | { | ||
1515 | int board_idx = 0; | ||
1516 | struct resource *res; | ||
1517 | struct ata_host *host; | ||
1518 | const struct ata_port_info *ppi[] = | ||
1519 | { &bfin_port_info[board_idx], NULL }; | ||
1520 | |||
1521 | /* | ||
1522 | * Simple resource validation .. | ||
1523 | */ | ||
1524 | if (unlikely(pdev->num_resources != 2)) { | ||
1525 | dev_err(&pdev->dev, "invalid number of resources\n"); | ||
1526 | return -EINVAL; | ||
1527 | } | ||
1528 | |||
1529 | /* | ||
1530 | * Get the register base first | ||
1531 | */ | ||
1532 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1533 | if (res == NULL) | ||
1534 | return -EINVAL; | ||
1535 | |||
1536 | /* | ||
1537 | * Now that that's out of the way, wire up the port.. | ||
1538 | */ | ||
1539 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1); | ||
1540 | if (!host) | ||
1541 | return -ENOMEM; | ||
1542 | |||
1543 | host->ports[0]->ioaddr.ctl_addr = (void *)res->start; | ||
1544 | |||
1545 | if (peripheral_request_list(atapi_io_port, "atapi-io-port")) { | ||
1546 | dev_err(&pdev->dev, "Requesting Peripherals faild\n"); | ||
1547 | return -EFAULT; | ||
1548 | } | ||
1549 | |||
1550 | if (bfin_reset_controller(host)) { | ||
1551 | peripheral_free_list(atapi_io_port); | ||
1552 | dev_err(&pdev->dev, "Fail to reset ATAPI device\n"); | ||
1553 | return -EFAULT; | ||
1554 | } | ||
1555 | |||
1556 | if (ata_host_activate(host, platform_get_irq(pdev, 0), | ||
1557 | ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) { | ||
1558 | peripheral_free_list(atapi_io_port); | ||
1559 | dev_err(&pdev->dev, "Fail to attach ATAPI device\n"); | ||
1560 | return -ENODEV; | ||
1561 | } | ||
1562 | |||
1563 | return 0; | ||
1564 | } | ||
1565 | |||
1566 | /** | ||
1567 | * bfin_atapi_remove - unplug a bfin atapi interface | ||
1568 | * @pdev: platform device | ||
1569 | * | ||
1570 | * A bfin atapi device has been unplugged. Perform the needed | ||
1571 | * cleanup. Also called on module unload for any active devices. | ||
1572 | */ | ||
1573 | static int __devexit bfin_atapi_remove(struct platform_device *pdev) | ||
1574 | { | ||
1575 | struct device *dev = &pdev->dev; | ||
1576 | struct ata_host *host = dev_get_drvdata(dev); | ||
1577 | |||
1578 | ata_host_detach(host); | ||
1579 | |||
1580 | peripheral_free_list(atapi_io_port); | ||
1581 | |||
1582 | return 0; | ||
1583 | } | ||
1584 | |||
1585 | #ifdef CONFIG_PM | ||
1586 | int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state) | ||
1587 | { | ||
1588 | return 0; | ||
1589 | } | ||
1590 | |||
1591 | int bfin_atapi_resume(struct platform_device *pdev) | ||
1592 | { | ||
1593 | return 0; | ||
1594 | } | ||
1595 | #endif | ||
1596 | |||
1597 | static struct platform_driver bfin_atapi_driver = { | ||
1598 | .probe = bfin_atapi_probe, | ||
1599 | .remove = __devexit_p(bfin_atapi_remove), | ||
1600 | .driver = { | ||
1601 | .name = DRV_NAME, | ||
1602 | .owner = THIS_MODULE, | ||
1603 | #ifdef CONFIG_PM | ||
1604 | .suspend = bfin_atapi_suspend, | ||
1605 | .resume = bfin_atapi_resume, | ||
1606 | #endif | ||
1607 | }, | ||
1608 | }; | ||
1609 | |||
1610 | static int __init bfin_atapi_init(void) | ||
1611 | { | ||
1612 | pr_info("register bfin atapi driver\n"); | ||
1613 | return platform_driver_register(&bfin_atapi_driver); | ||
1614 | } | ||
1615 | |||
1616 | static void __exit bfin_atapi_exit(void) | ||
1617 | { | ||
1618 | platform_driver_unregister(&bfin_atapi_driver); | ||
1619 | } | ||
1620 | |||
1621 | module_init(bfin_atapi_init); | ||
1622 | module_exit(bfin_atapi_exit); | ||
1623 | |||
1624 | MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>"); | ||
1625 | MODULE_DESCRIPTION("PATA driver for blackfin 54x ATAPI controller"); | ||
1626 | MODULE_LICENSE("GPL"); | ||
1627 | MODULE_VERSION(DRV_VERSION); | ||
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c index 0feb5ae8c486..43d198f90968 100644 --- a/drivers/ata/pata_cmd640.c +++ b/drivers/ata/pata_cmd640.c | |||
@@ -153,7 +153,7 @@ static int cmd640_port_start(struct ata_port *ap) | |||
153 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 153 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
154 | struct cmd640_reg *timing; | 154 | struct cmd640_reg *timing; |
155 | 155 | ||
156 | int ret = ata_port_start(ap); | 156 | int ret = ata_sff_port_start(ap); |
157 | if (ret < 0) | 157 | if (ret < 0) |
158 | return ret; | 158 | return ret; |
159 | 159 | ||
@@ -184,7 +184,6 @@ static struct scsi_host_template cmd640_sht = { | |||
184 | }; | 184 | }; |
185 | 185 | ||
186 | static struct ata_port_operations cmd640_port_ops = { | 186 | static struct ata_port_operations cmd640_port_ops = { |
187 | .port_disable = ata_port_disable, | ||
188 | .set_piomode = cmd640_set_piomode, | 187 | .set_piomode = cmd640_set_piomode, |
189 | .mode_filter = ata_pci_default_filter, | 188 | .mode_filter = ata_pci_default_filter, |
190 | .tf_load = ata_tf_load, | 189 | .tf_load = ata_tf_load, |
@@ -213,7 +212,6 @@ static struct ata_port_operations cmd640_port_ops = { | |||
213 | .irq_handler = ata_interrupt, | 212 | .irq_handler = ata_interrupt, |
214 | .irq_clear = ata_bmdma_irq_clear, | 213 | .irq_clear = ata_bmdma_irq_clear, |
215 | .irq_on = ata_irq_on, | 214 | .irq_on = ata_irq_on, |
216 | .irq_ack = ata_irq_ack, | ||
217 | 215 | ||
218 | .port_start = cmd640_port_start, | 216 | .port_start = cmd640_port_start, |
219 | }; | 217 | }; |
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c index e34b632487d7..9e412c26b2a3 100644 --- a/drivers/ata/pata_cmd64x.c +++ b/drivers/ata/pata_cmd64x.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <linux/libata.h> | 31 | #include <linux/libata.h> |
32 | 32 | ||
33 | #define DRV_NAME "pata_cmd64x" | 33 | #define DRV_NAME "pata_cmd64x" |
34 | #define DRV_VERSION "0.2.4" | 34 | #define DRV_VERSION "0.2.5" |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * CMD64x specific registers definition. | 37 | * CMD64x specific registers definition. |
@@ -88,14 +88,15 @@ static int cmd648_cable_detect(struct ata_port *ap) | |||
88 | } | 88 | } |
89 | 89 | ||
90 | /** | 90 | /** |
91 | * cmd64x_set_piomode - set initial PIO mode data | 91 | * cmd64x_set_piomode - set PIO and MWDMA timing |
92 | * @ap: ATA interface | 92 | * @ap: ATA interface |
93 | * @adev: ATA device | 93 | * @adev: ATA device |
94 | * @mode: mode | ||
94 | * | 95 | * |
95 | * Called to do the PIO mode setup. | 96 | * Called to do the PIO and MWDMA mode setup. |
96 | */ | 97 | */ |
97 | 98 | ||
98 | static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev) | 99 | static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 mode) |
99 | { | 100 | { |
100 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 101 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
101 | struct ata_timing t; | 102 | struct ata_timing t; |
@@ -117,8 +118,9 @@ static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
117 | int arttim = arttim_port[ap->port_no][adev->devno]; | 118 | int arttim = arttim_port[ap->port_no][adev->devno]; |
118 | int drwtim = drwtim_port[ap->port_no][adev->devno]; | 119 | int drwtim = drwtim_port[ap->port_no][adev->devno]; |
119 | 120 | ||
120 | 121 | /* ata_timing_compute is smart and will produce timings for MWDMA | |
121 | if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) { | 122 | that don't violate the drives PIO capabilities. */ |
123 | if (ata_timing_compute(adev, mode, &t, T, 0) < 0) { | ||
122 | printk(KERN_ERR DRV_NAME ": mode computation failed.\n"); | 124 | printk(KERN_ERR DRV_NAME ": mode computation failed.\n"); |
123 | return; | 125 | return; |
124 | } | 126 | } |
@@ -168,6 +170,20 @@ static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
168 | } | 170 | } |
169 | 171 | ||
170 | /** | 172 | /** |
173 | * cmd64x_set_piomode - set initial PIO mode data | ||
174 | * @ap: ATA interface | ||
175 | * @adev: ATA device | ||
176 | * | ||
177 | * Used when configuring the devices ot set the PIO timings. All the | ||
178 | * actual work is done by the PIO/MWDMA setting helper | ||
179 | */ | ||
180 | |||
181 | static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
182 | { | ||
183 | cmd64x_set_timing(ap, adev, adev->pio_mode); | ||
184 | } | ||
185 | |||
186 | /** | ||
171 | * cmd64x_set_dmamode - set initial DMA mode data | 187 | * cmd64x_set_dmamode - set initial DMA mode data |
172 | * @ap: ATA interface | 188 | * @ap: ATA interface |
173 | * @adev: ATA device | 189 | * @adev: ATA device |
@@ -180,9 +196,6 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
180 | static const u8 udma_data[] = { | 196 | static const u8 udma_data[] = { |
181 | 0x30, 0x20, 0x10, 0x20, 0x10, 0x00 | 197 | 0x30, 0x20, 0x10, 0x20, 0x10, 0x00 |
182 | }; | 198 | }; |
183 | static const u8 mwdma_data[] = { | ||
184 | 0x30, 0x20, 0x10 | ||
185 | }; | ||
186 | 199 | ||
187 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 200 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
188 | u8 regU, regD; | 201 | u8 regU, regD; |
@@ -208,8 +221,10 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
208 | regU |= 1 << adev->devno; /* UDMA on */ | 221 | regU |= 1 << adev->devno; /* UDMA on */ |
209 | if (adev->dma_mode > 2) /* 15nS timing */ | 222 | if (adev->dma_mode > 2) /* 15nS timing */ |
210 | regU |= 4 << adev->devno; | 223 | regU |= 4 << adev->devno; |
211 | } else | 224 | } else { |
212 | regD |= mwdma_data[adev->dma_mode - XFER_MW_DMA_0] << shift; | 225 | regU &= ~ (1 << adev->devno); /* UDMA off */ |
226 | cmd64x_set_timing(ap, adev, adev->dma_mode); | ||
227 | } | ||
213 | 228 | ||
214 | regD |= 0x20 << adev->devno; | 229 | regD |= 0x20 << adev->devno; |
215 | 230 | ||
@@ -269,7 +284,6 @@ static struct scsi_host_template cmd64x_sht = { | |||
269 | }; | 284 | }; |
270 | 285 | ||
271 | static struct ata_port_operations cmd64x_port_ops = { | 286 | static struct ata_port_operations cmd64x_port_ops = { |
272 | .port_disable = ata_port_disable, | ||
273 | .set_piomode = cmd64x_set_piomode, | 287 | .set_piomode = cmd64x_set_piomode, |
274 | .set_dmamode = cmd64x_set_dmamode, | 288 | .set_dmamode = cmd64x_set_dmamode, |
275 | .mode_filter = ata_pci_default_filter, | 289 | .mode_filter = ata_pci_default_filter, |
@@ -298,13 +312,11 @@ static struct ata_port_operations cmd64x_port_ops = { | |||
298 | .irq_handler = ata_interrupt, | 312 | .irq_handler = ata_interrupt, |
299 | .irq_clear = ata_bmdma_irq_clear, | 313 | .irq_clear = ata_bmdma_irq_clear, |
300 | .irq_on = ata_irq_on, | 314 | .irq_on = ata_irq_on, |
301 | .irq_ack = ata_irq_ack, | ||
302 | 315 | ||
303 | .port_start = ata_port_start, | 316 | .port_start = ata_port_start, |
304 | }; | 317 | }; |
305 | 318 | ||
306 | static struct ata_port_operations cmd646r1_port_ops = { | 319 | static struct ata_port_operations cmd646r1_port_ops = { |
307 | .port_disable = ata_port_disable, | ||
308 | .set_piomode = cmd64x_set_piomode, | 320 | .set_piomode = cmd64x_set_piomode, |
309 | .set_dmamode = cmd64x_set_dmamode, | 321 | .set_dmamode = cmd64x_set_dmamode, |
310 | .mode_filter = ata_pci_default_filter, | 322 | .mode_filter = ata_pci_default_filter, |
@@ -333,13 +345,11 @@ static struct ata_port_operations cmd646r1_port_ops = { | |||
333 | .irq_handler = ata_interrupt, | 345 | .irq_handler = ata_interrupt, |
334 | .irq_clear = ata_bmdma_irq_clear, | 346 | .irq_clear = ata_bmdma_irq_clear, |
335 | .irq_on = ata_irq_on, | 347 | .irq_on = ata_irq_on, |
336 | .irq_ack = ata_irq_ack, | ||
337 | 348 | ||
338 | .port_start = ata_port_start, | 349 | .port_start = ata_port_start, |
339 | }; | 350 | }; |
340 | 351 | ||
341 | static struct ata_port_operations cmd648_port_ops = { | 352 | static struct ata_port_operations cmd648_port_ops = { |
342 | .port_disable = ata_port_disable, | ||
343 | .set_piomode = cmd64x_set_piomode, | 353 | .set_piomode = cmd64x_set_piomode, |
344 | .set_dmamode = cmd64x_set_dmamode, | 354 | .set_dmamode = cmd64x_set_dmamode, |
345 | .mode_filter = ata_pci_default_filter, | 355 | .mode_filter = ata_pci_default_filter, |
@@ -368,7 +378,6 @@ static struct ata_port_operations cmd648_port_ops = { | |||
368 | .irq_handler = ata_interrupt, | 378 | .irq_handler = ata_interrupt, |
369 | .irq_clear = ata_bmdma_irq_clear, | 379 | .irq_clear = ata_bmdma_irq_clear, |
370 | .irq_on = ata_irq_on, | 380 | .irq_on = ata_irq_on, |
371 | .irq_ack = ata_irq_ack, | ||
372 | 381 | ||
373 | .port_start = ata_port_start, | 382 | .port_start = ata_port_start, |
374 | }; | 383 | }; |
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index e2459088cdcd..33f7f0843f4f 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c | |||
@@ -158,7 +158,6 @@ static struct scsi_host_template cs5520_sht = { | |||
158 | }; | 158 | }; |
159 | 159 | ||
160 | static struct ata_port_operations cs5520_port_ops = { | 160 | static struct ata_port_operations cs5520_port_ops = { |
161 | .port_disable = ata_port_disable, | ||
162 | .set_piomode = cs5520_set_piomode, | 161 | .set_piomode = cs5520_set_piomode, |
163 | .set_dmamode = cs5520_set_dmamode, | 162 | .set_dmamode = cs5520_set_dmamode, |
164 | 163 | ||
@@ -184,13 +183,14 @@ static struct ata_port_operations cs5520_port_ops = { | |||
184 | 183 | ||
185 | .irq_clear = ata_bmdma_irq_clear, | 184 | .irq_clear = ata_bmdma_irq_clear, |
186 | .irq_on = ata_irq_on, | 185 | .irq_on = ata_irq_on, |
187 | .irq_ack = ata_irq_ack, | ||
188 | 186 | ||
189 | .port_start = ata_port_start, | 187 | .port_start = ata_sff_port_start, |
190 | }; | 188 | }; |
191 | 189 | ||
192 | static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 190 | static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
193 | { | 191 | { |
192 | static const unsigned int cmd_port[] = { 0x1F0, 0x170 }; | ||
193 | static const unsigned int ctl_port[] = { 0x3F6, 0x376 }; | ||
194 | struct ata_port_info pi = { | 194 | struct ata_port_info pi = { |
195 | .flags = ATA_FLAG_SLAVE_POSS, | 195 | .flags = ATA_FLAG_SLAVE_POSS, |
196 | .pio_mask = 0x1f, | 196 | .pio_mask = 0x1f, |
@@ -244,10 +244,10 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi | |||
244 | } | 244 | } |
245 | 245 | ||
246 | /* Map IO ports and initialize host accordingly */ | 246 | /* Map IO ports and initialize host accordingly */ |
247 | iomap[0] = devm_ioport_map(&pdev->dev, 0x1F0, 8); | 247 | iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8); |
248 | iomap[1] = devm_ioport_map(&pdev->dev, 0x3F6, 1); | 248 | iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1); |
249 | iomap[2] = devm_ioport_map(&pdev->dev, 0x170, 8); | 249 | iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8); |
250 | iomap[3] = devm_ioport_map(&pdev->dev, 0x376, 1); | 250 | iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1); |
251 | iomap[4] = pcim_iomap(pdev, 2, 0); | 251 | iomap[4] = pcim_iomap(pdev, 2, 0); |
252 | 252 | ||
253 | if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4]) | 253 | if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4]) |
@@ -260,6 +260,10 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi | |||
260 | ioaddr->bmdma_addr = iomap[4]; | 260 | ioaddr->bmdma_addr = iomap[4]; |
261 | ata_std_ports(ioaddr); | 261 | ata_std_ports(ioaddr); |
262 | 262 | ||
263 | ata_port_desc(host->ports[0], | ||
264 | "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]); | ||
265 | ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma"); | ||
266 | |||
263 | ioaddr = &host->ports[1]->ioaddr; | 267 | ioaddr = &host->ports[1]->ioaddr; |
264 | ioaddr->cmd_addr = iomap[2]; | 268 | ioaddr->cmd_addr = iomap[2]; |
265 | ioaddr->ctl_addr = iomap[3]; | 269 | ioaddr->ctl_addr = iomap[3]; |
@@ -267,6 +271,10 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi | |||
267 | ioaddr->bmdma_addr = iomap[4] + 8; | 271 | ioaddr->bmdma_addr = iomap[4] + 8; |
268 | ata_std_ports(ioaddr); | 272 | ata_std_ports(ioaddr); |
269 | 273 | ||
274 | ata_port_desc(host->ports[1], | ||
275 | "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]); | ||
276 | ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma"); | ||
277 | |||
270 | /* activate the host */ | 278 | /* activate the host */ |
271 | pci_set_master(pdev); | 279 | pci_set_master(pdev); |
272 | rc = ata_host_start(host); | 280 | rc = ata_host_start(host); |
@@ -285,33 +293,12 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi | |||
285 | if (rc) | 293 | if (rc) |
286 | return rc; | 294 | return rc; |
287 | 295 | ||
288 | if (i == 0) | 296 | ata_port_desc(ap, "irq %d", irq[i]); |
289 | host->irq = irq[0]; | ||
290 | else | ||
291 | host->irq2 = irq[1]; | ||
292 | } | 297 | } |
293 | 298 | ||
294 | return ata_host_register(host, &cs5520_sht); | 299 | return ata_host_register(host, &cs5520_sht); |
295 | } | 300 | } |
296 | 301 | ||
297 | /** | ||
298 | * cs5520_remove_one - device unload | ||
299 | * @pdev: PCI device being removed | ||
300 | * | ||
301 | * Handle an unplug/unload event for a PCI device. Unload the | ||
302 | * PCI driver but do not use the default handler as we manage | ||
303 | * resources ourself and *MUST NOT* disable the device as it has | ||
304 | * other functions. | ||
305 | */ | ||
306 | |||
307 | static void __devexit cs5520_remove_one(struct pci_dev *pdev) | ||
308 | { | ||
309 | struct device *dev = pci_dev_to_dev(pdev); | ||
310 | struct ata_host *host = dev_get_drvdata(dev); | ||
311 | |||
312 | ata_host_detach(host); | ||
313 | } | ||
314 | |||
315 | #ifdef CONFIG_PM | 302 | #ifdef CONFIG_PM |
316 | /** | 303 | /** |
317 | * cs5520_reinit_one - device resume | 304 | * cs5520_reinit_one - device resume |
@@ -368,7 +355,7 @@ static struct pci_driver cs5520_pci_driver = { | |||
368 | .name = DRV_NAME, | 355 | .name = DRV_NAME, |
369 | .id_table = pata_cs5520, | 356 | .id_table = pata_cs5520, |
370 | .probe = cs5520_init_one, | 357 | .probe = cs5520_init_one, |
371 | .remove = cs5520_remove_one, | 358 | .remove = ata_pci_remove_one, |
372 | #ifdef CONFIG_PM | 359 | #ifdef CONFIG_PM |
373 | .suspend = cs5520_pci_device_suspend, | 360 | .suspend = cs5520_pci_device_suspend, |
374 | .resume = cs5520_reinit_one, | 361 | .resume = cs5520_reinit_one, |
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c index eaaea848b649..57e827e4109e 100644 --- a/drivers/ata/pata_cs5530.c +++ b/drivers/ata/pata_cs5530.c | |||
@@ -179,7 +179,6 @@ static struct scsi_host_template cs5530_sht = { | |||
179 | }; | 179 | }; |
180 | 180 | ||
181 | static struct ata_port_operations cs5530_port_ops = { | 181 | static struct ata_port_operations cs5530_port_ops = { |
182 | .port_disable = ata_port_disable, | ||
183 | .set_piomode = cs5530_set_piomode, | 182 | .set_piomode = cs5530_set_piomode, |
184 | .set_dmamode = cs5530_set_dmamode, | 183 | .set_dmamode = cs5530_set_dmamode, |
185 | .mode_filter = ata_pci_default_filter, | 184 | .mode_filter = ata_pci_default_filter, |
@@ -209,9 +208,8 @@ static struct ata_port_operations cs5530_port_ops = { | |||
209 | .irq_handler = ata_interrupt, | 208 | .irq_handler = ata_interrupt, |
210 | .irq_clear = ata_bmdma_irq_clear, | 209 | .irq_clear = ata_bmdma_irq_clear, |
211 | .irq_on = ata_irq_on, | 210 | .irq_on = ata_irq_on, |
212 | .irq_ack = ata_irq_ack, | ||
213 | 211 | ||
214 | .port_start = ata_port_start, | 212 | .port_start = ata_sff_port_start, |
215 | }; | 213 | }; |
216 | 214 | ||
217 | static const struct dmi_system_id palmax_dmi_table[] = { | 215 | static const struct dmi_system_id palmax_dmi_table[] = { |
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c index 360b6f32e17e..3578593a882b 100644 --- a/drivers/ata/pata_cs5535.c +++ b/drivers/ata/pata_cs5535.c | |||
@@ -176,7 +176,6 @@ static struct scsi_host_template cs5535_sht = { | |||
176 | }; | 176 | }; |
177 | 177 | ||
178 | static struct ata_port_operations cs5535_port_ops = { | 178 | static struct ata_port_operations cs5535_port_ops = { |
179 | .port_disable = ata_port_disable, | ||
180 | .set_piomode = cs5535_set_piomode, | 179 | .set_piomode = cs5535_set_piomode, |
181 | .set_dmamode = cs5535_set_dmamode, | 180 | .set_dmamode = cs5535_set_dmamode, |
182 | .mode_filter = ata_pci_default_filter, | 181 | .mode_filter = ata_pci_default_filter, |
@@ -206,9 +205,8 @@ static struct ata_port_operations cs5535_port_ops = { | |||
206 | .irq_handler = ata_interrupt, | 205 | .irq_handler = ata_interrupt, |
207 | .irq_clear = ata_bmdma_irq_clear, | 206 | .irq_clear = ata_bmdma_irq_clear, |
208 | .irq_on = ata_irq_on, | 207 | .irq_on = ata_irq_on, |
209 | .irq_ack = ata_irq_ack, | ||
210 | 208 | ||
211 | .port_start = ata_port_start, | 209 | .port_start = ata_sff_port_start, |
212 | }; | 210 | }; |
213 | 211 | ||
214 | /** | 212 | /** |
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c index 6cbc8778bf4f..fc5f9c4e5d87 100644 --- a/drivers/ata/pata_cypress.c +++ b/drivers/ata/pata_cypress.c | |||
@@ -128,7 +128,6 @@ static struct scsi_host_template cy82c693_sht = { | |||
128 | }; | 128 | }; |
129 | 129 | ||
130 | static struct ata_port_operations cy82c693_port_ops = { | 130 | static struct ata_port_operations cy82c693_port_ops = { |
131 | .port_disable = ata_port_disable, | ||
132 | .set_piomode = cy82c693_set_piomode, | 131 | .set_piomode = cy82c693_set_piomode, |
133 | .set_dmamode = cy82c693_set_dmamode, | 132 | .set_dmamode = cy82c693_set_dmamode, |
134 | .mode_filter = ata_pci_default_filter, | 133 | .mode_filter = ata_pci_default_filter, |
@@ -158,9 +157,8 @@ static struct ata_port_operations cy82c693_port_ops = { | |||
158 | .irq_handler = ata_interrupt, | 157 | .irq_handler = ata_interrupt, |
159 | .irq_clear = ata_bmdma_irq_clear, | 158 | .irq_clear = ata_bmdma_irq_clear, |
160 | .irq_on = ata_irq_on, | 159 | .irq_on = ata_irq_on, |
161 | .irq_ack = ata_irq_ack, | ||
162 | 160 | ||
163 | .port_start = ata_port_start, | 161 | .port_start = ata_sff_port_start, |
164 | }; | 162 | }; |
165 | 163 | ||
166 | static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 164 | static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c index c8ba59c56114..043dcd35106c 100644 --- a/drivers/ata/pata_efar.c +++ b/drivers/ata/pata_efar.c | |||
@@ -26,25 +26,26 @@ | |||
26 | 26 | ||
27 | /** | 27 | /** |
28 | * efar_pre_reset - Enable bits | 28 | * efar_pre_reset - Enable bits |
29 | * @ap: Port | 29 | * @link: ATA link |
30 | * @deadline: deadline jiffies for the operation | 30 | * @deadline: deadline jiffies for the operation |
31 | * | 31 | * |
32 | * Perform cable detection for the EFAR ATA interface. This is | 32 | * Perform cable detection for the EFAR ATA interface. This is |
33 | * different to the PIIX arrangement | 33 | * different to the PIIX arrangement |
34 | */ | 34 | */ |
35 | 35 | ||
36 | static int efar_pre_reset(struct ata_port *ap, unsigned long deadline) | 36 | static int efar_pre_reset(struct ata_link *link, unsigned long deadline) |
37 | { | 37 | { |
38 | static const struct pci_bits efar_enable_bits[] = { | 38 | static const struct pci_bits efar_enable_bits[] = { |
39 | { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ | 39 | { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ |
40 | { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */ | 40 | { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */ |
41 | }; | 41 | }; |
42 | struct ata_port *ap = link->ap; | ||
42 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 43 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
43 | 44 | ||
44 | if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no])) | 45 | if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no])) |
45 | return -ENOENT; | 46 | return -ENOENT; |
46 | 47 | ||
47 | return ata_std_prereset(ap, deadline); | 48 | return ata_std_prereset(link, deadline); |
48 | } | 49 | } |
49 | 50 | ||
50 | /** | 51 | /** |
@@ -250,7 +251,6 @@ static struct scsi_host_template efar_sht = { | |||
250 | }; | 251 | }; |
251 | 252 | ||
252 | static const struct ata_port_operations efar_ops = { | 253 | static const struct ata_port_operations efar_ops = { |
253 | .port_disable = ata_port_disable, | ||
254 | .set_piomode = efar_set_piomode, | 254 | .set_piomode = efar_set_piomode, |
255 | .set_dmamode = efar_set_dmamode, | 255 | .set_dmamode = efar_set_dmamode, |
256 | .mode_filter = ata_pci_default_filter, | 256 | .mode_filter = ata_pci_default_filter, |
@@ -278,9 +278,8 @@ static const struct ata_port_operations efar_ops = { | |||
278 | .irq_handler = ata_interrupt, | 278 | .irq_handler = ata_interrupt, |
279 | .irq_clear = ata_bmdma_irq_clear, | 279 | .irq_clear = ata_bmdma_irq_clear, |
280 | .irq_on = ata_irq_on, | 280 | .irq_on = ata_irq_on, |
281 | .irq_ack = ata_irq_ack, | ||
282 | 281 | ||
283 | .port_start = ata_port_start, | 282 | .port_start = ata_sff_port_start, |
284 | }; | 283 | }; |
285 | 284 | ||
286 | 285 | ||
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index 6f7d34ad19ef..0713872cf65c 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c | |||
@@ -312,7 +312,6 @@ static struct scsi_host_template hpt36x_sht = { | |||
312 | */ | 312 | */ |
313 | 313 | ||
314 | static struct ata_port_operations hpt366_port_ops = { | 314 | static struct ata_port_operations hpt366_port_ops = { |
315 | .port_disable = ata_port_disable, | ||
316 | .set_piomode = hpt366_set_piomode, | 315 | .set_piomode = hpt366_set_piomode, |
317 | .set_dmamode = hpt366_set_dmamode, | 316 | .set_dmamode = hpt366_set_dmamode, |
318 | .mode_filter = hpt366_filter, | 317 | .mode_filter = hpt366_filter, |
@@ -342,9 +341,8 @@ static struct ata_port_operations hpt366_port_ops = { | |||
342 | .irq_handler = ata_interrupt, | 341 | .irq_handler = ata_interrupt, |
343 | .irq_clear = ata_bmdma_irq_clear, | 342 | .irq_clear = ata_bmdma_irq_clear, |
344 | .irq_on = ata_irq_on, | 343 | .irq_on = ata_irq_on, |
345 | .irq_ack = ata_irq_ack, | ||
346 | 344 | ||
347 | .port_start = ata_port_start, | 345 | .port_start = ata_sff_port_start, |
348 | }; | 346 | }; |
349 | 347 | ||
350 | /** | 348 | /** |
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index c5ddd937dbf2..e61cb1fd57b2 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c | |||
@@ -304,15 +304,16 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask) | |||
304 | 304 | ||
305 | /** | 305 | /** |
306 | * hpt37x_pre_reset - reset the hpt37x bus | 306 | * hpt37x_pre_reset - reset the hpt37x bus |
307 | * @ap: ATA port to reset | 307 | * @link: ATA link to reset |
308 | * @deadline: deadline jiffies for the operation | 308 | * @deadline: deadline jiffies for the operation |
309 | * | 309 | * |
310 | * Perform the initial reset handling for the 370/372 and 374 func 0 | 310 | * Perform the initial reset handling for the 370/372 and 374 func 0 |
311 | */ | 311 | */ |
312 | 312 | ||
313 | static int hpt37x_pre_reset(struct ata_port *ap, unsigned long deadline) | 313 | static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline) |
314 | { | 314 | { |
315 | u8 scr2, ata66; | 315 | u8 scr2, ata66; |
316 | struct ata_port *ap = link->ap; | ||
316 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 317 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
317 | static const struct pci_bits hpt37x_enable_bits[] = { | 318 | static const struct pci_bits hpt37x_enable_bits[] = { |
318 | { 0x50, 1, 0x04, 0x04 }, | 319 | { 0x50, 1, 0x04, 0x04 }, |
@@ -337,7 +338,7 @@ static int hpt37x_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
337 | pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); | 338 | pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); |
338 | udelay(100); | 339 | udelay(100); |
339 | 340 | ||
340 | return ata_std_prereset(ap, deadline); | 341 | return ata_std_prereset(link, deadline); |
341 | } | 342 | } |
342 | 343 | ||
343 | /** | 344 | /** |
@@ -352,7 +353,7 @@ static void hpt37x_error_handler(struct ata_port *ap) | |||
352 | ata_bmdma_drive_eh(ap, hpt37x_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | 353 | ata_bmdma_drive_eh(ap, hpt37x_pre_reset, ata_std_softreset, NULL, ata_std_postreset); |
353 | } | 354 | } |
354 | 355 | ||
355 | static int hpt374_pre_reset(struct ata_port *ap, unsigned long deadline) | 356 | static int hpt374_pre_reset(struct ata_link *link, unsigned long deadline) |
356 | { | 357 | { |
357 | static const struct pci_bits hpt37x_enable_bits[] = { | 358 | static const struct pci_bits hpt37x_enable_bits[] = { |
358 | { 0x50, 1, 0x04, 0x04 }, | 359 | { 0x50, 1, 0x04, 0x04 }, |
@@ -360,6 +361,7 @@ static int hpt374_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
360 | }; | 361 | }; |
361 | u16 mcr3, mcr6; | 362 | u16 mcr3, mcr6; |
362 | u8 ata66; | 363 | u8 ata66; |
364 | struct ata_port *ap = link->ap; | ||
363 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 365 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
364 | 366 | ||
365 | if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no])) | 367 | if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no])) |
@@ -387,7 +389,7 @@ static int hpt374_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
387 | pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); | 389 | pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); |
388 | udelay(100); | 390 | udelay(100); |
389 | 391 | ||
390 | return ata_std_prereset(ap, deadline); | 392 | return ata_std_prereset(link, deadline); |
391 | } | 393 | } |
392 | 394 | ||
393 | /** | 395 | /** |
@@ -642,7 +644,6 @@ static struct scsi_host_template hpt37x_sht = { | |||
642 | */ | 644 | */ |
643 | 645 | ||
644 | static struct ata_port_operations hpt370_port_ops = { | 646 | static struct ata_port_operations hpt370_port_ops = { |
645 | .port_disable = ata_port_disable, | ||
646 | .set_piomode = hpt370_set_piomode, | 647 | .set_piomode = hpt370_set_piomode, |
647 | .set_dmamode = hpt370_set_dmamode, | 648 | .set_dmamode = hpt370_set_dmamode, |
648 | .mode_filter = hpt370_filter, | 649 | .mode_filter = hpt370_filter, |
@@ -671,9 +672,8 @@ static struct ata_port_operations hpt370_port_ops = { | |||
671 | .irq_handler = ata_interrupt, | 672 | .irq_handler = ata_interrupt, |
672 | .irq_clear = ata_bmdma_irq_clear, | 673 | .irq_clear = ata_bmdma_irq_clear, |
673 | .irq_on = ata_irq_on, | 674 | .irq_on = ata_irq_on, |
674 | .irq_ack = ata_irq_ack, | ||
675 | 675 | ||
676 | .port_start = ata_port_start, | 676 | .port_start = ata_sff_port_start, |
677 | }; | 677 | }; |
678 | 678 | ||
679 | /* | 679 | /* |
@@ -681,7 +681,6 @@ static struct ata_port_operations hpt370_port_ops = { | |||
681 | */ | 681 | */ |
682 | 682 | ||
683 | static struct ata_port_operations hpt370a_port_ops = { | 683 | static struct ata_port_operations hpt370a_port_ops = { |
684 | .port_disable = ata_port_disable, | ||
685 | .set_piomode = hpt370_set_piomode, | 684 | .set_piomode = hpt370_set_piomode, |
686 | .set_dmamode = hpt370_set_dmamode, | 685 | .set_dmamode = hpt370_set_dmamode, |
687 | .mode_filter = hpt370a_filter, | 686 | .mode_filter = hpt370a_filter, |
@@ -710,9 +709,8 @@ static struct ata_port_operations hpt370a_port_ops = { | |||
710 | .irq_handler = ata_interrupt, | 709 | .irq_handler = ata_interrupt, |
711 | .irq_clear = ata_bmdma_irq_clear, | 710 | .irq_clear = ata_bmdma_irq_clear, |
712 | .irq_on = ata_irq_on, | 711 | .irq_on = ata_irq_on, |
713 | .irq_ack = ata_irq_ack, | ||
714 | 712 | ||
715 | .port_start = ata_port_start, | 713 | .port_start = ata_sff_port_start, |
716 | }; | 714 | }; |
717 | 715 | ||
718 | /* | 716 | /* |
@@ -721,7 +719,6 @@ static struct ata_port_operations hpt370a_port_ops = { | |||
721 | */ | 719 | */ |
722 | 720 | ||
723 | static struct ata_port_operations hpt372_port_ops = { | 721 | static struct ata_port_operations hpt372_port_ops = { |
724 | .port_disable = ata_port_disable, | ||
725 | .set_piomode = hpt372_set_piomode, | 722 | .set_piomode = hpt372_set_piomode, |
726 | .set_dmamode = hpt372_set_dmamode, | 723 | .set_dmamode = hpt372_set_dmamode, |
727 | .mode_filter = ata_pci_default_filter, | 724 | .mode_filter = ata_pci_default_filter, |
@@ -750,9 +747,8 @@ static struct ata_port_operations hpt372_port_ops = { | |||
750 | .irq_handler = ata_interrupt, | 747 | .irq_handler = ata_interrupt, |
751 | .irq_clear = ata_bmdma_irq_clear, | 748 | .irq_clear = ata_bmdma_irq_clear, |
752 | .irq_on = ata_irq_on, | 749 | .irq_on = ata_irq_on, |
753 | .irq_ack = ata_irq_ack, | ||
754 | 750 | ||
755 | .port_start = ata_port_start, | 751 | .port_start = ata_sff_port_start, |
756 | }; | 752 | }; |
757 | 753 | ||
758 | /* | 754 | /* |
@@ -761,7 +757,6 @@ static struct ata_port_operations hpt372_port_ops = { | |||
761 | */ | 757 | */ |
762 | 758 | ||
763 | static struct ata_port_operations hpt374_port_ops = { | 759 | static struct ata_port_operations hpt374_port_ops = { |
764 | .port_disable = ata_port_disable, | ||
765 | .set_piomode = hpt372_set_piomode, | 760 | .set_piomode = hpt372_set_piomode, |
766 | .set_dmamode = hpt372_set_dmamode, | 761 | .set_dmamode = hpt372_set_dmamode, |
767 | .mode_filter = ata_pci_default_filter, | 762 | .mode_filter = ata_pci_default_filter, |
@@ -790,9 +785,8 @@ static struct ata_port_operations hpt374_port_ops = { | |||
790 | .irq_handler = ata_interrupt, | 785 | .irq_handler = ata_interrupt, |
791 | .irq_clear = ata_bmdma_irq_clear, | 786 | .irq_clear = ata_bmdma_irq_clear, |
792 | .irq_on = ata_irq_on, | 787 | .irq_on = ata_irq_on, |
793 | .irq_ack = ata_irq_ack, | ||
794 | 788 | ||
795 | .port_start = ata_port_start, | 789 | .port_start = ata_sff_port_start, |
796 | }; | 790 | }; |
797 | 791 | ||
798 | /** | 792 | /** |
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c index f8f234bfc8ce..9f1c084f846f 100644 --- a/drivers/ata/pata_hpt3x2n.c +++ b/drivers/ata/pata_hpt3x2n.c | |||
@@ -141,21 +141,22 @@ static int hpt3x2n_cable_detect(struct ata_port *ap) | |||
141 | 141 | ||
142 | /** | 142 | /** |
143 | * hpt3x2n_pre_reset - reset the hpt3x2n bus | 143 | * hpt3x2n_pre_reset - reset the hpt3x2n bus |
144 | * @ap: ATA port to reset | 144 | * @link: ATA link to reset |
145 | * @deadline: deadline jiffies for the operation | 145 | * @deadline: deadline jiffies for the operation |
146 | * | 146 | * |
147 | * Perform the initial reset handling for the 3x2n series controllers. | 147 | * Perform the initial reset handling for the 3x2n series controllers. |
148 | * Reset the hardware and state machine, | 148 | * Reset the hardware and state machine, |
149 | */ | 149 | */ |
150 | 150 | ||
151 | static int hpt3xn_pre_reset(struct ata_port *ap, unsigned long deadline) | 151 | static int hpt3xn_pre_reset(struct ata_link *link, unsigned long deadline) |
152 | { | 152 | { |
153 | struct ata_port *ap = link->ap; | ||
153 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 154 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
154 | /* Reset the state machine */ | 155 | /* Reset the state machine */ |
155 | pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); | 156 | pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); |
156 | udelay(100); | 157 | udelay(100); |
157 | 158 | ||
158 | return ata_std_prereset(ap, deadline); | 159 | return ata_std_prereset(link, deadline); |
159 | } | 160 | } |
160 | 161 | ||
161 | /** | 162 | /** |
@@ -360,7 +361,6 @@ static struct scsi_host_template hpt3x2n_sht = { | |||
360 | */ | 361 | */ |
361 | 362 | ||
362 | static struct ata_port_operations hpt3x2n_port_ops = { | 363 | static struct ata_port_operations hpt3x2n_port_ops = { |
363 | .port_disable = ata_port_disable, | ||
364 | .set_piomode = hpt3x2n_set_piomode, | 364 | .set_piomode = hpt3x2n_set_piomode, |
365 | .set_dmamode = hpt3x2n_set_dmamode, | 365 | .set_dmamode = hpt3x2n_set_dmamode, |
366 | .mode_filter = ata_pci_default_filter, | 366 | .mode_filter = ata_pci_default_filter, |
@@ -390,9 +390,8 @@ static struct ata_port_operations hpt3x2n_port_ops = { | |||
390 | .irq_handler = ata_interrupt, | 390 | .irq_handler = ata_interrupt, |
391 | .irq_clear = ata_bmdma_irq_clear, | 391 | .irq_clear = ata_bmdma_irq_clear, |
392 | .irq_on = ata_irq_on, | 392 | .irq_on = ata_irq_on, |
393 | .irq_ack = ata_irq_ack, | ||
394 | 393 | ||
395 | .port_start = ata_port_start, | 394 | .port_start = ata_sff_port_start, |
396 | }; | 395 | }; |
397 | 396 | ||
398 | /** | 397 | /** |
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c index be0f05efac6d..cb8bdb6887de 100644 --- a/drivers/ata/pata_hpt3x3.c +++ b/drivers/ata/pata_hpt3x3.c | |||
@@ -120,7 +120,6 @@ static struct scsi_host_template hpt3x3_sht = { | |||
120 | }; | 120 | }; |
121 | 121 | ||
122 | static struct ata_port_operations hpt3x3_port_ops = { | 122 | static struct ata_port_operations hpt3x3_port_ops = { |
123 | .port_disable = ata_port_disable, | ||
124 | .set_piomode = hpt3x3_set_piomode, | 123 | .set_piomode = hpt3x3_set_piomode, |
125 | #if defined(CONFIG_PATA_HPT3X3_DMA) | 124 | #if defined(CONFIG_PATA_HPT3X3_DMA) |
126 | .set_dmamode = hpt3x3_set_dmamode, | 125 | .set_dmamode = hpt3x3_set_dmamode, |
@@ -153,9 +152,8 @@ static struct ata_port_operations hpt3x3_port_ops = { | |||
153 | .irq_handler = ata_interrupt, | 152 | .irq_handler = ata_interrupt, |
154 | .irq_clear = ata_bmdma_irq_clear, | 153 | .irq_clear = ata_bmdma_irq_clear, |
155 | .irq_on = ata_irq_on, | 154 | .irq_on = ata_irq_on, |
156 | .irq_ack = ata_irq_ack, | ||
157 | 155 | ||
158 | .port_start = ata_port_start, | 156 | .port_start = ata_sff_port_start, |
159 | }; | 157 | }; |
160 | 158 | ||
161 | /** | 159 | /** |
@@ -239,7 +237,8 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
239 | base = host->iomap[4]; /* Bus mastering base */ | 237 | base = host->iomap[4]; /* Bus mastering base */ |
240 | 238 | ||
241 | for (i = 0; i < host->n_ports; i++) { | 239 | for (i = 0; i < host->n_ports; i++) { |
242 | struct ata_ioports *ioaddr = &host->ports[i]->ioaddr; | 240 | struct ata_port *ap = host->ports[i]; |
241 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
243 | 242 | ||
244 | ioaddr->cmd_addr = base + offset_cmd[i]; | 243 | ioaddr->cmd_addr = base + offset_cmd[i]; |
245 | ioaddr->altstatus_addr = | 244 | ioaddr->altstatus_addr = |
@@ -247,6 +246,9 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
247 | ioaddr->scr_addr = NULL; | 246 | ioaddr->scr_addr = NULL; |
248 | ata_std_ports(ioaddr); | 247 | ata_std_ports(ioaddr); |
249 | ioaddr->bmdma_addr = base + 8 * i; | 248 | ioaddr->bmdma_addr = base + 8 * i; |
249 | |||
250 | ata_port_pbar_desc(ap, 4, -1, "ioport"); | ||
251 | ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd"); | ||
250 | } | 252 | } |
251 | pci_set_master(pdev); | 253 | pci_set_master(pdev); |
252 | return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, | 254 | return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, |
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index 64a711776c45..be30923566c5 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c | |||
@@ -70,6 +70,8 @@ struct pata_icside_info { | |||
70 | unsigned int mwdma_mask; | 70 | unsigned int mwdma_mask; |
71 | unsigned int nr_ports; | 71 | unsigned int nr_ports; |
72 | const struct portinfo *port[2]; | 72 | const struct portinfo *port[2]; |
73 | unsigned long raw_base; | ||
74 | unsigned long raw_ioc_base; | ||
73 | }; | 75 | }; |
74 | 76 | ||
75 | #define ICS_TYPE_A3IN 0 | 77 | #define ICS_TYPE_A3IN 0 |
@@ -357,26 +359,7 @@ static void pata_icside_error_handler(struct ata_port *ap) | |||
357 | pata_icside_postreset); | 359 | pata_icside_postreset); |
358 | } | 360 | } |
359 | 361 | ||
360 | static u8 pata_icside_irq_ack(struct ata_port *ap, unsigned int chk_drq) | ||
361 | { | ||
362 | unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; | ||
363 | u8 status; | ||
364 | |||
365 | status = ata_busy_wait(ap, bits, 1000); | ||
366 | if (status & bits) | ||
367 | if (ata_msg_err(ap)) | ||
368 | printk(KERN_ERR "abnormal status 0x%X\n", status); | ||
369 | |||
370 | if (ata_msg_intr(ap)) | ||
371 | printk(KERN_INFO "%s: irq ack: drv_stat 0x%X\n", | ||
372 | __FUNCTION__, status); | ||
373 | |||
374 | return status; | ||
375 | } | ||
376 | |||
377 | static struct ata_port_operations pata_icside_port_ops = { | 362 | static struct ata_port_operations pata_icside_port_ops = { |
378 | .port_disable = ata_port_disable, | ||
379 | |||
380 | .set_dmamode = pata_icside_set_dmamode, | 363 | .set_dmamode = pata_icside_set_dmamode, |
381 | 364 | ||
382 | .tf_load = ata_tf_load, | 365 | .tf_load = ata_tf_load, |
@@ -403,7 +386,6 @@ static struct ata_port_operations pata_icside_port_ops = { | |||
403 | 386 | ||
404 | .irq_clear = ata_dummy_noret, | 387 | .irq_clear = ata_dummy_noret, |
405 | .irq_on = ata_irq_on, | 388 | .irq_on = ata_irq_on, |
406 | .irq_ack = pata_icside_irq_ack, | ||
407 | 389 | ||
408 | .port_start = pata_icside_port_start, | 390 | .port_start = pata_icside_port_start, |
409 | 391 | ||
@@ -412,9 +394,10 @@ static struct ata_port_operations pata_icside_port_ops = { | |||
412 | }; | 394 | }; |
413 | 395 | ||
414 | static void __devinit | 396 | static void __devinit |
415 | pata_icside_setup_ioaddr(struct ata_ioports *ioaddr, void __iomem *base, | 397 | pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base, |
416 | const struct portinfo *info) | 398 | const struct portinfo *info) |
417 | { | 399 | { |
400 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
418 | void __iomem *cmd = base + info->dataoffset; | 401 | void __iomem *cmd = base + info->dataoffset; |
419 | 402 | ||
420 | ioaddr->cmd_addr = cmd; | 403 | ioaddr->cmd_addr = cmd; |
@@ -431,6 +414,13 @@ pata_icside_setup_ioaddr(struct ata_ioports *ioaddr, void __iomem *base, | |||
431 | 414 | ||
432 | ioaddr->ctl_addr = base + info->ctrloffset; | 415 | ioaddr->ctl_addr = base + info->ctrloffset; |
433 | ioaddr->altstatus_addr = ioaddr->ctl_addr; | 416 | ioaddr->altstatus_addr = ioaddr->ctl_addr; |
417 | |||
418 | ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", | ||
419 | info->raw_base + info->dataoffset, | ||
420 | info->raw_base + info->ctrloffset); | ||
421 | |||
422 | if (info->raw_ioc_base) | ||
423 | ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base); | ||
434 | } | 424 | } |
435 | 425 | ||
436 | static int __devinit pata_icside_register_v5(struct pata_icside_info *info) | 426 | static int __devinit pata_icside_register_v5(struct pata_icside_info *info) |
@@ -451,6 +441,8 @@ static int __devinit pata_icside_register_v5(struct pata_icside_info *info) | |||
451 | info->nr_ports = 1; | 441 | info->nr_ports = 1; |
452 | info->port[0] = &pata_icside_portinfo_v5; | 442 | info->port[0] = &pata_icside_portinfo_v5; |
453 | 443 | ||
444 | info->raw_base = ecard_resource_start(ec, ECARD_RES_MEMC); | ||
445 | |||
454 | return 0; | 446 | return 0; |
455 | } | 447 | } |
456 | 448 | ||
@@ -491,6 +483,9 @@ static int __devinit pata_icside_register_v6(struct pata_icside_info *info) | |||
491 | info->port[0] = &pata_icside_portinfo_v6_1; | 483 | info->port[0] = &pata_icside_portinfo_v6_1; |
492 | info->port[1] = &pata_icside_portinfo_v6_2; | 484 | info->port[1] = &pata_icside_portinfo_v6_2; |
493 | 485 | ||
486 | info->raw_base = ecard_resource_start(ec, ECARD_RES_EASI); | ||
487 | info->raw_ioc_base = ecard_resource_start(ec, ECARD_RES_IOCFAST); | ||
488 | |||
494 | return icside_dma_init(info); | 489 | return icside_dma_init(info); |
495 | } | 490 | } |
496 | 491 | ||
@@ -527,7 +522,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info) | |||
527 | ap->flags |= ATA_FLAG_SLAVE_POSS; | 522 | ap->flags |= ATA_FLAG_SLAVE_POSS; |
528 | ap->ops = &pata_icside_port_ops; | 523 | ap->ops = &pata_icside_port_ops; |
529 | 524 | ||
530 | pata_icside_setup_ioaddr(&ap->ioaddr, info->base, info->port[i]); | 525 | pata_icside_setup_ioaddr(ap, info->base, info->port[i]); |
531 | } | 526 | } |
532 | 527 | ||
533 | return ata_host_activate(host, ec->irq, ata_interrupt, 0, | 528 | return ata_host_activate(host, ec->irq, ata_interrupt, 0, |
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c index 9e553c54203a..88ab0e1d353f 100644 --- a/drivers/ata/pata_isapnp.c +++ b/drivers/ata/pata_isapnp.c | |||
@@ -38,7 +38,6 @@ static struct scsi_host_template isapnp_sht = { | |||
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct ata_port_operations isapnp_port_ops = { | 40 | static struct ata_port_operations isapnp_port_ops = { |
41 | .port_disable = ata_port_disable, | ||
42 | .tf_load = ata_tf_load, | 41 | .tf_load = ata_tf_load, |
43 | .tf_read = ata_tf_read, | 42 | .tf_read = ata_tf_read, |
44 | .check_status = ata_check_status, | 43 | .check_status = ata_check_status, |
@@ -58,9 +57,8 @@ static struct ata_port_operations isapnp_port_ops = { | |||
58 | 57 | ||
59 | .irq_clear = ata_bmdma_irq_clear, | 58 | .irq_clear = ata_bmdma_irq_clear, |
60 | .irq_on = ata_irq_on, | 59 | .irq_on = ata_irq_on, |
61 | .irq_ack = ata_irq_ack, | ||
62 | 60 | ||
63 | .port_start = ata_port_start, | 61 | .port_start = ata_sff_port_start, |
64 | }; | 62 | }; |
65 | 63 | ||
66 | /** | 64 | /** |
@@ -112,6 +110,10 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev | |||
112 | 110 | ||
113 | ata_std_ports(&ap->ioaddr); | 111 | ata_std_ports(&ap->ioaddr); |
114 | 112 | ||
113 | ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", | ||
114 | (unsigned long long)pnp_port_start(idev, 0), | ||
115 | (unsigned long long)pnp_port_start(idev, 1)); | ||
116 | |||
115 | /* activate */ | 117 | /* activate */ |
116 | return ata_host_activate(host, pnp_irq(idev, 0), ata_interrupt, 0, | 118 | return ata_host_activate(host, pnp_irq(idev, 0), ata_interrupt, 0, |
117 | &isapnp_sht); | 119 | &isapnp_sht); |
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c index b8af55e89156..1eda821e5e39 100644 --- a/drivers/ata/pata_it8213.c +++ b/drivers/ata/pata_it8213.c | |||
@@ -23,23 +23,24 @@ | |||
23 | 23 | ||
24 | /** | 24 | /** |
25 | * it8213_pre_reset - check for 40/80 pin | 25 | * it8213_pre_reset - check for 40/80 pin |
26 | * @ap: Port | 26 | * @link: link |
27 | * @deadline: deadline jiffies for the operation | 27 | * @deadline: deadline jiffies for the operation |
28 | * | 28 | * |
29 | * Filter out ports by the enable bits before doing the normal reset | 29 | * Filter out ports by the enable bits before doing the normal reset |
30 | * and probe. | 30 | * and probe. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | static int it8213_pre_reset(struct ata_port *ap, unsigned long deadline) | 33 | static int it8213_pre_reset(struct ata_link *link, unsigned long deadline) |
34 | { | 34 | { |
35 | static const struct pci_bits it8213_enable_bits[] = { | 35 | static const struct pci_bits it8213_enable_bits[] = { |
36 | { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ | 36 | { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ |
37 | }; | 37 | }; |
38 | struct ata_port *ap = link->ap; | ||
38 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 39 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
39 | if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no])) | 40 | if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no])) |
40 | return -ENOENT; | 41 | return -ENOENT; |
41 | 42 | ||
42 | return ata_std_prereset(ap, deadline); | 43 | return ata_std_prereset(link, deadline); |
43 | } | 44 | } |
44 | 45 | ||
45 | /** | 46 | /** |
@@ -260,7 +261,6 @@ static struct scsi_host_template it8213_sht = { | |||
260 | }; | 261 | }; |
261 | 262 | ||
262 | static const struct ata_port_operations it8213_ops = { | 263 | static const struct ata_port_operations it8213_ops = { |
263 | .port_disable = ata_port_disable, | ||
264 | .set_piomode = it8213_set_piomode, | 264 | .set_piomode = it8213_set_piomode, |
265 | .set_dmamode = it8213_set_dmamode, | 265 | .set_dmamode = it8213_set_dmamode, |
266 | .mode_filter = ata_pci_default_filter, | 266 | .mode_filter = ata_pci_default_filter, |
@@ -288,9 +288,8 @@ static const struct ata_port_operations it8213_ops = { | |||
288 | .irq_handler = ata_interrupt, | 288 | .irq_handler = ata_interrupt, |
289 | .irq_clear = ata_bmdma_irq_clear, | 289 | .irq_clear = ata_bmdma_irq_clear, |
290 | .irq_on = ata_irq_on, | 290 | .irq_on = ata_irq_on, |
291 | .irq_ack = ata_irq_ack, | ||
292 | 291 | ||
293 | .port_start = ata_port_start, | 292 | .port_start = ata_sff_port_start, |
294 | }; | 293 | }; |
295 | 294 | ||
296 | 295 | ||
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index 5d8b91e70ecd..988ef736b936 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c | |||
@@ -391,7 +391,7 @@ static void it821x_passthru_dev_select(struct ata_port *ap, | |||
391 | { | 391 | { |
392 | struct it821x_dev *itdev = ap->private_data; | 392 | struct it821x_dev *itdev = ap->private_data; |
393 | if (itdev && device != itdev->last_device) { | 393 | if (itdev && device != itdev->last_device) { |
394 | struct ata_device *adev = &ap->device[device]; | 394 | struct ata_device *adev = &ap->link.device[device]; |
395 | it821x_program(ap, adev, itdev->pio[adev->devno]); | 395 | it821x_program(ap, adev, itdev->pio[adev->devno]); |
396 | itdev->last_device = device; | 396 | itdev->last_device = device; |
397 | } | 397 | } |
@@ -450,7 +450,7 @@ static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc) | |||
450 | 450 | ||
451 | /** | 451 | /** |
452 | * it821x_smart_set_mode - mode setting | 452 | * it821x_smart_set_mode - mode setting |
453 | * @ap: interface to set up | 453 | * @link: interface to set up |
454 | * @unused: device that failed (error only) | 454 | * @unused: device that failed (error only) |
455 | * | 455 | * |
456 | * Use a non standard set_mode function. We don't want to be tuned. | 456 | * Use a non standard set_mode function. We don't want to be tuned. |
@@ -459,12 +459,11 @@ static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc) | |||
459 | * and respect them. | 459 | * and respect them. |
460 | */ | 460 | */ |
461 | 461 | ||
462 | static int it821x_smart_set_mode(struct ata_port *ap, struct ata_device **unused) | 462 | static int it821x_smart_set_mode(struct ata_link *link, struct ata_device **unused) |
463 | { | 463 | { |
464 | int i; | 464 | struct ata_device *dev; |
465 | 465 | ||
466 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 466 | ata_link_for_each_dev(dev, link) { |
467 | struct ata_device *dev = &ap->device[i]; | ||
468 | if (ata_dev_enabled(dev)) { | 467 | if (ata_dev_enabled(dev)) { |
469 | /* We don't really care */ | 468 | /* We don't really care */ |
470 | dev->pio_mode = XFER_PIO_0; | 469 | dev->pio_mode = XFER_PIO_0; |
@@ -564,7 +563,7 @@ static int it821x_port_start(struct ata_port *ap) | |||
564 | struct it821x_dev *itdev; | 563 | struct it821x_dev *itdev; |
565 | u8 conf; | 564 | u8 conf; |
566 | 565 | ||
567 | int ret = ata_port_start(ap); | 566 | int ret = ata_sff_port_start(ap); |
568 | if (ret < 0) | 567 | if (ret < 0) |
569 | return ret; | 568 | return ret; |
570 | 569 | ||
@@ -621,7 +620,6 @@ static struct scsi_host_template it821x_sht = { | |||
621 | 620 | ||
622 | static struct ata_port_operations it821x_smart_port_ops = { | 621 | static struct ata_port_operations it821x_smart_port_ops = { |
623 | .set_mode = it821x_smart_set_mode, | 622 | .set_mode = it821x_smart_set_mode, |
624 | .port_disable = ata_port_disable, | ||
625 | .tf_load = ata_tf_load, | 623 | .tf_load = ata_tf_load, |
626 | .tf_read = ata_tf_read, | 624 | .tf_read = ata_tf_read, |
627 | .mode_filter = ata_pci_default_filter, | 625 | .mode_filter = ata_pci_default_filter, |
@@ -651,13 +649,11 @@ static struct ata_port_operations it821x_smart_port_ops = { | |||
651 | .irq_handler = ata_interrupt, | 649 | .irq_handler = ata_interrupt, |
652 | .irq_clear = ata_bmdma_irq_clear, | 650 | .irq_clear = ata_bmdma_irq_clear, |
653 | .irq_on = ata_irq_on, | 651 | .irq_on = ata_irq_on, |
654 | .irq_ack = ata_irq_ack, | ||
655 | 652 | ||
656 | .port_start = it821x_port_start, | 653 | .port_start = it821x_port_start, |
657 | }; | 654 | }; |
658 | 655 | ||
659 | static struct ata_port_operations it821x_passthru_port_ops = { | 656 | static struct ata_port_operations it821x_passthru_port_ops = { |
660 | .port_disable = ata_port_disable, | ||
661 | .set_piomode = it821x_passthru_set_piomode, | 657 | .set_piomode = it821x_passthru_set_piomode, |
662 | .set_dmamode = it821x_passthru_set_dmamode, | 658 | .set_dmamode = it821x_passthru_set_dmamode, |
663 | .mode_filter = ata_pci_default_filter, | 659 | .mode_filter = ata_pci_default_filter, |
@@ -688,7 +684,6 @@ static struct ata_port_operations it821x_passthru_port_ops = { | |||
688 | .irq_clear = ata_bmdma_irq_clear, | 684 | .irq_clear = ata_bmdma_irq_clear, |
689 | .irq_handler = ata_interrupt, | 685 | .irq_handler = ata_interrupt, |
690 | .irq_on = ata_irq_on, | 686 | .irq_on = ata_irq_on, |
691 | .irq_ack = ata_irq_ack, | ||
692 | 687 | ||
693 | .port_start = it821x_port_start, | 688 | .port_start = it821x_port_start, |
694 | }; | 689 | }; |
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index 5dea3584c6c2..fcd532afbf2e 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c | |||
@@ -26,12 +26,11 @@ | |||
26 | #define DRV_NAME "pata_ixp4xx_cf" | 26 | #define DRV_NAME "pata_ixp4xx_cf" |
27 | #define DRV_VERSION "0.2" | 27 | #define DRV_VERSION "0.2" |
28 | 28 | ||
29 | static int ixp4xx_set_mode(struct ata_port *ap, struct ata_device **error) | 29 | static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error) |
30 | { | 30 | { |
31 | int i; | 31 | struct ata_device *dev; |
32 | 32 | ||
33 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 33 | ata_link_for_each_dev(dev, link) { |
34 | struct ata_device *dev = &ap->device[i]; | ||
35 | if (ata_dev_enabled(dev)) { | 34 | if (ata_dev_enabled(dev)) { |
36 | ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n"); | 35 | ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n"); |
37 | dev->pio_mode = XFER_PIO_0; | 36 | dev->pio_mode = XFER_PIO_0; |
@@ -49,7 +48,7 @@ static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf, | |||
49 | unsigned int i; | 48 | unsigned int i; |
50 | unsigned int words = buflen >> 1; | 49 | unsigned int words = buflen >> 1; |
51 | u16 *buf16 = (u16 *) buf; | 50 | u16 *buf16 = (u16 *) buf; |
52 | struct ata_port *ap = adev->ap; | 51 | struct ata_port *ap = adev->link->ap; |
53 | void __iomem *mmio = ap->ioaddr.data_addr; | 52 | void __iomem *mmio = ap->ioaddr.data_addr; |
54 | struct ixp4xx_pata_data *data = ap->host->dev->platform_data; | 53 | struct ixp4xx_pata_data *data = ap->host->dev->platform_data; |
55 | 54 | ||
@@ -108,7 +107,6 @@ static struct ata_port_operations ixp4xx_port_ops = { | |||
108 | .set_mode = ixp4xx_set_mode, | 107 | .set_mode = ixp4xx_set_mode, |
109 | .mode_filter = ata_pci_default_filter, | 108 | .mode_filter = ata_pci_default_filter, |
110 | 109 | ||
111 | .port_disable = ata_port_disable, | ||
112 | .tf_load = ata_tf_load, | 110 | .tf_load = ata_tf_load, |
113 | .tf_read = ata_tf_read, | 111 | .tf_read = ata_tf_read, |
114 | .exec_command = ata_exec_command, | 112 | .exec_command = ata_exec_command, |
@@ -128,14 +126,17 @@ static struct ata_port_operations ixp4xx_port_ops = { | |||
128 | .irq_handler = ata_interrupt, | 126 | .irq_handler = ata_interrupt, |
129 | .irq_clear = ata_bmdma_irq_clear, | 127 | .irq_clear = ata_bmdma_irq_clear, |
130 | .irq_on = ata_irq_on, | 128 | .irq_on = ata_irq_on, |
131 | .irq_ack = ata_dummy_irq_ack, | ||
132 | 129 | ||
133 | .port_start = ata_port_start, | 130 | .port_start = ata_port_start, |
134 | }; | 131 | }; |
135 | 132 | ||
136 | static void ixp4xx_setup_port(struct ata_ioports *ioaddr, | 133 | static void ixp4xx_setup_port(struct ata_ioports *ioaddr, |
137 | struct ixp4xx_pata_data *data) | 134 | struct ixp4xx_pata_data *data, |
135 | unsigned long raw_cs0, unsigned long raw_cs1) | ||
138 | { | 136 | { |
137 | unsigned long raw_cmd = raw_cs0; | ||
138 | unsigned long raw_ctl = raw_cs1 + 0x06; | ||
139 | |||
139 | ioaddr->cmd_addr = data->cs0; | 140 | ioaddr->cmd_addr = data->cs0; |
140 | ioaddr->altstatus_addr = data->cs1 + 0x06; | 141 | ioaddr->altstatus_addr = data->cs1 + 0x06; |
141 | ioaddr->ctl_addr = data->cs1 + 0x06; | 142 | ioaddr->ctl_addr = data->cs1 + 0x06; |
@@ -161,7 +162,12 @@ static void ixp4xx_setup_port(struct ata_ioports *ioaddr, | |||
161 | *(unsigned long *)&ioaddr->device_addr ^= 0x03; | 162 | *(unsigned long *)&ioaddr->device_addr ^= 0x03; |
162 | *(unsigned long *)&ioaddr->status_addr ^= 0x03; | 163 | *(unsigned long *)&ioaddr->status_addr ^= 0x03; |
163 | *(unsigned long *)&ioaddr->command_addr ^= 0x03; | 164 | *(unsigned long *)&ioaddr->command_addr ^= 0x03; |
165 | |||
166 | raw_cmd ^= 0x03; | ||
167 | raw_ctl ^= 0x03; | ||
164 | #endif | 168 | #endif |
169 | |||
170 | ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", raw_cmd, raw_ctl); | ||
165 | } | 171 | } |
166 | 172 | ||
167 | static __devinit int ixp4xx_pata_probe(struct platform_device *pdev) | 173 | static __devinit int ixp4xx_pata_probe(struct platform_device *pdev) |
@@ -206,7 +212,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev) | |||
206 | ap->pio_mask = 0x1f; /* PIO4 */ | 212 | ap->pio_mask = 0x1f; /* PIO4 */ |
207 | ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_NO_ATAPI; | 213 | ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_NO_ATAPI; |
208 | 214 | ||
209 | ixp4xx_setup_port(&ap->ioaddr, data); | 215 | ixp4xx_setup_port(ap, data, cs0->start, cs1->start); |
210 | 216 | ||
211 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); | 217 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); |
212 | 218 | ||
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c index 4d67f238eee2..225a7223a726 100644 --- a/drivers/ata/pata_jmicron.c +++ b/drivers/ata/pata_jmicron.c | |||
@@ -29,7 +29,7 @@ typedef enum { | |||
29 | 29 | ||
30 | /** | 30 | /** |
31 | * jmicron_pre_reset - check for 40/80 pin | 31 | * jmicron_pre_reset - check for 40/80 pin |
32 | * @ap: Port | 32 | * @link: ATA link |
33 | * @deadline: deadline jiffies for the operation | 33 | * @deadline: deadline jiffies for the operation |
34 | * | 34 | * |
35 | * Perform the PATA port setup we need. | 35 | * Perform the PATA port setup we need. |
@@ -39,9 +39,9 @@ typedef enum { | |||
39 | * and setup here. We assume that has been done by init_one and the | 39 | * and setup here. We assume that has been done by init_one and the |
40 | * BIOS. | 40 | * BIOS. |
41 | */ | 41 | */ |
42 | 42 | static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline) | |
43 | static int jmicron_pre_reset(struct ata_port *ap, unsigned long deadline) | ||
44 | { | 43 | { |
44 | struct ata_port *ap = link->ap; | ||
45 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 45 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
46 | u32 control; | 46 | u32 control; |
47 | u32 control5; | 47 | u32 control5; |
@@ -103,7 +103,7 @@ static int jmicron_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
103 | ap->cbl = ATA_CBL_SATA; | 103 | ap->cbl = ATA_CBL_SATA; |
104 | break; | 104 | break; |
105 | } | 105 | } |
106 | return ata_std_prereset(ap, deadline); | 106 | return ata_std_prereset(link, deadline); |
107 | } | 107 | } |
108 | 108 | ||
109 | /** | 109 | /** |
@@ -141,8 +141,6 @@ static struct scsi_host_template jmicron_sht = { | |||
141 | }; | 141 | }; |
142 | 142 | ||
143 | static const struct ata_port_operations jmicron_ops = { | 143 | static const struct ata_port_operations jmicron_ops = { |
144 | .port_disable = ata_port_disable, | ||
145 | |||
146 | /* Task file is PCI ATA format, use helpers */ | 144 | /* Task file is PCI ATA format, use helpers */ |
147 | .tf_load = ata_tf_load, | 145 | .tf_load = ata_tf_load, |
148 | .tf_read = ata_tf_read, | 146 | .tf_read = ata_tf_read, |
@@ -168,7 +166,6 @@ static const struct ata_port_operations jmicron_ops = { | |||
168 | .irq_handler = ata_interrupt, | 166 | .irq_handler = ata_interrupt, |
169 | .irq_clear = ata_bmdma_irq_clear, | 167 | .irq_clear = ata_bmdma_irq_clear, |
170 | .irq_on = ata_irq_on, | 168 | .irq_on = ata_irq_on, |
171 | .irq_ack = ata_irq_ack, | ||
172 | 169 | ||
173 | /* Generic PATA PCI ATA helpers */ | 170 | /* Generic PATA PCI ATA helpers */ |
174 | .port_start = ata_port_start, | 171 | .port_start = ata_port_start, |
@@ -207,17 +204,8 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i | |||
207 | } | 204 | } |
208 | 205 | ||
209 | static const struct pci_device_id jmicron_pci_tbl[] = { | 206 | static const struct pci_device_id jmicron_pci_tbl[] = { |
210 | { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, | 207 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
211 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 361 }, | 208 | PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 }, |
212 | { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, | ||
213 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 363 }, | ||
214 | { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, | ||
215 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 365 }, | ||
216 | { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, | ||
217 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 366 }, | ||
218 | { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, | ||
219 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 368 }, | ||
220 | |||
221 | { } /* terminate list */ | 209 | { } /* terminate list */ |
222 | }; | 210 | }; |
223 | 211 | ||
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index edffc25d2d3f..7bed8d806381 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c | |||
@@ -96,7 +96,7 @@ static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ | |||
96 | 96 | ||
97 | /** | 97 | /** |
98 | * legacy_set_mode - mode setting | 98 | * legacy_set_mode - mode setting |
99 | * @ap: IDE interface | 99 | * @link: IDE link |
100 | * @unused: Device that failed when error is returned | 100 | * @unused: Device that failed when error is returned |
101 | * | 101 | * |
102 | * Use a non standard set_mode function. We don't want to be tuned. | 102 | * Use a non standard set_mode function. We don't want to be tuned. |
@@ -107,12 +107,11 @@ static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ | |||
107 | * expand on this as per hdparm in the base kernel. | 107 | * expand on this as per hdparm in the base kernel. |
108 | */ | 108 | */ |
109 | 109 | ||
110 | static int legacy_set_mode(struct ata_port *ap, struct ata_device **unused) | 110 | static int legacy_set_mode(struct ata_link *link, struct ata_device **unused) |
111 | { | 111 | { |
112 | int i; | 112 | struct ata_device *dev; |
113 | 113 | ||
114 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 114 | ata_link_for_each_dev(dev, link) { |
115 | struct ata_device *dev = &ap->device[i]; | ||
116 | if (ata_dev_enabled(dev)) { | 115 | if (ata_dev_enabled(dev)) { |
117 | ata_dev_printk(dev, KERN_INFO, "configured for PIO\n"); | 116 | ata_dev_printk(dev, KERN_INFO, "configured for PIO\n"); |
118 | dev->pio_mode = XFER_PIO_0; | 117 | dev->pio_mode = XFER_PIO_0; |
@@ -151,7 +150,6 @@ static struct scsi_host_template legacy_sht = { | |||
151 | */ | 150 | */ |
152 | 151 | ||
153 | static struct ata_port_operations simple_port_ops = { | 152 | static struct ata_port_operations simple_port_ops = { |
154 | .port_disable = ata_port_disable, | ||
155 | .tf_load = ata_tf_load, | 153 | .tf_load = ata_tf_load, |
156 | .tf_read = ata_tf_read, | 154 | .tf_read = ata_tf_read, |
157 | .check_status = ata_check_status, | 155 | .check_status = ata_check_status, |
@@ -172,7 +170,6 @@ static struct ata_port_operations simple_port_ops = { | |||
172 | .irq_handler = ata_interrupt, | 170 | .irq_handler = ata_interrupt, |
173 | .irq_clear = ata_bmdma_irq_clear, | 171 | .irq_clear = ata_bmdma_irq_clear, |
174 | .irq_on = ata_irq_on, | 172 | .irq_on = ata_irq_on, |
175 | .irq_ack = ata_irq_ack, | ||
176 | 173 | ||
177 | .port_start = ata_port_start, | 174 | .port_start = ata_port_start, |
178 | }; | 175 | }; |
@@ -180,7 +177,6 @@ static struct ata_port_operations simple_port_ops = { | |||
180 | static struct ata_port_operations legacy_port_ops = { | 177 | static struct ata_port_operations legacy_port_ops = { |
181 | .set_mode = legacy_set_mode, | 178 | .set_mode = legacy_set_mode, |
182 | 179 | ||
183 | .port_disable = ata_port_disable, | ||
184 | .tf_load = ata_tf_load, | 180 | .tf_load = ata_tf_load, |
185 | .tf_read = ata_tf_read, | 181 | .tf_read = ata_tf_read, |
186 | .check_status = ata_check_status, | 182 | .check_status = ata_check_status, |
@@ -201,7 +197,6 @@ static struct ata_port_operations legacy_port_ops = { | |||
201 | .irq_handler = ata_interrupt, | 197 | .irq_handler = ata_interrupt, |
202 | .irq_clear = ata_bmdma_irq_clear, | 198 | .irq_clear = ata_bmdma_irq_clear, |
203 | .irq_on = ata_irq_on, | 199 | .irq_on = ata_irq_on, |
204 | .irq_ack = ata_irq_ack, | ||
205 | 200 | ||
206 | .port_start = ata_port_start, | 201 | .port_start = ata_port_start, |
207 | }; | 202 | }; |
@@ -256,7 +251,7 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
256 | 251 | ||
257 | static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) | 252 | static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) |
258 | { | 253 | { |
259 | struct ata_port *ap = adev->ap; | 254 | struct ata_port *ap = adev->link->ap; |
260 | int slop = buflen & 3; | 255 | int slop = buflen & 3; |
261 | unsigned long flags; | 256 | unsigned long flags; |
262 | 257 | ||
@@ -296,7 +291,6 @@ static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsig | |||
296 | static struct ata_port_operations pdc20230_port_ops = { | 291 | static struct ata_port_operations pdc20230_port_ops = { |
297 | .set_piomode = pdc20230_set_piomode, | 292 | .set_piomode = pdc20230_set_piomode, |
298 | 293 | ||
299 | .port_disable = ata_port_disable, | ||
300 | .tf_load = ata_tf_load, | 294 | .tf_load = ata_tf_load, |
301 | .tf_read = ata_tf_read, | 295 | .tf_read = ata_tf_read, |
302 | .check_status = ata_check_status, | 296 | .check_status = ata_check_status, |
@@ -317,7 +311,6 @@ static struct ata_port_operations pdc20230_port_ops = { | |||
317 | .irq_handler = ata_interrupt, | 311 | .irq_handler = ata_interrupt, |
318 | .irq_clear = ata_bmdma_irq_clear, | 312 | .irq_clear = ata_bmdma_irq_clear, |
319 | .irq_on = ata_irq_on, | 313 | .irq_on = ata_irq_on, |
320 | .irq_ack = ata_irq_ack, | ||
321 | 314 | ||
322 | .port_start = ata_port_start, | 315 | .port_start = ata_port_start, |
323 | }; | 316 | }; |
@@ -352,7 +345,6 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
352 | static struct ata_port_operations ht6560a_port_ops = { | 345 | static struct ata_port_operations ht6560a_port_ops = { |
353 | .set_piomode = ht6560a_set_piomode, | 346 | .set_piomode = ht6560a_set_piomode, |
354 | 347 | ||
355 | .port_disable = ata_port_disable, | ||
356 | .tf_load = ata_tf_load, | 348 | .tf_load = ata_tf_load, |
357 | .tf_read = ata_tf_read, | 349 | .tf_read = ata_tf_read, |
358 | .check_status = ata_check_status, | 350 | .check_status = ata_check_status, |
@@ -373,7 +365,6 @@ static struct ata_port_operations ht6560a_port_ops = { | |||
373 | .irq_handler = ata_interrupt, | 365 | .irq_handler = ata_interrupt, |
374 | .irq_clear = ata_bmdma_irq_clear, | 366 | .irq_clear = ata_bmdma_irq_clear, |
375 | .irq_on = ata_irq_on, | 367 | .irq_on = ata_irq_on, |
376 | .irq_ack = ata_irq_ack, | ||
377 | 368 | ||
378 | .port_start = ata_port_start, | 369 | .port_start = ata_port_start, |
379 | }; | 370 | }; |
@@ -419,7 +410,6 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
419 | static struct ata_port_operations ht6560b_port_ops = { | 410 | static struct ata_port_operations ht6560b_port_ops = { |
420 | .set_piomode = ht6560b_set_piomode, | 411 | .set_piomode = ht6560b_set_piomode, |
421 | 412 | ||
422 | .port_disable = ata_port_disable, | ||
423 | .tf_load = ata_tf_load, | 413 | .tf_load = ata_tf_load, |
424 | .tf_read = ata_tf_read, | 414 | .tf_read = ata_tf_read, |
425 | .check_status = ata_check_status, | 415 | .check_status = ata_check_status, |
@@ -440,7 +430,6 @@ static struct ata_port_operations ht6560b_port_ops = { | |||
440 | .irq_handler = ata_interrupt, | 430 | .irq_handler = ata_interrupt, |
441 | .irq_clear = ata_bmdma_irq_clear, | 431 | .irq_clear = ata_bmdma_irq_clear, |
442 | .irq_on = ata_irq_on, | 432 | .irq_on = ata_irq_on, |
443 | .irq_ack = ata_irq_ack, | ||
444 | 433 | ||
445 | .port_start = ata_port_start, | 434 | .port_start = ata_port_start, |
446 | }; | 435 | }; |
@@ -541,7 +530,6 @@ static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev | |||
541 | static struct ata_port_operations opti82c611a_port_ops = { | 530 | static struct ata_port_operations opti82c611a_port_ops = { |
542 | .set_piomode = opti82c611a_set_piomode, | 531 | .set_piomode = opti82c611a_set_piomode, |
543 | 532 | ||
544 | .port_disable = ata_port_disable, | ||
545 | .tf_load = ata_tf_load, | 533 | .tf_load = ata_tf_load, |
546 | .tf_read = ata_tf_read, | 534 | .tf_read = ata_tf_read, |
547 | .check_status = ata_check_status, | 535 | .check_status = ata_check_status, |
@@ -562,7 +550,6 @@ static struct ata_port_operations opti82c611a_port_ops = { | |||
562 | .irq_handler = ata_interrupt, | 550 | .irq_handler = ata_interrupt, |
563 | .irq_clear = ata_bmdma_irq_clear, | 551 | .irq_clear = ata_bmdma_irq_clear, |
564 | .irq_on = ata_irq_on, | 552 | .irq_on = ata_irq_on, |
565 | .irq_ack = ata_irq_ack, | ||
566 | 553 | ||
567 | .port_start = ata_port_start, | 554 | .port_start = ata_port_start, |
568 | }; | 555 | }; |
@@ -675,7 +662,6 @@ static unsigned int opti82c46x_qc_issue_prot(struct ata_queued_cmd *qc) | |||
675 | static struct ata_port_operations opti82c46x_port_ops = { | 662 | static struct ata_port_operations opti82c46x_port_ops = { |
676 | .set_piomode = opti82c46x_set_piomode, | 663 | .set_piomode = opti82c46x_set_piomode, |
677 | 664 | ||
678 | .port_disable = ata_port_disable, | ||
679 | .tf_load = ata_tf_load, | 665 | .tf_load = ata_tf_load, |
680 | .tf_read = ata_tf_read, | 666 | .tf_read = ata_tf_read, |
681 | .check_status = ata_check_status, | 667 | .check_status = ata_check_status, |
@@ -696,7 +682,6 @@ static struct ata_port_operations opti82c46x_port_ops = { | |||
696 | .irq_handler = ata_interrupt, | 682 | .irq_handler = ata_interrupt, |
697 | .irq_clear = ata_bmdma_irq_clear, | 683 | .irq_clear = ata_bmdma_irq_clear, |
698 | .irq_on = ata_irq_on, | 684 | .irq_on = ata_irq_on, |
699 | .irq_ack = ata_irq_ack, | ||
700 | 685 | ||
701 | .port_start = ata_port_start, | 686 | .port_start = ata_port_start, |
702 | }; | 687 | }; |
@@ -814,6 +799,8 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl | |||
814 | ata_std_ports(&ap->ioaddr); | 799 | ata_std_ports(&ap->ioaddr); |
815 | ap->private_data = ld; | 800 | ap->private_data = ld; |
816 | 801 | ||
802 | ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, ctrl); | ||
803 | |||
817 | ret = ata_host_activate(host, irq, ata_interrupt, 0, &legacy_sht); | 804 | ret = ata_host_activate(host, irq, ata_interrupt, 0, &legacy_sht); |
818 | if (ret) | 805 | if (ret) |
819 | goto fail; | 806 | goto fail; |
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index b45506f1ef73..9afc8a32b226 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c | |||
@@ -24,14 +24,15 @@ | |||
24 | 24 | ||
25 | /** | 25 | /** |
26 | * marvell_pre_reset - check for 40/80 pin | 26 | * marvell_pre_reset - check for 40/80 pin |
27 | * @ap: Port | 27 | * @link: link |
28 | * @deadline: deadline jiffies for the operation | 28 | * @deadline: deadline jiffies for the operation |
29 | * | 29 | * |
30 | * Perform the PATA port setup we need. | 30 | * Perform the PATA port setup we need. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | static int marvell_pre_reset(struct ata_port *ap, unsigned long deadline) | 33 | static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) |
34 | { | 34 | { |
35 | struct ata_port *ap = link->ap; | ||
35 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 36 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
36 | u32 devices; | 37 | u32 devices; |
37 | void __iomem *barp; | 38 | void __iomem *barp; |
@@ -54,7 +55,7 @@ static int marvell_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
54 | (!(devices & 0x10))) /* PATA enable ? */ | 55 | (!(devices & 0x10))) /* PATA enable ? */ |
55 | return -ENOENT; | 56 | return -ENOENT; |
56 | 57 | ||
57 | return ata_std_prereset(ap, deadline); | 58 | return ata_std_prereset(link, deadline); |
58 | } | 59 | } |
59 | 60 | ||
60 | static int marvell_cable_detect(struct ata_port *ap) | 61 | static int marvell_cable_detect(struct ata_port *ap) |
@@ -110,8 +111,6 @@ static struct scsi_host_template marvell_sht = { | |||
110 | }; | 111 | }; |
111 | 112 | ||
112 | static const struct ata_port_operations marvell_ops = { | 113 | static const struct ata_port_operations marvell_ops = { |
113 | .port_disable = ata_port_disable, | ||
114 | |||
115 | /* Task file is PCI ATA format, use helpers */ | 114 | /* Task file is PCI ATA format, use helpers */ |
116 | .tf_load = ata_tf_load, | 115 | .tf_load = ata_tf_load, |
117 | .tf_read = ata_tf_read, | 116 | .tf_read = ata_tf_read, |
@@ -138,10 +137,9 @@ static const struct ata_port_operations marvell_ops = { | |||
138 | .irq_handler = ata_interrupt, | 137 | .irq_handler = ata_interrupt, |
139 | .irq_clear = ata_bmdma_irq_clear, | 138 | .irq_clear = ata_bmdma_irq_clear, |
140 | .irq_on = ata_irq_on, | 139 | .irq_on = ata_irq_on, |
141 | .irq_ack = ata_irq_ack, | ||
142 | 140 | ||
143 | /* Generic PATA PCI ATA helpers */ | 141 | /* Generic PATA PCI ATA helpers */ |
144 | .port_start = ata_port_start, | 142 | .port_start = ata_sff_port_start, |
145 | }; | 143 | }; |
146 | 144 | ||
147 | 145 | ||
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index 099f4cdc4cd9..412140f02853 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c | |||
@@ -283,7 +283,6 @@ static struct scsi_host_template mpc52xx_ata_sht = { | |||
283 | }; | 283 | }; |
284 | 284 | ||
285 | static struct ata_port_operations mpc52xx_ata_port_ops = { | 285 | static struct ata_port_operations mpc52xx_ata_port_ops = { |
286 | .port_disable = ata_port_disable, | ||
287 | .set_piomode = mpc52xx_ata_set_piomode, | 286 | .set_piomode = mpc52xx_ata_set_piomode, |
288 | .dev_select = mpc52xx_ata_dev_select, | 287 | .dev_select = mpc52xx_ata_dev_select, |
289 | .tf_load = ata_tf_load, | 288 | .tf_load = ata_tf_load, |
@@ -299,12 +298,12 @@ static struct ata_port_operations mpc52xx_ata_port_ops = { | |||
299 | .data_xfer = ata_data_xfer, | 298 | .data_xfer = ata_data_xfer, |
300 | .irq_clear = ata_bmdma_irq_clear, | 299 | .irq_clear = ata_bmdma_irq_clear, |
301 | .irq_on = ata_irq_on, | 300 | .irq_on = ata_irq_on, |
302 | .irq_ack = ata_irq_ack, | ||
303 | .port_start = ata_port_start, | 301 | .port_start = ata_port_start, |
304 | }; | 302 | }; |
305 | 303 | ||
306 | static int __devinit | 304 | static int __devinit |
307 | mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv) | 305 | mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv, |
306 | unsigned long raw_ata_regs) | ||
308 | { | 307 | { |
309 | struct ata_host *host; | 308 | struct ata_host *host; |
310 | struct ata_port *ap; | 309 | struct ata_port *ap; |
@@ -338,6 +337,8 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv) | |||
338 | aio->status_addr = &priv->ata_regs->tf_command; | 337 | aio->status_addr = &priv->ata_regs->tf_command; |
339 | aio->command_addr = &priv->ata_regs->tf_command; | 338 | aio->command_addr = &priv->ata_regs->tf_command; |
340 | 339 | ||
340 | ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs); | ||
341 | |||
341 | /* activate host */ | 342 | /* activate host */ |
342 | return ata_host_activate(host, priv->ata_irq, ata_interrupt, 0, | 343 | return ata_host_activate(host, priv->ata_irq, ata_interrupt, 0, |
343 | &mpc52xx_ata_sht); | 344 | &mpc52xx_ata_sht); |
@@ -434,7 +435,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match) | |||
434 | } | 435 | } |
435 | 436 | ||
436 | /* Register ourselves to libata */ | 437 | /* Register ourselves to libata */ |
437 | rv = mpc52xx_ata_init_one(&op->dev, priv); | 438 | rv = mpc52xx_ata_init_one(&op->dev, priv, res_mem.start); |
438 | if (rv) { | 439 | if (rv) { |
439 | printk(KERN_ERR DRV_NAME ": " | 440 | printk(KERN_ERR DRV_NAME ": " |
440 | "Error while registering to ATA layer\n"); | 441 | "Error while registering to ATA layer\n"); |
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c index 4ea42838297e..d5483087a3fa 100644 --- a/drivers/ata/pata_mpiix.c +++ b/drivers/ata/pata_mpiix.c | |||
@@ -46,15 +46,16 @@ enum { | |||
46 | SECONDARY = (1 << 14) | 46 | SECONDARY = (1 << 14) |
47 | }; | 47 | }; |
48 | 48 | ||
49 | static int mpiix_pre_reset(struct ata_port *ap, unsigned long deadline) | 49 | static int mpiix_pre_reset(struct ata_link *link, unsigned long deadline) |
50 | { | 50 | { |
51 | struct ata_port *ap = link->ap; | ||
51 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 52 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
52 | static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 }; | 53 | static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 }; |
53 | 54 | ||
54 | if (!pci_test_config_bits(pdev, &mpiix_enable_bits)) | 55 | if (!pci_test_config_bits(pdev, &mpiix_enable_bits)) |
55 | return -ENOENT; | 56 | return -ENOENT; |
56 | 57 | ||
57 | return ata_std_prereset(ap, deadline); | 58 | return ata_std_prereset(link, deadline); |
58 | } | 59 | } |
59 | 60 | ||
60 | /** | 61 | /** |
@@ -168,7 +169,6 @@ static struct scsi_host_template mpiix_sht = { | |||
168 | }; | 169 | }; |
169 | 170 | ||
170 | static struct ata_port_operations mpiix_port_ops = { | 171 | static struct ata_port_operations mpiix_port_ops = { |
171 | .port_disable = ata_port_disable, | ||
172 | .set_piomode = mpiix_set_piomode, | 172 | .set_piomode = mpiix_set_piomode, |
173 | 173 | ||
174 | .tf_load = ata_tf_load, | 174 | .tf_load = ata_tf_load, |
@@ -189,9 +189,8 @@ static struct ata_port_operations mpiix_port_ops = { | |||
189 | 189 | ||
190 | .irq_clear = ata_bmdma_irq_clear, | 190 | .irq_clear = ata_bmdma_irq_clear, |
191 | .irq_on = ata_irq_on, | 191 | .irq_on = ata_irq_on, |
192 | .irq_ack = ata_irq_ack, | ||
193 | 192 | ||
194 | .port_start = ata_port_start, | 193 | .port_start = ata_sff_port_start, |
195 | }; | 194 | }; |
196 | 195 | ||
197 | static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 196 | static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
@@ -202,7 +201,7 @@ static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
202 | struct ata_port *ap; | 201 | struct ata_port *ap; |
203 | void __iomem *cmd_addr, *ctl_addr; | 202 | void __iomem *cmd_addr, *ctl_addr; |
204 | u16 idetim; | 203 | u16 idetim; |
205 | int irq; | 204 | int cmd, ctl, irq; |
206 | 205 | ||
207 | if (!printed_version++) | 206 | if (!printed_version++) |
208 | dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); | 207 | dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); |
@@ -210,6 +209,7 @@ static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
210 | host = ata_host_alloc(&dev->dev, 1); | 209 | host = ata_host_alloc(&dev->dev, 1); |
211 | if (!host) | 210 | if (!host) |
212 | return -ENOMEM; | 211 | return -ENOMEM; |
212 | ap = host->ports[0]; | ||
213 | 213 | ||
214 | /* MPIIX has many functions which can be turned on or off according | 214 | /* MPIIX has many functions which can be turned on or off according |
215 | to other devices present. Make sure IDE is enabled before we try | 215 | to other devices present. Make sure IDE is enabled before we try |
@@ -221,25 +221,28 @@ static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
221 | 221 | ||
222 | /* See if it's primary or secondary channel... */ | 222 | /* See if it's primary or secondary channel... */ |
223 | if (!(idetim & SECONDARY)) { | 223 | if (!(idetim & SECONDARY)) { |
224 | cmd = 0x1F0; | ||
225 | ctl = 0x3F6; | ||
224 | irq = 14; | 226 | irq = 14; |
225 | cmd_addr = devm_ioport_map(&dev->dev, 0x1F0, 8); | ||
226 | ctl_addr = devm_ioport_map(&dev->dev, 0x3F6, 1); | ||
227 | } else { | 227 | } else { |
228 | cmd = 0x170; | ||
229 | ctl = 0x376; | ||
228 | irq = 15; | 230 | irq = 15; |
229 | cmd_addr = devm_ioport_map(&dev->dev, 0x170, 8); | ||
230 | ctl_addr = devm_ioport_map(&dev->dev, 0x376, 1); | ||
231 | } | 231 | } |
232 | 232 | ||
233 | cmd_addr = devm_ioport_map(&dev->dev, cmd, 8); | ||
234 | ctl_addr = devm_ioport_map(&dev->dev, ctl, 1); | ||
233 | if (!cmd_addr || !ctl_addr) | 235 | if (!cmd_addr || !ctl_addr) |
234 | return -ENOMEM; | 236 | return -ENOMEM; |
235 | 237 | ||
238 | ata_port_desc(ap, "cmd 0x%x ctl 0x%x", cmd, ctl); | ||
239 | |||
236 | /* We do our own plumbing to avoid leaking special cases for whacko | 240 | /* We do our own plumbing to avoid leaking special cases for whacko |
237 | ancient hardware into the core code. There are two issues to | 241 | ancient hardware into the core code. There are two issues to |
238 | worry about. #1 The chip is a bridge so if in legacy mode and | 242 | worry about. #1 The chip is a bridge so if in legacy mode and |
239 | without BARs set fools the setup. #2 If you pci_disable_device | 243 | without BARs set fools the setup. #2 If you pci_disable_device |
240 | the MPIIX your box goes castors up */ | 244 | the MPIIX your box goes castors up */ |
241 | 245 | ||
242 | ap = host->ports[0]; | ||
243 | ap->ops = &mpiix_port_ops; | 246 | ap->ops = &mpiix_port_ops; |
244 | ap->pio_mask = 0x1F; | 247 | ap->pio_mask = 0x1F; |
245 | ap->flags |= ATA_FLAG_SLAVE_POSS; | 248 | ap->flags |= ATA_FLAG_SLAVE_POSS; |
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c index 40eb574828bf..25c922abd554 100644 --- a/drivers/ata/pata_netcell.c +++ b/drivers/ata/pata_netcell.c | |||
@@ -40,8 +40,6 @@ static struct scsi_host_template netcell_sht = { | |||
40 | }; | 40 | }; |
41 | 41 | ||
42 | static const struct ata_port_operations netcell_ops = { | 42 | static const struct ata_port_operations netcell_ops = { |
43 | .port_disable = ata_port_disable, | ||
44 | |||
45 | /* Task file is PCI ATA format, use helpers */ | 43 | /* Task file is PCI ATA format, use helpers */ |
46 | .tf_load = ata_tf_load, | 44 | .tf_load = ata_tf_load, |
47 | .tf_read = ata_tf_read, | 45 | .tf_read = ata_tf_read, |
@@ -68,10 +66,9 @@ static const struct ata_port_operations netcell_ops = { | |||
68 | .irq_handler = ata_interrupt, | 66 | .irq_handler = ata_interrupt, |
69 | .irq_clear = ata_bmdma_irq_clear, | 67 | .irq_clear = ata_bmdma_irq_clear, |
70 | .irq_on = ata_irq_on, | 68 | .irq_on = ata_irq_on, |
71 | .irq_ack = ata_irq_ack, | ||
72 | 69 | ||
73 | /* Generic PATA PCI ATA helpers */ | 70 | /* Generic PATA PCI ATA helpers */ |
74 | .port_start = ata_port_start, | 71 | .port_start = ata_sff_port_start, |
75 | }; | 72 | }; |
76 | 73 | ||
77 | 74 | ||
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c index 2f5d714ebfc4..6e8e55745b7b 100644 --- a/drivers/ata/pata_ns87410.c +++ b/drivers/ata/pata_ns87410.c | |||
@@ -32,14 +32,15 @@ | |||
32 | 32 | ||
33 | /** | 33 | /** |
34 | * ns87410_pre_reset - probe begin | 34 | * ns87410_pre_reset - probe begin |
35 | * @ap: ATA port | 35 | * @link: ATA link |
36 | * @deadline: deadline jiffies for the operation | 36 | * @deadline: deadline jiffies for the operation |
37 | * | 37 | * |
38 | * Check enabled ports | 38 | * Check enabled ports |
39 | */ | 39 | */ |
40 | 40 | ||
41 | static int ns87410_pre_reset(struct ata_port *ap, unsigned long deadline) | 41 | static int ns87410_pre_reset(struct ata_link *link, unsigned long deadline) |
42 | { | 42 | { |
43 | struct ata_port *ap = link->ap; | ||
43 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 44 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
44 | static const struct pci_bits ns87410_enable_bits[] = { | 45 | static const struct pci_bits ns87410_enable_bits[] = { |
45 | { 0x43, 1, 0x08, 0x08 }, | 46 | { 0x43, 1, 0x08, 0x08 }, |
@@ -49,7 +50,7 @@ static int ns87410_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
49 | if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no])) | 50 | if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no])) |
50 | return -ENOENT; | 51 | return -ENOENT; |
51 | 52 | ||
52 | return ata_std_prereset(ap, deadline); | 53 | return ata_std_prereset(link, deadline); |
53 | } | 54 | } |
54 | 55 | ||
55 | /** | 56 | /** |
@@ -161,7 +162,6 @@ static struct scsi_host_template ns87410_sht = { | |||
161 | }; | 162 | }; |
162 | 163 | ||
163 | static struct ata_port_operations ns87410_port_ops = { | 164 | static struct ata_port_operations ns87410_port_ops = { |
164 | .port_disable = ata_port_disable, | ||
165 | .set_piomode = ns87410_set_piomode, | 165 | .set_piomode = ns87410_set_piomode, |
166 | 166 | ||
167 | .tf_load = ata_tf_load, | 167 | .tf_load = ata_tf_load, |
@@ -184,9 +184,8 @@ static struct ata_port_operations ns87410_port_ops = { | |||
184 | .irq_handler = ata_interrupt, | 184 | .irq_handler = ata_interrupt, |
185 | .irq_clear = ata_bmdma_irq_clear, | 185 | .irq_clear = ata_bmdma_irq_clear, |
186 | .irq_on = ata_irq_on, | 186 | .irq_on = ata_irq_on, |
187 | .irq_ack = ata_irq_ack, | ||
188 | 187 | ||
189 | .port_start = ata_port_start, | 188 | .port_start = ata_sff_port_start, |
190 | }; | 189 | }; |
191 | 190 | ||
192 | static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 191 | static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c new file mode 100644 index 000000000000..bb97ef583f9b --- /dev/null +++ b/drivers/ata/pata_ns87415.c | |||
@@ -0,0 +1,467 @@ | |||
1 | /* | ||
2 | * pata_ns87415.c - NS87415 (non PARISC) PATA | ||
3 | * | ||
4 | * (C) 2005 Red Hat <alan@redhat.com> | ||
5 | * | ||
6 | * This is a fairly generic MWDMA controller. It has some limitations | ||
7 | * as it requires timing reloads on PIO/DMA transitions but it is otherwise | ||
8 | * fairly well designed. | ||
9 | * | ||
10 | * This driver assumes the firmware has left the chip in a valid ST506 | ||
11 | * compliant state, either legacy IRQ 14/15 or native INTA shared. You | ||
12 | * may need to add platform code if your system fails to do this. | ||
13 | * | ||
14 | * The same cell appears in the 87560 controller used by some PARISC | ||
15 | * systems. This has its own special mountain of errata. | ||
16 | * | ||
17 | * TODO: | ||
18 | * Test PARISC SuperIO | ||
19 | * Get someone to test on SPARC | ||
20 | * Implement lazy pio/dma switching for better performance | ||
21 | * 8bit shared timing. | ||
22 | * See if we need to kill the FIFO for ATAPI | ||
23 | */ | ||
24 | |||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/pci.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/blkdev.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/device.h> | ||
32 | #include <scsi/scsi_host.h> | ||
33 | #include <linux/libata.h> | ||
34 | #include <linux/ata.h> | ||
35 | |||
36 | #define DRV_NAME "pata_ns87415" | ||
37 | #define DRV_VERSION "0.0.1" | ||
38 | |||
39 | /** | ||
40 | * ns87415_set_mode - Initialize host controller mode timings | ||
41 | * @ap: Port whose timings we are configuring | ||
42 | * @adev: Device whose timings we are configuring | ||
43 | * @mode: Mode to set | ||
44 | * | ||
45 | * Program the mode registers for this controller, channel and | ||
46 | * device. Because the chip is quite an old design we have to do this | ||
47 | * for PIO/DMA switches. | ||
48 | * | ||
49 | * LOCKING: | ||
50 | * None (inherited from caller). | ||
51 | */ | ||
52 | |||
53 | static void ns87415_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode) | ||
54 | { | ||
55 | struct pci_dev *dev = to_pci_dev(ap->host->dev); | ||
56 | int unit = 2 * ap->port_no + adev->devno; | ||
57 | int timing = 0x44 + 2 * unit; | ||
58 | unsigned long T = 1000000000 / 33333; /* PCI clocks */ | ||
59 | struct ata_timing t; | ||
60 | u16 clocking; | ||
61 | u8 iordy; | ||
62 | u8 status; | ||
63 | |||
64 | /* Timing register format is 17 - low nybble read timing with | ||
65 | the high nybble being 16 - x for recovery time in PCI clocks */ | ||
66 | |||
67 | ata_timing_compute(adev, adev->pio_mode, &t, T, 0); | ||
68 | |||
69 | clocking = 17 - FIT(t.active, 2, 17); | ||
70 | clocking |= (16 - FIT(t.recover, 1, 16)) << 4; | ||
71 | /* Use the same timing for read and write bytes */ | ||
72 | clocking |= (clocking << 8); | ||
73 | pci_write_config_word(dev, timing, clocking); | ||
74 | |||
75 | /* Set the IORDY enable versus DMA enable on or off properly */ | ||
76 | pci_read_config_byte(dev, 0x42, &iordy); | ||
77 | iordy &= ~(1 << (4 + unit)); | ||
78 | if (mode >= XFER_MW_DMA_0 || !ata_pio_need_iordy(adev)) | ||
79 | iordy |= (1 << (4 + unit)); | ||
80 | |||
81 | /* Paranoia: We shouldn't ever get here with busy write buffers | ||
82 | but if so wait */ | ||
83 | |||
84 | pci_read_config_byte(dev, 0x43, &status); | ||
85 | while (status & 0x03) { | ||
86 | udelay(1); | ||
87 | pci_read_config_byte(dev, 0x43, &status); | ||
88 | } | ||
89 | /* Flip the IORDY/DMA bits now we are sure the write buffers are | ||
90 | clear */ | ||
91 | pci_write_config_byte(dev, 0x42, iordy); | ||
92 | |||
93 | /* TODO: Set byte 54 command timing to the best 8bit | ||
94 | mode shared by all four devices */ | ||
95 | } | ||
96 | |||
97 | /** | ||
98 | * ns87415_set_piomode - Initialize host controller PATA PIO timings | ||
99 | * @ap: Port whose timings we are configuring | ||
100 | * @adev: Device to program | ||
101 | * | ||
102 | * Set PIO mode for device, in host controller PCI config space. | ||
103 | * | ||
104 | * LOCKING: | ||
105 | * None (inherited from caller). | ||
106 | */ | ||
107 | |||
108 | static void ns87415_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
109 | { | ||
110 | ns87415_set_mode(ap, adev, adev->pio_mode); | ||
111 | } | ||
112 | |||
113 | /** | ||
114 | * ns87415_bmdma_setup - Set up DMA | ||
115 | * @qc: Command block | ||
116 | * | ||
117 | * Set up for bus masterng DMA. We have to do this ourselves | ||
118 | * rather than use the helper due to a chip erratum | ||
119 | */ | ||
120 | |||
121 | static void ns87415_bmdma_setup(struct ata_queued_cmd *qc) | ||
122 | { | ||
123 | struct ata_port *ap = qc->ap; | ||
124 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
125 | u8 dmactl; | ||
126 | |||
127 | /* load PRD table addr. */ | ||
128 | mb(); /* make sure PRD table writes are visible to controller */ | ||
129 | iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); | ||
130 | |||
131 | /* specify data direction, triple-check start bit is clear */ | ||
132 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
133 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | ||
134 | /* Due to an erratum we need to write these bits to the wrong | ||
135 | place - which does save us an I/O bizarrely */ | ||
136 | dmactl |= ATA_DMA_INTR | ATA_DMA_ERR; | ||
137 | if (!rw) | ||
138 | dmactl |= ATA_DMA_WR; | ||
139 | iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
140 | /* issue r/w command */ | ||
141 | ap->ops->exec_command(ap, &qc->tf); | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * ns87415_bmdma_start - Begin DMA transfer | ||
146 | * @qc: Command block | ||
147 | * | ||
148 | * Switch the timings for the chip and set up for a DMA transfer | ||
149 | * before the DMA burst begins. | ||
150 | * | ||
151 | * FIXME: We should do lazy switching on bmdma_start versus | ||
152 | * ata_pio_data_xfer for better performance. | ||
153 | */ | ||
154 | |||
155 | static void ns87415_bmdma_start(struct ata_queued_cmd *qc) | ||
156 | { | ||
157 | ns87415_set_mode(qc->ap, qc->dev, qc->dev->dma_mode); | ||
158 | ata_bmdma_start(qc); | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * ns87415_bmdma_stop - End DMA transfer | ||
163 | * @qc: Command block | ||
164 | * | ||
165 | * End DMA mode and switch the controller back into PIO mode | ||
166 | */ | ||
167 | |||
168 | static void ns87415_bmdma_stop(struct ata_queued_cmd *qc) | ||
169 | { | ||
170 | ata_bmdma_stop(qc); | ||
171 | ns87415_set_mode(qc->ap, qc->dev, qc->dev->pio_mode); | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | * ns87415_bmdma_irq_clear - Clear interrupt | ||
176 | * @ap: Channel to clear | ||
177 | * | ||
178 | * Erratum: Due to a chip bug regisers 02 and 0A bit 1 and 2 (the | ||
179 | * error bits) are reset by writing to register 00 or 08. | ||
180 | */ | ||
181 | |||
182 | static void ns87415_bmdma_irq_clear(struct ata_port *ap) | ||
183 | { | ||
184 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
185 | |||
186 | if (!mmio) | ||
187 | return; | ||
188 | iowrite8((ioread8(mmio + ATA_DMA_CMD) | ATA_DMA_INTR | ATA_DMA_ERR), | ||
189 | mmio + ATA_DMA_CMD); | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * ns87415_check_atapi_dma - ATAPI DMA filter | ||
194 | * @qc: Command block | ||
195 | * | ||
196 | * Disable ATAPI DMA (for now). We may be able to do DMA if we | ||
197 | * kill the prefetching. This isn't clear. | ||
198 | */ | ||
199 | |||
200 | static int ns87415_check_atapi_dma(struct ata_queued_cmd *qc) | ||
201 | { | ||
202 | return -EOPNOTSUPP; | ||
203 | } | ||
204 | |||
205 | #if defined(CONFIG_SUPERIO) | ||
206 | |||
207 | /* SUPERIO 87560 is a PoS chip that NatSem denies exists. | ||
208 | * Unfortunately, it's built-in on all Astro-based PA-RISC workstations | ||
209 | * which use the integrated NS87514 cell for CD-ROM support. | ||
210 | * i.e we have to support for CD-ROM installs. | ||
211 | * See drivers/parisc/superio.c for more gory details. | ||
212 | * | ||
213 | * Workarounds taken from drivers/ide/pci/ns87415.c | ||
214 | */ | ||
215 | |||
216 | #include <asm/superio.h> | ||
217 | |||
218 | /** | ||
219 | * ns87560_read_buggy - workaround buggy Super I/O chip | ||
220 | * @port: Port to read | ||
221 | * | ||
222 | * Work around chipset problems in the 87560 SuperIO chip | ||
223 | */ | ||
224 | |||
225 | static u8 ns87560_read_buggy(void __iomem *port) | ||
226 | { | ||
227 | u8 tmp; | ||
228 | int retries = SUPERIO_IDE_MAX_RETRIES; | ||
229 | do { | ||
230 | tmp = ioread8(port); | ||
231 | if (tmp != 0) | ||
232 | return tmp; | ||
233 | udelay(50); | ||
234 | } while(retries-- > 0); | ||
235 | return tmp; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * ns87560_check_status | ||
240 | * @ap: channel to check | ||
241 | * | ||
242 | * Return the status of the channel working around the | ||
243 | * 87560 flaws. | ||
244 | */ | ||
245 | |||
246 | static u8 ns87560_check_status(struct ata_port *ap) | ||
247 | { | ||
248 | return ns87560_read_buggy(ap->ioaddr.status_addr); | ||
249 | } | ||
250 | |||
251 | /** | ||
252 | * ns87560_tf_read - input device's ATA taskfile shadow registers | ||
253 | * @ap: Port from which input is read | ||
254 | * @tf: ATA taskfile register set for storing input | ||
255 | * | ||
256 | * Reads ATA taskfile registers for currently-selected device | ||
257 | * into @tf. Work around the 87560 bugs. | ||
258 | * | ||
259 | * LOCKING: | ||
260 | * Inherited from caller. | ||
261 | */ | ||
262 | void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | ||
263 | { | ||
264 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
265 | |||
266 | tf->command = ns87560_check_status(ap); | ||
267 | tf->feature = ioread8(ioaddr->error_addr); | ||
268 | tf->nsect = ioread8(ioaddr->nsect_addr); | ||
269 | tf->lbal = ioread8(ioaddr->lbal_addr); | ||
270 | tf->lbam = ioread8(ioaddr->lbam_addr); | ||
271 | tf->lbah = ioread8(ioaddr->lbah_addr); | ||
272 | tf->device = ns87560_read_buggy(ioaddr->device_addr); | ||
273 | |||
274 | if (tf->flags & ATA_TFLAG_LBA48) { | ||
275 | iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); | ||
276 | tf->hob_feature = ioread8(ioaddr->error_addr); | ||
277 | tf->hob_nsect = ioread8(ioaddr->nsect_addr); | ||
278 | tf->hob_lbal = ioread8(ioaddr->lbal_addr); | ||
279 | tf->hob_lbam = ioread8(ioaddr->lbam_addr); | ||
280 | tf->hob_lbah = ioread8(ioaddr->lbah_addr); | ||
281 | iowrite8(tf->ctl, ioaddr->ctl_addr); | ||
282 | ap->last_ctl = tf->ctl; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | /** | ||
287 | * ns87560_bmdma_status | ||
288 | * @ap: channel to check | ||
289 | * | ||
290 | * Return the DMA status of the channel working around the | ||
291 | * 87560 flaws. | ||
292 | */ | ||
293 | |||
294 | static u8 ns87560_bmdma_status(struct ata_port *ap) | ||
295 | { | ||
296 | return ns87560_read_buggy(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
297 | } | ||
298 | |||
299 | static const struct ata_port_operations ns87560_pata_ops = { | ||
300 | .set_piomode = ns87415_set_piomode, | ||
301 | .mode_filter = ata_pci_default_filter, | ||
302 | |||
303 | .tf_load = ata_tf_load, | ||
304 | .tf_read = ns87560_tf_read, | ||
305 | .check_status = ns87560_check_status, | ||
306 | .check_atapi_dma = ns87415_check_atapi_dma, | ||
307 | .exec_command = ata_exec_command, | ||
308 | .dev_select = ata_std_dev_select, | ||
309 | |||
310 | .freeze = ata_bmdma_freeze, | ||
311 | .thaw = ata_bmdma_thaw, | ||
312 | .error_handler = ata_bmdma_error_handler, | ||
313 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
314 | .cable_detect = ata_cable_40wire, | ||
315 | |||
316 | .bmdma_setup = ns87415_bmdma_setup, | ||
317 | .bmdma_start = ns87415_bmdma_start, | ||
318 | .bmdma_stop = ns87415_bmdma_stop, | ||
319 | .bmdma_status = ns87560_bmdma_status, | ||
320 | .qc_prep = ata_qc_prep, | ||
321 | .qc_issue = ata_qc_issue_prot, | ||
322 | .data_xfer = ata_data_xfer, | ||
323 | |||
324 | .irq_handler = ata_interrupt, | ||
325 | .irq_clear = ns87415_bmdma_irq_clear, | ||
326 | .irq_on = ata_irq_on, | ||
327 | |||
328 | .port_start = ata_sff_port_start, | ||
329 | }; | ||
330 | |||
331 | #endif /* 87560 SuperIO Support */ | ||
332 | |||
333 | |||
334 | static const struct ata_port_operations ns87415_pata_ops = { | ||
335 | .set_piomode = ns87415_set_piomode, | ||
336 | .mode_filter = ata_pci_default_filter, | ||
337 | |||
338 | .tf_load = ata_tf_load, | ||
339 | .tf_read = ata_tf_read, | ||
340 | .check_status = ata_check_status, | ||
341 | .check_atapi_dma = ns87415_check_atapi_dma, | ||
342 | .exec_command = ata_exec_command, | ||
343 | .dev_select = ata_std_dev_select, | ||
344 | |||
345 | .freeze = ata_bmdma_freeze, | ||
346 | .thaw = ata_bmdma_thaw, | ||
347 | .error_handler = ata_bmdma_error_handler, | ||
348 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
349 | .cable_detect = ata_cable_40wire, | ||
350 | |||
351 | .bmdma_setup = ns87415_bmdma_setup, | ||
352 | .bmdma_start = ns87415_bmdma_start, | ||
353 | .bmdma_stop = ns87415_bmdma_stop, | ||
354 | .bmdma_status = ata_bmdma_status, | ||
355 | .qc_prep = ata_qc_prep, | ||
356 | .qc_issue = ata_qc_issue_prot, | ||
357 | .data_xfer = ata_data_xfer, | ||
358 | |||
359 | .irq_handler = ata_interrupt, | ||
360 | .irq_clear = ns87415_bmdma_irq_clear, | ||
361 | .irq_on = ata_irq_on, | ||
362 | |||
363 | .port_start = ata_sff_port_start, | ||
364 | }; | ||
365 | |||
366 | static struct scsi_host_template ns87415_sht = { | ||
367 | .module = THIS_MODULE, | ||
368 | .name = DRV_NAME, | ||
369 | .ioctl = ata_scsi_ioctl, | ||
370 | .queuecommand = ata_scsi_queuecmd, | ||
371 | .can_queue = ATA_DEF_QUEUE, | ||
372 | .this_id = ATA_SHT_THIS_ID, | ||
373 | .sg_tablesize = LIBATA_MAX_PRD, | ||
374 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
375 | .emulated = ATA_SHT_EMULATED, | ||
376 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
377 | .proc_name = DRV_NAME, | ||
378 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
379 | .slave_configure = ata_scsi_slave_config, | ||
380 | .slave_destroy = ata_scsi_slave_destroy, | ||
381 | .bios_param = ata_std_bios_param, | ||
382 | }; | ||
383 | |||
384 | |||
385 | /** | ||
386 | * ns87415_init_one - Register 87415 ATA PCI device with kernel services | ||
387 | * @pdev: PCI device to register | ||
388 | * @ent: Entry in ns87415_pci_tbl matching with @pdev | ||
389 | * | ||
390 | * Called from kernel PCI layer. We probe for combined mode (sigh), | ||
391 | * and then hand over control to libata, for it to do the rest. | ||
392 | * | ||
393 | * LOCKING: | ||
394 | * Inherited from PCI layer (may sleep). | ||
395 | * | ||
396 | * RETURNS: | ||
397 | * Zero on success, or -ERRNO value. | ||
398 | */ | ||
399 | |||
400 | static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | ||
401 | { | ||
402 | static int printed_version; | ||
403 | static const struct ata_port_info info = { | ||
404 | .sht = &ns87415_sht, | ||
405 | .flags = ATA_FLAG_SLAVE_POSS, | ||
406 | .pio_mask = 0x1f, /* pio0-4 */ | ||
407 | .mwdma_mask = 0x07, /* mwdma0-2 */ | ||
408 | .port_ops = &ns87415_pata_ops, | ||
409 | }; | ||
410 | const struct ata_port_info *ppi[] = { &info, NULL }; | ||
411 | #if defined(CONFIG_SUPERIO) | ||
412 | static const struct ata_port_info info87560 = { | ||
413 | .sht = &ns87415_sht, | ||
414 | .flags = ATA_FLAG_SLAVE_POSS, | ||
415 | .pio_mask = 0x1f, /* pio0-4 */ | ||
416 | .mwdma_mask = 0x07, /* mwdma0-2 */ | ||
417 | .port_ops = &ns87560_pata_ops, | ||
418 | }; | ||
419 | |||
420 | if (PCI_SLOT(pdev->devfn) == 0x0E) | ||
421 | ppi[0] = &info87560; | ||
422 | #endif | ||
423 | if (!printed_version++) | ||
424 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
425 | "version " DRV_VERSION "\n"); | ||
426 | /* Select 512 byte sectors */ | ||
427 | pci_write_config_byte(pdev, 0x55, 0xEE); | ||
428 | /* Select PIO0 8bit clocking */ | ||
429 | pci_write_config_byte(pdev, 0x54, 0xB7); | ||
430 | return ata_pci_init_one(pdev, ppi); | ||
431 | } | ||
432 | |||
433 | static const struct pci_device_id ns87415_pci_tbl[] = { | ||
434 | { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87415), }, | ||
435 | |||
436 | { } /* terminate list */ | ||
437 | }; | ||
438 | |||
439 | static struct pci_driver ns87415_pci_driver = { | ||
440 | .name = DRV_NAME, | ||
441 | .id_table = ns87415_pci_tbl, | ||
442 | .probe = ns87415_init_one, | ||
443 | .remove = ata_pci_remove_one, | ||
444 | #ifdef CONFIG_PM | ||
445 | .suspend = ata_pci_device_suspend, | ||
446 | .resume = ata_pci_device_resume, | ||
447 | #endif | ||
448 | }; | ||
449 | |||
450 | static int __init ns87415_init(void) | ||
451 | { | ||
452 | return pci_register_driver(&ns87415_pci_driver); | ||
453 | } | ||
454 | |||
455 | static void __exit ns87415_exit(void) | ||
456 | { | ||
457 | pci_unregister_driver(&ns87415_pci_driver); | ||
458 | } | ||
459 | |||
460 | module_init(ns87415_init); | ||
461 | module_exit(ns87415_exit); | ||
462 | |||
463 | MODULE_AUTHOR("Alan Cox"); | ||
464 | MODULE_DESCRIPTION("ATA low-level driver for NS87415 controllers"); | ||
465 | MODULE_LICENSE("GPL"); | ||
466 | MODULE_DEVICE_TABLE(pci, ns87415_pci_tbl); | ||
467 | MODULE_VERSION(DRV_VERSION); | ||
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c index 091a70a0ef1c..3cd5eb2b6c91 100644 --- a/drivers/ata/pata_oldpiix.c +++ b/drivers/ata/pata_oldpiix.c | |||
@@ -29,14 +29,15 @@ | |||
29 | 29 | ||
30 | /** | 30 | /** |
31 | * oldpiix_pre_reset - probe begin | 31 | * oldpiix_pre_reset - probe begin |
32 | * @ap: ATA port | 32 | * @link: ATA link |
33 | * @deadline: deadline jiffies for the operation | 33 | * @deadline: deadline jiffies for the operation |
34 | * | 34 | * |
35 | * Set up cable type and use generic probe init | 35 | * Set up cable type and use generic probe init |
36 | */ | 36 | */ |
37 | 37 | ||
38 | static int oldpiix_pre_reset(struct ata_port *ap, unsigned long deadline) | 38 | static int oldpiix_pre_reset(struct ata_link *link, unsigned long deadline) |
39 | { | 39 | { |
40 | struct ata_port *ap = link->ap; | ||
40 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 41 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
41 | static const struct pci_bits oldpiix_enable_bits[] = { | 42 | static const struct pci_bits oldpiix_enable_bits[] = { |
42 | { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ | 43 | { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ |
@@ -46,7 +47,7 @@ static int oldpiix_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
46 | if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no])) | 47 | if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no])) |
47 | return -ENOENT; | 48 | return -ENOENT; |
48 | 49 | ||
49 | return ata_std_prereset(ap, deadline); | 50 | return ata_std_prereset(link, deadline); |
50 | } | 51 | } |
51 | 52 | ||
52 | /** | 53 | /** |
@@ -237,7 +238,6 @@ static struct scsi_host_template oldpiix_sht = { | |||
237 | }; | 238 | }; |
238 | 239 | ||
239 | static const struct ata_port_operations oldpiix_pata_ops = { | 240 | static const struct ata_port_operations oldpiix_pata_ops = { |
240 | .port_disable = ata_port_disable, | ||
241 | .set_piomode = oldpiix_set_piomode, | 241 | .set_piomode = oldpiix_set_piomode, |
242 | .set_dmamode = oldpiix_set_dmamode, | 242 | .set_dmamode = oldpiix_set_dmamode, |
243 | .mode_filter = ata_pci_default_filter, | 243 | .mode_filter = ata_pci_default_filter, |
@@ -265,9 +265,8 @@ static const struct ata_port_operations oldpiix_pata_ops = { | |||
265 | .irq_handler = ata_interrupt, | 265 | .irq_handler = ata_interrupt, |
266 | .irq_clear = ata_bmdma_irq_clear, | 266 | .irq_clear = ata_bmdma_irq_clear, |
267 | .irq_on = ata_irq_on, | 267 | .irq_on = ata_irq_on, |
268 | .irq_ack = ata_irq_ack, | ||
269 | 268 | ||
270 | .port_start = ata_port_start, | 269 | .port_start = ata_sff_port_start, |
271 | }; | 270 | }; |
272 | 271 | ||
273 | 272 | ||
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c index 458bf67f766f..8f79447b6151 100644 --- a/drivers/ata/pata_opti.c +++ b/drivers/ata/pata_opti.c | |||
@@ -46,14 +46,15 @@ enum { | |||
46 | 46 | ||
47 | /** | 47 | /** |
48 | * opti_pre_reset - probe begin | 48 | * opti_pre_reset - probe begin |
49 | * @ap: ATA port | 49 | * @link: ATA link |
50 | * @deadline: deadline jiffies for the operation | 50 | * @deadline: deadline jiffies for the operation |
51 | * | 51 | * |
52 | * Set up cable type and use generic probe init | 52 | * Set up cable type and use generic probe init |
53 | */ | 53 | */ |
54 | 54 | ||
55 | static int opti_pre_reset(struct ata_port *ap, unsigned long deadline) | 55 | static int opti_pre_reset(struct ata_link *link, unsigned long deadline) |
56 | { | 56 | { |
57 | struct ata_port *ap = link->ap; | ||
57 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 58 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
58 | static const struct pci_bits opti_enable_bits[] = { | 59 | static const struct pci_bits opti_enable_bits[] = { |
59 | { 0x45, 1, 0x80, 0x00 }, | 60 | { 0x45, 1, 0x80, 0x00 }, |
@@ -63,7 +64,7 @@ static int opti_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
63 | if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no])) | 64 | if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no])) |
64 | return -ENOENT; | 65 | return -ENOENT; |
65 | 66 | ||
66 | return ata_std_prereset(ap, deadline); | 67 | return ata_std_prereset(link, deadline); |
67 | } | 68 | } |
68 | 69 | ||
69 | /** | 70 | /** |
@@ -182,7 +183,6 @@ static struct scsi_host_template opti_sht = { | |||
182 | }; | 183 | }; |
183 | 184 | ||
184 | static struct ata_port_operations opti_port_ops = { | 185 | static struct ata_port_operations opti_port_ops = { |
185 | .port_disable = ata_port_disable, | ||
186 | .set_piomode = opti_set_piomode, | 186 | .set_piomode = opti_set_piomode, |
187 | .tf_load = ata_tf_load, | 187 | .tf_load = ata_tf_load, |
188 | .tf_read = ata_tf_read, | 188 | .tf_read = ata_tf_read, |
@@ -209,9 +209,8 @@ static struct ata_port_operations opti_port_ops = { | |||
209 | .irq_handler = ata_interrupt, | 209 | .irq_handler = ata_interrupt, |
210 | .irq_clear = ata_bmdma_irq_clear, | 210 | .irq_clear = ata_bmdma_irq_clear, |
211 | .irq_on = ata_irq_on, | 211 | .irq_on = ata_irq_on, |
212 | .irq_ack = ata_irq_ack, | ||
213 | 212 | ||
214 | .port_start = ata_port_start, | 213 | .port_start = ata_sff_port_start, |
215 | }; | 214 | }; |
216 | 215 | ||
217 | static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 216 | static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c index f89bdfde16d0..6b07b5b48532 100644 --- a/drivers/ata/pata_optidma.c +++ b/drivers/ata/pata_optidma.c | |||
@@ -47,14 +47,15 @@ static int pci_clock; /* 0 = 33 1 = 25 */ | |||
47 | 47 | ||
48 | /** | 48 | /** |
49 | * optidma_pre_reset - probe begin | 49 | * optidma_pre_reset - probe begin |
50 | * @ap: ATA port | 50 | * @link: ATA link |
51 | * @deadline: deadline jiffies for the operation | 51 | * @deadline: deadline jiffies for the operation |
52 | * | 52 | * |
53 | * Set up cable type and use generic probe init | 53 | * Set up cable type and use generic probe init |
54 | */ | 54 | */ |
55 | 55 | ||
56 | static int optidma_pre_reset(struct ata_port *ap, unsigned long deadline) | 56 | static int optidma_pre_reset(struct ata_link *link, unsigned long deadline) |
57 | { | 57 | { |
58 | struct ata_port *ap = link->ap; | ||
58 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 59 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
59 | static const struct pci_bits optidma_enable_bits = { | 60 | static const struct pci_bits optidma_enable_bits = { |
60 | 0x40, 1, 0x08, 0x00 | 61 | 0x40, 1, 0x08, 0x00 |
@@ -63,7 +64,7 @@ static int optidma_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
63 | if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits)) | 64 | if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits)) |
64 | return -ENOENT; | 65 | return -ENOENT; |
65 | 66 | ||
66 | return ata_std_prereset(ap, deadline); | 67 | return ata_std_prereset(link, deadline); |
67 | } | 68 | } |
68 | 69 | ||
69 | /** | 70 | /** |
@@ -323,25 +324,26 @@ static u8 optidma_make_bits43(struct ata_device *adev) | |||
323 | 324 | ||
324 | /** | 325 | /** |
325 | * optidma_set_mode - mode setup | 326 | * optidma_set_mode - mode setup |
326 | * @ap: port to set up | 327 | * @link: link to set up |
327 | * | 328 | * |
328 | * Use the standard setup to tune the chipset and then finalise the | 329 | * Use the standard setup to tune the chipset and then finalise the |
329 | * configuration by writing the nibble of extra bits of data into | 330 | * configuration by writing the nibble of extra bits of data into |
330 | * the chip. | 331 | * the chip. |
331 | */ | 332 | */ |
332 | 333 | ||
333 | static int optidma_set_mode(struct ata_port *ap, struct ata_device **r_failed) | 334 | static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed) |
334 | { | 335 | { |
336 | struct ata_port *ap = link->ap; | ||
335 | u8 r; | 337 | u8 r; |
336 | int nybble = 4 * ap->port_no; | 338 | int nybble = 4 * ap->port_no; |
337 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 339 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
338 | int rc = ata_do_set_mode(ap, r_failed); | 340 | int rc = ata_do_set_mode(link, r_failed); |
339 | if (rc == 0) { | 341 | if (rc == 0) { |
340 | pci_read_config_byte(pdev, 0x43, &r); | 342 | pci_read_config_byte(pdev, 0x43, &r); |
341 | 343 | ||
342 | r &= (0x0F << nybble); | 344 | r &= (0x0F << nybble); |
343 | r |= (optidma_make_bits43(&ap->device[0]) + | 345 | r |= (optidma_make_bits43(&link->device[0]) + |
344 | (optidma_make_bits43(&ap->device[0]) << 2)) << nybble; | 346 | (optidma_make_bits43(&link->device[0]) << 2)) << nybble; |
345 | pci_write_config_byte(pdev, 0x43, r); | 347 | pci_write_config_byte(pdev, 0x43, r); |
346 | } | 348 | } |
347 | return rc; | 349 | return rc; |
@@ -366,7 +368,6 @@ static struct scsi_host_template optidma_sht = { | |||
366 | }; | 368 | }; |
367 | 369 | ||
368 | static struct ata_port_operations optidma_port_ops = { | 370 | static struct ata_port_operations optidma_port_ops = { |
369 | .port_disable = ata_port_disable, | ||
370 | .set_piomode = optidma_set_pio_mode, | 371 | .set_piomode = optidma_set_pio_mode, |
371 | .set_dmamode = optidma_set_dma_mode, | 372 | .set_dmamode = optidma_set_dma_mode, |
372 | 373 | ||
@@ -396,13 +397,11 @@ static struct ata_port_operations optidma_port_ops = { | |||
396 | .irq_handler = ata_interrupt, | 397 | .irq_handler = ata_interrupt, |
397 | .irq_clear = ata_bmdma_irq_clear, | 398 | .irq_clear = ata_bmdma_irq_clear, |
398 | .irq_on = ata_irq_on, | 399 | .irq_on = ata_irq_on, |
399 | .irq_ack = ata_irq_ack, | ||
400 | 400 | ||
401 | .port_start = ata_port_start, | 401 | .port_start = ata_sff_port_start, |
402 | }; | 402 | }; |
403 | 403 | ||
404 | static struct ata_port_operations optiplus_port_ops = { | 404 | static struct ata_port_operations optiplus_port_ops = { |
405 | .port_disable = ata_port_disable, | ||
406 | .set_piomode = optiplus_set_pio_mode, | 405 | .set_piomode = optiplus_set_pio_mode, |
407 | .set_dmamode = optiplus_set_dma_mode, | 406 | .set_dmamode = optiplus_set_dma_mode, |
408 | 407 | ||
@@ -432,9 +431,8 @@ static struct ata_port_operations optiplus_port_ops = { | |||
432 | .irq_handler = ata_interrupt, | 431 | .irq_handler = ata_interrupt, |
433 | .irq_clear = ata_bmdma_irq_clear, | 432 | .irq_clear = ata_bmdma_irq_clear, |
434 | .irq_on = ata_irq_on, | 433 | .irq_on = ata_irq_on, |
435 | .irq_ack = ata_irq_ack, | ||
436 | 434 | ||
437 | .port_start = ata_port_start, | 435 | .port_start = ata_sff_port_start, |
438 | }; | 436 | }; |
439 | 437 | ||
440 | /** | 438 | /** |
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index 0f2b027624d6..782ff4ada9d1 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c | |||
@@ -56,7 +56,7 @@ struct ata_pcmcia_info { | |||
56 | 56 | ||
57 | /** | 57 | /** |
58 | * pcmcia_set_mode - PCMCIA specific mode setup | 58 | * pcmcia_set_mode - PCMCIA specific mode setup |
59 | * @ap: Port | 59 | * @link: link |
60 | * @r_failed_dev: Return pointer for failed device | 60 | * @r_failed_dev: Return pointer for failed device |
61 | * | 61 | * |
62 | * Perform the tuning and setup of the devices and timings, which | 62 | * Perform the tuning and setup of the devices and timings, which |
@@ -65,13 +65,13 @@ struct ata_pcmcia_info { | |||
65 | * decode, which alas is embarrassingly common in the PC world | 65 | * decode, which alas is embarrassingly common in the PC world |
66 | */ | 66 | */ |
67 | 67 | ||
68 | static int pcmcia_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) | 68 | static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) |
69 | { | 69 | { |
70 | struct ata_device *master = &ap->device[0]; | 70 | struct ata_device *master = &link->device[0]; |
71 | struct ata_device *slave = &ap->device[1]; | 71 | struct ata_device *slave = &link->device[1]; |
72 | 72 | ||
73 | if (!ata_dev_enabled(master) || !ata_dev_enabled(slave)) | 73 | if (!ata_dev_enabled(master) || !ata_dev_enabled(slave)) |
74 | return ata_do_set_mode(ap, r_failed_dev); | 74 | return ata_do_set_mode(link, r_failed_dev); |
75 | 75 | ||
76 | if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV, | 76 | if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV, |
77 | ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) | 77 | ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) |
@@ -84,7 +84,7 @@ static int pcmcia_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev | |||
84 | ata_dev_disable(slave); | 84 | ata_dev_disable(slave); |
85 | } | 85 | } |
86 | } | 86 | } |
87 | return ata_do_set_mode(ap, r_failed_dev); | 87 | return ata_do_set_mode(link, r_failed_dev); |
88 | } | 88 | } |
89 | 89 | ||
90 | static struct scsi_host_template pcmcia_sht = { | 90 | static struct scsi_host_template pcmcia_sht = { |
@@ -107,7 +107,6 @@ static struct scsi_host_template pcmcia_sht = { | |||
107 | 107 | ||
108 | static struct ata_port_operations pcmcia_port_ops = { | 108 | static struct ata_port_operations pcmcia_port_ops = { |
109 | .set_mode = pcmcia_set_mode, | 109 | .set_mode = pcmcia_set_mode, |
110 | .port_disable = ata_port_disable, | ||
111 | .tf_load = ata_tf_load, | 110 | .tf_load = ata_tf_load, |
112 | .tf_read = ata_tf_read, | 111 | .tf_read = ata_tf_read, |
113 | .check_status = ata_check_status, | 112 | .check_status = ata_check_status, |
@@ -127,7 +126,6 @@ static struct ata_port_operations pcmcia_port_ops = { | |||
127 | 126 | ||
128 | .irq_clear = ata_bmdma_irq_clear, | 127 | .irq_clear = ata_bmdma_irq_clear, |
129 | .irq_on = ata_irq_on, | 128 | .irq_on = ata_irq_on, |
130 | .irq_ack = ata_irq_ack, | ||
131 | 129 | ||
132 | .port_start = ata_sff_port_start, | 130 | .port_start = ata_sff_port_start, |
133 | }; | 131 | }; |
@@ -304,6 +302,8 @@ next_entry: | |||
304 | ap->ioaddr.ctl_addr = ctl_addr; | 302 | ap->ioaddr.ctl_addr = ctl_addr; |
305 | ata_std_ports(&ap->ioaddr); | 303 | ata_std_ports(&ap->ioaddr); |
306 | 304 | ||
305 | ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base); | ||
306 | |||
307 | /* activate */ | 307 | /* activate */ |
308 | ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_interrupt, | 308 | ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_interrupt, |
309 | IRQF_SHARED, &pcmcia_sht); | 309 | IRQF_SHARED, &pcmcia_sht); |
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index bb64a986e8f5..3d3f1558cdee 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c | |||
@@ -69,7 +69,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev); | |||
69 | static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc); | 69 | static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc); |
70 | static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask); | 70 | static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask); |
71 | static int pdc2027x_cable_detect(struct ata_port *ap); | 71 | static int pdc2027x_cable_detect(struct ata_port *ap); |
72 | static int pdc2027x_set_mode(struct ata_port *ap, struct ata_device **r_failed); | 72 | static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed); |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * ATA Timing Tables based on 133MHz controller clock. | 75 | * ATA Timing Tables based on 133MHz controller clock. |
@@ -147,7 +147,6 @@ static struct scsi_host_template pdc2027x_sht = { | |||
147 | }; | 147 | }; |
148 | 148 | ||
149 | static struct ata_port_operations pdc2027x_pata100_ops = { | 149 | static struct ata_port_operations pdc2027x_pata100_ops = { |
150 | .port_disable = ata_port_disable, | ||
151 | .mode_filter = ata_pci_default_filter, | 150 | .mode_filter = ata_pci_default_filter, |
152 | 151 | ||
153 | .tf_load = ata_tf_load, | 152 | .tf_load = ata_tf_load, |
@@ -173,13 +172,11 @@ static struct ata_port_operations pdc2027x_pata100_ops = { | |||
173 | 172 | ||
174 | .irq_clear = ata_bmdma_irq_clear, | 173 | .irq_clear = ata_bmdma_irq_clear, |
175 | .irq_on = ata_irq_on, | 174 | .irq_on = ata_irq_on, |
176 | .irq_ack = ata_irq_ack, | ||
177 | 175 | ||
178 | .port_start = ata_port_start, | 176 | .port_start = ata_sff_port_start, |
179 | }; | 177 | }; |
180 | 178 | ||
181 | static struct ata_port_operations pdc2027x_pata133_ops = { | 179 | static struct ata_port_operations pdc2027x_pata133_ops = { |
182 | .port_disable = ata_port_disable, | ||
183 | .set_piomode = pdc2027x_set_piomode, | 180 | .set_piomode = pdc2027x_set_piomode, |
184 | .set_dmamode = pdc2027x_set_dmamode, | 181 | .set_dmamode = pdc2027x_set_dmamode, |
185 | .set_mode = pdc2027x_set_mode, | 182 | .set_mode = pdc2027x_set_mode, |
@@ -208,9 +205,8 @@ static struct ata_port_operations pdc2027x_pata133_ops = { | |||
208 | 205 | ||
209 | .irq_clear = ata_bmdma_irq_clear, | 206 | .irq_clear = ata_bmdma_irq_clear, |
210 | .irq_on = ata_irq_on, | 207 | .irq_on = ata_irq_on, |
211 | .irq_ack = ata_irq_ack, | ||
212 | 208 | ||
213 | .port_start = ata_port_start, | 209 | .port_start = ata_sff_port_start, |
214 | }; | 210 | }; |
215 | 211 | ||
216 | static struct ata_port_info pdc2027x_port_info[] = { | 212 | static struct ata_port_info pdc2027x_port_info[] = { |
@@ -277,7 +273,7 @@ static int pdc2027x_cable_detect(struct ata_port *ap) | |||
277 | u32 cgcr; | 273 | u32 cgcr; |
278 | 274 | ||
279 | /* check cable detect results */ | 275 | /* check cable detect results */ |
280 | cgcr = readl(port_mmio(ap, PDC_GLOBAL_CTL)); | 276 | cgcr = ioread32(port_mmio(ap, PDC_GLOBAL_CTL)); |
281 | if (cgcr & (1 << 26)) | 277 | if (cgcr & (1 << 26)) |
282 | goto cbl40; | 278 | goto cbl40; |
283 | 279 | ||
@@ -295,12 +291,12 @@ cbl40: | |||
295 | */ | 291 | */ |
296 | static inline int pdc2027x_port_enabled(struct ata_port *ap) | 292 | static inline int pdc2027x_port_enabled(struct ata_port *ap) |
297 | { | 293 | { |
298 | return readb(port_mmio(ap, PDC_ATA_CTL)) & 0x02; | 294 | return ioread8(port_mmio(ap, PDC_ATA_CTL)) & 0x02; |
299 | } | 295 | } |
300 | 296 | ||
301 | /** | 297 | /** |
302 | * pdc2027x_prereset - prereset for PATA host controller | 298 | * pdc2027x_prereset - prereset for PATA host controller |
303 | * @ap: Target port | 299 | * @link: Target link |
304 | * @deadline: deadline jiffies for the operation | 300 | * @deadline: deadline jiffies for the operation |
305 | * | 301 | * |
306 | * Probeinit including cable detection. | 302 | * Probeinit including cable detection. |
@@ -309,12 +305,12 @@ static inline int pdc2027x_port_enabled(struct ata_port *ap) | |||
309 | * None (inherited from caller). | 305 | * None (inherited from caller). |
310 | */ | 306 | */ |
311 | 307 | ||
312 | static int pdc2027x_prereset(struct ata_port *ap, unsigned long deadline) | 308 | static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline) |
313 | { | 309 | { |
314 | /* Check whether port enabled */ | 310 | /* Check whether port enabled */ |
315 | if (!pdc2027x_port_enabled(ap)) | 311 | if (!pdc2027x_port_enabled(link->ap)) |
316 | return -ENOENT; | 312 | return -ENOENT; |
317 | return ata_std_prereset(ap, deadline); | 313 | return ata_std_prereset(link, deadline); |
318 | } | 314 | } |
319 | 315 | ||
320 | /** | 316 | /** |
@@ -387,16 +383,16 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
387 | /* Set the PIO timing registers using value table for 133MHz */ | 383 | /* Set the PIO timing registers using value table for 133MHz */ |
388 | PDPRINTK("Set pio regs... \n"); | 384 | PDPRINTK("Set pio regs... \n"); |
389 | 385 | ||
390 | ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0)); | 386 | ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0)); |
391 | ctcr0 &= 0xffff0000; | 387 | ctcr0 &= 0xffff0000; |
392 | ctcr0 |= pdc2027x_pio_timing_tbl[pio].value0 | | 388 | ctcr0 |= pdc2027x_pio_timing_tbl[pio].value0 | |
393 | (pdc2027x_pio_timing_tbl[pio].value1 << 8); | 389 | (pdc2027x_pio_timing_tbl[pio].value1 << 8); |
394 | writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0)); | 390 | iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0)); |
395 | 391 | ||
396 | ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1)); | 392 | ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1)); |
397 | ctcr1 &= 0x00ffffff; | 393 | ctcr1 &= 0x00ffffff; |
398 | ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24); | 394 | ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24); |
399 | writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); | 395 | iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); |
400 | 396 | ||
401 | PDPRINTK("Set pio regs done\n"); | 397 | PDPRINTK("Set pio regs done\n"); |
402 | 398 | ||
@@ -430,18 +426,18 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
430 | * If tHOLD is '1', the hardware will add half clock for data hold time. | 426 | * If tHOLD is '1', the hardware will add half clock for data hold time. |
431 | * This code segment seems to be no effect. tHOLD will be overwritten below. | 427 | * This code segment seems to be no effect. tHOLD will be overwritten below. |
432 | */ | 428 | */ |
433 | ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1)); | 429 | ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1)); |
434 | writel(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1)); | 430 | iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1)); |
435 | } | 431 | } |
436 | 432 | ||
437 | PDPRINTK("Set udma regs... \n"); | 433 | PDPRINTK("Set udma regs... \n"); |
438 | 434 | ||
439 | ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1)); | 435 | ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1)); |
440 | ctcr1 &= 0xff000000; | 436 | ctcr1 &= 0xff000000; |
441 | ctcr1 |= pdc2027x_udma_timing_tbl[udma_mode].value0 | | 437 | ctcr1 |= pdc2027x_udma_timing_tbl[udma_mode].value0 | |
442 | (pdc2027x_udma_timing_tbl[udma_mode].value1 << 8) | | 438 | (pdc2027x_udma_timing_tbl[udma_mode].value1 << 8) | |
443 | (pdc2027x_udma_timing_tbl[udma_mode].value2 << 16); | 439 | (pdc2027x_udma_timing_tbl[udma_mode].value2 << 16); |
444 | writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); | 440 | iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); |
445 | 441 | ||
446 | PDPRINTK("Set udma regs done\n"); | 442 | PDPRINTK("Set udma regs done\n"); |
447 | 443 | ||
@@ -453,13 +449,13 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
453 | unsigned int mdma_mode = dma_mode & 0x07; | 449 | unsigned int mdma_mode = dma_mode & 0x07; |
454 | 450 | ||
455 | PDPRINTK("Set mdma regs... \n"); | 451 | PDPRINTK("Set mdma regs... \n"); |
456 | ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0)); | 452 | ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0)); |
457 | 453 | ||
458 | ctcr0 &= 0x0000ffff; | 454 | ctcr0 &= 0x0000ffff; |
459 | ctcr0 |= (pdc2027x_mdma_timing_tbl[mdma_mode].value0 << 16) | | 455 | ctcr0 |= (pdc2027x_mdma_timing_tbl[mdma_mode].value0 << 16) | |
460 | (pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24); | 456 | (pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24); |
461 | 457 | ||
462 | writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0)); | 458 | iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0)); |
463 | PDPRINTK("Set mdma regs done\n"); | 459 | PDPRINTK("Set mdma regs done\n"); |
464 | 460 | ||
465 | PDPRINTK("Set to mdma mode[%u] \n", mdma_mode); | 461 | PDPRINTK("Set to mdma mode[%u] \n", mdma_mode); |
@@ -470,24 +466,24 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
470 | 466 | ||
471 | /** | 467 | /** |
472 | * pdc2027x_set_mode - Set the timing registers back to correct values. | 468 | * pdc2027x_set_mode - Set the timing registers back to correct values. |
473 | * @ap: Port to configure | 469 | * @link: link to configure |
474 | * @r_failed: Returned device for failure | 470 | * @r_failed: Returned device for failure |
475 | * | 471 | * |
476 | * The pdc2027x hardware will look at "SET FEATURES" and change the timing registers | 472 | * The pdc2027x hardware will look at "SET FEATURES" and change the timing registers |
477 | * automatically. The values set by the hardware might be incorrect, under 133Mhz PLL. | 473 | * automatically. The values set by the hardware might be incorrect, under 133Mhz PLL. |
478 | * This function overwrites the possibly incorrect values set by the hardware to be correct. | 474 | * This function overwrites the possibly incorrect values set by the hardware to be correct. |
479 | */ | 475 | */ |
480 | static int pdc2027x_set_mode(struct ata_port *ap, struct ata_device **r_failed) | 476 | static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed) |
481 | { | 477 | { |
482 | int i; | 478 | struct ata_port *ap = link->ap; |
483 | 479 | struct ata_device *dev; | |
484 | i = ata_do_set_mode(ap, r_failed); | 480 | int rc; |
485 | if (i < 0) | ||
486 | return i; | ||
487 | 481 | ||
488 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 482 | rc = ata_do_set_mode(link, r_failed); |
489 | struct ata_device *dev = &ap->device[i]; | 483 | if (rc < 0) |
484 | return rc; | ||
490 | 485 | ||
486 | ata_link_for_each_dev(dev, link) { | ||
491 | if (ata_dev_enabled(dev)) { | 487 | if (ata_dev_enabled(dev)) { |
492 | 488 | ||
493 | pdc2027x_set_piomode(ap, dev); | 489 | pdc2027x_set_piomode(ap, dev); |
@@ -496,9 +492,9 @@ static int pdc2027x_set_mode(struct ata_port *ap, struct ata_device **r_failed) | |||
496 | * Enable prefetch if the device support PIO only. | 492 | * Enable prefetch if the device support PIO only. |
497 | */ | 493 | */ |
498 | if (dev->xfer_shift == ATA_SHIFT_PIO) { | 494 | if (dev->xfer_shift == ATA_SHIFT_PIO) { |
499 | u32 ctcr1 = readl(dev_mmio(ap, dev, PDC_CTCR1)); | 495 | u32 ctcr1 = ioread32(dev_mmio(ap, dev, PDC_CTCR1)); |
500 | ctcr1 |= (1 << 25); | 496 | ctcr1 |= (1 << 25); |
501 | writel(ctcr1, dev_mmio(ap, dev, PDC_CTCR1)); | 497 | iowrite32(ctcr1, dev_mmio(ap, dev, PDC_CTCR1)); |
502 | 498 | ||
503 | PDPRINTK("Turn on prefetch\n"); | 499 | PDPRINTK("Turn on prefetch\n"); |
504 | } else { | 500 | } else { |
@@ -563,14 +559,12 @@ static long pdc_read_counter(struct ata_host *host) | |||
563 | u32 bccrl, bccrh, bccrlv, bccrhv; | 559 | u32 bccrl, bccrh, bccrlv, bccrhv; |
564 | 560 | ||
565 | retry: | 561 | retry: |
566 | bccrl = readl(mmio_base + PDC_BYTE_COUNT) & 0x7fff; | 562 | bccrl = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff; |
567 | bccrh = readl(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff; | 563 | bccrh = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff; |
568 | rmb(); | ||
569 | 564 | ||
570 | /* Read the counter values again for verification */ | 565 | /* Read the counter values again for verification */ |
571 | bccrlv = readl(mmio_base + PDC_BYTE_COUNT) & 0x7fff; | 566 | bccrlv = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff; |
572 | bccrhv = readl(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff; | 567 | bccrhv = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff; |
573 | rmb(); | ||
574 | 568 | ||
575 | counter = (bccrh << 15) | bccrl; | 569 | counter = (bccrh << 15) | bccrl; |
576 | 570 | ||
@@ -619,7 +613,7 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b | |||
619 | /* Show the current clock value of PLL control register | 613 | /* Show the current clock value of PLL control register |
620 | * (maybe already configured by the firmware) | 614 | * (maybe already configured by the firmware) |
621 | */ | 615 | */ |
622 | pll_ctl = readw(mmio_base + PDC_PLL_CTL); | 616 | pll_ctl = ioread16(mmio_base + PDC_PLL_CTL); |
623 | 617 | ||
624 | PDPRINTK("pll_ctl[%X]\n", pll_ctl); | 618 | PDPRINTK("pll_ctl[%X]\n", pll_ctl); |
625 | #endif | 619 | #endif |
@@ -659,8 +653,8 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b | |||
659 | 653 | ||
660 | PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl); | 654 | PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl); |
661 | 655 | ||
662 | writew(pll_ctl, mmio_base + PDC_PLL_CTL); | 656 | iowrite16(pll_ctl, mmio_base + PDC_PLL_CTL); |
663 | readw(mmio_base + PDC_PLL_CTL); /* flush */ | 657 | ioread16(mmio_base + PDC_PLL_CTL); /* flush */ |
664 | 658 | ||
665 | /* Wait the PLL circuit to be stable */ | 659 | /* Wait the PLL circuit to be stable */ |
666 | mdelay(30); | 660 | mdelay(30); |
@@ -670,7 +664,7 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b | |||
670 | * Show the current clock value of PLL control register | 664 | * Show the current clock value of PLL control register |
671 | * (maybe configured by the firmware) | 665 | * (maybe configured by the firmware) |
672 | */ | 666 | */ |
673 | pll_ctl = readw(mmio_base + PDC_PLL_CTL); | 667 | pll_ctl = ioread16(mmio_base + PDC_PLL_CTL); |
674 | 668 | ||
675 | PDPRINTK("pll_ctl[%X]\n", pll_ctl); | 669 | PDPRINTK("pll_ctl[%X]\n", pll_ctl); |
676 | #endif | 670 | #endif |
@@ -693,10 +687,10 @@ static long pdc_detect_pll_input_clock(struct ata_host *host) | |||
693 | long pll_clock, usec_elapsed; | 687 | long pll_clock, usec_elapsed; |
694 | 688 | ||
695 | /* Start the test mode */ | 689 | /* Start the test mode */ |
696 | scr = readl(mmio_base + PDC_SYS_CTL); | 690 | scr = ioread32(mmio_base + PDC_SYS_CTL); |
697 | PDPRINTK("scr[%X]\n", scr); | 691 | PDPRINTK("scr[%X]\n", scr); |
698 | writel(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL); | 692 | iowrite32(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL); |
699 | readl(mmio_base + PDC_SYS_CTL); /* flush */ | 693 | ioread32(mmio_base + PDC_SYS_CTL); /* flush */ |
700 | 694 | ||
701 | /* Read current counter value */ | 695 | /* Read current counter value */ |
702 | start_count = pdc_read_counter(host); | 696 | start_count = pdc_read_counter(host); |
@@ -710,10 +704,10 @@ static long pdc_detect_pll_input_clock(struct ata_host *host) | |||
710 | do_gettimeofday(&end_time); | 704 | do_gettimeofday(&end_time); |
711 | 705 | ||
712 | /* Stop the test mode */ | 706 | /* Stop the test mode */ |
713 | scr = readl(mmio_base + PDC_SYS_CTL); | 707 | scr = ioread32(mmio_base + PDC_SYS_CTL); |
714 | PDPRINTK("scr[%X]\n", scr); | 708 | PDPRINTK("scr[%X]\n", scr); |
715 | writel(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL); | 709 | iowrite32(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL); |
716 | readl(mmio_base + PDC_SYS_CTL); /* flush */ | 710 | ioread32(mmio_base + PDC_SYS_CTL); /* flush */ |
717 | 711 | ||
718 | /* calculate the input clock in Hz */ | 712 | /* calculate the input clock in Hz */ |
719 | usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 + | 713 | usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 + |
@@ -745,9 +739,6 @@ static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx) | |||
745 | */ | 739 | */ |
746 | pll_clock = pdc_detect_pll_input_clock(host); | 740 | pll_clock = pdc_detect_pll_input_clock(host); |
747 | 741 | ||
748 | if (pll_clock < 0) /* counter overflow? Try again. */ | ||
749 | pll_clock = pdc_detect_pll_input_clock(host); | ||
750 | |||
751 | dev_printk(KERN_INFO, host->dev, "PLL input clock %ld kHz\n", pll_clock/1000); | 742 | dev_printk(KERN_INFO, host->dev, "PLL input clock %ld kHz\n", pll_clock/1000); |
752 | 743 | ||
753 | /* Adjust PLL control register */ | 744 | /* Adjust PLL control register */ |
@@ -791,12 +782,14 @@ static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base) | |||
791 | static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 782 | static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
792 | { | 783 | { |
793 | static int printed_version; | 784 | static int printed_version; |
785 | static const unsigned long cmd_offset[] = { 0x17c0, 0x15c0 }; | ||
786 | static const unsigned long bmdma_offset[] = { 0x1000, 0x1008 }; | ||
794 | unsigned int board_idx = (unsigned int) ent->driver_data; | 787 | unsigned int board_idx = (unsigned int) ent->driver_data; |
795 | const struct ata_port_info *ppi[] = | 788 | const struct ata_port_info *ppi[] = |
796 | { &pdc2027x_port_info[board_idx], NULL }; | 789 | { &pdc2027x_port_info[board_idx], NULL }; |
797 | struct ata_host *host; | 790 | struct ata_host *host; |
798 | void __iomem *mmio_base; | 791 | void __iomem *mmio_base; |
799 | int rc; | 792 | int i, rc; |
800 | 793 | ||
801 | if (!printed_version++) | 794 | if (!printed_version++) |
802 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 795 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
@@ -826,10 +819,15 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de | |||
826 | 819 | ||
827 | mmio_base = host->iomap[PDC_MMIO_BAR]; | 820 | mmio_base = host->iomap[PDC_MMIO_BAR]; |
828 | 821 | ||
829 | pdc_ata_setup_port(&host->ports[0]->ioaddr, mmio_base + 0x17c0); | 822 | for (i = 0; i < 2; i++) { |
830 | host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x1000; | 823 | struct ata_port *ap = host->ports[i]; |
831 | pdc_ata_setup_port(&host->ports[1]->ioaddr, mmio_base + 0x15c0); | 824 | |
832 | host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x1008; | 825 | pdc_ata_setup_port(&ap->ioaddr, mmio_base + cmd_offset[i]); |
826 | ap->ioaddr.bmdma_addr = mmio_base + bmdma_offset[i]; | ||
827 | |||
828 | ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); | ||
829 | ata_port_pbar_desc(ap, PDC_MMIO_BAR, cmd_offset[i], "cmd"); | ||
830 | } | ||
833 | 831 | ||
834 | //pci_enable_intx(pdev); | 832 | //pci_enable_intx(pdev); |
835 | 833 | ||
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c index 92447bed5e77..65d951618c60 100644 --- a/drivers/ata/pata_pdc202xx_old.c +++ b/drivers/ata/pata_pdc202xx_old.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * First cut with LBA48/ATAPI | 9 | * First cut with LBA48/ATAPI |
10 | * | 10 | * |
11 | * TODO: | 11 | * TODO: |
12 | * Channel interlock/reset on both required | 12 | * Channel interlock/reset on both required ? |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/libata.h> | 22 | #include <linux/libata.h> |
23 | 23 | ||
24 | #define DRV_NAME "pata_pdc202xx_old" | 24 | #define DRV_NAME "pata_pdc202xx_old" |
25 | #define DRV_VERSION "0.4.2" | 25 | #define DRV_VERSION "0.4.3" |
26 | 26 | ||
27 | static int pdc2026x_cable_detect(struct ata_port *ap) | 27 | static int pdc2026x_cable_detect(struct ata_port *ap) |
28 | { | 28 | { |
@@ -106,9 +106,9 @@ static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
106 | { 0x20, 0x01 } | 106 | { 0x20, 0x01 } |
107 | }; | 107 | }; |
108 | static u8 mdma_timing[3][2] = { | 108 | static u8 mdma_timing[3][2] = { |
109 | { 0x60, 0x03 }, | ||
110 | { 0x60, 0x04 }, | ||
111 | { 0xe0, 0x0f }, | 109 | { 0xe0, 0x0f }, |
110 | { 0x60, 0x04 }, | ||
111 | { 0x60, 0x03 }, | ||
112 | }; | 112 | }; |
113 | u8 r_bp, r_cp; | 113 | u8 r_bp, r_cp; |
114 | 114 | ||
@@ -139,6 +139,9 @@ static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
139 | * | 139 | * |
140 | * In UDMA3 or higher we have to clock switch for the duration of the | 140 | * In UDMA3 or higher we have to clock switch for the duration of the |
141 | * DMA transfer sequence. | 141 | * DMA transfer sequence. |
142 | * | ||
143 | * Note: The host lock held by the libata layer protects | ||
144 | * us from two channels both trying to set DMA bits at once | ||
142 | */ | 145 | */ |
143 | 146 | ||
144 | static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc) | 147 | static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc) |
@@ -187,6 +190,9 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc) | |||
187 | * | 190 | * |
188 | * After a DMA completes we need to put the clock back to 33MHz for | 191 | * After a DMA completes we need to put the clock back to 33MHz for |
189 | * PIO timings. | 192 | * PIO timings. |
193 | * | ||
194 | * Note: The host lock held by the libata layer protects | ||
195 | * us from two channels both trying to set DMA bits at once | ||
190 | */ | 196 | */ |
191 | 197 | ||
192 | static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc) | 198 | static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc) |
@@ -206,7 +212,6 @@ static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc) | |||
206 | iowrite32(0, atapi_reg); | 212 | iowrite32(0, atapi_reg); |
207 | iowrite8(ioread8(clock) & ~sel66, clock); | 213 | iowrite8(ioread8(clock) & ~sel66, clock); |
208 | } | 214 | } |
209 | /* Check we keep host level locking here */ | ||
210 | /* Flip back to 33Mhz for PIO */ | 215 | /* Flip back to 33Mhz for PIO */ |
211 | if (adev->dma_mode >= XFER_UDMA_2) | 216 | if (adev->dma_mode >= XFER_UDMA_2) |
212 | iowrite8(ioread8(clock) & ~sel66, clock); | 217 | iowrite8(ioread8(clock) & ~sel66, clock); |
@@ -247,7 +252,6 @@ static struct scsi_host_template pdc202xx_sht = { | |||
247 | }; | 252 | }; |
248 | 253 | ||
249 | static struct ata_port_operations pdc2024x_port_ops = { | 254 | static struct ata_port_operations pdc2024x_port_ops = { |
250 | .port_disable = ata_port_disable, | ||
251 | .set_piomode = pdc202xx_set_piomode, | 255 | .set_piomode = pdc202xx_set_piomode, |
252 | .set_dmamode = pdc202xx_set_dmamode, | 256 | .set_dmamode = pdc202xx_set_dmamode, |
253 | .mode_filter = ata_pci_default_filter, | 257 | .mode_filter = ata_pci_default_filter, |
@@ -275,13 +279,11 @@ static struct ata_port_operations pdc2024x_port_ops = { | |||
275 | .irq_handler = ata_interrupt, | 279 | .irq_handler = ata_interrupt, |
276 | .irq_clear = ata_bmdma_irq_clear, | 280 | .irq_clear = ata_bmdma_irq_clear, |
277 | .irq_on = ata_irq_on, | 281 | .irq_on = ata_irq_on, |
278 | .irq_ack = ata_irq_ack, | ||
279 | 282 | ||
280 | .port_start = ata_port_start, | 283 | .port_start = ata_sff_port_start, |
281 | }; | 284 | }; |
282 | 285 | ||
283 | static struct ata_port_operations pdc2026x_port_ops = { | 286 | static struct ata_port_operations pdc2026x_port_ops = { |
284 | .port_disable = ata_port_disable, | ||
285 | .set_piomode = pdc202xx_set_piomode, | 287 | .set_piomode = pdc202xx_set_piomode, |
286 | .set_dmamode = pdc202xx_set_dmamode, | 288 | .set_dmamode = pdc202xx_set_dmamode, |
287 | .mode_filter = ata_pci_default_filter, | 289 | .mode_filter = ata_pci_default_filter, |
@@ -310,9 +312,8 @@ static struct ata_port_operations pdc2026x_port_ops = { | |||
310 | .irq_handler = ata_interrupt, | 312 | .irq_handler = ata_interrupt, |
311 | .irq_clear = ata_bmdma_irq_clear, | 313 | .irq_clear = ata_bmdma_irq_clear, |
312 | .irq_on = ata_irq_on, | 314 | .irq_on = ata_irq_on, |
313 | .irq_ack = ata_irq_ack, | ||
314 | 315 | ||
315 | .port_start = ata_port_start, | 316 | .port_start = ata_sff_port_start, |
316 | }; | 317 | }; |
317 | 318 | ||
318 | static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 319 | static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c index 5086d03f2d7c..fc72a965643d 100644 --- a/drivers/ata/pata_platform.c +++ b/drivers/ata/pata_platform.c | |||
@@ -30,13 +30,11 @@ static int pio_mask = 1; | |||
30 | * Provide our own set_mode() as we don't want to change anything that has | 30 | * Provide our own set_mode() as we don't want to change anything that has |
31 | * already been configured.. | 31 | * already been configured.. |
32 | */ | 32 | */ |
33 | static int pata_platform_set_mode(struct ata_port *ap, struct ata_device **unused) | 33 | static int pata_platform_set_mode(struct ata_link *link, struct ata_device **unused) |
34 | { | 34 | { |
35 | int i; | 35 | struct ata_device *dev; |
36 | |||
37 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | ||
38 | struct ata_device *dev = &ap->device[i]; | ||
39 | 36 | ||
37 | ata_link_for_each_dev(dev, link) { | ||
40 | if (ata_dev_enabled(dev)) { | 38 | if (ata_dev_enabled(dev)) { |
41 | /* We don't really care */ | 39 | /* We don't really care */ |
42 | dev->pio_mode = dev->xfer_mode = XFER_PIO_0; | 40 | dev->pio_mode = dev->xfer_mode = XFER_PIO_0; |
@@ -71,7 +69,6 @@ static struct scsi_host_template pata_platform_sht = { | |||
71 | static struct ata_port_operations pata_platform_port_ops = { | 69 | static struct ata_port_operations pata_platform_port_ops = { |
72 | .set_mode = pata_platform_set_mode, | 70 | .set_mode = pata_platform_set_mode, |
73 | 71 | ||
74 | .port_disable = ata_port_disable, | ||
75 | .tf_load = ata_tf_load, | 72 | .tf_load = ata_tf_load, |
76 | .tf_read = ata_tf_read, | 73 | .tf_read = ata_tf_read, |
77 | .check_status = ata_check_status, | 74 | .check_status = ata_check_status, |
@@ -91,7 +88,6 @@ static struct ata_port_operations pata_platform_port_ops = { | |||
91 | 88 | ||
92 | .irq_clear = ata_bmdma_irq_clear, | 89 | .irq_clear = ata_bmdma_irq_clear, |
93 | .irq_on = ata_irq_on, | 90 | .irq_on = ata_irq_on, |
94 | .irq_ack = ata_irq_ack, | ||
95 | 91 | ||
96 | .port_start = ata_dummy_ret0, | 92 | .port_start = ata_dummy_ret0, |
97 | }; | 93 | }; |
@@ -209,9 +205,13 @@ static int __devinit pata_platform_probe(struct platform_device *pdev) | |||
209 | 205 | ||
210 | ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; | 206 | ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; |
211 | 207 | ||
212 | pp_info = (struct pata_platform_info *)(pdev->dev.platform_data); | 208 | pp_info = pdev->dev.platform_data; |
213 | pata_platform_setup_port(&ap->ioaddr, pp_info); | 209 | pata_platform_setup_port(&ap->ioaddr, pp_info); |
214 | 210 | ||
211 | ata_port_desc(ap, "%s cmd 0x%llx ctl 0x%llx", mmio ? "mmio" : "ioport", | ||
212 | (unsigned long long)io_res->start, | ||
213 | (unsigned long long)ctl_res->start); | ||
214 | |||
215 | /* activate */ | 215 | /* activate */ |
216 | return ata_host_activate(host, platform_get_irq(pdev, 0), | 216 | return ata_host_activate(host, platform_get_irq(pdev, 0), |
217 | ata_interrupt, pp_info ? pp_info->irq_flags | 217 | ata_interrupt, pp_info ? pp_info->irq_flags |
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c index 1998c19e8743..7d4c696c4cb6 100644 --- a/drivers/ata/pata_qdi.c +++ b/drivers/ata/pata_qdi.c | |||
@@ -126,7 +126,7 @@ static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc) | |||
126 | 126 | ||
127 | static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) | 127 | static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) |
128 | { | 128 | { |
129 | struct ata_port *ap = adev->ap; | 129 | struct ata_port *ap = adev->link->ap; |
130 | int slop = buflen & 3; | 130 | int slop = buflen & 3; |
131 | 131 | ||
132 | if (ata_id_has_dword_io(adev->id)) { | 132 | if (ata_id_has_dword_io(adev->id)) { |
@@ -170,7 +170,6 @@ static struct scsi_host_template qdi_sht = { | |||
170 | }; | 170 | }; |
171 | 171 | ||
172 | static struct ata_port_operations qdi6500_port_ops = { | 172 | static struct ata_port_operations qdi6500_port_ops = { |
173 | .port_disable = ata_port_disable, | ||
174 | .set_piomode = qdi6500_set_piomode, | 173 | .set_piomode = qdi6500_set_piomode, |
175 | 174 | ||
176 | .tf_load = ata_tf_load, | 175 | .tf_load = ata_tf_load, |
@@ -192,13 +191,11 @@ static struct ata_port_operations qdi6500_port_ops = { | |||
192 | 191 | ||
193 | .irq_clear = ata_bmdma_irq_clear, | 192 | .irq_clear = ata_bmdma_irq_clear, |
194 | .irq_on = ata_irq_on, | 193 | .irq_on = ata_irq_on, |
195 | .irq_ack = ata_irq_ack, | ||
196 | 194 | ||
197 | .port_start = ata_port_start, | 195 | .port_start = ata_sff_port_start, |
198 | }; | 196 | }; |
199 | 197 | ||
200 | static struct ata_port_operations qdi6580_port_ops = { | 198 | static struct ata_port_operations qdi6580_port_ops = { |
201 | .port_disable = ata_port_disable, | ||
202 | .set_piomode = qdi6580_set_piomode, | 199 | .set_piomode = qdi6580_set_piomode, |
203 | 200 | ||
204 | .tf_load = ata_tf_load, | 201 | .tf_load = ata_tf_load, |
@@ -220,9 +217,8 @@ static struct ata_port_operations qdi6580_port_ops = { | |||
220 | 217 | ||
221 | .irq_clear = ata_bmdma_irq_clear, | 218 | .irq_clear = ata_bmdma_irq_clear, |
222 | .irq_on = ata_irq_on, | 219 | .irq_on = ata_irq_on, |
223 | .irq_ack = ata_irq_ack, | ||
224 | 220 | ||
225 | .port_start = ata_port_start, | 221 | .port_start = ata_sff_port_start, |
226 | }; | 222 | }; |
227 | 223 | ||
228 | /** | 224 | /** |
@@ -238,6 +234,7 @@ static struct ata_port_operations qdi6580_port_ops = { | |||
238 | 234 | ||
239 | static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast) | 235 | static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast) |
240 | { | 236 | { |
237 | unsigned long ctl = io + 0x206; | ||
241 | struct platform_device *pdev; | 238 | struct platform_device *pdev; |
242 | struct ata_host *host; | 239 | struct ata_host *host; |
243 | struct ata_port *ap; | 240 | struct ata_port *ap; |
@@ -254,7 +251,7 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i | |||
254 | 251 | ||
255 | ret = -ENOMEM; | 252 | ret = -ENOMEM; |
256 | io_addr = devm_ioport_map(&pdev->dev, io, 8); | 253 | io_addr = devm_ioport_map(&pdev->dev, io, 8); |
257 | ctl_addr = devm_ioport_map(&pdev->dev, io + 0x206, 1); | 254 | ctl_addr = devm_ioport_map(&pdev->dev, ctl, 1); |
258 | if (!io_addr || !ctl_addr) | 255 | if (!io_addr || !ctl_addr) |
259 | goto fail; | 256 | goto fail; |
260 | 257 | ||
@@ -279,6 +276,8 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i | |||
279 | ap->ioaddr.ctl_addr = ctl_addr; | 276 | ap->ioaddr.ctl_addr = ctl_addr; |
280 | ata_std_ports(&ap->ioaddr); | 277 | ata_std_ports(&ap->ioaddr); |
281 | 278 | ||
279 | ata_port_desc(ap, "cmd %lx ctl %lx", io, ctl); | ||
280 | |||
282 | /* | 281 | /* |
283 | * Hook in a private data structure per channel | 282 | * Hook in a private data structure per channel |
284 | */ | 283 | */ |
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c index 7d1aabed422d..d5b76497f4a2 100644 --- a/drivers/ata/pata_radisys.c +++ b/drivers/ata/pata_radisys.c | |||
@@ -203,7 +203,6 @@ static struct scsi_host_template radisys_sht = { | |||
203 | }; | 203 | }; |
204 | 204 | ||
205 | static const struct ata_port_operations radisys_pata_ops = { | 205 | static const struct ata_port_operations radisys_pata_ops = { |
206 | .port_disable = ata_port_disable, | ||
207 | .set_piomode = radisys_set_piomode, | 206 | .set_piomode = radisys_set_piomode, |
208 | .set_dmamode = radisys_set_dmamode, | 207 | .set_dmamode = radisys_set_dmamode, |
209 | .mode_filter = ata_pci_default_filter, | 208 | .mode_filter = ata_pci_default_filter, |
@@ -231,9 +230,8 @@ static const struct ata_port_operations radisys_pata_ops = { | |||
231 | .irq_handler = ata_interrupt, | 230 | .irq_handler = ata_interrupt, |
232 | .irq_clear = ata_bmdma_irq_clear, | 231 | .irq_clear = ata_bmdma_irq_clear, |
233 | .irq_on = ata_irq_on, | 232 | .irq_on = ata_irq_on, |
234 | .irq_ack = ata_irq_ack, | ||
235 | 233 | ||
236 | .port_start = ata_port_start, | 234 | .port_start = ata_sff_port_start, |
237 | }; | 235 | }; |
238 | 236 | ||
239 | 237 | ||
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c index 7632fcb070ca..ba8a31c55edb 100644 --- a/drivers/ata/pata_rz1000.c +++ b/drivers/ata/pata_rz1000.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | /** | 27 | /** |
28 | * rz1000_set_mode - mode setting function | 28 | * rz1000_set_mode - mode setting function |
29 | * @ap: ATA interface | 29 | * @link: ATA link |
30 | * @unused: returned device on set_mode failure | 30 | * @unused: returned device on set_mode failure |
31 | * | 31 | * |
32 | * Use a non standard set_mode function. We don't want to be tuned. We | 32 | * Use a non standard set_mode function. We don't want to be tuned. We |
@@ -34,12 +34,11 @@ | |||
34 | * whacked out. | 34 | * whacked out. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | static int rz1000_set_mode(struct ata_port *ap, struct ata_device **unused) | 37 | static int rz1000_set_mode(struct ata_link *link, struct ata_device **unused) |
38 | { | 38 | { |
39 | int i; | 39 | struct ata_device *dev; |
40 | 40 | ||
41 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 41 | ata_link_for_each_dev(dev, link) { |
42 | struct ata_device *dev = &ap->device[i]; | ||
43 | if (ata_dev_enabled(dev)) { | 42 | if (ata_dev_enabled(dev)) { |
44 | /* We don't really care */ | 43 | /* We don't really care */ |
45 | dev->pio_mode = XFER_PIO_0; | 44 | dev->pio_mode = XFER_PIO_0; |
@@ -74,7 +73,6 @@ static struct scsi_host_template rz1000_sht = { | |||
74 | static struct ata_port_operations rz1000_port_ops = { | 73 | static struct ata_port_operations rz1000_port_ops = { |
75 | .set_mode = rz1000_set_mode, | 74 | .set_mode = rz1000_set_mode, |
76 | 75 | ||
77 | .port_disable = ata_port_disable, | ||
78 | .tf_load = ata_tf_load, | 76 | .tf_load = ata_tf_load, |
79 | .tf_read = ata_tf_read, | 77 | .tf_read = ata_tf_read, |
80 | .check_status = ata_check_status, | 78 | .check_status = ata_check_status, |
@@ -100,9 +98,8 @@ static struct ata_port_operations rz1000_port_ops = { | |||
100 | .irq_handler = ata_interrupt, | 98 | .irq_handler = ata_interrupt, |
101 | .irq_clear = ata_bmdma_irq_clear, | 99 | .irq_clear = ata_bmdma_irq_clear, |
102 | .irq_on = ata_irq_on, | 100 | .irq_on = ata_irq_on, |
103 | .irq_ack = ata_irq_ack, | ||
104 | 101 | ||
105 | .port_start = ata_port_start, | 102 | .port_start = ata_sff_port_start, |
106 | }; | 103 | }; |
107 | 104 | ||
108 | static int rz1000_fifo_disable(struct pci_dev *pdev) | 105 | static int rz1000_fifo_disable(struct pci_dev *pdev) |
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c index 5edf67b1f3bf..21ebc485ca4b 100644 --- a/drivers/ata/pata_sc1200.c +++ b/drivers/ata/pata_sc1200.c | |||
@@ -197,7 +197,6 @@ static struct scsi_host_template sc1200_sht = { | |||
197 | }; | 197 | }; |
198 | 198 | ||
199 | static struct ata_port_operations sc1200_port_ops = { | 199 | static struct ata_port_operations sc1200_port_ops = { |
200 | .port_disable = ata_port_disable, | ||
201 | .set_piomode = sc1200_set_piomode, | 200 | .set_piomode = sc1200_set_piomode, |
202 | .set_dmamode = sc1200_set_dmamode, | 201 | .set_dmamode = sc1200_set_dmamode, |
203 | .mode_filter = ata_pci_default_filter, | 202 | .mode_filter = ata_pci_default_filter, |
@@ -227,9 +226,8 @@ static struct ata_port_operations sc1200_port_ops = { | |||
227 | .irq_handler = ata_interrupt, | 226 | .irq_handler = ata_interrupt, |
228 | .irq_clear = ata_bmdma_irq_clear, | 227 | .irq_clear = ata_bmdma_irq_clear, |
229 | .irq_on = ata_irq_on, | 228 | .irq_on = ata_irq_on, |
230 | .irq_ack = ata_irq_ack, | ||
231 | 229 | ||
232 | .port_start = ata_port_start, | 230 | .port_start = ata_sff_port_start, |
233 | }; | 231 | }; |
234 | 232 | ||
235 | /** | 233 | /** |
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index 2d048ef25a5a..55576138faea 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c | |||
@@ -603,16 +603,17 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, | |||
603 | * Note: Original code is ata_std_softreset(). | 603 | * Note: Original code is ata_std_softreset(). |
604 | */ | 604 | */ |
605 | 605 | ||
606 | static int scc_std_softreset (struct ata_port *ap, unsigned int *classes, | 606 | static int scc_std_softreset(struct ata_link *link, unsigned int *classes, |
607 | unsigned long deadline) | 607 | unsigned long deadline) |
608 | { | 608 | { |
609 | struct ata_port *ap = link->ap; | ||
609 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | 610 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; |
610 | unsigned int devmask = 0, err_mask; | 611 | unsigned int devmask = 0, err_mask; |
611 | u8 err; | 612 | u8 err; |
612 | 613 | ||
613 | DPRINTK("ENTER\n"); | 614 | DPRINTK("ENTER\n"); |
614 | 615 | ||
615 | if (ata_port_offline(ap)) { | 616 | if (ata_link_offline(link)) { |
616 | classes[0] = ATA_DEV_NONE; | 617 | classes[0] = ATA_DEV_NONE; |
617 | goto out; | 618 | goto out; |
618 | } | 619 | } |
@@ -636,9 +637,11 @@ static int scc_std_softreset (struct ata_port *ap, unsigned int *classes, | |||
636 | } | 637 | } |
637 | 638 | ||
638 | /* determine by signature whether we have ATA or ATAPI devices */ | 639 | /* determine by signature whether we have ATA or ATAPI devices */ |
639 | classes[0] = ata_dev_try_classify(ap, 0, &err); | 640 | classes[0] = ata_dev_try_classify(&ap->link.device[0], |
641 | devmask & (1 << 0), &err); | ||
640 | if (slave_possible && err != 0x81) | 642 | if (slave_possible && err != 0x81) |
641 | classes[1] = ata_dev_try_classify(ap, 1, &err); | 643 | classes[1] = ata_dev_try_classify(&ap->link.device[1], |
644 | devmask & (1 << 1), &err); | ||
642 | 645 | ||
643 | out: | 646 | out: |
644 | DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); | 647 | DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); |
@@ -701,7 +704,7 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc) | |||
701 | printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME); | 704 | printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME); |
702 | out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT); | 705 | out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT); |
703 | /* TBD: SW reset */ | 706 | /* TBD: SW reset */ |
704 | scc_std_softreset(ap, &classes, deadline); | 707 | scc_std_softreset(&ap->link, &classes, deadline); |
705 | continue; | 708 | continue; |
706 | } | 709 | } |
707 | 710 | ||
@@ -740,7 +743,7 @@ static u8 scc_bmdma_status (struct ata_port *ap) | |||
740 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | 743 | void __iomem *mmio = ap->ioaddr.bmdma_addr; |
741 | u8 host_stat = in_be32(mmio + SCC_DMA_STATUS); | 744 | u8 host_stat = in_be32(mmio + SCC_DMA_STATUS); |
742 | u32 int_status = in_be32(mmio + SCC_DMA_INTST); | 745 | u32 int_status = in_be32(mmio + SCC_DMA_INTST); |
743 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); | 746 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); |
744 | static int retry = 0; | 747 | static int retry = 0; |
745 | 748 | ||
746 | /* return if IOS_SS is cleared */ | 749 | /* return if IOS_SS is cleared */ |
@@ -785,7 +788,7 @@ static u8 scc_bmdma_status (struct ata_port *ap) | |||
785 | static void scc_data_xfer (struct ata_device *adev, unsigned char *buf, | 788 | static void scc_data_xfer (struct ata_device *adev, unsigned char *buf, |
786 | unsigned int buflen, int write_data) | 789 | unsigned int buflen, int write_data) |
787 | { | 790 | { |
788 | struct ata_port *ap = adev->ap; | 791 | struct ata_port *ap = adev->link->ap; |
789 | unsigned int words = buflen >> 1; | 792 | unsigned int words = buflen >> 1; |
790 | unsigned int i; | 793 | unsigned int i; |
791 | u16 *buf16 = (u16 *) buf; | 794 | u16 *buf16 = (u16 *) buf; |
@@ -839,38 +842,6 @@ static u8 scc_irq_on (struct ata_port *ap) | |||
839 | } | 842 | } |
840 | 843 | ||
841 | /** | 844 | /** |
842 | * scc_irq_ack - Acknowledge a device interrupt. | ||
843 | * @ap: Port on which interrupts are enabled. | ||
844 | * | ||
845 | * Note: Original code is ata_irq_ack(). | ||
846 | */ | ||
847 | |||
848 | static u8 scc_irq_ack (struct ata_port *ap, unsigned int chk_drq) | ||
849 | { | ||
850 | unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; | ||
851 | u8 host_stat, post_stat, status; | ||
852 | |||
853 | status = ata_busy_wait(ap, bits, 1000); | ||
854 | if (status & bits) | ||
855 | if (ata_msg_err(ap)) | ||
856 | printk(KERN_ERR "abnormal status 0x%X\n", status); | ||
857 | |||
858 | /* get controller status; clear intr, err bits */ | ||
859 | host_stat = in_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS); | ||
860 | out_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS, | ||
861 | host_stat | ATA_DMA_INTR | ATA_DMA_ERR); | ||
862 | |||
863 | post_stat = in_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS); | ||
864 | |||
865 | if (ata_msg_intr(ap)) | ||
866 | printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", | ||
867 | __FUNCTION__, | ||
868 | host_stat, post_stat, status); | ||
869 | |||
870 | return status; | ||
871 | } | ||
872 | |||
873 | /** | ||
874 | * scc_bmdma_freeze - Freeze BMDMA controller port | 845 | * scc_bmdma_freeze - Freeze BMDMA controller port |
875 | * @ap: port to freeze | 846 | * @ap: port to freeze |
876 | * | 847 | * |
@@ -901,10 +872,10 @@ static void scc_bmdma_freeze (struct ata_port *ap) | |||
901 | * @deadline: deadline jiffies for the operation | 872 | * @deadline: deadline jiffies for the operation |
902 | */ | 873 | */ |
903 | 874 | ||
904 | static int scc_pata_prereset(struct ata_port *ap, unsigned long deadline) | 875 | static int scc_pata_prereset(struct ata_link *link, unsigned long deadline) |
905 | { | 876 | { |
906 | ap->cbl = ATA_CBL_PATA80; | 877 | link->ap->cbl = ATA_CBL_PATA80; |
907 | return ata_std_prereset(ap, deadline); | 878 | return ata_std_prereset(link, deadline); |
908 | } | 879 | } |
909 | 880 | ||
910 | /** | 881 | /** |
@@ -915,8 +886,10 @@ static int scc_pata_prereset(struct ata_port *ap, unsigned long deadline) | |||
915 | * Note: Original code is ata_std_postreset(). | 886 | * Note: Original code is ata_std_postreset(). |
916 | */ | 887 | */ |
917 | 888 | ||
918 | static void scc_std_postreset (struct ata_port *ap, unsigned int *classes) | 889 | static void scc_std_postreset(struct ata_link *link, unsigned int *classes) |
919 | { | 890 | { |
891 | struct ata_port *ap = link->ap; | ||
892 | |||
920 | DPRINTK("ENTER\n"); | 893 | DPRINTK("ENTER\n"); |
921 | 894 | ||
922 | /* is double-select really necessary? */ | 895 | /* is double-select really necessary? */ |
@@ -1020,7 +993,6 @@ static struct scsi_host_template scc_sht = { | |||
1020 | }; | 993 | }; |
1021 | 994 | ||
1022 | static const struct ata_port_operations scc_pata_ops = { | 995 | static const struct ata_port_operations scc_pata_ops = { |
1023 | .port_disable = ata_port_disable, | ||
1024 | .set_piomode = scc_set_piomode, | 996 | .set_piomode = scc_set_piomode, |
1025 | .set_dmamode = scc_set_dmamode, | 997 | .set_dmamode = scc_set_dmamode, |
1026 | .mode_filter = scc_mode_filter, | 998 | .mode_filter = scc_mode_filter, |
@@ -1047,7 +1019,6 @@ static const struct ata_port_operations scc_pata_ops = { | |||
1047 | 1019 | ||
1048 | .irq_clear = scc_bmdma_irq_clear, | 1020 | .irq_clear = scc_bmdma_irq_clear, |
1049 | .irq_on = scc_irq_on, | 1021 | .irq_on = scc_irq_on, |
1050 | .irq_ack = scc_irq_ack, | ||
1051 | 1022 | ||
1052 | .port_start = scc_port_start, | 1023 | .port_start = scc_port_start, |
1053 | .port_stop = scc_port_stop, | 1024 | .port_stop = scc_port_stop, |
@@ -1193,6 +1164,9 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1193 | return rc; | 1164 | return rc; |
1194 | host->iomap = pcim_iomap_table(pdev); | 1165 | host->iomap = pcim_iomap_table(pdev); |
1195 | 1166 | ||
1167 | ata_port_pbar_desc(host->ports[0], SCC_CTRL_BAR, -1, "ctrl"); | ||
1168 | ata_port_pbar_desc(host->ports[0], SCC_BMID_BAR, -1, "bmid"); | ||
1169 | |||
1196 | rc = scc_host_init(host); | 1170 | rc = scc_host_init(host); |
1197 | if (rc) | 1171 | if (rc) |
1198 | return rc; | 1172 | return rc; |
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 0faf99c8f13e..df68806df4be 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c | |||
@@ -318,7 +318,6 @@ static struct scsi_host_template serverworks_sht = { | |||
318 | }; | 318 | }; |
319 | 319 | ||
320 | static struct ata_port_operations serverworks_osb4_port_ops = { | 320 | static struct ata_port_operations serverworks_osb4_port_ops = { |
321 | .port_disable = ata_port_disable, | ||
322 | .set_piomode = serverworks_set_piomode, | 321 | .set_piomode = serverworks_set_piomode, |
323 | .set_dmamode = serverworks_set_dmamode, | 322 | .set_dmamode = serverworks_set_dmamode, |
324 | .mode_filter = serverworks_osb4_filter, | 323 | .mode_filter = serverworks_osb4_filter, |
@@ -348,13 +347,11 @@ static struct ata_port_operations serverworks_osb4_port_ops = { | |||
348 | .irq_handler = ata_interrupt, | 347 | .irq_handler = ata_interrupt, |
349 | .irq_clear = ata_bmdma_irq_clear, | 348 | .irq_clear = ata_bmdma_irq_clear, |
350 | .irq_on = ata_irq_on, | 349 | .irq_on = ata_irq_on, |
351 | .irq_ack = ata_irq_ack, | ||
352 | 350 | ||
353 | .port_start = ata_port_start, | 351 | .port_start = ata_sff_port_start, |
354 | }; | 352 | }; |
355 | 353 | ||
356 | static struct ata_port_operations serverworks_csb_port_ops = { | 354 | static struct ata_port_operations serverworks_csb_port_ops = { |
357 | .port_disable = ata_port_disable, | ||
358 | .set_piomode = serverworks_set_piomode, | 355 | .set_piomode = serverworks_set_piomode, |
359 | .set_dmamode = serverworks_set_dmamode, | 356 | .set_dmamode = serverworks_set_dmamode, |
360 | .mode_filter = serverworks_csb_filter, | 357 | .mode_filter = serverworks_csb_filter, |
@@ -384,9 +381,8 @@ static struct ata_port_operations serverworks_csb_port_ops = { | |||
384 | .irq_handler = ata_interrupt, | 381 | .irq_handler = ata_interrupt, |
385 | .irq_clear = ata_bmdma_irq_clear, | 382 | .irq_clear = ata_bmdma_irq_clear, |
386 | .irq_on = ata_irq_on, | 383 | .irq_on = ata_irq_on, |
387 | .irq_ack = ata_irq_ack, | ||
388 | 384 | ||
389 | .port_start = ata_port_start, | 385 | .port_start = ata_sff_port_start, |
390 | }; | 386 | }; |
391 | 387 | ||
392 | static int serverworks_fixup_osb4(struct pci_dev *pdev) | 388 | static int serverworks_fixup_osb4(struct pci_dev *pdev) |
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index 40395804a66f..2eb75cd74a96 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c | |||
@@ -95,15 +95,16 @@ static int sil680_cable_detect(struct ata_port *ap) { | |||
95 | 95 | ||
96 | /** | 96 | /** |
97 | * sil680_bus_reset - reset the SIL680 bus | 97 | * sil680_bus_reset - reset the SIL680 bus |
98 | * @ap: ATA port to reset | 98 | * @link: ATA link to reset |
99 | * @deadline: deadline jiffies for the operation | 99 | * @deadline: deadline jiffies for the operation |
100 | * | 100 | * |
101 | * Perform the SIL680 housekeeping when doing an ATA bus reset | 101 | * Perform the SIL680 housekeeping when doing an ATA bus reset |
102 | */ | 102 | */ |
103 | 103 | ||
104 | static int sil680_bus_reset(struct ata_port *ap,unsigned int *classes, | 104 | static int sil680_bus_reset(struct ata_link *link, unsigned int *classes, |
105 | unsigned long deadline) | 105 | unsigned long deadline) |
106 | { | 106 | { |
107 | struct ata_port *ap = link->ap; | ||
107 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 108 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
108 | unsigned long addr = sil680_selreg(ap, 0); | 109 | unsigned long addr = sil680_selreg(ap, 0); |
109 | u8 reset; | 110 | u8 reset; |
@@ -112,7 +113,7 @@ static int sil680_bus_reset(struct ata_port *ap,unsigned int *classes, | |||
112 | pci_write_config_byte(pdev, addr, reset | 0x03); | 113 | pci_write_config_byte(pdev, addr, reset | 0x03); |
113 | udelay(25); | 114 | udelay(25); |
114 | pci_write_config_byte(pdev, addr, reset); | 115 | pci_write_config_byte(pdev, addr, reset); |
115 | return ata_std_softreset(ap, classes, deadline); | 116 | return ata_std_softreset(link, classes, deadline); |
116 | } | 117 | } |
117 | 118 | ||
118 | static void sil680_error_handler(struct ata_port *ap) | 119 | static void sil680_error_handler(struct ata_port *ap) |
@@ -237,7 +238,6 @@ static struct scsi_host_template sil680_sht = { | |||
237 | }; | 238 | }; |
238 | 239 | ||
239 | static struct ata_port_operations sil680_port_ops = { | 240 | static struct ata_port_operations sil680_port_ops = { |
240 | .port_disable = ata_port_disable, | ||
241 | .set_piomode = sil680_set_piomode, | 241 | .set_piomode = sil680_set_piomode, |
242 | .set_dmamode = sil680_set_dmamode, | 242 | .set_dmamode = sil680_set_dmamode, |
243 | .mode_filter = ata_pci_default_filter, | 243 | .mode_filter = ata_pci_default_filter, |
@@ -266,9 +266,8 @@ static struct ata_port_operations sil680_port_ops = { | |||
266 | .irq_handler = ata_interrupt, | 266 | .irq_handler = ata_interrupt, |
267 | .irq_clear = ata_bmdma_irq_clear, | 267 | .irq_clear = ata_bmdma_irq_clear, |
268 | .irq_on = ata_irq_on, | 268 | .irq_on = ata_irq_on, |
269 | .irq_ack = ata_irq_ack, | ||
270 | 269 | ||
271 | .port_start = ata_port_start, | 270 | .port_start = ata_sff_port_start, |
272 | }; | 271 | }; |
273 | 272 | ||
274 | /** | 273 | /** |
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index cce2834b2b60..3b5be77e861c 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c | |||
@@ -84,7 +84,7 @@ static int sis_short_ata40(struct pci_dev *dev) | |||
84 | 84 | ||
85 | static int sis_old_port_base(struct ata_device *adev) | 85 | static int sis_old_port_base(struct ata_device *adev) |
86 | { | 86 | { |
87 | return 0x40 + (4 * adev->ap->port_no) + (2 * adev->devno); | 87 | return 0x40 + (4 * adev->link->ap->port_no) + (2 * adev->devno); |
88 | } | 88 | } |
89 | 89 | ||
90 | /** | 90 | /** |
@@ -133,19 +133,20 @@ static int sis_66_cable_detect(struct ata_port *ap) | |||
133 | 133 | ||
134 | /** | 134 | /** |
135 | * sis_pre_reset - probe begin | 135 | * sis_pre_reset - probe begin |
136 | * @ap: ATA port | 136 | * @link: ATA link |
137 | * @deadline: deadline jiffies for the operation | 137 | * @deadline: deadline jiffies for the operation |
138 | * | 138 | * |
139 | * Set up cable type and use generic probe init | 139 | * Set up cable type and use generic probe init |
140 | */ | 140 | */ |
141 | 141 | ||
142 | static int sis_pre_reset(struct ata_port *ap, unsigned long deadline) | 142 | static int sis_pre_reset(struct ata_link *link, unsigned long deadline) |
143 | { | 143 | { |
144 | static const struct pci_bits sis_enable_bits[] = { | 144 | static const struct pci_bits sis_enable_bits[] = { |
145 | { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */ | 145 | { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */ |
146 | { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */ | 146 | { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */ |
147 | }; | 147 | }; |
148 | 148 | ||
149 | struct ata_port *ap = link->ap; | ||
149 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 150 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
150 | 151 | ||
151 | if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) | 152 | if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) |
@@ -154,7 +155,7 @@ static int sis_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
154 | /* Clear the FIFO settings. We can't enable the FIFO until | 155 | /* Clear the FIFO settings. We can't enable the FIFO until |
155 | we know we are poking at a disk */ | 156 | we know we are poking at a disk */ |
156 | pci_write_config_byte(pdev, 0x4B, 0); | 157 | pci_write_config_byte(pdev, 0x4B, 0); |
157 | return ata_std_prereset(ap, deadline); | 158 | return ata_std_prereset(link, deadline); |
158 | } | 159 | } |
159 | 160 | ||
160 | 161 | ||
@@ -530,7 +531,6 @@ static struct scsi_host_template sis_sht = { | |||
530 | }; | 531 | }; |
531 | 532 | ||
532 | static const struct ata_port_operations sis_133_ops = { | 533 | static const struct ata_port_operations sis_133_ops = { |
533 | .port_disable = ata_port_disable, | ||
534 | .set_piomode = sis_133_set_piomode, | 534 | .set_piomode = sis_133_set_piomode, |
535 | .set_dmamode = sis_133_set_dmamode, | 535 | .set_dmamode = sis_133_set_dmamode, |
536 | .mode_filter = ata_pci_default_filter, | 536 | .mode_filter = ata_pci_default_filter, |
@@ -558,13 +558,11 @@ static const struct ata_port_operations sis_133_ops = { | |||
558 | .irq_handler = ata_interrupt, | 558 | .irq_handler = ata_interrupt, |
559 | .irq_clear = ata_bmdma_irq_clear, | 559 | .irq_clear = ata_bmdma_irq_clear, |
560 | .irq_on = ata_irq_on, | 560 | .irq_on = ata_irq_on, |
561 | .irq_ack = ata_irq_ack, | ||
562 | 561 | ||
563 | .port_start = ata_port_start, | 562 | .port_start = ata_sff_port_start, |
564 | }; | 563 | }; |
565 | 564 | ||
566 | static const struct ata_port_operations sis_133_for_sata_ops = { | 565 | static const struct ata_port_operations sis_133_for_sata_ops = { |
567 | .port_disable = ata_port_disable, | ||
568 | .set_piomode = sis_133_set_piomode, | 566 | .set_piomode = sis_133_set_piomode, |
569 | .set_dmamode = sis_133_set_dmamode, | 567 | .set_dmamode = sis_133_set_dmamode, |
570 | .mode_filter = ata_pci_default_filter, | 568 | .mode_filter = ata_pci_default_filter, |
@@ -592,13 +590,11 @@ static const struct ata_port_operations sis_133_for_sata_ops = { | |||
592 | .irq_handler = ata_interrupt, | 590 | .irq_handler = ata_interrupt, |
593 | .irq_clear = ata_bmdma_irq_clear, | 591 | .irq_clear = ata_bmdma_irq_clear, |
594 | .irq_on = ata_irq_on, | 592 | .irq_on = ata_irq_on, |
595 | .irq_ack = ata_irq_ack, | ||
596 | 593 | ||
597 | .port_start = ata_port_start, | 594 | .port_start = ata_sff_port_start, |
598 | }; | 595 | }; |
599 | 596 | ||
600 | static const struct ata_port_operations sis_133_early_ops = { | 597 | static const struct ata_port_operations sis_133_early_ops = { |
601 | .port_disable = ata_port_disable, | ||
602 | .set_piomode = sis_100_set_piomode, | 598 | .set_piomode = sis_100_set_piomode, |
603 | .set_dmamode = sis_133_early_set_dmamode, | 599 | .set_dmamode = sis_133_early_set_dmamode, |
604 | .mode_filter = ata_pci_default_filter, | 600 | .mode_filter = ata_pci_default_filter, |
@@ -626,13 +622,11 @@ static const struct ata_port_operations sis_133_early_ops = { | |||
626 | .irq_handler = ata_interrupt, | 622 | .irq_handler = ata_interrupt, |
627 | .irq_clear = ata_bmdma_irq_clear, | 623 | .irq_clear = ata_bmdma_irq_clear, |
628 | .irq_on = ata_irq_on, | 624 | .irq_on = ata_irq_on, |
629 | .irq_ack = ata_irq_ack, | ||
630 | 625 | ||
631 | .port_start = ata_port_start, | 626 | .port_start = ata_sff_port_start, |
632 | }; | 627 | }; |
633 | 628 | ||
634 | static const struct ata_port_operations sis_100_ops = { | 629 | static const struct ata_port_operations sis_100_ops = { |
635 | .port_disable = ata_port_disable, | ||
636 | .set_piomode = sis_100_set_piomode, | 630 | .set_piomode = sis_100_set_piomode, |
637 | .set_dmamode = sis_100_set_dmamode, | 631 | .set_dmamode = sis_100_set_dmamode, |
638 | .mode_filter = ata_pci_default_filter, | 632 | .mode_filter = ata_pci_default_filter, |
@@ -660,13 +654,11 @@ static const struct ata_port_operations sis_100_ops = { | |||
660 | .irq_handler = ata_interrupt, | 654 | .irq_handler = ata_interrupt, |
661 | .irq_clear = ata_bmdma_irq_clear, | 655 | .irq_clear = ata_bmdma_irq_clear, |
662 | .irq_on = ata_irq_on, | 656 | .irq_on = ata_irq_on, |
663 | .irq_ack = ata_irq_ack, | ||
664 | 657 | ||
665 | .port_start = ata_port_start, | 658 | .port_start = ata_sff_port_start, |
666 | }; | 659 | }; |
667 | 660 | ||
668 | static const struct ata_port_operations sis_66_ops = { | 661 | static const struct ata_port_operations sis_66_ops = { |
669 | .port_disable = ata_port_disable, | ||
670 | .set_piomode = sis_old_set_piomode, | 662 | .set_piomode = sis_old_set_piomode, |
671 | .set_dmamode = sis_66_set_dmamode, | 663 | .set_dmamode = sis_66_set_dmamode, |
672 | .mode_filter = ata_pci_default_filter, | 664 | .mode_filter = ata_pci_default_filter, |
@@ -694,13 +686,11 @@ static const struct ata_port_operations sis_66_ops = { | |||
694 | .irq_handler = ata_interrupt, | 686 | .irq_handler = ata_interrupt, |
695 | .irq_clear = ata_bmdma_irq_clear, | 687 | .irq_clear = ata_bmdma_irq_clear, |
696 | .irq_on = ata_irq_on, | 688 | .irq_on = ata_irq_on, |
697 | .irq_ack = ata_irq_ack, | ||
698 | 689 | ||
699 | .port_start = ata_port_start, | 690 | .port_start = ata_sff_port_start, |
700 | }; | 691 | }; |
701 | 692 | ||
702 | static const struct ata_port_operations sis_old_ops = { | 693 | static const struct ata_port_operations sis_old_ops = { |
703 | .port_disable = ata_port_disable, | ||
704 | .set_piomode = sis_old_set_piomode, | 694 | .set_piomode = sis_old_set_piomode, |
705 | .set_dmamode = sis_old_set_dmamode, | 695 | .set_dmamode = sis_old_set_dmamode, |
706 | .mode_filter = ata_pci_default_filter, | 696 | .mode_filter = ata_pci_default_filter, |
@@ -728,9 +718,8 @@ static const struct ata_port_operations sis_old_ops = { | |||
728 | .irq_handler = ata_interrupt, | 718 | .irq_handler = ata_interrupt, |
729 | .irq_clear = ata_bmdma_irq_clear, | 719 | .irq_clear = ata_bmdma_irq_clear, |
730 | .irq_on = ata_irq_on, | 720 | .irq_on = ata_irq_on, |
731 | .irq_ack = ata_irq_ack, | ||
732 | 721 | ||
733 | .port_start = ata_port_start, | 722 | .port_start = ata_sff_port_start, |
734 | }; | 723 | }; |
735 | 724 | ||
736 | static const struct ata_port_info sis_info = { | 725 | static const struct ata_port_info sis_info = { |
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c index c0f43bb25956..1388cef52c07 100644 --- a/drivers/ata/pata_sl82c105.c +++ b/drivers/ata/pata_sl82c105.c | |||
@@ -43,23 +43,24 @@ enum { | |||
43 | 43 | ||
44 | /** | 44 | /** |
45 | * sl82c105_pre_reset - probe begin | 45 | * sl82c105_pre_reset - probe begin |
46 | * @ap: ATA port | 46 | * @link: ATA link |
47 | * @deadline: deadline jiffies for the operation | 47 | * @deadline: deadline jiffies for the operation |
48 | * | 48 | * |
49 | * Set up cable type and use generic probe init | 49 | * Set up cable type and use generic probe init |
50 | */ | 50 | */ |
51 | 51 | ||
52 | static int sl82c105_pre_reset(struct ata_port *ap, unsigned long deadline) | 52 | static int sl82c105_pre_reset(struct ata_link *link, unsigned long deadline) |
53 | { | 53 | { |
54 | static const struct pci_bits sl82c105_enable_bits[] = { | 54 | static const struct pci_bits sl82c105_enable_bits[] = { |
55 | { 0x40, 1, 0x01, 0x01 }, | 55 | { 0x40, 1, 0x01, 0x01 }, |
56 | { 0x40, 1, 0x10, 0x10 } | 56 | { 0x40, 1, 0x10, 0x10 } |
57 | }; | 57 | }; |
58 | struct ata_port *ap = link->ap; | ||
58 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 59 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
59 | 60 | ||
60 | if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) | 61 | if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) |
61 | return -ENOENT; | 62 | return -ENOENT; |
62 | return ata_std_prereset(ap, deadline); | 63 | return ata_std_prereset(link, deadline); |
63 | } | 64 | } |
64 | 65 | ||
65 | 66 | ||
@@ -224,7 +225,6 @@ static struct scsi_host_template sl82c105_sht = { | |||
224 | }; | 225 | }; |
225 | 226 | ||
226 | static struct ata_port_operations sl82c105_port_ops = { | 227 | static struct ata_port_operations sl82c105_port_ops = { |
227 | .port_disable = ata_port_disable, | ||
228 | .set_piomode = sl82c105_set_piomode, | 228 | .set_piomode = sl82c105_set_piomode, |
229 | .mode_filter = ata_pci_default_filter, | 229 | .mode_filter = ata_pci_default_filter, |
230 | 230 | ||
@@ -253,9 +253,8 @@ static struct ata_port_operations sl82c105_port_ops = { | |||
253 | .irq_handler = ata_interrupt, | 253 | .irq_handler = ata_interrupt, |
254 | .irq_clear = ata_bmdma_irq_clear, | 254 | .irq_clear = ata_bmdma_irq_clear, |
255 | .irq_on = ata_irq_on, | 255 | .irq_on = ata_irq_on, |
256 | .irq_ack = ata_irq_ack, | ||
257 | 256 | ||
258 | .port_start = ata_port_start, | 257 | .port_start = ata_sff_port_start, |
259 | }; | 258 | }; |
260 | 259 | ||
261 | /** | 260 | /** |
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c index af21f443db6e..403eafcffe12 100644 --- a/drivers/ata/pata_triflex.c +++ b/drivers/ata/pata_triflex.c | |||
@@ -47,25 +47,26 @@ | |||
47 | 47 | ||
48 | /** | 48 | /** |
49 | * triflex_prereset - probe begin | 49 | * triflex_prereset - probe begin |
50 | * @ap: ATA port | 50 | * @link: ATA link |
51 | * @deadline: deadline jiffies for the operation | 51 | * @deadline: deadline jiffies for the operation |
52 | * | 52 | * |
53 | * Set up cable type and use generic probe init | 53 | * Set up cable type and use generic probe init |
54 | */ | 54 | */ |
55 | 55 | ||
56 | static int triflex_prereset(struct ata_port *ap, unsigned long deadline) | 56 | static int triflex_prereset(struct ata_link *link, unsigned long deadline) |
57 | { | 57 | { |
58 | static const struct pci_bits triflex_enable_bits[] = { | 58 | static const struct pci_bits triflex_enable_bits[] = { |
59 | { 0x80, 1, 0x01, 0x01 }, | 59 | { 0x80, 1, 0x01, 0x01 }, |
60 | { 0x80, 1, 0x02, 0x02 } | 60 | { 0x80, 1, 0x02, 0x02 } |
61 | }; | 61 | }; |
62 | 62 | ||
63 | struct ata_port *ap = link->ap; | ||
63 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 64 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
64 | 65 | ||
65 | if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no])) | 66 | if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no])) |
66 | return -ENOENT; | 67 | return -ENOENT; |
67 | 68 | ||
68 | return ata_std_prereset(ap, deadline); | 69 | return ata_std_prereset(link, deadline); |
69 | } | 70 | } |
70 | 71 | ||
71 | 72 | ||
@@ -197,7 +198,6 @@ static struct scsi_host_template triflex_sht = { | |||
197 | }; | 198 | }; |
198 | 199 | ||
199 | static struct ata_port_operations triflex_port_ops = { | 200 | static struct ata_port_operations triflex_port_ops = { |
200 | .port_disable = ata_port_disable, | ||
201 | .set_piomode = triflex_set_piomode, | 201 | .set_piomode = triflex_set_piomode, |
202 | .mode_filter = ata_pci_default_filter, | 202 | .mode_filter = ata_pci_default_filter, |
203 | 203 | ||
@@ -226,9 +226,8 @@ static struct ata_port_operations triflex_port_ops = { | |||
226 | .irq_handler = ata_interrupt, | 226 | .irq_handler = ata_interrupt, |
227 | .irq_clear = ata_bmdma_irq_clear, | 227 | .irq_clear = ata_bmdma_irq_clear, |
228 | .irq_on = ata_irq_on, | 228 | .irq_on = ata_irq_on, |
229 | .irq_ack = ata_irq_ack, | ||
230 | 229 | ||
231 | .port_start = ata_port_start, | 230 | .port_start = ata_sff_port_start, |
232 | }; | 231 | }; |
233 | 232 | ||
234 | static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 233 | static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index f143db4559e0..5d41b6612d7f 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c | |||
@@ -184,11 +184,15 @@ static int via_cable_detect(struct ata_port *ap) { | |||
184 | two drives */ | 184 | two drives */ |
185 | if (ata66 & (0x10100000 >> (16 * ap->port_no))) | 185 | if (ata66 & (0x10100000 >> (16 * ap->port_no))) |
186 | return ATA_CBL_PATA80; | 186 | return ATA_CBL_PATA80; |
187 | /* Check with ACPI so we can spot BIOS reported SATA bridges */ | ||
188 | if (ata_acpi_cbl_80wire(ap)) | ||
189 | return ATA_CBL_PATA80; | ||
187 | return ATA_CBL_PATA40; | 190 | return ATA_CBL_PATA40; |
188 | } | 191 | } |
189 | 192 | ||
190 | static int via_pre_reset(struct ata_port *ap, unsigned long deadline) | 193 | static int via_pre_reset(struct ata_link *link, unsigned long deadline) |
191 | { | 194 | { |
195 | struct ata_port *ap = link->ap; | ||
192 | const struct via_isa_bridge *config = ap->host->private_data; | 196 | const struct via_isa_bridge *config = ap->host->private_data; |
193 | 197 | ||
194 | if (!(config->flags & VIA_NO_ENABLES)) { | 198 | if (!(config->flags & VIA_NO_ENABLES)) { |
@@ -201,7 +205,7 @@ static int via_pre_reset(struct ata_port *ap, unsigned long deadline) | |||
201 | return -ENOENT; | 205 | return -ENOENT; |
202 | } | 206 | } |
203 | 207 | ||
204 | return ata_std_prereset(ap, deadline); | 208 | return ata_std_prereset(link, deadline); |
205 | } | 209 | } |
206 | 210 | ||
207 | 211 | ||
@@ -344,7 +348,6 @@ static struct scsi_host_template via_sht = { | |||
344 | }; | 348 | }; |
345 | 349 | ||
346 | static struct ata_port_operations via_port_ops = { | 350 | static struct ata_port_operations via_port_ops = { |
347 | .port_disable = ata_port_disable, | ||
348 | .set_piomode = via_set_piomode, | 351 | .set_piomode = via_set_piomode, |
349 | .set_dmamode = via_set_dmamode, | 352 | .set_dmamode = via_set_dmamode, |
350 | .mode_filter = ata_pci_default_filter, | 353 | .mode_filter = ata_pci_default_filter, |
@@ -374,13 +377,11 @@ static struct ata_port_operations via_port_ops = { | |||
374 | .irq_handler = ata_interrupt, | 377 | .irq_handler = ata_interrupt, |
375 | .irq_clear = ata_bmdma_irq_clear, | 378 | .irq_clear = ata_bmdma_irq_clear, |
376 | .irq_on = ata_irq_on, | 379 | .irq_on = ata_irq_on, |
377 | .irq_ack = ata_irq_ack, | ||
378 | 380 | ||
379 | .port_start = ata_port_start, | 381 | .port_start = ata_sff_port_start, |
380 | }; | 382 | }; |
381 | 383 | ||
382 | static struct ata_port_operations via_port_ops_noirq = { | 384 | static struct ata_port_operations via_port_ops_noirq = { |
383 | .port_disable = ata_port_disable, | ||
384 | .set_piomode = via_set_piomode, | 385 | .set_piomode = via_set_piomode, |
385 | .set_dmamode = via_set_dmamode, | 386 | .set_dmamode = via_set_dmamode, |
386 | .mode_filter = ata_pci_default_filter, | 387 | .mode_filter = ata_pci_default_filter, |
@@ -410,9 +411,8 @@ static struct ata_port_operations via_port_ops_noirq = { | |||
410 | .irq_handler = ata_interrupt, | 411 | .irq_handler = ata_interrupt, |
411 | .irq_clear = ata_bmdma_irq_clear, | 412 | .irq_clear = ata_bmdma_irq_clear, |
412 | .irq_on = ata_irq_on, | 413 | .irq_on = ata_irq_on, |
413 | .irq_ack = ata_irq_ack, | ||
414 | 414 | ||
415 | .port_start = ata_port_start, | 415 | .port_start = ata_sff_port_start, |
416 | }; | 416 | }; |
417 | 417 | ||
418 | /** | 418 | /** |
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c index 83abfeca4057..549cbbe9fd07 100644 --- a/drivers/ata/pata_winbond.c +++ b/drivers/ata/pata_winbond.c | |||
@@ -94,7 +94,7 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
94 | 94 | ||
95 | static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) | 95 | static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) |
96 | { | 96 | { |
97 | struct ata_port *ap = adev->ap; | 97 | struct ata_port *ap = adev->link->ap; |
98 | int slop = buflen & 3; | 98 | int slop = buflen & 3; |
99 | 99 | ||
100 | if (ata_id_has_dword_io(adev->id)) { | 100 | if (ata_id_has_dword_io(adev->id)) { |
@@ -138,7 +138,6 @@ static struct scsi_host_template winbond_sht = { | |||
138 | }; | 138 | }; |
139 | 139 | ||
140 | static struct ata_port_operations winbond_port_ops = { | 140 | static struct ata_port_operations winbond_port_ops = { |
141 | .port_disable = ata_port_disable, | ||
142 | .set_piomode = winbond_set_piomode, | 141 | .set_piomode = winbond_set_piomode, |
143 | 142 | ||
144 | .tf_load = ata_tf_load, | 143 | .tf_load = ata_tf_load, |
@@ -160,9 +159,8 @@ static struct ata_port_operations winbond_port_ops = { | |||
160 | 159 | ||
161 | .irq_clear = ata_bmdma_irq_clear, | 160 | .irq_clear = ata_bmdma_irq_clear, |
162 | .irq_on = ata_irq_on, | 161 | .irq_on = ata_irq_on, |
163 | .irq_ack = ata_irq_ack, | ||
164 | 162 | ||
165 | .port_start = ata_port_start, | 163 | .port_start = ata_sff_port_start, |
166 | }; | 164 | }; |
167 | 165 | ||
168 | /** | 166 | /** |
@@ -199,6 +197,7 @@ static __init int winbond_init_one(unsigned long port) | |||
199 | 197 | ||
200 | for (i = 0; i < 2 ; i ++) { | 198 | for (i = 0; i < 2 ; i ++) { |
201 | unsigned long cmd_port = 0x1F0 - (0x80 * i); | 199 | unsigned long cmd_port = 0x1F0 - (0x80 * i); |
200 | unsigned long ctl_port = cmd_port + 0x206; | ||
202 | struct ata_host *host; | 201 | struct ata_host *host; |
203 | struct ata_port *ap; | 202 | struct ata_port *ap; |
204 | void __iomem *cmd_addr, *ctl_addr; | 203 | void __iomem *cmd_addr, *ctl_addr; |
@@ -214,14 +213,16 @@ static __init int winbond_init_one(unsigned long port) | |||
214 | host = ata_host_alloc(&pdev->dev, 1); | 213 | host = ata_host_alloc(&pdev->dev, 1); |
215 | if (!host) | 214 | if (!host) |
216 | goto err_unregister; | 215 | goto err_unregister; |
216 | ap = host->ports[0]; | ||
217 | 217 | ||
218 | rc = -ENOMEM; | 218 | rc = -ENOMEM; |
219 | cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8); | 219 | cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8); |
220 | ctl_addr = devm_ioport_map(&pdev->dev, cmd_port + 0x0206, 1); | 220 | ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1); |
221 | if (!cmd_addr || !ctl_addr) | 221 | if (!cmd_addr || !ctl_addr) |
222 | goto err_unregister; | 222 | goto err_unregister; |
223 | 223 | ||
224 | ap = host->ports[0]; | 224 | ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port); |
225 | |||
225 | ap->ops = &winbond_port_ops; | 226 | ap->ops = &winbond_port_ops; |
226 | ap->pio_mask = 0x1F; | 227 | ap->pio_mask = 0x1F; |
227 | ap->flags |= ATA_FLAG_SLAVE_POSS; | 228 | ap->flags |= ATA_FLAG_SLAVE_POSS; |
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index 5c79271401af..8d1b03d5bcb1 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c | |||
@@ -92,6 +92,8 @@ enum { | |||
92 | 92 | ||
93 | /* CPB bits */ | 93 | /* CPB bits */ |
94 | cDONE = (1 << 0), | 94 | cDONE = (1 << 0), |
95 | cATERR = (1 << 3), | ||
96 | |||
95 | cVLD = (1 << 0), | 97 | cVLD = (1 << 0), |
96 | cDAT = (1 << 2), | 98 | cDAT = (1 << 2), |
97 | cIEN = (1 << 3), | 99 | cIEN = (1 << 3), |
@@ -131,14 +133,15 @@ static int adma_ata_init_one (struct pci_dev *pdev, | |||
131 | static int adma_port_start(struct ata_port *ap); | 133 | static int adma_port_start(struct ata_port *ap); |
132 | static void adma_host_stop(struct ata_host *host); | 134 | static void adma_host_stop(struct ata_host *host); |
133 | static void adma_port_stop(struct ata_port *ap); | 135 | static void adma_port_stop(struct ata_port *ap); |
134 | static void adma_phy_reset(struct ata_port *ap); | ||
135 | static void adma_qc_prep(struct ata_queued_cmd *qc); | 136 | static void adma_qc_prep(struct ata_queued_cmd *qc); |
136 | static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); | 137 | static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); |
137 | static int adma_check_atapi_dma(struct ata_queued_cmd *qc); | 138 | static int adma_check_atapi_dma(struct ata_queued_cmd *qc); |
138 | static void adma_bmdma_stop(struct ata_queued_cmd *qc); | 139 | static void adma_bmdma_stop(struct ata_queued_cmd *qc); |
139 | static u8 adma_bmdma_status(struct ata_port *ap); | 140 | static u8 adma_bmdma_status(struct ata_port *ap); |
140 | static void adma_irq_clear(struct ata_port *ap); | 141 | static void adma_irq_clear(struct ata_port *ap); |
141 | static void adma_eng_timeout(struct ata_port *ap); | 142 | static void adma_freeze(struct ata_port *ap); |
143 | static void adma_thaw(struct ata_port *ap); | ||
144 | static void adma_error_handler(struct ata_port *ap); | ||
142 | 145 | ||
143 | static struct scsi_host_template adma_ata_sht = { | 146 | static struct scsi_host_template adma_ata_sht = { |
144 | .module = THIS_MODULE, | 147 | .module = THIS_MODULE, |
@@ -159,21 +162,20 @@ static struct scsi_host_template adma_ata_sht = { | |||
159 | }; | 162 | }; |
160 | 163 | ||
161 | static const struct ata_port_operations adma_ata_ops = { | 164 | static const struct ata_port_operations adma_ata_ops = { |
162 | .port_disable = ata_port_disable, | ||
163 | .tf_load = ata_tf_load, | 165 | .tf_load = ata_tf_load, |
164 | .tf_read = ata_tf_read, | 166 | .tf_read = ata_tf_read, |
165 | .exec_command = ata_exec_command, | 167 | .exec_command = ata_exec_command, |
166 | .check_status = ata_check_status, | 168 | .check_status = ata_check_status, |
167 | .dev_select = ata_std_dev_select, | 169 | .dev_select = ata_std_dev_select, |
168 | .phy_reset = adma_phy_reset, | ||
169 | .check_atapi_dma = adma_check_atapi_dma, | 170 | .check_atapi_dma = adma_check_atapi_dma, |
170 | .data_xfer = ata_data_xfer, | 171 | .data_xfer = ata_data_xfer, |
171 | .qc_prep = adma_qc_prep, | 172 | .qc_prep = adma_qc_prep, |
172 | .qc_issue = adma_qc_issue, | 173 | .qc_issue = adma_qc_issue, |
173 | .eng_timeout = adma_eng_timeout, | 174 | .freeze = adma_freeze, |
175 | .thaw = adma_thaw, | ||
176 | .error_handler = adma_error_handler, | ||
174 | .irq_clear = adma_irq_clear, | 177 | .irq_clear = adma_irq_clear, |
175 | .irq_on = ata_irq_on, | 178 | .irq_on = ata_irq_on, |
176 | .irq_ack = ata_irq_ack, | ||
177 | .port_start = adma_port_start, | 179 | .port_start = adma_port_start, |
178 | .port_stop = adma_port_stop, | 180 | .port_stop = adma_port_stop, |
179 | .host_stop = adma_host_stop, | 181 | .host_stop = adma_host_stop, |
@@ -184,7 +186,7 @@ static const struct ata_port_operations adma_ata_ops = { | |||
184 | static struct ata_port_info adma_port_info[] = { | 186 | static struct ata_port_info adma_port_info[] = { |
185 | /* board_1841_idx */ | 187 | /* board_1841_idx */ |
186 | { | 188 | { |
187 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | | 189 | .flags = ATA_FLAG_SLAVE_POSS | |
188 | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | | 190 | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | |
189 | ATA_FLAG_PIO_POLLING, | 191 | ATA_FLAG_PIO_POLLING, |
190 | .pio_mask = 0x10, /* pio4 */ | 192 | .pio_mask = 0x10, /* pio4 */ |
@@ -273,24 +275,42 @@ static inline void adma_enter_reg_mode(struct ata_port *ap) | |||
273 | readb(chan + ADMA_STATUS); /* flush */ | 275 | readb(chan + ADMA_STATUS); /* flush */ |
274 | } | 276 | } |
275 | 277 | ||
276 | static void adma_phy_reset(struct ata_port *ap) | 278 | static void adma_freeze(struct ata_port *ap) |
277 | { | 279 | { |
278 | struct adma_port_priv *pp = ap->private_data; | 280 | void __iomem *chan = ADMA_PORT_REGS(ap); |
281 | |||
282 | /* mask/clear ATA interrupts */ | ||
283 | writeb(ATA_NIEN, ap->ioaddr.ctl_addr); | ||
284 | ata_check_status(ap); | ||
279 | 285 | ||
280 | pp->state = adma_state_idle; | 286 | /* reset ADMA to idle state */ |
287 | writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL); | ||
288 | udelay(2); | ||
289 | writew(aPIOMD4 | aNIEN, chan + ADMA_CONTROL); | ||
290 | udelay(2); | ||
291 | } | ||
292 | |||
293 | static void adma_thaw(struct ata_port *ap) | ||
294 | { | ||
281 | adma_reinit_engine(ap); | 295 | adma_reinit_engine(ap); |
282 | ata_port_probe(ap); | ||
283 | ata_bus_reset(ap); | ||
284 | } | 296 | } |
285 | 297 | ||
286 | static void adma_eng_timeout(struct ata_port *ap) | 298 | static int adma_prereset(struct ata_link *link, unsigned long deadline) |
287 | { | 299 | { |
300 | struct ata_port *ap = link->ap; | ||
288 | struct adma_port_priv *pp = ap->private_data; | 301 | struct adma_port_priv *pp = ap->private_data; |
289 | 302 | ||
290 | if (pp->state != adma_state_idle) /* healthy paranoia */ | 303 | if (pp->state != adma_state_idle) /* healthy paranoia */ |
291 | pp->state = adma_state_mmio; | 304 | pp->state = adma_state_mmio; |
292 | adma_reinit_engine(ap); | 305 | adma_reinit_engine(ap); |
293 | ata_eng_timeout(ap); | 306 | |
307 | return ata_std_prereset(link, deadline); | ||
308 | } | ||
309 | |||
310 | static void adma_error_handler(struct ata_port *ap) | ||
311 | { | ||
312 | ata_do_eh(ap, adma_prereset, ata_std_softreset, NULL, | ||
313 | ata_std_postreset); | ||
294 | } | 314 | } |
295 | 315 | ||
296 | static int adma_fill_sg(struct ata_queued_cmd *qc) | 316 | static int adma_fill_sg(struct ata_queued_cmd *qc) |
@@ -464,14 +484,33 @@ static inline unsigned int adma_intr_pkt(struct ata_host *host) | |||
464 | pp = ap->private_data; | 484 | pp = ap->private_data; |
465 | if (!pp || pp->state != adma_state_pkt) | 485 | if (!pp || pp->state != adma_state_pkt) |
466 | continue; | 486 | continue; |
467 | qc = ata_qc_from_tag(ap, ap->active_tag); | 487 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
468 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { | 488 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
469 | if ((status & (aPERR | aPSD | aUIRQ))) | 489 | if (status & aPERR) |
490 | qc->err_mask |= AC_ERR_HOST_BUS; | ||
491 | else if ((status & (aPSD | aUIRQ))) | ||
470 | qc->err_mask |= AC_ERR_OTHER; | 492 | qc->err_mask |= AC_ERR_OTHER; |
493 | |||
494 | if (pp->pkt[0] & cATERR) | ||
495 | qc->err_mask |= AC_ERR_DEV; | ||
471 | else if (pp->pkt[0] != cDONE) | 496 | else if (pp->pkt[0] != cDONE) |
472 | qc->err_mask |= AC_ERR_OTHER; | 497 | qc->err_mask |= AC_ERR_OTHER; |
473 | 498 | ||
474 | ata_qc_complete(qc); | 499 | if (!qc->err_mask) |
500 | ata_qc_complete(qc); | ||
501 | else { | ||
502 | struct ata_eh_info *ehi = &ap->link.eh_info; | ||
503 | ata_ehi_clear_desc(ehi); | ||
504 | ata_ehi_push_desc(ehi, | ||
505 | "ADMA-status 0x%02X", status); | ||
506 | ata_ehi_push_desc(ehi, | ||
507 | "pkt[0] 0x%02X", pp->pkt[0]); | ||
508 | |||
509 | if (qc->err_mask == AC_ERR_DEV) | ||
510 | ata_port_abort(ap); | ||
511 | else | ||
512 | ata_port_freeze(ap); | ||
513 | } | ||
475 | } | 514 | } |
476 | } | 515 | } |
477 | return handled; | 516 | return handled; |
@@ -489,7 +528,7 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host) | |||
489 | struct adma_port_priv *pp = ap->private_data; | 528 | struct adma_port_priv *pp = ap->private_data; |
490 | if (!pp || pp->state != adma_state_mmio) | 529 | if (!pp || pp->state != adma_state_mmio) |
491 | continue; | 530 | continue; |
492 | qc = ata_qc_from_tag(ap, ap->active_tag); | 531 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
493 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { | 532 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
494 | 533 | ||
495 | /* check main status, clearing INTRQ */ | 534 | /* check main status, clearing INTRQ */ |
@@ -502,7 +541,20 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host) | |||
502 | /* complete taskfile transaction */ | 541 | /* complete taskfile transaction */ |
503 | pp->state = adma_state_idle; | 542 | pp->state = adma_state_idle; |
504 | qc->err_mask |= ac_err_mask(status); | 543 | qc->err_mask |= ac_err_mask(status); |
505 | ata_qc_complete(qc); | 544 | if (!qc->err_mask) |
545 | ata_qc_complete(qc); | ||
546 | else { | ||
547 | struct ata_eh_info *ehi = | ||
548 | &ap->link.eh_info; | ||
549 | ata_ehi_clear_desc(ehi); | ||
550 | ata_ehi_push_desc(ehi, | ||
551 | "status 0x%02X", status); | ||
552 | |||
553 | if (qc->err_mask == AC_ERR_DEV) | ||
554 | ata_port_abort(ap); | ||
555 | else | ||
556 | ata_port_freeze(ap); | ||
557 | } | ||
506 | handled = 1; | 558 | handled = 1; |
507 | } | 559 | } |
508 | } | 560 | } |
@@ -652,9 +704,16 @@ static int adma_ata_init_one(struct pci_dev *pdev, | |||
652 | if (rc) | 704 | if (rc) |
653 | return rc; | 705 | return rc; |
654 | 706 | ||
655 | for (port_no = 0; port_no < ADMA_PORTS; ++port_no) | 707 | for (port_no = 0; port_no < ADMA_PORTS; ++port_no) { |
656 | adma_ata_setup_port(&host->ports[port_no]->ioaddr, | 708 | struct ata_port *ap = host->ports[port_no]; |
657 | ADMA_ATA_REGS(mmio_base, port_no)); | 709 | void __iomem *port_base = ADMA_ATA_REGS(mmio_base, port_no); |
710 | unsigned int offset = port_base - mmio_base; | ||
711 | |||
712 | adma_ata_setup_port(&ap->ioaddr, port_base); | ||
713 | |||
714 | ata_port_pbar_desc(ap, ADMA_MMIO_BAR, -1, "mmio"); | ||
715 | ata_port_pbar_desc(ap, ADMA_MMIO_BAR, offset, "port"); | ||
716 | } | ||
658 | 717 | ||
659 | /* initialize adapter */ | 718 | /* initialize adapter */ |
660 | adma_host_init(host, board_idx); | 719 | adma_host_init(host, board_idx); |
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index fdbed8ecdfc2..08595f34b3e8 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c | |||
@@ -285,7 +285,7 @@ static void inic_irq_clear(struct ata_port *ap) | |||
285 | static void inic_host_intr(struct ata_port *ap) | 285 | static void inic_host_intr(struct ata_port *ap) |
286 | { | 286 | { |
287 | void __iomem *port_base = inic_port_base(ap); | 287 | void __iomem *port_base = inic_port_base(ap); |
288 | struct ata_eh_info *ehi = &ap->eh_info; | 288 | struct ata_eh_info *ehi = &ap->link.eh_info; |
289 | u8 irq_stat; | 289 | u8 irq_stat; |
290 | 290 | ||
291 | /* fetch and clear irq */ | 291 | /* fetch and clear irq */ |
@@ -293,7 +293,8 @@ static void inic_host_intr(struct ata_port *ap) | |||
293 | writeb(irq_stat, port_base + PORT_IRQ_STAT); | 293 | writeb(irq_stat, port_base + PORT_IRQ_STAT); |
294 | 294 | ||
295 | if (likely(!(irq_stat & PIRQ_ERR))) { | 295 | if (likely(!(irq_stat & PIRQ_ERR))) { |
296 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); | 296 | struct ata_queued_cmd *qc = |
297 | ata_qc_from_tag(ap, ap->link.active_tag); | ||
297 | 298 | ||
298 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { | 299 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { |
299 | ata_chk_status(ap); /* clear ATA interrupt */ | 300 | ata_chk_status(ap); /* clear ATA interrupt */ |
@@ -416,12 +417,13 @@ static void inic_thaw(struct ata_port *ap) | |||
416 | * SRST and SControl hardreset don't give valid signature on this | 417 | * SRST and SControl hardreset don't give valid signature on this |
417 | * controller. Only controller specific hardreset mechanism works. | 418 | * controller. Only controller specific hardreset mechanism works. |
418 | */ | 419 | */ |
419 | static int inic_hardreset(struct ata_port *ap, unsigned int *class, | 420 | static int inic_hardreset(struct ata_link *link, unsigned int *class, |
420 | unsigned long deadline) | 421 | unsigned long deadline) |
421 | { | 422 | { |
423 | struct ata_port *ap = link->ap; | ||
422 | void __iomem *port_base = inic_port_base(ap); | 424 | void __iomem *port_base = inic_port_base(ap); |
423 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; | 425 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; |
424 | const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context); | 426 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); |
425 | u16 val; | 427 | u16 val; |
426 | int rc; | 428 | int rc; |
427 | 429 | ||
@@ -434,15 +436,15 @@ static int inic_hardreset(struct ata_port *ap, unsigned int *class, | |||
434 | msleep(1); | 436 | msleep(1); |
435 | writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); | 437 | writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); |
436 | 438 | ||
437 | rc = sata_phy_resume(ap, timing, deadline); | 439 | rc = sata_link_resume(link, timing, deadline); |
438 | if (rc) { | 440 | if (rc) { |
439 | ata_port_printk(ap, KERN_WARNING, "failed to resume " | 441 | ata_link_printk(link, KERN_WARNING, "failed to resume " |
440 | "link after reset (errno=%d)\n", rc); | 442 | "link after reset (errno=%d)\n", rc); |
441 | return rc; | 443 | return rc; |
442 | } | 444 | } |
443 | 445 | ||
444 | *class = ATA_DEV_NONE; | 446 | *class = ATA_DEV_NONE; |
445 | if (ata_port_online(ap)) { | 447 | if (ata_link_online(link)) { |
446 | struct ata_taskfile tf; | 448 | struct ata_taskfile tf; |
447 | 449 | ||
448 | /* wait a while before checking status */ | 450 | /* wait a while before checking status */ |
@@ -451,7 +453,7 @@ static int inic_hardreset(struct ata_port *ap, unsigned int *class, | |||
451 | rc = ata_wait_ready(ap, deadline); | 453 | rc = ata_wait_ready(ap, deadline); |
452 | /* link occupied, -ENODEV too is an error */ | 454 | /* link occupied, -ENODEV too is an error */ |
453 | if (rc) { | 455 | if (rc) { |
454 | ata_port_printk(ap, KERN_WARNING, "device not ready " | 456 | ata_link_printk(link, KERN_WARNING, "device not ready " |
455 | "after hardreset (errno=%d)\n", rc); | 457 | "after hardreset (errno=%d)\n", rc); |
456 | return rc; | 458 | return rc; |
457 | } | 459 | } |
@@ -550,7 +552,6 @@ static int inic_port_start(struct ata_port *ap) | |||
550 | } | 552 | } |
551 | 553 | ||
552 | static struct ata_port_operations inic_port_ops = { | 554 | static struct ata_port_operations inic_port_ops = { |
553 | .port_disable = ata_port_disable, | ||
554 | .tf_load = ata_tf_load, | 555 | .tf_load = ata_tf_load, |
555 | .tf_read = ata_tf_read, | 556 | .tf_read = ata_tf_read, |
556 | .check_status = ata_check_status, | 557 | .check_status = ata_check_status, |
@@ -567,7 +568,6 @@ static struct ata_port_operations inic_port_ops = { | |||
567 | 568 | ||
568 | .irq_clear = inic_irq_clear, | 569 | .irq_clear = inic_irq_clear, |
569 | .irq_on = ata_irq_on, | 570 | .irq_on = ata_irq_on, |
570 | .irq_ack = ata_irq_ack, | ||
571 | 571 | ||
572 | .qc_prep = ata_qc_prep, | 572 | .qc_prep = ata_qc_prep, |
573 | .qc_issue = inic_qc_issue, | 573 | .qc_issue = inic_qc_issue, |
@@ -693,16 +693,24 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
693 | host->iomap = iomap = pcim_iomap_table(pdev); | 693 | host->iomap = iomap = pcim_iomap_table(pdev); |
694 | 694 | ||
695 | for (i = 0; i < NR_PORTS; i++) { | 695 | for (i = 0; i < NR_PORTS; i++) { |
696 | struct ata_ioports *port = &host->ports[i]->ioaddr; | 696 | struct ata_port *ap = host->ports[i]; |
697 | void __iomem *port_base = iomap[MMIO_BAR] + i * PORT_SIZE; | 697 | struct ata_ioports *port = &ap->ioaddr; |
698 | unsigned int offset = i * PORT_SIZE; | ||
698 | 699 | ||
699 | port->cmd_addr = iomap[2 * i]; | 700 | port->cmd_addr = iomap[2 * i]; |
700 | port->altstatus_addr = | 701 | port->altstatus_addr = |
701 | port->ctl_addr = (void __iomem *) | 702 | port->ctl_addr = (void __iomem *) |
702 | ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); | 703 | ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); |
703 | port->scr_addr = port_base + PORT_SCR; | 704 | port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR; |
704 | 705 | ||
705 | ata_std_ports(port); | 706 | ata_std_ports(port); |
707 | |||
708 | ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio"); | ||
709 | ata_port_pbar_desc(ap, MMIO_BAR, offset, "port"); | ||
710 | ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", | ||
711 | (unsigned long long)pci_resource_start(pdev, 2 * i), | ||
712 | (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) | | ||
713 | ATA_PCI_CTL_OFS); | ||
706 | } | 714 | } |
707 | 715 | ||
708 | hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); | 716 | hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index d9832e234e44..4df8311968e9 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -483,8 +483,6 @@ static struct scsi_host_template mv6_sht = { | |||
483 | }; | 483 | }; |
484 | 484 | ||
485 | static const struct ata_port_operations mv5_ops = { | 485 | static const struct ata_port_operations mv5_ops = { |
486 | .port_disable = ata_port_disable, | ||
487 | |||
488 | .tf_load = ata_tf_load, | 486 | .tf_load = ata_tf_load, |
489 | .tf_read = ata_tf_read, | 487 | .tf_read = ata_tf_read, |
490 | .check_status = ata_check_status, | 488 | .check_status = ata_check_status, |
@@ -499,7 +497,6 @@ static const struct ata_port_operations mv5_ops = { | |||
499 | 497 | ||
500 | .irq_clear = mv_irq_clear, | 498 | .irq_clear = mv_irq_clear, |
501 | .irq_on = ata_irq_on, | 499 | .irq_on = ata_irq_on, |
502 | .irq_ack = ata_irq_ack, | ||
503 | 500 | ||
504 | .error_handler = mv_error_handler, | 501 | .error_handler = mv_error_handler, |
505 | .post_internal_cmd = mv_post_int_cmd, | 502 | .post_internal_cmd = mv_post_int_cmd, |
@@ -514,8 +511,6 @@ static const struct ata_port_operations mv5_ops = { | |||
514 | }; | 511 | }; |
515 | 512 | ||
516 | static const struct ata_port_operations mv6_ops = { | 513 | static const struct ata_port_operations mv6_ops = { |
517 | .port_disable = ata_port_disable, | ||
518 | |||
519 | .tf_load = ata_tf_load, | 514 | .tf_load = ata_tf_load, |
520 | .tf_read = ata_tf_read, | 515 | .tf_read = ata_tf_read, |
521 | .check_status = ata_check_status, | 516 | .check_status = ata_check_status, |
@@ -530,7 +525,6 @@ static const struct ata_port_operations mv6_ops = { | |||
530 | 525 | ||
531 | .irq_clear = mv_irq_clear, | 526 | .irq_clear = mv_irq_clear, |
532 | .irq_on = ata_irq_on, | 527 | .irq_on = ata_irq_on, |
533 | .irq_ack = ata_irq_ack, | ||
534 | 528 | ||
535 | .error_handler = mv_error_handler, | 529 | .error_handler = mv_error_handler, |
536 | .post_internal_cmd = mv_post_int_cmd, | 530 | .post_internal_cmd = mv_post_int_cmd, |
@@ -545,8 +539,6 @@ static const struct ata_port_operations mv6_ops = { | |||
545 | }; | 539 | }; |
546 | 540 | ||
547 | static const struct ata_port_operations mv_iie_ops = { | 541 | static const struct ata_port_operations mv_iie_ops = { |
548 | .port_disable = ata_port_disable, | ||
549 | |||
550 | .tf_load = ata_tf_load, | 542 | .tf_load = ata_tf_load, |
551 | .tf_read = ata_tf_read, | 543 | .tf_read = ata_tf_read, |
552 | .check_status = ata_check_status, | 544 | .check_status = ata_check_status, |
@@ -561,7 +553,6 @@ static const struct ata_port_operations mv_iie_ops = { | |||
561 | 553 | ||
562 | .irq_clear = mv_irq_clear, | 554 | .irq_clear = mv_irq_clear, |
563 | .irq_on = ata_irq_on, | 555 | .irq_on = ata_irq_on, |
564 | .irq_ack = ata_irq_ack, | ||
565 | 556 | ||
566 | .error_handler = mv_error_handler, | 557 | .error_handler = mv_error_handler, |
567 | .post_internal_cmd = mv_post_int_cmd, | 558 | .post_internal_cmd = mv_post_int_cmd, |
@@ -1415,7 +1406,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1415 | struct mv_host_priv *hpriv = ap->host->private_data; | 1406 | struct mv_host_priv *hpriv = ap->host->private_data; |
1416 | unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); | 1407 | unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); |
1417 | unsigned int action = 0, err_mask = 0; | 1408 | unsigned int action = 0, err_mask = 0; |
1418 | struct ata_eh_info *ehi = &ap->eh_info; | 1409 | struct ata_eh_info *ehi = &ap->link.eh_info; |
1419 | 1410 | ||
1420 | ata_ehi_clear_desc(ehi); | 1411 | ata_ehi_clear_desc(ehi); |
1421 | 1412 | ||
@@ -1423,8 +1414,8 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1423 | /* just a guess: do we need to do this? should we | 1414 | /* just a guess: do we need to do this? should we |
1424 | * expand this, and do it in all cases? | 1415 | * expand this, and do it in all cases? |
1425 | */ | 1416 | */ |
1426 | sata_scr_read(ap, SCR_ERROR, &serr); | 1417 | sata_scr_read(&ap->link, SCR_ERROR, &serr); |
1427 | sata_scr_write_flush(ap, SCR_ERROR, serr); | 1418 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); |
1428 | } | 1419 | } |
1429 | 1420 | ||
1430 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1421 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
@@ -1468,8 +1459,8 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1468 | } | 1459 | } |
1469 | 1460 | ||
1470 | if (edma_err_cause & EDMA_ERR_SERR) { | 1461 | if (edma_err_cause & EDMA_ERR_SERR) { |
1471 | sata_scr_read(ap, SCR_ERROR, &serr); | 1462 | sata_scr_read(&ap->link, SCR_ERROR, &serr); |
1472 | sata_scr_write_flush(ap, SCR_ERROR, serr); | 1463 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); |
1473 | err_mask = AC_ERR_ATA_BUS; | 1464 | err_mask = AC_ERR_ATA_BUS; |
1474 | action |= ATA_EH_HARDRESET; | 1465 | action |= ATA_EH_HARDRESET; |
1475 | } | 1466 | } |
@@ -1508,7 +1499,7 @@ static void mv_intr_pio(struct ata_port *ap) | |||
1508 | return; | 1499 | return; |
1509 | 1500 | ||
1510 | /* get active ATA command */ | 1501 | /* get active ATA command */ |
1511 | qc = ata_qc_from_tag(ap, ap->active_tag); | 1502 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
1512 | if (unlikely(!qc)) /* no active tag */ | 1503 | if (unlikely(!qc)) /* no active tag */ |
1513 | return; | 1504 | return; |
1514 | if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */ | 1505 | if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */ |
@@ -1543,7 +1534,7 @@ static void mv_intr_edma(struct ata_port *ap) | |||
1543 | 1534 | ||
1544 | /* 50xx: get active ATA command */ | 1535 | /* 50xx: get active ATA command */ |
1545 | if (IS_GEN_I(hpriv)) | 1536 | if (IS_GEN_I(hpriv)) |
1546 | tag = ap->active_tag; | 1537 | tag = ap->link.active_tag; |
1547 | 1538 | ||
1548 | /* Gen II/IIE: get active ATA command via tag, to enable | 1539 | /* Gen II/IIE: get active ATA command via tag, to enable |
1549 | * support for queueing. this works transparently for | 1540 | * support for queueing. this works transparently for |
@@ -1646,7 +1637,7 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) | |||
1646 | if (unlikely(have_err_bits)) { | 1637 | if (unlikely(have_err_bits)) { |
1647 | struct ata_queued_cmd *qc; | 1638 | struct ata_queued_cmd *qc; |
1648 | 1639 | ||
1649 | qc = ata_qc_from_tag(ap, ap->active_tag); | 1640 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
1650 | if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) | 1641 | if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) |
1651 | continue; | 1642 | continue; |
1652 | 1643 | ||
@@ -1687,15 +1678,15 @@ static void mv_pci_error(struct ata_host *host, void __iomem *mmio) | |||
1687 | 1678 | ||
1688 | for (i = 0; i < host->n_ports; i++) { | 1679 | for (i = 0; i < host->n_ports; i++) { |
1689 | ap = host->ports[i]; | 1680 | ap = host->ports[i]; |
1690 | if (!ata_port_offline(ap)) { | 1681 | if (!ata_link_offline(&ap->link)) { |
1691 | ehi = &ap->eh_info; | 1682 | ehi = &ap->link.eh_info; |
1692 | ata_ehi_clear_desc(ehi); | 1683 | ata_ehi_clear_desc(ehi); |
1693 | if (!printed++) | 1684 | if (!printed++) |
1694 | ata_ehi_push_desc(ehi, | 1685 | ata_ehi_push_desc(ehi, |
1695 | "PCI err cause 0x%08x", err_cause); | 1686 | "PCI err cause 0x%08x", err_cause); |
1696 | err_mask = AC_ERR_HOST_BUS; | 1687 | err_mask = AC_ERR_HOST_BUS; |
1697 | ehi->action = ATA_EH_HARDRESET; | 1688 | ehi->action = ATA_EH_HARDRESET; |
1698 | qc = ata_qc_from_tag(ap, ap->active_tag); | 1689 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
1699 | if (qc) | 1690 | if (qc) |
1700 | qc->err_mask |= err_mask; | 1691 | qc->err_mask |= err_mask; |
1701 | else | 1692 | else |
@@ -2198,14 +2189,14 @@ static void mv_phy_reset(struct ata_port *ap, unsigned int *class, | |||
2198 | 2189 | ||
2199 | /* Issue COMRESET via SControl */ | 2190 | /* Issue COMRESET via SControl */ |
2200 | comreset_retry: | 2191 | comreset_retry: |
2201 | sata_scr_write_flush(ap, SCR_CONTROL, 0x301); | 2192 | sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301); |
2202 | msleep(1); | 2193 | msleep(1); |
2203 | 2194 | ||
2204 | sata_scr_write_flush(ap, SCR_CONTROL, 0x300); | 2195 | sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300); |
2205 | msleep(20); | 2196 | msleep(20); |
2206 | 2197 | ||
2207 | do { | 2198 | do { |
2208 | sata_scr_read(ap, SCR_STATUS, &sstatus); | 2199 | sata_scr_read(&ap->link, SCR_STATUS, &sstatus); |
2209 | if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) | 2200 | if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) |
2210 | break; | 2201 | break; |
2211 | 2202 | ||
@@ -2230,7 +2221,7 @@ comreset_retry: | |||
2230 | } | 2221 | } |
2231 | #endif | 2222 | #endif |
2232 | 2223 | ||
2233 | if (ata_port_offline(ap)) { | 2224 | if (ata_link_offline(&ap->link)) { |
2234 | *class = ATA_DEV_NONE; | 2225 | *class = ATA_DEV_NONE; |
2235 | return; | 2226 | return; |
2236 | } | 2227 | } |
@@ -2257,7 +2248,7 @@ comreset_retry: | |||
2257 | */ | 2248 | */ |
2258 | 2249 | ||
2259 | /* finally, read device signature from TF registers */ | 2250 | /* finally, read device signature from TF registers */ |
2260 | *class = ata_dev_try_classify(ap, 0, NULL); | 2251 | *class = ata_dev_try_classify(ap->link.device, 1, NULL); |
2261 | 2252 | ||
2262 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 2253 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
2263 | 2254 | ||
@@ -2266,10 +2257,11 @@ comreset_retry: | |||
2266 | VPRINTK("EXIT\n"); | 2257 | VPRINTK("EXIT\n"); |
2267 | } | 2258 | } |
2268 | 2259 | ||
2269 | static int mv_prereset(struct ata_port *ap, unsigned long deadline) | 2260 | static int mv_prereset(struct ata_link *link, unsigned long deadline) |
2270 | { | 2261 | { |
2262 | struct ata_port *ap = link->ap; | ||
2271 | struct mv_port_priv *pp = ap->private_data; | 2263 | struct mv_port_priv *pp = ap->private_data; |
2272 | struct ata_eh_context *ehc = &ap->eh_context; | 2264 | struct ata_eh_context *ehc = &link->eh_context; |
2273 | int rc; | 2265 | int rc; |
2274 | 2266 | ||
2275 | rc = mv_stop_dma(ap); | 2267 | rc = mv_stop_dma(ap); |
@@ -2285,7 +2277,7 @@ static int mv_prereset(struct ata_port *ap, unsigned long deadline) | |||
2285 | if (ehc->i.action & ATA_EH_HARDRESET) | 2277 | if (ehc->i.action & ATA_EH_HARDRESET) |
2286 | return 0; | 2278 | return 0; |
2287 | 2279 | ||
2288 | if (ata_port_online(ap)) | 2280 | if (ata_link_online(link)) |
2289 | rc = ata_wait_ready(ap, deadline); | 2281 | rc = ata_wait_ready(ap, deadline); |
2290 | else | 2282 | else |
2291 | rc = -ENODEV; | 2283 | rc = -ENODEV; |
@@ -2293,9 +2285,10 @@ static int mv_prereset(struct ata_port *ap, unsigned long deadline) | |||
2293 | return rc; | 2285 | return rc; |
2294 | } | 2286 | } |
2295 | 2287 | ||
2296 | static int mv_hardreset(struct ata_port *ap, unsigned int *class, | 2288 | static int mv_hardreset(struct ata_link *link, unsigned int *class, |
2297 | unsigned long deadline) | 2289 | unsigned long deadline) |
2298 | { | 2290 | { |
2291 | struct ata_port *ap = link->ap; | ||
2299 | struct mv_host_priv *hpriv = ap->host->private_data; | 2292 | struct mv_host_priv *hpriv = ap->host->private_data; |
2300 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; | 2293 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; |
2301 | 2294 | ||
@@ -2308,16 +2301,17 @@ static int mv_hardreset(struct ata_port *ap, unsigned int *class, | |||
2308 | return 0; | 2301 | return 0; |
2309 | } | 2302 | } |
2310 | 2303 | ||
2311 | static void mv_postreset(struct ata_port *ap, unsigned int *classes) | 2304 | static void mv_postreset(struct ata_link *link, unsigned int *classes) |
2312 | { | 2305 | { |
2306 | struct ata_port *ap = link->ap; | ||
2313 | u32 serr; | 2307 | u32 serr; |
2314 | 2308 | ||
2315 | /* print link status */ | 2309 | /* print link status */ |
2316 | sata_print_link_status(ap); | 2310 | sata_print_link_status(link); |
2317 | 2311 | ||
2318 | /* clear SError */ | 2312 | /* clear SError */ |
2319 | sata_scr_read(ap, SCR_ERROR, &serr); | 2313 | sata_scr_read(link, SCR_ERROR, &serr); |
2320 | sata_scr_write_flush(ap, SCR_ERROR, serr); | 2314 | sata_scr_write_flush(link, SCR_ERROR, serr); |
2321 | 2315 | ||
2322 | /* bail out if no device is present */ | 2316 | /* bail out if no device is present */ |
2323 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { | 2317 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { |
@@ -2590,8 +2584,14 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
2590 | } | 2584 | } |
2591 | 2585 | ||
2592 | for (port = 0; port < host->n_ports; port++) { | 2586 | for (port = 0; port < host->n_ports; port++) { |
2587 | struct ata_port *ap = host->ports[port]; | ||
2593 | void __iomem *port_mmio = mv_port_base(mmio, port); | 2588 | void __iomem *port_mmio = mv_port_base(mmio, port); |
2594 | mv_port_init(&host->ports[port]->ioaddr, port_mmio); | 2589 | unsigned int offset = port_mmio - mmio; |
2590 | |||
2591 | mv_port_init(&ap->ioaddr, port_mmio); | ||
2592 | |||
2593 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); | ||
2594 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); | ||
2595 | } | 2595 | } |
2596 | 2596 | ||
2597 | for (hc = 0; hc < n_hc; hc++) { | 2597 | for (hc = 0; hc < n_hc; hc++) { |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 40dc73139858..40557fe2ffdf 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -340,7 +340,6 @@ static struct scsi_host_template nv_adma_sht = { | |||
340 | }; | 340 | }; |
341 | 341 | ||
342 | static const struct ata_port_operations nv_generic_ops = { | 342 | static const struct ata_port_operations nv_generic_ops = { |
343 | .port_disable = ata_port_disable, | ||
344 | .tf_load = ata_tf_load, | 343 | .tf_load = ata_tf_load, |
345 | .tf_read = ata_tf_read, | 344 | .tf_read = ata_tf_read, |
346 | .exec_command = ata_exec_command, | 345 | .exec_command = ata_exec_command, |
@@ -359,14 +358,12 @@ static const struct ata_port_operations nv_generic_ops = { | |||
359 | .data_xfer = ata_data_xfer, | 358 | .data_xfer = ata_data_xfer, |
360 | .irq_clear = ata_bmdma_irq_clear, | 359 | .irq_clear = ata_bmdma_irq_clear, |
361 | .irq_on = ata_irq_on, | 360 | .irq_on = ata_irq_on, |
362 | .irq_ack = ata_irq_ack, | ||
363 | .scr_read = nv_scr_read, | 361 | .scr_read = nv_scr_read, |
364 | .scr_write = nv_scr_write, | 362 | .scr_write = nv_scr_write, |
365 | .port_start = ata_port_start, | 363 | .port_start = ata_port_start, |
366 | }; | 364 | }; |
367 | 365 | ||
368 | static const struct ata_port_operations nv_nf2_ops = { | 366 | static const struct ata_port_operations nv_nf2_ops = { |
369 | .port_disable = ata_port_disable, | ||
370 | .tf_load = ata_tf_load, | 367 | .tf_load = ata_tf_load, |
371 | .tf_read = ata_tf_read, | 368 | .tf_read = ata_tf_read, |
372 | .exec_command = ata_exec_command, | 369 | .exec_command = ata_exec_command, |
@@ -385,14 +382,12 @@ static const struct ata_port_operations nv_nf2_ops = { | |||
385 | .data_xfer = ata_data_xfer, | 382 | .data_xfer = ata_data_xfer, |
386 | .irq_clear = ata_bmdma_irq_clear, | 383 | .irq_clear = ata_bmdma_irq_clear, |
387 | .irq_on = ata_irq_on, | 384 | .irq_on = ata_irq_on, |
388 | .irq_ack = ata_irq_ack, | ||
389 | .scr_read = nv_scr_read, | 385 | .scr_read = nv_scr_read, |
390 | .scr_write = nv_scr_write, | 386 | .scr_write = nv_scr_write, |
391 | .port_start = ata_port_start, | 387 | .port_start = ata_port_start, |
392 | }; | 388 | }; |
393 | 389 | ||
394 | static const struct ata_port_operations nv_ck804_ops = { | 390 | static const struct ata_port_operations nv_ck804_ops = { |
395 | .port_disable = ata_port_disable, | ||
396 | .tf_load = ata_tf_load, | 391 | .tf_load = ata_tf_load, |
397 | .tf_read = ata_tf_read, | 392 | .tf_read = ata_tf_read, |
398 | .exec_command = ata_exec_command, | 393 | .exec_command = ata_exec_command, |
@@ -411,7 +406,6 @@ static const struct ata_port_operations nv_ck804_ops = { | |||
411 | .data_xfer = ata_data_xfer, | 406 | .data_xfer = ata_data_xfer, |
412 | .irq_clear = ata_bmdma_irq_clear, | 407 | .irq_clear = ata_bmdma_irq_clear, |
413 | .irq_on = ata_irq_on, | 408 | .irq_on = ata_irq_on, |
414 | .irq_ack = ata_irq_ack, | ||
415 | .scr_read = nv_scr_read, | 409 | .scr_read = nv_scr_read, |
416 | .scr_write = nv_scr_write, | 410 | .scr_write = nv_scr_write, |
417 | .port_start = ata_port_start, | 411 | .port_start = ata_port_start, |
@@ -419,7 +413,6 @@ static const struct ata_port_operations nv_ck804_ops = { | |||
419 | }; | 413 | }; |
420 | 414 | ||
421 | static const struct ata_port_operations nv_adma_ops = { | 415 | static const struct ata_port_operations nv_adma_ops = { |
422 | .port_disable = ata_port_disable, | ||
423 | .tf_load = ata_tf_load, | 416 | .tf_load = ata_tf_load, |
424 | .tf_read = nv_adma_tf_read, | 417 | .tf_read = nv_adma_tf_read, |
425 | .check_atapi_dma = nv_adma_check_atapi_dma, | 418 | .check_atapi_dma = nv_adma_check_atapi_dma, |
@@ -430,6 +423,7 @@ static const struct ata_port_operations nv_adma_ops = { | |||
430 | .bmdma_start = ata_bmdma_start, | 423 | .bmdma_start = ata_bmdma_start, |
431 | .bmdma_stop = ata_bmdma_stop, | 424 | .bmdma_stop = ata_bmdma_stop, |
432 | .bmdma_status = ata_bmdma_status, | 425 | .bmdma_status = ata_bmdma_status, |
426 | .qc_defer = ata_std_qc_defer, | ||
433 | .qc_prep = nv_adma_qc_prep, | 427 | .qc_prep = nv_adma_qc_prep, |
434 | .qc_issue = nv_adma_qc_issue, | 428 | .qc_issue = nv_adma_qc_issue, |
435 | .freeze = nv_adma_freeze, | 429 | .freeze = nv_adma_freeze, |
@@ -439,7 +433,6 @@ static const struct ata_port_operations nv_adma_ops = { | |||
439 | .data_xfer = ata_data_xfer, | 433 | .data_xfer = ata_data_xfer, |
440 | .irq_clear = nv_adma_irq_clear, | 434 | .irq_clear = nv_adma_irq_clear, |
441 | .irq_on = ata_irq_on, | 435 | .irq_on = ata_irq_on, |
442 | .irq_ack = ata_irq_ack, | ||
443 | .scr_read = nv_scr_read, | 436 | .scr_read = nv_scr_read, |
444 | .scr_write = nv_scr_write, | 437 | .scr_write = nv_scr_write, |
445 | .port_start = nv_adma_port_start, | 438 | .port_start = nv_adma_port_start, |
@@ -455,8 +448,8 @@ static const struct ata_port_info nv_port_info[] = { | |||
455 | /* generic */ | 448 | /* generic */ |
456 | { | 449 | { |
457 | .sht = &nv_sht, | 450 | .sht = &nv_sht, |
458 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 451 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, |
459 | ATA_FLAG_HRST_TO_RESUME, | 452 | .link_flags = ATA_LFLAG_HRST_TO_RESUME, |
460 | .pio_mask = NV_PIO_MASK, | 453 | .pio_mask = NV_PIO_MASK, |
461 | .mwdma_mask = NV_MWDMA_MASK, | 454 | .mwdma_mask = NV_MWDMA_MASK, |
462 | .udma_mask = NV_UDMA_MASK, | 455 | .udma_mask = NV_UDMA_MASK, |
@@ -466,8 +459,8 @@ static const struct ata_port_info nv_port_info[] = { | |||
466 | /* nforce2/3 */ | 459 | /* nforce2/3 */ |
467 | { | 460 | { |
468 | .sht = &nv_sht, | 461 | .sht = &nv_sht, |
469 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 462 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, |
470 | ATA_FLAG_HRST_TO_RESUME, | 463 | .link_flags = ATA_LFLAG_HRST_TO_RESUME, |
471 | .pio_mask = NV_PIO_MASK, | 464 | .pio_mask = NV_PIO_MASK, |
472 | .mwdma_mask = NV_MWDMA_MASK, | 465 | .mwdma_mask = NV_MWDMA_MASK, |
473 | .udma_mask = NV_UDMA_MASK, | 466 | .udma_mask = NV_UDMA_MASK, |
@@ -477,8 +470,8 @@ static const struct ata_port_info nv_port_info[] = { | |||
477 | /* ck804 */ | 470 | /* ck804 */ |
478 | { | 471 | { |
479 | .sht = &nv_sht, | 472 | .sht = &nv_sht, |
480 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 473 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, |
481 | ATA_FLAG_HRST_TO_RESUME, | 474 | .link_flags = ATA_LFLAG_HRST_TO_RESUME, |
482 | .pio_mask = NV_PIO_MASK, | 475 | .pio_mask = NV_PIO_MASK, |
483 | .mwdma_mask = NV_MWDMA_MASK, | 476 | .mwdma_mask = NV_MWDMA_MASK, |
484 | .udma_mask = NV_UDMA_MASK, | 477 | .udma_mask = NV_UDMA_MASK, |
@@ -489,8 +482,8 @@ static const struct ata_port_info nv_port_info[] = { | |||
489 | { | 482 | { |
490 | .sht = &nv_adma_sht, | 483 | .sht = &nv_adma_sht, |
491 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 484 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
492 | ATA_FLAG_HRST_TO_RESUME | | ||
493 | ATA_FLAG_MMIO | ATA_FLAG_NCQ, | 485 | ATA_FLAG_MMIO | ATA_FLAG_NCQ, |
486 | .link_flags = ATA_LFLAG_HRST_TO_RESUME, | ||
494 | .pio_mask = NV_PIO_MASK, | 487 | .pio_mask = NV_PIO_MASK, |
495 | .mwdma_mask = NV_MWDMA_MASK, | 488 | .mwdma_mask = NV_MWDMA_MASK, |
496 | .udma_mask = NV_UDMA_MASK, | 489 | .udma_mask = NV_UDMA_MASK, |
@@ -594,7 +587,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev) | |||
594 | /* Not a proper libata device, ignore */ | 587 | /* Not a proper libata device, ignore */ |
595 | return rc; | 588 | return rc; |
596 | 589 | ||
597 | if (ap->device[sdev->id].class == ATA_DEV_ATAPI) { | 590 | if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) { |
598 | /* | 591 | /* |
599 | * NVIDIA reports that ADMA mode does not support ATAPI commands. | 592 | * NVIDIA reports that ADMA mode does not support ATAPI commands. |
600 | * Therefore ATAPI commands are sent through the legacy interface. | 593 | * Therefore ATAPI commands are sent through the legacy interface. |
@@ -711,7 +704,7 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) | |||
711 | flags & (NV_CPB_RESP_ATA_ERR | | 704 | flags & (NV_CPB_RESP_ATA_ERR | |
712 | NV_CPB_RESP_CMD_ERR | | 705 | NV_CPB_RESP_CMD_ERR | |
713 | NV_CPB_RESP_CPB_ERR)))) { | 706 | NV_CPB_RESP_CPB_ERR)))) { |
714 | struct ata_eh_info *ehi = &ap->eh_info; | 707 | struct ata_eh_info *ehi = &ap->link.eh_info; |
715 | int freeze = 0; | 708 | int freeze = 0; |
716 | 709 | ||
717 | ata_ehi_clear_desc(ehi); | 710 | ata_ehi_clear_desc(ehi); |
@@ -747,7 +740,7 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) | |||
747 | DPRINTK("Completing qc from tag %d\n",cpb_num); | 740 | DPRINTK("Completing qc from tag %d\n",cpb_num); |
748 | ata_qc_complete(qc); | 741 | ata_qc_complete(qc); |
749 | } else { | 742 | } else { |
750 | struct ata_eh_info *ehi = &ap->eh_info; | 743 | struct ata_eh_info *ehi = &ap->link.eh_info; |
751 | /* Notifier bits set without a command may indicate the drive | 744 | /* Notifier bits set without a command may indicate the drive |
752 | is misbehaving. Raise host state machine violation on this | 745 | is misbehaving. Raise host state machine violation on this |
753 | condition. */ | 746 | condition. */ |
@@ -764,7 +757,7 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) | |||
764 | 757 | ||
765 | static int nv_host_intr(struct ata_port *ap, u8 irq_stat) | 758 | static int nv_host_intr(struct ata_port *ap, u8 irq_stat) |
766 | { | 759 | { |
767 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); | 760 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); |
768 | 761 | ||
769 | /* freeze if hotplugged */ | 762 | /* freeze if hotplugged */ |
770 | if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) { | 763 | if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) { |
@@ -817,7 +810,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) | |||
817 | if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { | 810 | if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { |
818 | u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) | 811 | u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) |
819 | >> (NV_INT_PORT_SHIFT * i); | 812 | >> (NV_INT_PORT_SHIFT * i); |
820 | if(ata_tag_valid(ap->active_tag)) | 813 | if(ata_tag_valid(ap->link.active_tag)) |
821 | /** NV_INT_DEV indication seems unreliable at times | 814 | /** NV_INT_DEV indication seems unreliable at times |
822 | at least in ADMA mode. Force it on always when a | 815 | at least in ADMA mode. Force it on always when a |
823 | command is active, to prevent losing interrupts. */ | 816 | command is active, to prevent losing interrupts. */ |
@@ -852,7 +845,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) | |||
852 | NV_ADMA_STAT_HOTUNPLUG | | 845 | NV_ADMA_STAT_HOTUNPLUG | |
853 | NV_ADMA_STAT_TIMEOUT | | 846 | NV_ADMA_STAT_TIMEOUT | |
854 | NV_ADMA_STAT_SERROR))) { | 847 | NV_ADMA_STAT_SERROR))) { |
855 | struct ata_eh_info *ehi = &ap->eh_info; | 848 | struct ata_eh_info *ehi = &ap->link.eh_info; |
856 | 849 | ||
857 | ata_ehi_clear_desc(ehi); | 850 | ata_ehi_clear_desc(ehi); |
858 | __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status ); | 851 | __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status ); |
@@ -879,10 +872,10 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) | |||
879 | u32 check_commands; | 872 | u32 check_commands; |
880 | int pos, error = 0; | 873 | int pos, error = 0; |
881 | 874 | ||
882 | if(ata_tag_valid(ap->active_tag)) | 875 | if(ata_tag_valid(ap->link.active_tag)) |
883 | check_commands = 1 << ap->active_tag; | 876 | check_commands = 1 << ap->link.active_tag; |
884 | else | 877 | else |
885 | check_commands = ap->sactive; | 878 | check_commands = ap->link.sactive; |
886 | 879 | ||
887 | /** Check CPBs for completed commands */ | 880 | /** Check CPBs for completed commands */ |
888 | while ((pos = ffs(check_commands)) && !error) { | 881 | while ((pos = ffs(check_commands)) && !error) { |
@@ -1333,7 +1326,7 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance) | |||
1333 | !(ap->flags & ATA_FLAG_DISABLED)) { | 1326 | !(ap->flags & ATA_FLAG_DISABLED)) { |
1334 | struct ata_queued_cmd *qc; | 1327 | struct ata_queued_cmd *qc; |
1335 | 1328 | ||
1336 | qc = ata_qc_from_tag(ap, ap->active_tag); | 1329 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
1337 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) | 1330 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
1338 | handled += ata_host_intr(ap, qc); | 1331 | handled += ata_host_intr(ap, qc); |
1339 | else | 1332 | else |
@@ -1459,7 +1452,7 @@ static void nv_ck804_thaw(struct ata_port *ap) | |||
1459 | writeb(mask, mmio_base + NV_INT_ENABLE_CK804); | 1452 | writeb(mask, mmio_base + NV_INT_ENABLE_CK804); |
1460 | } | 1453 | } |
1461 | 1454 | ||
1462 | static int nv_hardreset(struct ata_port *ap, unsigned int *class, | 1455 | static int nv_hardreset(struct ata_link *link, unsigned int *class, |
1463 | unsigned long deadline) | 1456 | unsigned long deadline) |
1464 | { | 1457 | { |
1465 | unsigned int dummy; | 1458 | unsigned int dummy; |
@@ -1468,7 +1461,7 @@ static int nv_hardreset(struct ata_port *ap, unsigned int *class, | |||
1468 | * some controllers. Don't classify on hardreset. For more | 1461 | * some controllers. Don't classify on hardreset. For more |
1469 | * info, see http://bugme.osdl.org/show_bug.cgi?id=3352 | 1462 | * info, see http://bugme.osdl.org/show_bug.cgi?id=3352 |
1470 | */ | 1463 | */ |
1471 | return sata_std_hardreset(ap, &dummy, deadline); | 1464 | return sata_std_hardreset(link, &dummy, deadline); |
1472 | } | 1465 | } |
1473 | 1466 | ||
1474 | static void nv_error_handler(struct ata_port *ap) | 1467 | static void nv_error_handler(struct ata_port *ap) |
@@ -1485,7 +1478,7 @@ static void nv_adma_error_handler(struct ata_port *ap) | |||
1485 | int i; | 1478 | int i; |
1486 | u16 tmp; | 1479 | u16 tmp; |
1487 | 1480 | ||
1488 | if(ata_tag_valid(ap->active_tag) || ap->sactive) { | 1481 | if(ata_tag_valid(ap->link.active_tag) || ap->link.sactive) { |
1489 | u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); | 1482 | u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); |
1490 | u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); | 1483 | u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); |
1491 | u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); | 1484 | u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); |
@@ -1501,8 +1494,8 @@ static void nv_adma_error_handler(struct ata_port *ap) | |||
1501 | 1494 | ||
1502 | for( i=0;i<NV_ADMA_MAX_CPBS;i++) { | 1495 | for( i=0;i<NV_ADMA_MAX_CPBS;i++) { |
1503 | struct nv_adma_cpb *cpb = &pp->cpb[i]; | 1496 | struct nv_adma_cpb *cpb = &pp->cpb[i]; |
1504 | if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) || | 1497 | if( (ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) || |
1505 | ap->sactive & (1 << i) ) | 1498 | ap->link.sactive & (1 << i) ) |
1506 | ata_port_printk(ap, KERN_ERR, | 1499 | ata_port_printk(ap, KERN_ERR, |
1507 | "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n", | 1500 | "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n", |
1508 | i, cpb->ctl_flags, cpb->resp_flags); | 1501 | i, cpb->ctl_flags, cpb->resp_flags); |
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 25698cf0dce0..903213153b5d 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c | |||
@@ -167,7 +167,6 @@ static struct scsi_host_template pdc_ata_sht = { | |||
167 | }; | 167 | }; |
168 | 168 | ||
169 | static const struct ata_port_operations pdc_sata_ops = { | 169 | static const struct ata_port_operations pdc_sata_ops = { |
170 | .port_disable = ata_port_disable, | ||
171 | .tf_load = pdc_tf_load_mmio, | 170 | .tf_load = pdc_tf_load_mmio, |
172 | .tf_read = ata_tf_read, | 171 | .tf_read = ata_tf_read, |
173 | .check_status = ata_check_status, | 172 | .check_status = ata_check_status, |
@@ -185,7 +184,6 @@ static const struct ata_port_operations pdc_sata_ops = { | |||
185 | .data_xfer = ata_data_xfer, | 184 | .data_xfer = ata_data_xfer, |
186 | .irq_clear = pdc_irq_clear, | 185 | .irq_clear = pdc_irq_clear, |
187 | .irq_on = ata_irq_on, | 186 | .irq_on = ata_irq_on, |
188 | .irq_ack = ata_irq_ack, | ||
189 | 187 | ||
190 | .scr_read = pdc_sata_scr_read, | 188 | .scr_read = pdc_sata_scr_read, |
191 | .scr_write = pdc_sata_scr_write, | 189 | .scr_write = pdc_sata_scr_write, |
@@ -194,7 +192,6 @@ static const struct ata_port_operations pdc_sata_ops = { | |||
194 | 192 | ||
195 | /* First-generation chips need a more restrictive ->check_atapi_dma op */ | 193 | /* First-generation chips need a more restrictive ->check_atapi_dma op */ |
196 | static const struct ata_port_operations pdc_old_sata_ops = { | 194 | static const struct ata_port_operations pdc_old_sata_ops = { |
197 | .port_disable = ata_port_disable, | ||
198 | .tf_load = pdc_tf_load_mmio, | 195 | .tf_load = pdc_tf_load_mmio, |
199 | .tf_read = ata_tf_read, | 196 | .tf_read = ata_tf_read, |
200 | .check_status = ata_check_status, | 197 | .check_status = ata_check_status, |
@@ -212,7 +209,6 @@ static const struct ata_port_operations pdc_old_sata_ops = { | |||
212 | .data_xfer = ata_data_xfer, | 209 | .data_xfer = ata_data_xfer, |
213 | .irq_clear = pdc_irq_clear, | 210 | .irq_clear = pdc_irq_clear, |
214 | .irq_on = ata_irq_on, | 211 | .irq_on = ata_irq_on, |
215 | .irq_ack = ata_irq_ack, | ||
216 | 212 | ||
217 | .scr_read = pdc_sata_scr_read, | 213 | .scr_read = pdc_sata_scr_read, |
218 | .scr_write = pdc_sata_scr_write, | 214 | .scr_write = pdc_sata_scr_write, |
@@ -220,7 +216,6 @@ static const struct ata_port_operations pdc_old_sata_ops = { | |||
220 | }; | 216 | }; |
221 | 217 | ||
222 | static const struct ata_port_operations pdc_pata_ops = { | 218 | static const struct ata_port_operations pdc_pata_ops = { |
223 | .port_disable = ata_port_disable, | ||
224 | .tf_load = pdc_tf_load_mmio, | 219 | .tf_load = pdc_tf_load_mmio, |
225 | .tf_read = ata_tf_read, | 220 | .tf_read = ata_tf_read, |
226 | .check_status = ata_check_status, | 221 | .check_status = ata_check_status, |
@@ -238,7 +233,6 @@ static const struct ata_port_operations pdc_pata_ops = { | |||
238 | .data_xfer = ata_data_xfer, | 233 | .data_xfer = ata_data_xfer, |
239 | .irq_clear = pdc_irq_clear, | 234 | .irq_clear = pdc_irq_clear, |
240 | .irq_on = ata_irq_on, | 235 | .irq_on = ata_irq_on, |
241 | .irq_ack = ata_irq_ack, | ||
242 | 236 | ||
243 | .port_start = pdc_common_port_start, | 237 | .port_start = pdc_common_port_start, |
244 | }; | 238 | }; |
@@ -475,7 +469,7 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc) | |||
475 | buf32[2] = 0; /* no next-packet */ | 469 | buf32[2] = 0; /* no next-packet */ |
476 | 470 | ||
477 | /* select drive */ | 471 | /* select drive */ |
478 | if (sata_scr_valid(ap)) { | 472 | if (sata_scr_valid(&ap->link)) { |
479 | dev_sel = PDC_DEVICE_SATA; | 473 | dev_sel = PDC_DEVICE_SATA; |
480 | } else { | 474 | } else { |
481 | dev_sel = ATA_DEVICE_OBS; | 475 | dev_sel = ATA_DEVICE_OBS; |
@@ -626,7 +620,7 @@ static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) | |||
626 | static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc, | 620 | static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc, |
627 | u32 port_status, u32 err_mask) | 621 | u32 port_status, u32 err_mask) |
628 | { | 622 | { |
629 | struct ata_eh_info *ehi = &ap->eh_info; | 623 | struct ata_eh_info *ehi = &ap->link.eh_info; |
630 | unsigned int ac_err_mask = 0; | 624 | unsigned int ac_err_mask = 0; |
631 | 625 | ||
632 | ata_ehi_clear_desc(ehi); | 626 | ata_ehi_clear_desc(ehi); |
@@ -643,7 +637,7 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc, | |||
643 | | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR)) | 637 | | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR)) |
644 | ac_err_mask |= AC_ERR_HOST_BUS; | 638 | ac_err_mask |= AC_ERR_HOST_BUS; |
645 | 639 | ||
646 | if (sata_scr_valid(ap)) { | 640 | if (sata_scr_valid(&ap->link)) { |
647 | u32 serror; | 641 | u32 serror; |
648 | 642 | ||
649 | pdc_sata_scr_read(ap, SCR_ERROR, &serror); | 643 | pdc_sata_scr_read(ap, SCR_ERROR, &serror); |
@@ -773,7 +767,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance) | |||
773 | tmp = hotplug_status & (0x11 << ata_no); | 767 | tmp = hotplug_status & (0x11 << ata_no); |
774 | if (tmp && ap && | 768 | if (tmp && ap && |
775 | !(ap->flags & ATA_FLAG_DISABLED)) { | 769 | !(ap->flags & ATA_FLAG_DISABLED)) { |
776 | struct ata_eh_info *ehi = &ap->eh_info; | 770 | struct ata_eh_info *ehi = &ap->link.eh_info; |
777 | ata_ehi_clear_desc(ehi); | 771 | ata_ehi_clear_desc(ehi); |
778 | ata_ehi_hotplugged(ehi); | 772 | ata_ehi_hotplugged(ehi); |
779 | ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp); | 773 | ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp); |
@@ -788,7 +782,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance) | |||
788 | !(ap->flags & ATA_FLAG_DISABLED)) { | 782 | !(ap->flags & ATA_FLAG_DISABLED)) { |
789 | struct ata_queued_cmd *qc; | 783 | struct ata_queued_cmd *qc; |
790 | 784 | ||
791 | qc = ata_qc_from_tag(ap, ap->active_tag); | 785 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
792 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) | 786 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
793 | handled += pdc_host_intr(ap, qc); | 787 | handled += pdc_host_intr(ap, qc); |
794 | } | 788 | } |
@@ -1009,10 +1003,15 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
1009 | 1003 | ||
1010 | is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags); | 1004 | is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags); |
1011 | for (i = 0; i < host->n_ports; i++) { | 1005 | for (i = 0; i < host->n_ports; i++) { |
1006 | struct ata_port *ap = host->ports[i]; | ||
1012 | unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); | 1007 | unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); |
1013 | pdc_ata_setup_port(host->ports[i], | 1008 | unsigned int port_offset = 0x200 + ata_no * 0x80; |
1014 | base + 0x200 + ata_no * 0x80, | 1009 | unsigned int scr_offset = 0x400 + ata_no * 0x100; |
1015 | base + 0x400 + ata_no * 0x100); | 1010 | |
1011 | pdc_ata_setup_port(ap, base + port_offset, base + scr_offset); | ||
1012 | |||
1013 | ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); | ||
1014 | ata_port_pbar_desc(ap, PDC_MMIO_BAR, port_offset, "port"); | ||
1016 | } | 1015 | } |
1017 | 1016 | ||
1018 | /* initialize adapter */ | 1017 | /* initialize adapter */ |
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index 5e1dfdda698f..c4c4cd29eebb 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c | |||
@@ -145,7 +145,6 @@ static struct scsi_host_template qs_ata_sht = { | |||
145 | }; | 145 | }; |
146 | 146 | ||
147 | static const struct ata_port_operations qs_ata_ops = { | 147 | static const struct ata_port_operations qs_ata_ops = { |
148 | .port_disable = ata_port_disable, | ||
149 | .tf_load = ata_tf_load, | 148 | .tf_load = ata_tf_load, |
150 | .tf_read = ata_tf_read, | 149 | .tf_read = ata_tf_read, |
151 | .check_status = ata_check_status, | 150 | .check_status = ata_check_status, |
@@ -159,7 +158,6 @@ static const struct ata_port_operations qs_ata_ops = { | |||
159 | .eng_timeout = qs_eng_timeout, | 158 | .eng_timeout = qs_eng_timeout, |
160 | .irq_clear = qs_irq_clear, | 159 | .irq_clear = qs_irq_clear, |
161 | .irq_on = ata_irq_on, | 160 | .irq_on = ata_irq_on, |
162 | .irq_ack = ata_irq_ack, | ||
163 | .scr_read = qs_scr_read, | 161 | .scr_read = qs_scr_read, |
164 | .scr_write = qs_scr_write, | 162 | .scr_write = qs_scr_write, |
165 | .port_start = qs_port_start, | 163 | .port_start = qs_port_start, |
@@ -404,7 +402,7 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host) | |||
404 | struct qs_port_priv *pp = ap->private_data; | 402 | struct qs_port_priv *pp = ap->private_data; |
405 | if (!pp || pp->state != qs_state_pkt) | 403 | if (!pp || pp->state != qs_state_pkt) |
406 | continue; | 404 | continue; |
407 | qc = ata_qc_from_tag(ap, ap->active_tag); | 405 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
408 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { | 406 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
409 | switch (sHST) { | 407 | switch (sHST) { |
410 | case 0: /* successful CPB */ | 408 | case 0: /* successful CPB */ |
@@ -437,7 +435,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host) | |||
437 | struct qs_port_priv *pp = ap->private_data; | 435 | struct qs_port_priv *pp = ap->private_data; |
438 | if (!pp || pp->state != qs_state_mmio) | 436 | if (!pp || pp->state != qs_state_mmio) |
439 | continue; | 437 | continue; |
440 | qc = ata_qc_from_tag(ap, ap->active_tag); | 438 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
441 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { | 439 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
442 | 440 | ||
443 | /* check main status, clearing INTRQ */ | 441 | /* check main status, clearing INTRQ */ |
@@ -637,9 +635,14 @@ static int qs_ata_init_one(struct pci_dev *pdev, | |||
637 | return rc; | 635 | return rc; |
638 | 636 | ||
639 | for (port_no = 0; port_no < host->n_ports; ++port_no) { | 637 | for (port_no = 0; port_no < host->n_ports; ++port_no) { |
640 | void __iomem *chan = | 638 | struct ata_port *ap = host->ports[port_no]; |
641 | host->iomap[QS_MMIO_BAR] + (port_no * 0x4000); | 639 | unsigned int offset = port_no * 0x4000; |
642 | qs_ata_setup_port(&host->ports[port_no]->ioaddr, chan); | 640 | void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset; |
641 | |||
642 | qs_ata_setup_port(&ap->ioaddr, chan); | ||
643 | |||
644 | ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio"); | ||
645 | ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port"); | ||
643 | } | 646 | } |
644 | 647 | ||
645 | /* initialize adapter */ | 648 | /* initialize adapter */ |
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 8c72e714b456..ea3a0ab7e027 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c | |||
@@ -59,7 +59,8 @@ enum { | |||
59 | SIL_FLAG_MOD15WRITE = (1 << 30), | 59 | SIL_FLAG_MOD15WRITE = (1 << 30), |
60 | 60 | ||
61 | SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 61 | SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
62 | ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME, | 62 | ATA_FLAG_MMIO, |
63 | SIL_DFL_LINK_FLAGS = ATA_LFLAG_HRST_TO_RESUME, | ||
63 | 64 | ||
64 | /* | 65 | /* |
65 | * Controller IDs | 66 | * Controller IDs |
@@ -117,7 +118,7 @@ static int sil_pci_device_resume(struct pci_dev *pdev); | |||
117 | static void sil_dev_config(struct ata_device *dev); | 118 | static void sil_dev_config(struct ata_device *dev); |
118 | static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); | 119 | static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
119 | static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); | 120 | static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
120 | static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed); | 121 | static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); |
121 | static void sil_freeze(struct ata_port *ap); | 122 | static void sil_freeze(struct ata_port *ap); |
122 | static void sil_thaw(struct ata_port *ap); | 123 | static void sil_thaw(struct ata_port *ap); |
123 | 124 | ||
@@ -185,7 +186,6 @@ static struct scsi_host_template sil_sht = { | |||
185 | }; | 186 | }; |
186 | 187 | ||
187 | static const struct ata_port_operations sil_ops = { | 188 | static const struct ata_port_operations sil_ops = { |
188 | .port_disable = ata_port_disable, | ||
189 | .dev_config = sil_dev_config, | 189 | .dev_config = sil_dev_config, |
190 | .tf_load = ata_tf_load, | 190 | .tf_load = ata_tf_load, |
191 | .tf_read = ata_tf_read, | 191 | .tf_read = ata_tf_read, |
@@ -206,7 +206,6 @@ static const struct ata_port_operations sil_ops = { | |||
206 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | 206 | .post_internal_cmd = ata_bmdma_post_internal_cmd, |
207 | .irq_clear = ata_bmdma_irq_clear, | 207 | .irq_clear = ata_bmdma_irq_clear, |
208 | .irq_on = ata_irq_on, | 208 | .irq_on = ata_irq_on, |
209 | .irq_ack = ata_irq_ack, | ||
210 | .scr_read = sil_scr_read, | 209 | .scr_read = sil_scr_read, |
211 | .scr_write = sil_scr_write, | 210 | .scr_write = sil_scr_write, |
212 | .port_start = ata_port_start, | 211 | .port_start = ata_port_start, |
@@ -216,6 +215,7 @@ static const struct ata_port_info sil_port_info[] = { | |||
216 | /* sil_3112 */ | 215 | /* sil_3112 */ |
217 | { | 216 | { |
218 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE, | 217 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE, |
218 | .link_flags = SIL_DFL_LINK_FLAGS, | ||
219 | .pio_mask = 0x1f, /* pio0-4 */ | 219 | .pio_mask = 0x1f, /* pio0-4 */ |
220 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 220 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
221 | .udma_mask = ATA_UDMA5, | 221 | .udma_mask = ATA_UDMA5, |
@@ -225,6 +225,7 @@ static const struct ata_port_info sil_port_info[] = { | |||
225 | { | 225 | { |
226 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE | | 226 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE | |
227 | SIL_FLAG_NO_SATA_IRQ, | 227 | SIL_FLAG_NO_SATA_IRQ, |
228 | .link_flags = SIL_DFL_LINK_FLAGS, | ||
228 | .pio_mask = 0x1f, /* pio0-4 */ | 229 | .pio_mask = 0x1f, /* pio0-4 */ |
229 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 230 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
230 | .udma_mask = ATA_UDMA5, | 231 | .udma_mask = ATA_UDMA5, |
@@ -233,6 +234,7 @@ static const struct ata_port_info sil_port_info[] = { | |||
233 | /* sil_3512 */ | 234 | /* sil_3512 */ |
234 | { | 235 | { |
235 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, | 236 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, |
237 | .link_flags = SIL_DFL_LINK_FLAGS, | ||
236 | .pio_mask = 0x1f, /* pio0-4 */ | 238 | .pio_mask = 0x1f, /* pio0-4 */ |
237 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 239 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
238 | .udma_mask = ATA_UDMA5, | 240 | .udma_mask = ATA_UDMA5, |
@@ -241,6 +243,7 @@ static const struct ata_port_info sil_port_info[] = { | |||
241 | /* sil_3114 */ | 243 | /* sil_3114 */ |
242 | { | 244 | { |
243 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, | 245 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, |
246 | .link_flags = SIL_DFL_LINK_FLAGS, | ||
244 | .pio_mask = 0x1f, /* pio0-4 */ | 247 | .pio_mask = 0x1f, /* pio0-4 */ |
245 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 248 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
246 | .udma_mask = ATA_UDMA5, | 249 | .udma_mask = ATA_UDMA5, |
@@ -290,35 +293,33 @@ static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) | |||
290 | 293 | ||
291 | /** | 294 | /** |
292 | * sil_set_mode - wrap set_mode functions | 295 | * sil_set_mode - wrap set_mode functions |
293 | * @ap: port to set up | 296 | * @link: link to set up |
294 | * @r_failed: returned device when we fail | 297 | * @r_failed: returned device when we fail |
295 | * | 298 | * |
296 | * Wrap the libata method for device setup as after the setup we need | 299 | * Wrap the libata method for device setup as after the setup we need |
297 | * to inspect the results and do some configuration work | 300 | * to inspect the results and do some configuration work |
298 | */ | 301 | */ |
299 | 302 | ||
300 | static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed) | 303 | static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed) |
301 | { | 304 | { |
302 | struct ata_host *host = ap->host; | 305 | struct ata_port *ap = link->ap; |
303 | struct ata_device *dev; | 306 | void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; |
304 | void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; | ||
305 | void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode; | 307 | void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode; |
306 | u32 tmp, dev_mode[2]; | 308 | struct ata_device *dev; |
307 | unsigned int i; | 309 | u32 tmp, dev_mode[2] = { }; |
308 | int rc; | 310 | int rc; |
309 | 311 | ||
310 | rc = ata_do_set_mode(ap, r_failed); | 312 | rc = ata_do_set_mode(link, r_failed); |
311 | if (rc) | 313 | if (rc) |
312 | return rc; | 314 | return rc; |
313 | 315 | ||
314 | for (i = 0; i < 2; i++) { | 316 | ata_link_for_each_dev(dev, link) { |
315 | dev = &ap->device[i]; | ||
316 | if (!ata_dev_enabled(dev)) | 317 | if (!ata_dev_enabled(dev)) |
317 | dev_mode[i] = 0; /* PIO0/1/2 */ | 318 | dev_mode[dev->devno] = 0; /* PIO0/1/2 */ |
318 | else if (dev->flags & ATA_DFLAG_PIO) | 319 | else if (dev->flags & ATA_DFLAG_PIO) |
319 | dev_mode[i] = 1; /* PIO3/4 */ | 320 | dev_mode[dev->devno] = 1; /* PIO3/4 */ |
320 | else | 321 | else |
321 | dev_mode[i] = 3; /* UDMA */ | 322 | dev_mode[dev->devno] = 3; /* UDMA */ |
322 | /* value 2 indicates MDMA */ | 323 | /* value 2 indicates MDMA */ |
323 | } | 324 | } |
324 | 325 | ||
@@ -374,8 +375,8 @@ static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) | |||
374 | 375 | ||
375 | static void sil_host_intr(struct ata_port *ap, u32 bmdma2) | 376 | static void sil_host_intr(struct ata_port *ap, u32 bmdma2) |
376 | { | 377 | { |
377 | struct ata_eh_info *ehi = &ap->eh_info; | 378 | struct ata_eh_info *ehi = &ap->link.eh_info; |
378 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); | 379 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); |
379 | u8 status; | 380 | u8 status; |
380 | 381 | ||
381 | if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { | 382 | if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { |
@@ -394,8 +395,8 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) | |||
394 | * repeat probing needlessly. | 395 | * repeat probing needlessly. |
395 | */ | 396 | */ |
396 | if (!(ap->pflags & ATA_PFLAG_FROZEN)) { | 397 | if (!(ap->pflags & ATA_PFLAG_FROZEN)) { |
397 | ata_ehi_hotplugged(&ap->eh_info); | 398 | ata_ehi_hotplugged(&ap->link.eh_info); |
398 | ap->eh_info.serror |= serror; | 399 | ap->link.eh_info.serror |= serror; |
399 | } | 400 | } |
400 | 401 | ||
401 | goto freeze; | 402 | goto freeze; |
@@ -562,8 +563,8 @@ static void sil_thaw(struct ata_port *ap) | |||
562 | */ | 563 | */ |
563 | static void sil_dev_config(struct ata_device *dev) | 564 | static void sil_dev_config(struct ata_device *dev) |
564 | { | 565 | { |
565 | struct ata_port *ap = dev->ap; | 566 | struct ata_port *ap = dev->link->ap; |
566 | int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO; | 567 | int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO; |
567 | unsigned int n, quirks = 0; | 568 | unsigned int n, quirks = 0; |
568 | unsigned char model_num[ATA_ID_PROD_LEN + 1]; | 569 | unsigned char model_num[ATA_ID_PROD_LEN + 1]; |
569 | 570 | ||
@@ -686,7 +687,8 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
686 | mmio_base = host->iomap[SIL_MMIO_BAR]; | 687 | mmio_base = host->iomap[SIL_MMIO_BAR]; |
687 | 688 | ||
688 | for (i = 0; i < host->n_ports; i++) { | 689 | for (i = 0; i < host->n_ports; i++) { |
689 | struct ata_ioports *ioaddr = &host->ports[i]->ioaddr; | 690 | struct ata_port *ap = host->ports[i]; |
691 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
690 | 692 | ||
691 | ioaddr->cmd_addr = mmio_base + sil_port[i].tf; | 693 | ioaddr->cmd_addr = mmio_base + sil_port[i].tf; |
692 | ioaddr->altstatus_addr = | 694 | ioaddr->altstatus_addr = |
@@ -694,6 +696,9 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
694 | ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma; | 696 | ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma; |
695 | ioaddr->scr_addr = mmio_base + sil_port[i].scr; | 697 | ioaddr->scr_addr = mmio_base + sil_port[i].scr; |
696 | ata_std_ports(ioaddr); | 698 | ata_std_ports(ioaddr); |
699 | |||
700 | ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio"); | ||
701 | ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf"); | ||
697 | } | 702 | } |
698 | 703 | ||
699 | /* initialize and activate */ | 704 | /* initialize and activate */ |
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 233e88693395..b0619278454a 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/libata.h> | 30 | #include <linux/libata.h> |
31 | 31 | ||
32 | #define DRV_NAME "sata_sil24" | 32 | #define DRV_NAME "sata_sil24" |
33 | #define DRV_VERSION "1.0" | 33 | #define DRV_VERSION "1.1" |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Port request block (PRB) 32 bytes | 36 | * Port request block (PRB) 32 bytes |
@@ -168,7 +168,7 @@ enum { | |||
168 | 168 | ||
169 | DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | | 169 | DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | |
170 | PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG | | 170 | PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG | |
171 | PORT_IRQ_UNK_FIS, | 171 | PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_NOTIFY, |
172 | 172 | ||
173 | /* bits[27:16] are unmasked (raw) */ | 173 | /* bits[27:16] are unmasked (raw) */ |
174 | PORT_IRQ_RAW_SHIFT = 16, | 174 | PORT_IRQ_RAW_SHIFT = 16, |
@@ -237,8 +237,9 @@ enum { | |||
237 | /* host flags */ | 237 | /* host flags */ |
238 | SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 238 | SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
239 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | | 239 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | |
240 | ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY | | 240 | ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | |
241 | ATA_FLAG_ACPI_SATA, | 241 | ATA_FLAG_AN | ATA_FLAG_PMP, |
242 | SIL24_COMMON_LFLAGS = ATA_LFLAG_SKIP_D2H_BSY, | ||
242 | SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ | 243 | SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ |
243 | 244 | ||
244 | IRQ_STAT_4PORTS = 0xf, | 245 | IRQ_STAT_4PORTS = 0xf, |
@@ -322,6 +323,7 @@ struct sil24_port_priv { | |||
322 | union sil24_cmd_block *cmd_block; /* 32 cmd blocks */ | 323 | union sil24_cmd_block *cmd_block; /* 32 cmd blocks */ |
323 | dma_addr_t cmd_block_dma; /* DMA base addr for them */ | 324 | dma_addr_t cmd_block_dma; /* DMA base addr for them */ |
324 | struct ata_taskfile tf; /* Cached taskfile registers */ | 325 | struct ata_taskfile tf; /* Cached taskfile registers */ |
326 | int do_port_rst; | ||
325 | }; | 327 | }; |
326 | 328 | ||
327 | static void sil24_dev_config(struct ata_device *dev); | 329 | static void sil24_dev_config(struct ata_device *dev); |
@@ -329,9 +331,12 @@ static u8 sil24_check_status(struct ata_port *ap); | |||
329 | static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val); | 331 | static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val); |
330 | static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); | 332 | static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); |
331 | static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); | 333 | static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); |
334 | static int sil24_qc_defer(struct ata_queued_cmd *qc); | ||
332 | static void sil24_qc_prep(struct ata_queued_cmd *qc); | 335 | static void sil24_qc_prep(struct ata_queued_cmd *qc); |
333 | static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); | 336 | static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); |
334 | static void sil24_irq_clear(struct ata_port *ap); | 337 | static void sil24_irq_clear(struct ata_port *ap); |
338 | static void sil24_pmp_attach(struct ata_port *ap); | ||
339 | static void sil24_pmp_detach(struct ata_port *ap); | ||
335 | static void sil24_freeze(struct ata_port *ap); | 340 | static void sil24_freeze(struct ata_port *ap); |
336 | static void sil24_thaw(struct ata_port *ap); | 341 | static void sil24_thaw(struct ata_port *ap); |
337 | static void sil24_error_handler(struct ata_port *ap); | 342 | static void sil24_error_handler(struct ata_port *ap); |
@@ -340,6 +345,7 @@ static int sil24_port_start(struct ata_port *ap); | |||
340 | static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | 345 | static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
341 | #ifdef CONFIG_PM | 346 | #ifdef CONFIG_PM |
342 | static int sil24_pci_device_resume(struct pci_dev *pdev); | 347 | static int sil24_pci_device_resume(struct pci_dev *pdev); |
348 | static int sil24_port_resume(struct ata_port *ap); | ||
343 | #endif | 349 | #endif |
344 | 350 | ||
345 | static const struct pci_device_id sil24_pci_tbl[] = { | 351 | static const struct pci_device_id sil24_pci_tbl[] = { |
@@ -384,8 +390,6 @@ static struct scsi_host_template sil24_sht = { | |||
384 | }; | 390 | }; |
385 | 391 | ||
386 | static const struct ata_port_operations sil24_ops = { | 392 | static const struct ata_port_operations sil24_ops = { |
387 | .port_disable = ata_port_disable, | ||
388 | |||
389 | .dev_config = sil24_dev_config, | 393 | .dev_config = sil24_dev_config, |
390 | 394 | ||
391 | .check_status = sil24_check_status, | 395 | .check_status = sil24_check_status, |
@@ -394,22 +398,28 @@ static const struct ata_port_operations sil24_ops = { | |||
394 | 398 | ||
395 | .tf_read = sil24_tf_read, | 399 | .tf_read = sil24_tf_read, |
396 | 400 | ||
401 | .qc_defer = sil24_qc_defer, | ||
397 | .qc_prep = sil24_qc_prep, | 402 | .qc_prep = sil24_qc_prep, |
398 | .qc_issue = sil24_qc_issue, | 403 | .qc_issue = sil24_qc_issue, |
399 | 404 | ||
400 | .irq_clear = sil24_irq_clear, | 405 | .irq_clear = sil24_irq_clear, |
401 | .irq_on = ata_dummy_irq_on, | ||
402 | .irq_ack = ata_dummy_irq_ack, | ||
403 | 406 | ||
404 | .scr_read = sil24_scr_read, | 407 | .scr_read = sil24_scr_read, |
405 | .scr_write = sil24_scr_write, | 408 | .scr_write = sil24_scr_write, |
406 | 409 | ||
410 | .pmp_attach = sil24_pmp_attach, | ||
411 | .pmp_detach = sil24_pmp_detach, | ||
412 | |||
407 | .freeze = sil24_freeze, | 413 | .freeze = sil24_freeze, |
408 | .thaw = sil24_thaw, | 414 | .thaw = sil24_thaw, |
409 | .error_handler = sil24_error_handler, | 415 | .error_handler = sil24_error_handler, |
410 | .post_internal_cmd = sil24_post_internal_cmd, | 416 | .post_internal_cmd = sil24_post_internal_cmd, |
411 | 417 | ||
412 | .port_start = sil24_port_start, | 418 | .port_start = sil24_port_start, |
419 | |||
420 | #ifdef CONFIG_PM | ||
421 | .port_resume = sil24_port_resume, | ||
422 | #endif | ||
413 | }; | 423 | }; |
414 | 424 | ||
415 | /* | 425 | /* |
@@ -424,6 +434,7 @@ static const struct ata_port_info sil24_port_info[] = { | |||
424 | { | 434 | { |
425 | .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) | | 435 | .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) | |
426 | SIL24_FLAG_PCIX_IRQ_WOC, | 436 | SIL24_FLAG_PCIX_IRQ_WOC, |
437 | .link_flags = SIL24_COMMON_LFLAGS, | ||
427 | .pio_mask = 0x1f, /* pio0-4 */ | 438 | .pio_mask = 0x1f, /* pio0-4 */ |
428 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 439 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
429 | .udma_mask = ATA_UDMA5, /* udma0-5 */ | 440 | .udma_mask = ATA_UDMA5, /* udma0-5 */ |
@@ -432,6 +443,7 @@ static const struct ata_port_info sil24_port_info[] = { | |||
432 | /* sil_3132 */ | 443 | /* sil_3132 */ |
433 | { | 444 | { |
434 | .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2), | 445 | .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2), |
446 | .link_flags = SIL24_COMMON_LFLAGS, | ||
435 | .pio_mask = 0x1f, /* pio0-4 */ | 447 | .pio_mask = 0x1f, /* pio0-4 */ |
436 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 448 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
437 | .udma_mask = ATA_UDMA5, /* udma0-5 */ | 449 | .udma_mask = ATA_UDMA5, /* udma0-5 */ |
@@ -440,6 +452,7 @@ static const struct ata_port_info sil24_port_info[] = { | |||
440 | /* sil_3131/sil_3531 */ | 452 | /* sil_3131/sil_3531 */ |
441 | { | 453 | { |
442 | .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1), | 454 | .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1), |
455 | .link_flags = SIL24_COMMON_LFLAGS, | ||
443 | .pio_mask = 0x1f, /* pio0-4 */ | 456 | .pio_mask = 0x1f, /* pio0-4 */ |
444 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 457 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
445 | .udma_mask = ATA_UDMA5, /* udma0-5 */ | 458 | .udma_mask = ATA_UDMA5, /* udma0-5 */ |
@@ -456,7 +469,7 @@ static int sil24_tag(int tag) | |||
456 | 469 | ||
457 | static void sil24_dev_config(struct ata_device *dev) | 470 | static void sil24_dev_config(struct ata_device *dev) |
458 | { | 471 | { |
459 | void __iomem *port = dev->ap->ioaddr.cmd_addr; | 472 | void __iomem *port = dev->link->ap->ioaddr.cmd_addr; |
460 | 473 | ||
461 | if (dev->cdb_len == 16) | 474 | if (dev->cdb_len == 16) |
462 | writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); | 475 | writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); |
@@ -520,19 +533,78 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
520 | *tf = pp->tf; | 533 | *tf = pp->tf; |
521 | } | 534 | } |
522 | 535 | ||
536 | static void sil24_config_port(struct ata_port *ap) | ||
537 | { | ||
538 | void __iomem *port = ap->ioaddr.cmd_addr; | ||
539 | |||
540 | /* configure IRQ WoC */ | ||
541 | if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) | ||
542 | writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT); | ||
543 | else | ||
544 | writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); | ||
545 | |||
546 | /* zero error counters. */ | ||
547 | writel(0x8000, port + PORT_DECODE_ERR_THRESH); | ||
548 | writel(0x8000, port + PORT_CRC_ERR_THRESH); | ||
549 | writel(0x8000, port + PORT_HSHK_ERR_THRESH); | ||
550 | writel(0x0000, port + PORT_DECODE_ERR_CNT); | ||
551 | writel(0x0000, port + PORT_CRC_ERR_CNT); | ||
552 | writel(0x0000, port + PORT_HSHK_ERR_CNT); | ||
553 | |||
554 | /* always use 64bit activation */ | ||
555 | writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); | ||
556 | |||
557 | /* clear port multiplier enable and resume bits */ | ||
558 | writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR); | ||
559 | } | ||
560 | |||
561 | static void sil24_config_pmp(struct ata_port *ap, int attached) | ||
562 | { | ||
563 | void __iomem *port = ap->ioaddr.cmd_addr; | ||
564 | |||
565 | if (attached) | ||
566 | writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT); | ||
567 | else | ||
568 | writel(PORT_CS_PMP_EN, port + PORT_CTRL_CLR); | ||
569 | } | ||
570 | |||
571 | static void sil24_clear_pmp(struct ata_port *ap) | ||
572 | { | ||
573 | void __iomem *port = ap->ioaddr.cmd_addr; | ||
574 | int i; | ||
575 | |||
576 | writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR); | ||
577 | |||
578 | for (i = 0; i < SATA_PMP_MAX_PORTS; i++) { | ||
579 | void __iomem *pmp_base = port + PORT_PMP + i * PORT_PMP_SIZE; | ||
580 | |||
581 | writel(0, pmp_base + PORT_PMP_STATUS); | ||
582 | writel(0, pmp_base + PORT_PMP_QACTIVE); | ||
583 | } | ||
584 | } | ||
585 | |||
523 | static int sil24_init_port(struct ata_port *ap) | 586 | static int sil24_init_port(struct ata_port *ap) |
524 | { | 587 | { |
525 | void __iomem *port = ap->ioaddr.cmd_addr; | 588 | void __iomem *port = ap->ioaddr.cmd_addr; |
589 | struct sil24_port_priv *pp = ap->private_data; | ||
526 | u32 tmp; | 590 | u32 tmp; |
527 | 591 | ||
592 | /* clear PMP error status */ | ||
593 | if (ap->nr_pmp_links) | ||
594 | sil24_clear_pmp(ap); | ||
595 | |||
528 | writel(PORT_CS_INIT, port + PORT_CTRL_STAT); | 596 | writel(PORT_CS_INIT, port + PORT_CTRL_STAT); |
529 | ata_wait_register(port + PORT_CTRL_STAT, | 597 | ata_wait_register(port + PORT_CTRL_STAT, |
530 | PORT_CS_INIT, PORT_CS_INIT, 10, 100); | 598 | PORT_CS_INIT, PORT_CS_INIT, 10, 100); |
531 | tmp = ata_wait_register(port + PORT_CTRL_STAT, | 599 | tmp = ata_wait_register(port + PORT_CTRL_STAT, |
532 | PORT_CS_RDY, 0, 10, 100); | 600 | PORT_CS_RDY, 0, 10, 100); |
533 | 601 | ||
534 | if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) | 602 | if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) { |
603 | pp->do_port_rst = 1; | ||
604 | ap->link.eh_context.i.action |= ATA_EH_HARDRESET; | ||
535 | return -EIO; | 605 | return -EIO; |
606 | } | ||
607 | |||
536 | return 0; | 608 | return 0; |
537 | } | 609 | } |
538 | 610 | ||
@@ -583,9 +655,10 @@ static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp, | |||
583 | return rc; | 655 | return rc; |
584 | } | 656 | } |
585 | 657 | ||
586 | static int sil24_do_softreset(struct ata_port *ap, unsigned int *class, | 658 | static int sil24_do_softreset(struct ata_link *link, unsigned int *class, |
587 | int pmp, unsigned long deadline) | 659 | int pmp, unsigned long deadline) |
588 | { | 660 | { |
661 | struct ata_port *ap = link->ap; | ||
589 | unsigned long timeout_msec = 0; | 662 | unsigned long timeout_msec = 0; |
590 | struct ata_taskfile tf; | 663 | struct ata_taskfile tf; |
591 | const char *reason; | 664 | const char *reason; |
@@ -593,7 +666,7 @@ static int sil24_do_softreset(struct ata_port *ap, unsigned int *class, | |||
593 | 666 | ||
594 | DPRINTK("ENTER\n"); | 667 | DPRINTK("ENTER\n"); |
595 | 668 | ||
596 | if (ata_port_offline(ap)) { | 669 | if (ata_link_offline(link)) { |
597 | DPRINTK("PHY reports no device\n"); | 670 | DPRINTK("PHY reports no device\n"); |
598 | *class = ATA_DEV_NONE; | 671 | *class = ATA_DEV_NONE; |
599 | goto out; | 672 | goto out; |
@@ -609,7 +682,7 @@ static int sil24_do_softreset(struct ata_port *ap, unsigned int *class, | |||
609 | if (time_after(deadline, jiffies)) | 682 | if (time_after(deadline, jiffies)) |
610 | timeout_msec = jiffies_to_msecs(deadline - jiffies); | 683 | timeout_msec = jiffies_to_msecs(deadline - jiffies); |
611 | 684 | ||
612 | ata_tf_init(ap->device, &tf); /* doesn't really matter */ | 685 | ata_tf_init(link->device, &tf); /* doesn't really matter */ |
613 | rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST, | 686 | rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST, |
614 | timeout_msec); | 687 | timeout_msec); |
615 | if (rc == -EBUSY) { | 688 | if (rc == -EBUSY) { |
@@ -631,29 +704,54 @@ static int sil24_do_softreset(struct ata_port *ap, unsigned int *class, | |||
631 | return 0; | 704 | return 0; |
632 | 705 | ||
633 | err: | 706 | err: |
634 | ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason); | 707 | ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason); |
635 | return -EIO; | 708 | return -EIO; |
636 | } | 709 | } |
637 | 710 | ||
638 | static int sil24_softreset(struct ata_port *ap, unsigned int *class, | 711 | static int sil24_softreset(struct ata_link *link, unsigned int *class, |
639 | unsigned long deadline) | 712 | unsigned long deadline) |
640 | { | 713 | { |
641 | return sil24_do_softreset(ap, class, 0, deadline); | 714 | return sil24_do_softreset(link, class, SATA_PMP_CTRL_PORT, deadline); |
642 | } | 715 | } |
643 | 716 | ||
644 | static int sil24_hardreset(struct ata_port *ap, unsigned int *class, | 717 | static int sil24_hardreset(struct ata_link *link, unsigned int *class, |
645 | unsigned long deadline) | 718 | unsigned long deadline) |
646 | { | 719 | { |
720 | struct ata_port *ap = link->ap; | ||
647 | void __iomem *port = ap->ioaddr.cmd_addr; | 721 | void __iomem *port = ap->ioaddr.cmd_addr; |
722 | struct sil24_port_priv *pp = ap->private_data; | ||
723 | int did_port_rst = 0; | ||
648 | const char *reason; | 724 | const char *reason; |
649 | int tout_msec, rc; | 725 | int tout_msec, rc; |
650 | u32 tmp; | 726 | u32 tmp; |
651 | 727 | ||
728 | retry: | ||
729 | /* Sometimes, DEV_RST is not enough to recover the controller. | ||
730 | * This happens often after PM DMA CS errata. | ||
731 | */ | ||
732 | if (pp->do_port_rst) { | ||
733 | ata_port_printk(ap, KERN_WARNING, "controller in dubious " | ||
734 | "state, performing PORT_RST\n"); | ||
735 | |||
736 | writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT); | ||
737 | msleep(10); | ||
738 | writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); | ||
739 | ata_wait_register(port + PORT_CTRL_STAT, PORT_CS_RDY, 0, | ||
740 | 10, 5000); | ||
741 | |||
742 | /* restore port configuration */ | ||
743 | sil24_config_port(ap); | ||
744 | sil24_config_pmp(ap, ap->nr_pmp_links); | ||
745 | |||
746 | pp->do_port_rst = 0; | ||
747 | did_port_rst = 1; | ||
748 | } | ||
749 | |||
652 | /* sil24 does the right thing(tm) without any protection */ | 750 | /* sil24 does the right thing(tm) without any protection */ |
653 | sata_set_spd(ap); | 751 | sata_set_spd(link); |
654 | 752 | ||
655 | tout_msec = 100; | 753 | tout_msec = 100; |
656 | if (ata_port_online(ap)) | 754 | if (ata_link_online(link)) |
657 | tout_msec = 5000; | 755 | tout_msec = 5000; |
658 | 756 | ||
659 | writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); | 757 | writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); |
@@ -663,14 +761,14 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class, | |||
663 | /* SStatus oscillates between zero and valid status after | 761 | /* SStatus oscillates between zero and valid status after |
664 | * DEV_RST, debounce it. | 762 | * DEV_RST, debounce it. |
665 | */ | 763 | */ |
666 | rc = sata_phy_debounce(ap, sata_deb_timing_long, deadline); | 764 | rc = sata_link_debounce(link, sata_deb_timing_long, deadline); |
667 | if (rc) { | 765 | if (rc) { |
668 | reason = "PHY debouncing failed"; | 766 | reason = "PHY debouncing failed"; |
669 | goto err; | 767 | goto err; |
670 | } | 768 | } |
671 | 769 | ||
672 | if (tmp & PORT_CS_DEV_RST) { | 770 | if (tmp & PORT_CS_DEV_RST) { |
673 | if (ata_port_offline(ap)) | 771 | if (ata_link_offline(link)) |
674 | return 0; | 772 | return 0; |
675 | reason = "link not ready"; | 773 | reason = "link not ready"; |
676 | goto err; | 774 | goto err; |
@@ -685,7 +783,12 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class, | |||
685 | return -EAGAIN; | 783 | return -EAGAIN; |
686 | 784 | ||
687 | err: | 785 | err: |
688 | ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason); | 786 | if (!did_port_rst) { |
787 | pp->do_port_rst = 1; | ||
788 | goto retry; | ||
789 | } | ||
790 | |||
791 | ata_link_printk(link, KERN_ERR, "hardreset failed (%s)\n", reason); | ||
689 | return -EIO; | 792 | return -EIO; |
690 | } | 793 | } |
691 | 794 | ||
@@ -705,6 +808,38 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc, | |||
705 | } | 808 | } |
706 | } | 809 | } |
707 | 810 | ||
811 | static int sil24_qc_defer(struct ata_queued_cmd *qc) | ||
812 | { | ||
813 | struct ata_link *link = qc->dev->link; | ||
814 | struct ata_port *ap = link->ap; | ||
815 | u8 prot = qc->tf.protocol; | ||
816 | int is_atapi = (prot == ATA_PROT_ATAPI || | ||
817 | prot == ATA_PROT_ATAPI_NODATA || | ||
818 | prot == ATA_PROT_ATAPI_DMA); | ||
819 | |||
820 | /* ATAPI commands completing with CHECK_SENSE cause various | ||
821 | * weird problems if other commands are active. PMP DMA CS | ||
822 | * errata doesn't cover all and HSM violation occurs even with | ||
823 | * only one other device active. Always run an ATAPI command | ||
824 | * by itself. | ||
825 | */ | ||
826 | if (unlikely(ap->excl_link)) { | ||
827 | if (link == ap->excl_link) { | ||
828 | if (ap->nr_active_links) | ||
829 | return ATA_DEFER_PORT; | ||
830 | qc->flags |= ATA_QCFLAG_CLEAR_EXCL; | ||
831 | } else | ||
832 | return ATA_DEFER_PORT; | ||
833 | } else if (unlikely(is_atapi)) { | ||
834 | ap->excl_link = link; | ||
835 | if (ap->nr_active_links) | ||
836 | return ATA_DEFER_PORT; | ||
837 | qc->flags |= ATA_QCFLAG_CLEAR_EXCL; | ||
838 | } | ||
839 | |||
840 | return ata_std_qc_defer(qc); | ||
841 | } | ||
842 | |||
708 | static void sil24_qc_prep(struct ata_queued_cmd *qc) | 843 | static void sil24_qc_prep(struct ata_queued_cmd *qc) |
709 | { | 844 | { |
710 | struct ata_port *ap = qc->ap; | 845 | struct ata_port *ap = qc->ap; |
@@ -748,7 +883,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) | |||
748 | } | 883 | } |
749 | 884 | ||
750 | prb->ctrl = cpu_to_le16(ctrl); | 885 | prb->ctrl = cpu_to_le16(ctrl); |
751 | ata_tf_to_fis(&qc->tf, 0, 1, prb->fis); | 886 | ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, prb->fis); |
752 | 887 | ||
753 | if (qc->flags & ATA_QCFLAG_DMAMAP) | 888 | if (qc->flags & ATA_QCFLAG_DMAMAP) |
754 | sil24_fill_sg(qc, sge); | 889 | sil24_fill_sg(qc, sge); |
@@ -777,6 +912,39 @@ static void sil24_irq_clear(struct ata_port *ap) | |||
777 | /* unused */ | 912 | /* unused */ |
778 | } | 913 | } |
779 | 914 | ||
915 | static void sil24_pmp_attach(struct ata_port *ap) | ||
916 | { | ||
917 | sil24_config_pmp(ap, 1); | ||
918 | sil24_init_port(ap); | ||
919 | } | ||
920 | |||
921 | static void sil24_pmp_detach(struct ata_port *ap) | ||
922 | { | ||
923 | sil24_init_port(ap); | ||
924 | sil24_config_pmp(ap, 0); | ||
925 | } | ||
926 | |||
927 | static int sil24_pmp_softreset(struct ata_link *link, unsigned int *class, | ||
928 | unsigned long deadline) | ||
929 | { | ||
930 | return sil24_do_softreset(link, class, link->pmp, deadline); | ||
931 | } | ||
932 | |||
933 | static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class, | ||
934 | unsigned long deadline) | ||
935 | { | ||
936 | int rc; | ||
937 | |||
938 | rc = sil24_init_port(link->ap); | ||
939 | if (rc) { | ||
940 | ata_link_printk(link, KERN_ERR, | ||
941 | "hardreset failed (port not ready)\n"); | ||
942 | return rc; | ||
943 | } | ||
944 | |||
945 | return sata_pmp_std_hardreset(link, class, deadline); | ||
946 | } | ||
947 | |||
780 | static void sil24_freeze(struct ata_port *ap) | 948 | static void sil24_freeze(struct ata_port *ap) |
781 | { | 949 | { |
782 | void __iomem *port = ap->ioaddr.cmd_addr; | 950 | void __iomem *port = ap->ioaddr.cmd_addr; |
@@ -804,8 +972,10 @@ static void sil24_error_intr(struct ata_port *ap) | |||
804 | { | 972 | { |
805 | void __iomem *port = ap->ioaddr.cmd_addr; | 973 | void __iomem *port = ap->ioaddr.cmd_addr; |
806 | struct sil24_port_priv *pp = ap->private_data; | 974 | struct sil24_port_priv *pp = ap->private_data; |
807 | struct ata_eh_info *ehi = &ap->eh_info; | 975 | struct ata_queued_cmd *qc = NULL; |
808 | int freeze = 0; | 976 | struct ata_link *link; |
977 | struct ata_eh_info *ehi; | ||
978 | int abort = 0, freeze = 0; | ||
809 | u32 irq_stat; | 979 | u32 irq_stat; |
810 | 980 | ||
811 | /* on error, we need to clear IRQ explicitly */ | 981 | /* on error, we need to clear IRQ explicitly */ |
@@ -813,10 +983,17 @@ static void sil24_error_intr(struct ata_port *ap) | |||
813 | writel(irq_stat, port + PORT_IRQ_STAT); | 983 | writel(irq_stat, port + PORT_IRQ_STAT); |
814 | 984 | ||
815 | /* first, analyze and record host port events */ | 985 | /* first, analyze and record host port events */ |
986 | link = &ap->link; | ||
987 | ehi = &link->eh_info; | ||
816 | ata_ehi_clear_desc(ehi); | 988 | ata_ehi_clear_desc(ehi); |
817 | 989 | ||
818 | ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat); | 990 | ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat); |
819 | 991 | ||
992 | if (irq_stat & PORT_IRQ_SDB_NOTIFY) { | ||
993 | ata_ehi_push_desc(ehi, "SDB notify"); | ||
994 | sata_async_notification(ap); | ||
995 | } | ||
996 | |||
820 | if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) { | 997 | if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) { |
821 | ata_ehi_hotplugged(ehi); | 998 | ata_ehi_hotplugged(ehi); |
822 | ata_ehi_push_desc(ehi, "%s", | 999 | ata_ehi_push_desc(ehi, "%s", |
@@ -836,8 +1013,44 @@ static void sil24_error_intr(struct ata_port *ap) | |||
836 | if (irq_stat & PORT_IRQ_ERROR) { | 1013 | if (irq_stat & PORT_IRQ_ERROR) { |
837 | struct sil24_cerr_info *ci = NULL; | 1014 | struct sil24_cerr_info *ci = NULL; |
838 | unsigned int err_mask = 0, action = 0; | 1015 | unsigned int err_mask = 0, action = 0; |
839 | struct ata_queued_cmd *qc; | 1016 | u32 context, cerr; |
840 | u32 cerr; | 1017 | int pmp; |
1018 | |||
1019 | abort = 1; | ||
1020 | |||
1021 | /* DMA Context Switch Failure in Port Multiplier Mode | ||
1022 | * errata. If we have active commands to 3 or more | ||
1023 | * devices, any error condition on active devices can | ||
1024 | * corrupt DMA context switching. | ||
1025 | */ | ||
1026 | if (ap->nr_active_links >= 3) { | ||
1027 | ehi->err_mask |= AC_ERR_OTHER; | ||
1028 | ehi->action |= ATA_EH_HARDRESET; | ||
1029 | ata_ehi_push_desc(ehi, "PMP DMA CS errata"); | ||
1030 | pp->do_port_rst = 1; | ||
1031 | freeze = 1; | ||
1032 | } | ||
1033 | |||
1034 | /* find out the offending link and qc */ | ||
1035 | if (ap->nr_pmp_links) { | ||
1036 | context = readl(port + PORT_CONTEXT); | ||
1037 | pmp = (context >> 5) & 0xf; | ||
1038 | |||
1039 | if (pmp < ap->nr_pmp_links) { | ||
1040 | link = &ap->pmp_link[pmp]; | ||
1041 | ehi = &link->eh_info; | ||
1042 | qc = ata_qc_from_tag(ap, link->active_tag); | ||
1043 | |||
1044 | ata_ehi_clear_desc(ehi); | ||
1045 | ata_ehi_push_desc(ehi, "irq_stat 0x%08x", | ||
1046 | irq_stat); | ||
1047 | } else { | ||
1048 | err_mask |= AC_ERR_HSM; | ||
1049 | action |= ATA_EH_HARDRESET; | ||
1050 | freeze = 1; | ||
1051 | } | ||
1052 | } else | ||
1053 | qc = ata_qc_from_tag(ap, link->active_tag); | ||
841 | 1054 | ||
842 | /* analyze CMD_ERR */ | 1055 | /* analyze CMD_ERR */ |
843 | cerr = readl(port + PORT_CMD_ERR); | 1056 | cerr = readl(port + PORT_CMD_ERR); |
@@ -856,7 +1069,6 @@ static void sil24_error_intr(struct ata_port *ap) | |||
856 | } | 1069 | } |
857 | 1070 | ||
858 | /* record error info */ | 1071 | /* record error info */ |
859 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
860 | if (qc) { | 1072 | if (qc) { |
861 | sil24_read_tf(ap, qc->tag, &pp->tf); | 1073 | sil24_read_tf(ap, qc->tag, &pp->tf); |
862 | qc->err_mask |= err_mask; | 1074 | qc->err_mask |= err_mask; |
@@ -864,13 +1076,21 @@ static void sil24_error_intr(struct ata_port *ap) | |||
864 | ehi->err_mask |= err_mask; | 1076 | ehi->err_mask |= err_mask; |
865 | 1077 | ||
866 | ehi->action |= action; | 1078 | ehi->action |= action; |
1079 | |||
1080 | /* if PMP, resume */ | ||
1081 | if (ap->nr_pmp_links) | ||
1082 | writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT); | ||
867 | } | 1083 | } |
868 | 1084 | ||
869 | /* freeze or abort */ | 1085 | /* freeze or abort */ |
870 | if (freeze) | 1086 | if (freeze) |
871 | ata_port_freeze(ap); | 1087 | ata_port_freeze(ap); |
872 | else | 1088 | else if (abort) { |
873 | ata_port_abort(ap); | 1089 | if (qc) |
1090 | ata_link_abort(qc->dev->link); | ||
1091 | else | ||
1092 | ata_port_abort(ap); | ||
1093 | } | ||
874 | } | 1094 | } |
875 | 1095 | ||
876 | static void sil24_finish_qc(struct ata_queued_cmd *qc) | 1096 | static void sil24_finish_qc(struct ata_queued_cmd *qc) |
@@ -910,7 +1130,7 @@ static inline void sil24_host_intr(struct ata_port *ap) | |||
910 | if (rc > 0) | 1130 | if (rc > 0) |
911 | return; | 1131 | return; |
912 | if (rc < 0) { | 1132 | if (rc < 0) { |
913 | struct ata_eh_info *ehi = &ap->eh_info; | 1133 | struct ata_eh_info *ehi = &ap->link.eh_info; |
914 | ehi->err_mask |= AC_ERR_HSM; | 1134 | ehi->err_mask |= AC_ERR_HSM; |
915 | ehi->action |= ATA_EH_SOFTRESET; | 1135 | ehi->action |= ATA_EH_SOFTRESET; |
916 | ata_port_freeze(ap); | 1136 | ata_port_freeze(ap); |
@@ -921,7 +1141,7 @@ static inline void sil24_host_intr(struct ata_port *ap) | |||
921 | if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit()) | 1141 | if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit()) |
922 | ata_port_printk(ap, KERN_INFO, "spurious interrupt " | 1142 | ata_port_printk(ap, KERN_INFO, "spurious interrupt " |
923 | "(slot_stat 0x%x active_tag %d sactive 0x%x)\n", | 1143 | "(slot_stat 0x%x active_tag %d sactive 0x%x)\n", |
924 | slot_stat, ap->active_tag, ap->sactive); | 1144 | slot_stat, ap->link.active_tag, ap->link.sactive); |
925 | } | 1145 | } |
926 | 1146 | ||
927 | static irqreturn_t sil24_interrupt(int irq, void *dev_instance) | 1147 | static irqreturn_t sil24_interrupt(int irq, void *dev_instance) |
@@ -963,16 +1183,18 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance) | |||
963 | 1183 | ||
964 | static void sil24_error_handler(struct ata_port *ap) | 1184 | static void sil24_error_handler(struct ata_port *ap) |
965 | { | 1185 | { |
966 | struct ata_eh_context *ehc = &ap->eh_context; | 1186 | struct sil24_port_priv *pp = ap->private_data; |
967 | 1187 | ||
968 | if (sil24_init_port(ap)) { | 1188 | if (sil24_init_port(ap)) |
969 | ata_eh_freeze_port(ap); | 1189 | ata_eh_freeze_port(ap); |
970 | ehc->i.action |= ATA_EH_HARDRESET; | ||
971 | } | ||
972 | 1190 | ||
973 | /* perform recovery */ | 1191 | /* perform recovery */ |
974 | ata_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset, | 1192 | sata_pmp_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset, |
975 | ata_std_postreset); | 1193 | ata_std_postreset, sata_pmp_std_prereset, |
1194 | sil24_pmp_softreset, sil24_pmp_hardreset, | ||
1195 | sata_pmp_std_postreset); | ||
1196 | |||
1197 | pp->do_port_rst = 0; | ||
976 | } | 1198 | } |
977 | 1199 | ||
978 | static void sil24_post_internal_cmd(struct ata_queued_cmd *qc) | 1200 | static void sil24_post_internal_cmd(struct ata_queued_cmd *qc) |
@@ -980,8 +1202,8 @@ static void sil24_post_internal_cmd(struct ata_queued_cmd *qc) | |||
980 | struct ata_port *ap = qc->ap; | 1202 | struct ata_port *ap = qc->ap; |
981 | 1203 | ||
982 | /* make DMA engine forget about the failed command */ | 1204 | /* make DMA engine forget about the failed command */ |
983 | if (qc->flags & ATA_QCFLAG_FAILED) | 1205 | if ((qc->flags & ATA_QCFLAG_FAILED) && sil24_init_port(ap)) |
984 | sil24_init_port(ap); | 1206 | ata_eh_freeze_port(ap); |
985 | } | 1207 | } |
986 | 1208 | ||
987 | static int sil24_port_start(struct ata_port *ap) | 1209 | static int sil24_port_start(struct ata_port *ap) |
@@ -1019,7 +1241,6 @@ static int sil24_port_start(struct ata_port *ap) | |||
1019 | static void sil24_init_controller(struct ata_host *host) | 1241 | static void sil24_init_controller(struct ata_host *host) |
1020 | { | 1242 | { |
1021 | void __iomem *host_base = host->iomap[SIL24_HOST_BAR]; | 1243 | void __iomem *host_base = host->iomap[SIL24_HOST_BAR]; |
1022 | void __iomem *port_base = host->iomap[SIL24_PORT_BAR]; | ||
1023 | u32 tmp; | 1244 | u32 tmp; |
1024 | int i; | 1245 | int i; |
1025 | 1246 | ||
@@ -1031,7 +1252,8 @@ static void sil24_init_controller(struct ata_host *host) | |||
1031 | 1252 | ||
1032 | /* init ports */ | 1253 | /* init ports */ |
1033 | for (i = 0; i < host->n_ports; i++) { | 1254 | for (i = 0; i < host->n_ports; i++) { |
1034 | void __iomem *port = port_base + i * PORT_REGS_SIZE; | 1255 | struct ata_port *ap = host->ports[i]; |
1256 | void __iomem *port = ap->ioaddr.cmd_addr; | ||
1035 | 1257 | ||
1036 | /* Initial PHY setting */ | 1258 | /* Initial PHY setting */ |
1037 | writel(0x20c, port + PORT_PHY_CFG); | 1259 | writel(0x20c, port + PORT_PHY_CFG); |
@@ -1048,26 +1270,8 @@ static void sil24_init_controller(struct ata_host *host) | |||
1048 | "failed to clear port RST\n"); | 1270 | "failed to clear port RST\n"); |
1049 | } | 1271 | } |
1050 | 1272 | ||
1051 | /* Configure IRQ WoC */ | 1273 | /* configure port */ |
1052 | if (host->ports[0]->flags & SIL24_FLAG_PCIX_IRQ_WOC) | 1274 | sil24_config_port(ap); |
1053 | writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT); | ||
1054 | else | ||
1055 | writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); | ||
1056 | |||
1057 | /* Zero error counters. */ | ||
1058 | writel(0x8000, port + PORT_DECODE_ERR_THRESH); | ||
1059 | writel(0x8000, port + PORT_CRC_ERR_THRESH); | ||
1060 | writel(0x8000, port + PORT_HSHK_ERR_THRESH); | ||
1061 | writel(0x0000, port + PORT_DECODE_ERR_CNT); | ||
1062 | writel(0x0000, port + PORT_CRC_ERR_CNT); | ||
1063 | writel(0x0000, port + PORT_HSHK_ERR_CNT); | ||
1064 | |||
1065 | /* Always use 64bit activation */ | ||
1066 | writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); | ||
1067 | |||
1068 | /* Clear port multiplier enable and resume bits */ | ||
1069 | writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME, | ||
1070 | port + PORT_CTRL_CLR); | ||
1071 | } | 1275 | } |
1072 | 1276 | ||
1073 | /* Turn on interrupts */ | 1277 | /* Turn on interrupts */ |
@@ -1118,12 +1322,15 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1118 | host->iomap = iomap; | 1322 | host->iomap = iomap; |
1119 | 1323 | ||
1120 | for (i = 0; i < host->n_ports; i++) { | 1324 | for (i = 0; i < host->n_ports; i++) { |
1121 | void __iomem *port = iomap[SIL24_PORT_BAR] + i * PORT_REGS_SIZE; | 1325 | struct ata_port *ap = host->ports[i]; |
1326 | size_t offset = ap->port_no * PORT_REGS_SIZE; | ||
1327 | void __iomem *port = iomap[SIL24_PORT_BAR] + offset; | ||
1122 | 1328 | ||
1123 | host->ports[i]->ioaddr.cmd_addr = port; | 1329 | host->ports[i]->ioaddr.cmd_addr = port; |
1124 | host->ports[i]->ioaddr.scr_addr = port + PORT_SCONTROL; | 1330 | host->ports[i]->ioaddr.scr_addr = port + PORT_SCONTROL; |
1125 | 1331 | ||
1126 | ata_std_ports(&host->ports[i]->ioaddr); | 1332 | ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host"); |
1333 | ata_port_pbar_desc(ap, SIL24_PORT_BAR, offset, "port"); | ||
1127 | } | 1334 | } |
1128 | 1335 | ||
1129 | /* configure and activate the device */ | 1336 | /* configure and activate the device */ |
@@ -1179,6 +1386,12 @@ static int sil24_pci_device_resume(struct pci_dev *pdev) | |||
1179 | 1386 | ||
1180 | return 0; | 1387 | return 0; |
1181 | } | 1388 | } |
1389 | |||
1390 | static int sil24_port_resume(struct ata_port *ap) | ||
1391 | { | ||
1392 | sil24_config_pmp(ap, ap->nr_pmp_links); | ||
1393 | return 0; | ||
1394 | } | ||
1182 | #endif | 1395 | #endif |
1183 | 1396 | ||
1184 | static int __init sil24_init(void) | 1397 | static int __init sil24_init(void) |
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index 41c1d6e8f1fe..8d98a9fb0a42 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c | |||
@@ -104,7 +104,6 @@ static struct scsi_host_template sis_sht = { | |||
104 | }; | 104 | }; |
105 | 105 | ||
106 | static const struct ata_port_operations sis_ops = { | 106 | static const struct ata_port_operations sis_ops = { |
107 | .port_disable = ata_port_disable, | ||
108 | .tf_load = ata_tf_load, | 107 | .tf_load = ata_tf_load, |
109 | .tf_read = ata_tf_read, | 108 | .tf_read = ata_tf_read, |
110 | .check_status = ata_check_status, | 109 | .check_status = ata_check_status, |
@@ -123,7 +122,6 @@ static const struct ata_port_operations sis_ops = { | |||
123 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | 122 | .post_internal_cmd = ata_bmdma_post_internal_cmd, |
124 | .irq_clear = ata_bmdma_irq_clear, | 123 | .irq_clear = ata_bmdma_irq_clear, |
125 | .irq_on = ata_irq_on, | 124 | .irq_on = ata_irq_on, |
126 | .irq_ack = ata_irq_ack, | ||
127 | .scr_read = sis_scr_read, | 125 | .scr_read = sis_scr_read, |
128 | .scr_write = sis_scr_write, | 126 | .scr_write = sis_scr_write, |
129 | .port_start = ata_port_start, | 127 | .port_start = ata_port_start, |
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index d9678e7bc3a9..12d613c48c19 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c | |||
@@ -329,7 +329,6 @@ static struct scsi_host_template k2_sata_sht = { | |||
329 | 329 | ||
330 | 330 | ||
331 | static const struct ata_port_operations k2_sata_ops = { | 331 | static const struct ata_port_operations k2_sata_ops = { |
332 | .port_disable = ata_port_disable, | ||
333 | .tf_load = k2_sata_tf_load, | 332 | .tf_load = k2_sata_tf_load, |
334 | .tf_read = k2_sata_tf_read, | 333 | .tf_read = k2_sata_tf_read, |
335 | .check_status = k2_stat_check_status, | 334 | .check_status = k2_stat_check_status, |
@@ -349,7 +348,6 @@ static const struct ata_port_operations k2_sata_ops = { | |||
349 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | 348 | .post_internal_cmd = ata_bmdma_post_internal_cmd, |
350 | .irq_clear = ata_bmdma_irq_clear, | 349 | .irq_clear = ata_bmdma_irq_clear, |
351 | .irq_on = ata_irq_on, | 350 | .irq_on = ata_irq_on, |
352 | .irq_ack = ata_irq_ack, | ||
353 | .scr_read = k2_sata_scr_read, | 351 | .scr_read = k2_sata_scr_read, |
354 | .scr_write = k2_sata_scr_write, | 352 | .scr_write = k2_sata_scr_write, |
355 | .port_start = ata_port_start, | 353 | .port_start = ata_port_start, |
@@ -445,9 +443,15 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
445 | /* different controllers have different number of ports - currently 4 or 8 */ | 443 | /* different controllers have different number of ports - currently 4 or 8 */ |
446 | /* All ports are on the same function. Multi-function device is no | 444 | /* All ports are on the same function. Multi-function device is no |
447 | * longer available. This should not be seen in any system. */ | 445 | * longer available. This should not be seen in any system. */ |
448 | for (i = 0; i < host->n_ports; i++) | 446 | for (i = 0; i < host->n_ports; i++) { |
449 | k2_sata_setup_port(&host->ports[i]->ioaddr, | 447 | struct ata_port *ap = host->ports[i]; |
450 | mmio_base + i * K2_SATA_PORT_OFFSET); | 448 | unsigned int offset = i * K2_SATA_PORT_OFFSET; |
449 | |||
450 | k2_sata_setup_port(&ap->ioaddr, mmio_base + offset); | ||
451 | |||
452 | ata_port_pbar_desc(ap, 5, -1, "mmio"); | ||
453 | ata_port_pbar_desc(ap, 5, offset, "port"); | ||
454 | } | ||
451 | 455 | ||
452 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 456 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); |
453 | if (rc) | 457 | if (rc) |
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 97aefdd87be4..9f9f7b30654a 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c | |||
@@ -254,7 +254,6 @@ static struct scsi_host_template pdc_sata_sht = { | |||
254 | }; | 254 | }; |
255 | 255 | ||
256 | static const struct ata_port_operations pdc_20621_ops = { | 256 | static const struct ata_port_operations pdc_20621_ops = { |
257 | .port_disable = ata_port_disable, | ||
258 | .tf_load = pdc_tf_load_mmio, | 257 | .tf_load = pdc_tf_load_mmio, |
259 | .tf_read = ata_tf_read, | 258 | .tf_read = ata_tf_read, |
260 | .check_status = ata_check_status, | 259 | .check_status = ata_check_status, |
@@ -267,7 +266,6 @@ static const struct ata_port_operations pdc_20621_ops = { | |||
267 | .eng_timeout = pdc_eng_timeout, | 266 | .eng_timeout = pdc_eng_timeout, |
268 | .irq_clear = pdc20621_irq_clear, | 267 | .irq_clear = pdc20621_irq_clear, |
269 | .irq_on = ata_irq_on, | 268 | .irq_on = ata_irq_on, |
270 | .irq_ack = ata_irq_ack, | ||
271 | .port_start = pdc_port_start, | 269 | .port_start = pdc_port_start, |
272 | }; | 270 | }; |
273 | 271 | ||
@@ -854,7 +852,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance) | |||
854 | !(ap->flags & ATA_FLAG_DISABLED)) { | 852 | !(ap->flags & ATA_FLAG_DISABLED)) { |
855 | struct ata_queued_cmd *qc; | 853 | struct ata_queued_cmd *qc; |
856 | 854 | ||
857 | qc = ata_qc_from_tag(ap, ap->active_tag); | 855 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
858 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) | 856 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
859 | handled += pdc20621_host_intr(ap, qc, (i > 4), | 857 | handled += pdc20621_host_intr(ap, qc, (i > 4), |
860 | mmio_base); | 858 | mmio_base); |
@@ -881,7 +879,7 @@ static void pdc_eng_timeout(struct ata_port *ap) | |||
881 | 879 | ||
882 | spin_lock_irqsave(&host->lock, flags); | 880 | spin_lock_irqsave(&host->lock, flags); |
883 | 881 | ||
884 | qc = ata_qc_from_tag(ap, ap->active_tag); | 882 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
885 | 883 | ||
886 | switch (qc->tf.protocol) { | 884 | switch (qc->tf.protocol) { |
887 | case ATA_PROT_DMA: | 885 | case ATA_PROT_DMA: |
@@ -1383,9 +1381,8 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id * | |||
1383 | const struct ata_port_info *ppi[] = | 1381 | const struct ata_port_info *ppi[] = |
1384 | { &pdc_port_info[ent->driver_data], NULL }; | 1382 | { &pdc_port_info[ent->driver_data], NULL }; |
1385 | struct ata_host *host; | 1383 | struct ata_host *host; |
1386 | void __iomem *base; | ||
1387 | struct pdc_host_priv *hpriv; | 1384 | struct pdc_host_priv *hpriv; |
1388 | int rc; | 1385 | int i, rc; |
1389 | 1386 | ||
1390 | if (!printed_version++) | 1387 | if (!printed_version++) |
1391 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 1388 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
@@ -1411,11 +1408,17 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id * | |||
1411 | return rc; | 1408 | return rc; |
1412 | host->iomap = pcim_iomap_table(pdev); | 1409 | host->iomap = pcim_iomap_table(pdev); |
1413 | 1410 | ||
1414 | base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS; | 1411 | for (i = 0; i < 4; i++) { |
1415 | pdc_sata_setup_port(&host->ports[0]->ioaddr, base + 0x200); | 1412 | struct ata_port *ap = host->ports[i]; |
1416 | pdc_sata_setup_port(&host->ports[1]->ioaddr, base + 0x280); | 1413 | void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS; |
1417 | pdc_sata_setup_port(&host->ports[2]->ioaddr, base + 0x300); | 1414 | unsigned int offset = 0x200 + i * 0x80; |
1418 | pdc_sata_setup_port(&host->ports[3]->ioaddr, base + 0x380); | 1415 | |
1416 | pdc_sata_setup_port(&ap->ioaddr, base + offset); | ||
1417 | |||
1418 | ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); | ||
1419 | ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm"); | ||
1420 | ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port"); | ||
1421 | } | ||
1419 | 1422 | ||
1420 | /* configure and activate */ | 1423 | /* configure and activate */ |
1421 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 1424 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); |
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index e6b8b45279af..d394da085ae4 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c | |||
@@ -94,8 +94,6 @@ static struct scsi_host_template uli_sht = { | |||
94 | }; | 94 | }; |
95 | 95 | ||
96 | static const struct ata_port_operations uli_ops = { | 96 | static const struct ata_port_operations uli_ops = { |
97 | .port_disable = ata_port_disable, | ||
98 | |||
99 | .tf_load = ata_tf_load, | 97 | .tf_load = ata_tf_load, |
100 | .tf_read = ata_tf_read, | 98 | .tf_read = ata_tf_read, |
101 | .check_status = ata_check_status, | 99 | .check_status = ata_check_status, |
@@ -117,7 +115,6 @@ static const struct ata_port_operations uli_ops = { | |||
117 | 115 | ||
118 | .irq_clear = ata_bmdma_irq_clear, | 116 | .irq_clear = ata_bmdma_irq_clear, |
119 | .irq_on = ata_irq_on, | 117 | .irq_on = ata_irq_on, |
120 | .irq_ack = ata_irq_ack, | ||
121 | 118 | ||
122 | .scr_read = uli_scr_read, | 119 | .scr_read = uli_scr_read, |
123 | .scr_write = uli_scr_write, | 120 | .scr_write = uli_scr_write, |
@@ -242,6 +239,12 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
242 | hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4; | 239 | hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4; |
243 | ata_std_ports(ioaddr); | 240 | ata_std_ports(ioaddr); |
244 | 241 | ||
242 | ata_port_desc(host->ports[2], | ||
243 | "cmd 0x%llx ctl 0x%llx bmdma 0x%llx", | ||
244 | (unsigned long long)pci_resource_start(pdev, 0) + 8, | ||
245 | ((unsigned long long)pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4, | ||
246 | (unsigned long long)pci_resource_start(pdev, 4) + 16); | ||
247 | |||
245 | ioaddr = &host->ports[3]->ioaddr; | 248 | ioaddr = &host->ports[3]->ioaddr; |
246 | ioaddr->cmd_addr = iomap[2] + 8; | 249 | ioaddr->cmd_addr = iomap[2] + 8; |
247 | ioaddr->altstatus_addr = | 250 | ioaddr->altstatus_addr = |
@@ -250,6 +253,13 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
250 | ioaddr->bmdma_addr = iomap[4] + 24; | 253 | ioaddr->bmdma_addr = iomap[4] + 24; |
251 | hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5; | 254 | hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5; |
252 | ata_std_ports(ioaddr); | 255 | ata_std_ports(ioaddr); |
256 | |||
257 | ata_port_desc(host->ports[2], | ||
258 | "cmd 0x%llx ctl 0x%llx bmdma 0x%llx", | ||
259 | (unsigned long long)pci_resource_start(pdev, 2) + 9, | ||
260 | ((unsigned long long)pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4, | ||
261 | (unsigned long long)pci_resource_start(pdev, 4) + 24); | ||
262 | |||
253 | break; | 263 | break; |
254 | 264 | ||
255 | case uli_5289: | 265 | case uli_5289: |
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 57fd30de8f0d..1dc9b4f2b2dc 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c | |||
@@ -57,7 +57,6 @@ enum { | |||
57 | SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ | 57 | SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ |
58 | SATA_INT_GATE = 0x41, /* SATA interrupt gating */ | 58 | SATA_INT_GATE = 0x41, /* SATA interrupt gating */ |
59 | SATA_NATIVE_MODE = 0x42, /* Native mode enable */ | 59 | SATA_NATIVE_MODE = 0x42, /* Native mode enable */ |
60 | SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */ | ||
61 | PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ | 60 | PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ |
62 | PATA_PIO_TIMING = 0xAB, /* PATA timing register */ | 61 | PATA_PIO_TIMING = 0xAB, /* PATA timing register */ |
63 | 62 | ||
@@ -68,7 +67,6 @@ enum { | |||
68 | NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), | 67 | NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), |
69 | 68 | ||
70 | SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ | 69 | SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ |
71 | SATA_2DEV = (1 << 5), /* SATA is master/slave */ | ||
72 | }; | 70 | }; |
73 | 71 | ||
74 | static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 72 | static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
@@ -122,8 +120,6 @@ static struct scsi_host_template svia_sht = { | |||
122 | }; | 120 | }; |
123 | 121 | ||
124 | static const struct ata_port_operations vt6420_sata_ops = { | 122 | static const struct ata_port_operations vt6420_sata_ops = { |
125 | .port_disable = ata_port_disable, | ||
126 | |||
127 | .tf_load = ata_tf_load, | 123 | .tf_load = ata_tf_load, |
128 | .tf_read = ata_tf_read, | 124 | .tf_read = ata_tf_read, |
129 | .check_status = ata_check_status, | 125 | .check_status = ata_check_status, |
@@ -146,14 +142,11 @@ static const struct ata_port_operations vt6420_sata_ops = { | |||
146 | 142 | ||
147 | .irq_clear = ata_bmdma_irq_clear, | 143 | .irq_clear = ata_bmdma_irq_clear, |
148 | .irq_on = ata_irq_on, | 144 | .irq_on = ata_irq_on, |
149 | .irq_ack = ata_irq_ack, | ||
150 | 145 | ||
151 | .port_start = ata_port_start, | 146 | .port_start = ata_port_start, |
152 | }; | 147 | }; |
153 | 148 | ||
154 | static const struct ata_port_operations vt6421_pata_ops = { | 149 | static const struct ata_port_operations vt6421_pata_ops = { |
155 | .port_disable = ata_port_disable, | ||
156 | |||
157 | .set_piomode = vt6421_set_pio_mode, | 150 | .set_piomode = vt6421_set_pio_mode, |
158 | .set_dmamode = vt6421_set_dma_mode, | 151 | .set_dmamode = vt6421_set_dma_mode, |
159 | 152 | ||
@@ -180,14 +173,11 @@ static const struct ata_port_operations vt6421_pata_ops = { | |||
180 | 173 | ||
181 | .irq_clear = ata_bmdma_irq_clear, | 174 | .irq_clear = ata_bmdma_irq_clear, |
182 | .irq_on = ata_irq_on, | 175 | .irq_on = ata_irq_on, |
183 | .irq_ack = ata_irq_ack, | ||
184 | 176 | ||
185 | .port_start = ata_port_start, | 177 | .port_start = ata_port_start, |
186 | }; | 178 | }; |
187 | 179 | ||
188 | static const struct ata_port_operations vt6421_sata_ops = { | 180 | static const struct ata_port_operations vt6421_sata_ops = { |
189 | .port_disable = ata_port_disable, | ||
190 | |||
191 | .tf_load = ata_tf_load, | 181 | .tf_load = ata_tf_load, |
192 | .tf_read = ata_tf_read, | 182 | .tf_read = ata_tf_read, |
193 | .check_status = ata_check_status, | 183 | .check_status = ata_check_status, |
@@ -211,7 +201,6 @@ static const struct ata_port_operations vt6421_sata_ops = { | |||
211 | 201 | ||
212 | .irq_clear = ata_bmdma_irq_clear, | 202 | .irq_clear = ata_bmdma_irq_clear, |
213 | .irq_on = ata_irq_on, | 203 | .irq_on = ata_irq_on, |
214 | .irq_ack = ata_irq_ack, | ||
215 | 204 | ||
216 | .scr_read = svia_scr_read, | 205 | .scr_read = svia_scr_read, |
217 | .scr_write = svia_scr_write, | 206 | .scr_write = svia_scr_write, |
@@ -276,7 +265,7 @@ static void svia_noop_freeze(struct ata_port *ap) | |||
276 | 265 | ||
277 | /** | 266 | /** |
278 | * vt6420_prereset - prereset for vt6420 | 267 | * vt6420_prereset - prereset for vt6420 |
279 | * @ap: target ATA port | 268 | * @link: target ATA link |
280 | * @deadline: deadline jiffies for the operation | 269 | * @deadline: deadline jiffies for the operation |
281 | * | 270 | * |
282 | * SCR registers on vt6420 are pieces of shit and may hang the | 271 | * SCR registers on vt6420 are pieces of shit and may hang the |
@@ -294,9 +283,10 @@ static void svia_noop_freeze(struct ata_port *ap) | |||
294 | * RETURNS: | 283 | * RETURNS: |
295 | * 0 on success, -errno otherwise. | 284 | * 0 on success, -errno otherwise. |
296 | */ | 285 | */ |
297 | static int vt6420_prereset(struct ata_port *ap, unsigned long deadline) | 286 | static int vt6420_prereset(struct ata_link *link, unsigned long deadline) |
298 | { | 287 | { |
299 | struct ata_eh_context *ehc = &ap->eh_context; | 288 | struct ata_port *ap = link->ap; |
289 | struct ata_eh_context *ehc = &ap->link.eh_context; | ||
300 | unsigned long timeout = jiffies + (HZ * 5); | 290 | unsigned long timeout = jiffies + (HZ * 5); |
301 | u32 sstatus, scontrol; | 291 | u32 sstatus, scontrol; |
302 | int online; | 292 | int online; |
@@ -407,6 +397,9 @@ static void vt6421_init_addrs(struct ata_port *ap) | |||
407 | ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no); | 397 | ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no); |
408 | 398 | ||
409 | ata_std_ports(ioaddr); | 399 | ata_std_ports(ioaddr); |
400 | |||
401 | ata_port_pbar_desc(ap, ap->port_no, -1, "port"); | ||
402 | ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma"); | ||
410 | } | 403 | } |
411 | 404 | ||
412 | static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) | 405 | static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) |
@@ -513,7 +506,6 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
513 | struct ata_host *host; | 506 | struct ata_host *host; |
514 | int board_id = (int) ent->driver_data; | 507 | int board_id = (int) ent->driver_data; |
515 | const int *bar_sizes; | 508 | const int *bar_sizes; |
516 | u8 tmp8; | ||
517 | 509 | ||
518 | if (!printed_version++) | 510 | if (!printed_version++) |
519 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 511 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
@@ -522,19 +514,10 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
522 | if (rc) | 514 | if (rc) |
523 | return rc; | 515 | return rc; |
524 | 516 | ||
525 | if (board_id == vt6420) { | 517 | if (board_id == vt6420) |
526 | pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8); | ||
527 | if (tmp8 & SATA_2DEV) { | ||
528 | dev_printk(KERN_ERR, &pdev->dev, | ||
529 | "SATA master/slave not supported (0x%x)\n", | ||
530 | (int) tmp8); | ||
531 | return -EIO; | ||
532 | } | ||
533 | |||
534 | bar_sizes = &svia_bar_sizes[0]; | 518 | bar_sizes = &svia_bar_sizes[0]; |
535 | } else { | 519 | else |
536 | bar_sizes = &vt6421_bar_sizes[0]; | 520 | bar_sizes = &vt6421_bar_sizes[0]; |
537 | } | ||
538 | 521 | ||
539 | for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++) | 522 | for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++) |
540 | if ((pci_resource_start(pdev, i) == 0) || | 523 | if ((pci_resource_start(pdev, i) == 0) || |
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index 1920915dfa2c..0d9be1684873 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c | |||
@@ -240,7 +240,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap) | |||
240 | return; | 240 | return; |
241 | } | 241 | } |
242 | 242 | ||
243 | qc = ata_qc_from_tag(ap, ap->active_tag); | 243 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
244 | if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) | 244 | if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) |
245 | handled = ata_host_intr(ap, qc); | 245 | handled = ata_host_intr(ap, qc); |
246 | 246 | ||
@@ -317,7 +317,6 @@ static struct scsi_host_template vsc_sata_sht = { | |||
317 | 317 | ||
318 | 318 | ||
319 | static const struct ata_port_operations vsc_sata_ops = { | 319 | static const struct ata_port_operations vsc_sata_ops = { |
320 | .port_disable = ata_port_disable, | ||
321 | .tf_load = vsc_sata_tf_load, | 320 | .tf_load = vsc_sata_tf_load, |
322 | .tf_read = vsc_sata_tf_read, | 321 | .tf_read = vsc_sata_tf_read, |
323 | .exec_command = ata_exec_command, | 322 | .exec_command = ata_exec_command, |
@@ -336,7 +335,6 @@ static const struct ata_port_operations vsc_sata_ops = { | |||
336 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | 335 | .post_internal_cmd = ata_bmdma_post_internal_cmd, |
337 | .irq_clear = ata_bmdma_irq_clear, | 336 | .irq_clear = ata_bmdma_irq_clear, |
338 | .irq_on = ata_irq_on, | 337 | .irq_on = ata_irq_on, |
339 | .irq_ack = ata_irq_ack, | ||
340 | .scr_read = vsc_sata_scr_read, | 338 | .scr_read = vsc_sata_scr_read, |
341 | .scr_write = vsc_sata_scr_write, | 339 | .scr_write = vsc_sata_scr_write, |
342 | .port_start = ata_port_start, | 340 | .port_start = ata_port_start, |
@@ -408,9 +406,15 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d | |||
408 | 406 | ||
409 | mmio_base = host->iomap[VSC_MMIO_BAR]; | 407 | mmio_base = host->iomap[VSC_MMIO_BAR]; |
410 | 408 | ||
411 | for (i = 0; i < host->n_ports; i++) | 409 | for (i = 0; i < host->n_ports; i++) { |
412 | vsc_sata_setup_port(&host->ports[i]->ioaddr, | 410 | struct ata_port *ap = host->ports[i]; |
413 | mmio_base + (i + 1) * VSC_SATA_PORT_OFFSET); | 411 | unsigned int offset = (i + 1) * VSC_SATA_PORT_OFFSET; |
412 | |||
413 | vsc_sata_setup_port(&ap->ioaddr, mmio_base + offset); | ||
414 | |||
415 | ata_port_pbar_desc(ap, VSC_MMIO_BAR, -1, "mmio"); | ||
416 | ata_port_pbar_desc(ap, VSC_MMIO_BAR, offset, "port"); | ||
417 | } | ||
414 | 418 | ||
415 | /* | 419 | /* |
416 | * Use 32 bit DMA mask, because 64 bit address support is poor. | 420 | * Use 32 bit DMA mask, because 64 bit address support is poor. |