diff options
83 files changed, 4787 insertions, 8801 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 25aba69b59b4..292aa9a0f02f 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -28,7 +28,7 @@ config ATA_NONSTANDARD | |||
28 | default n | 28 | default n |
29 | 29 | ||
30 | config ATA_ACPI | 30 | config ATA_ACPI |
31 | bool | 31 | bool "ATA ACPI Support" |
32 | depends on ACPI && PCI | 32 | depends on ACPI && PCI |
33 | select ACPI_DOCK | 33 | select ACPI_DOCK |
34 | default y | 34 | default y |
@@ -41,6 +41,13 @@ config ATA_ACPI | |||
41 | You can disable this at kernel boot time by using the | 41 | You can disable this at kernel boot time by using the |
42 | option libata.noacpi=1 | 42 | option libata.noacpi=1 |
43 | 43 | ||
44 | config SATA_PMP | ||
45 | bool "SATA Port Multiplier support" | ||
46 | default y | ||
47 | help | ||
48 | This option adds support for SATA Port Multipliers | ||
49 | (the SATA version of an ethernet hub, or SAS expander). | ||
50 | |||
44 | config SATA_AHCI | 51 | config SATA_AHCI |
45 | tristate "AHCI SATA support" | 52 | tristate "AHCI SATA support" |
46 | depends on PCI | 53 | depends on PCI |
@@ -49,6 +56,43 @@ config SATA_AHCI | |||
49 | 56 | ||
50 | If unsure, say N. | 57 | If unsure, say N. |
51 | 58 | ||
59 | config SATA_SIL24 | ||
60 | tristate "Silicon Image 3124/3132 SATA support" | ||
61 | depends on PCI | ||
62 | help | ||
63 | This option enables support for Silicon Image 3124/3132 Serial ATA. | ||
64 | |||
65 | If unsure, say N. | ||
66 | |||
67 | config SATA_FSL | ||
68 | tristate "Freescale 3.0Gbps SATA support" | ||
69 | depends on FSL_SOC | ||
70 | help | ||
71 | This option enables support for Freescale 3.0Gbps SATA controller. | ||
72 | It can be found on MPC837x and MPC8315. | ||
73 | |||
74 | If unsure, say N. | ||
75 | |||
76 | config ATA_SFF | ||
77 | bool "ATA SFF support" | ||
78 | default y | ||
79 | help | ||
80 | This option adds support for ATA controllers with SFF | ||
81 | compliant or similar programming interface. | ||
82 | |||
83 | SFF is the legacy IDE interface that has been around since | ||
84 | the dawn of time. Almost all PATA controllers have an | ||
85 | SFF interface. Many SATA controllers have an SFF interface | ||
86 | when configured into a legacy compatibility mode. | ||
87 | |||
88 | For users with exclusively modern controllers like AHCI, | ||
89 | Silicon Image 3124, or Marvell 6440, you may choose to | ||
90 | disable this uneeded SFF support. | ||
91 | |||
92 | If unsure, say Y. | ||
93 | |||
94 | if ATA_SFF | ||
95 | |||
52 | config SATA_SVW | 96 | config SATA_SVW |
53 | tristate "ServerWorks Frodo / Apple K2 SATA support" | 97 | tristate "ServerWorks Frodo / Apple K2 SATA support" |
54 | depends on PCI | 98 | depends on PCI |
@@ -125,14 +169,6 @@ config SATA_SIL | |||
125 | 169 | ||
126 | If unsure, say N. | 170 | If unsure, say N. |
127 | 171 | ||
128 | config SATA_SIL24 | ||
129 | tristate "Silicon Image 3124/3132 SATA support" | ||
130 | depends on PCI | ||
131 | help | ||
132 | This option enables support for Silicon Image 3124/3132 Serial ATA. | ||
133 | |||
134 | If unsure, say N. | ||
135 | |||
136 | config SATA_SIS | 172 | config SATA_SIS |
137 | tristate "SiS 964/965/966/180 SATA support" | 173 | tristate "SiS 964/965/966/180 SATA support" |
138 | depends on PCI | 174 | depends on PCI |
@@ -183,15 +219,6 @@ config PATA_ACPI | |||
183 | firmware in the BIOS. This driver can sometimes handle | 219 | firmware in the BIOS. This driver can sometimes handle |
184 | otherwise unsupported hardware. | 220 | otherwise unsupported hardware. |
185 | 221 | ||
186 | config SATA_FSL | ||
187 | tristate "Freescale 3.0Gbps SATA support" | ||
188 | depends on FSL_SOC | ||
189 | help | ||
190 | This option enables support for Freescale 3.0Gbps SATA controller. | ||
191 | It can be found on MPC837x and MPC8315. | ||
192 | |||
193 | If unsure, say N. | ||
194 | |||
195 | config PATA_ALI | 222 | config PATA_ALI |
196 | tristate "ALi PATA support (Experimental)" | 223 | tristate "ALi PATA support (Experimental)" |
197 | depends on PCI && EXPERIMENTAL | 224 | depends on PCI && EXPERIMENTAL |
@@ -679,4 +706,5 @@ config PATA_BF54X | |||
679 | 706 | ||
680 | If unsure, say N. | 707 | If unsure, say N. |
681 | 708 | ||
709 | endif # ATA_SFF | ||
682 | endif # ATA | 710 | endif # ATA |
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 0511e6f0bb58..1fbc2aa648b7 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
@@ -78,6 +78,7 @@ obj-$(CONFIG_ATA_GENERIC) += ata_generic.o | |||
78 | # Should be last libata driver | 78 | # Should be last libata driver |
79 | obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o | 79 | obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o |
80 | 80 | ||
81 | libata-objs := libata-core.o libata-scsi.o libata-sff.o libata-eh.o \ | 81 | libata-objs := libata-core.o libata-scsi.o libata-eh.o |
82 | libata-pmp.o | 82 | libata-$(CONFIG_ATA_SFF) += libata-sff.o |
83 | libata-$(CONFIG_SATA_PMP) += libata-pmp.o | ||
83 | libata-$(CONFIG_ATA_ACPI) += libata-acpi.o | 84 | libata-$(CONFIG_ATA_ACPI) += libata-acpi.o |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index b1eb4e24c86a..739ba3f222e8 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -62,7 +62,6 @@ enum { | |||
62 | AHCI_MAX_PORTS = 32, | 62 | AHCI_MAX_PORTS = 32, |
63 | AHCI_MAX_SG = 168, /* hardware max is 64K */ | 63 | AHCI_MAX_SG = 168, /* hardware max is 64K */ |
64 | AHCI_DMA_BOUNDARY = 0xffffffff, | 64 | AHCI_DMA_BOUNDARY = 0xffffffff, |
65 | AHCI_USE_CLUSTERING = 1, | ||
66 | AHCI_MAX_CMDS = 32, | 65 | AHCI_MAX_CMDS = 32, |
67 | AHCI_CMD_SZ = 32, | 66 | AHCI_CMD_SZ = 32, |
68 | AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, | 67 | AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, |
@@ -198,7 +197,6 @@ enum { | |||
198 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | | 197 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | |
199 | ATA_FLAG_ACPI_SATA | ATA_FLAG_AN | | 198 | ATA_FLAG_ACPI_SATA | ATA_FLAG_AN | |
200 | ATA_FLAG_IPM, | 199 | ATA_FLAG_IPM, |
201 | AHCI_LFLAG_COMMON = ATA_LFLAG_SKIP_D2H_BSY, | ||
202 | 200 | ||
203 | ICH_MAP = 0x90, /* ICH MAP register */ | 201 | ICH_MAP = 0x90, /* ICH MAP register */ |
204 | }; | 202 | }; |
@@ -245,19 +243,24 @@ static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); | |||
245 | static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); | 243 | static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
246 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | 244 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
247 | static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); | 245 | static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); |
248 | static void ahci_irq_clear(struct ata_port *ap); | 246 | static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); |
249 | static int ahci_port_start(struct ata_port *ap); | 247 | static int ahci_port_start(struct ata_port *ap); |
250 | static void ahci_port_stop(struct ata_port *ap); | 248 | static void ahci_port_stop(struct ata_port *ap); |
251 | static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); | ||
252 | static void ahci_qc_prep(struct ata_queued_cmd *qc); | 249 | static void ahci_qc_prep(struct ata_queued_cmd *qc); |
253 | static u8 ahci_check_status(struct ata_port *ap); | ||
254 | static void ahci_freeze(struct ata_port *ap); | 250 | static void ahci_freeze(struct ata_port *ap); |
255 | static void ahci_thaw(struct ata_port *ap); | 251 | static void ahci_thaw(struct ata_port *ap); |
256 | static void ahci_pmp_attach(struct ata_port *ap); | 252 | static void ahci_pmp_attach(struct ata_port *ap); |
257 | static void ahci_pmp_detach(struct ata_port *ap); | 253 | static void ahci_pmp_detach(struct ata_port *ap); |
254 | static int ahci_softreset(struct ata_link *link, unsigned int *class, | ||
255 | unsigned long deadline); | ||
256 | static int ahci_hardreset(struct ata_link *link, unsigned int *class, | ||
257 | unsigned long deadline); | ||
258 | static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, | ||
259 | unsigned long deadline); | ||
260 | static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, | ||
261 | unsigned long deadline); | ||
262 | static void ahci_postreset(struct ata_link *link, unsigned int *class); | ||
258 | static void ahci_error_handler(struct ata_port *ap); | 263 | static void ahci_error_handler(struct ata_port *ap); |
259 | static void ahci_vt8251_error_handler(struct ata_port *ap); | ||
260 | static void ahci_p5wdh_error_handler(struct ata_port *ap); | ||
261 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); | 264 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); |
262 | static int ahci_port_resume(struct ata_port *ap); | 265 | static int ahci_port_resume(struct ata_port *ap); |
263 | static void ahci_dev_config(struct ata_device *dev); | 266 | static void ahci_dev_config(struct ata_device *dev); |
@@ -276,129 +279,54 @@ static struct class_device_attribute *ahci_shost_attrs[] = { | |||
276 | }; | 279 | }; |
277 | 280 | ||
278 | static struct scsi_host_template ahci_sht = { | 281 | static struct scsi_host_template ahci_sht = { |
279 | .module = THIS_MODULE, | 282 | ATA_NCQ_SHT(DRV_NAME), |
280 | .name = DRV_NAME, | ||
281 | .ioctl = ata_scsi_ioctl, | ||
282 | .queuecommand = ata_scsi_queuecmd, | ||
283 | .change_queue_depth = ata_scsi_change_queue_depth, | ||
284 | .can_queue = AHCI_MAX_CMDS - 1, | 283 | .can_queue = AHCI_MAX_CMDS - 1, |
285 | .this_id = ATA_SHT_THIS_ID, | ||
286 | .sg_tablesize = AHCI_MAX_SG, | 284 | .sg_tablesize = AHCI_MAX_SG, |
287 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
288 | .emulated = ATA_SHT_EMULATED, | ||
289 | .use_clustering = AHCI_USE_CLUSTERING, | ||
290 | .proc_name = DRV_NAME, | ||
291 | .dma_boundary = AHCI_DMA_BOUNDARY, | 285 | .dma_boundary = AHCI_DMA_BOUNDARY, |
292 | .slave_configure = ata_scsi_slave_config, | ||
293 | .slave_destroy = ata_scsi_slave_destroy, | ||
294 | .bios_param = ata_std_bios_param, | ||
295 | .shost_attrs = ahci_shost_attrs, | 286 | .shost_attrs = ahci_shost_attrs, |
296 | }; | 287 | }; |
297 | 288 | ||
298 | static const struct ata_port_operations ahci_ops = { | 289 | static struct ata_port_operations ahci_ops = { |
299 | .check_status = ahci_check_status, | 290 | .inherits = &sata_pmp_port_ops, |
300 | .check_altstatus = ahci_check_status, | ||
301 | .dev_select = ata_noop_dev_select, | ||
302 | |||
303 | .dev_config = ahci_dev_config, | ||
304 | |||
305 | .tf_read = ahci_tf_read, | ||
306 | 291 | ||
307 | .qc_defer = sata_pmp_qc_defer_cmd_switch, | 292 | .qc_defer = sata_pmp_qc_defer_cmd_switch, |
308 | .qc_prep = ahci_qc_prep, | 293 | .qc_prep = ahci_qc_prep, |
309 | .qc_issue = ahci_qc_issue, | 294 | .qc_issue = ahci_qc_issue, |
310 | 295 | .qc_fill_rtf = ahci_qc_fill_rtf, | |
311 | .irq_clear = ahci_irq_clear, | ||
312 | |||
313 | .scr_read = ahci_scr_read, | ||
314 | .scr_write = ahci_scr_write, | ||
315 | 296 | ||
316 | .freeze = ahci_freeze, | 297 | .freeze = ahci_freeze, |
317 | .thaw = ahci_thaw, | 298 | .thaw = ahci_thaw, |
318 | 299 | .softreset = ahci_softreset, | |
300 | .hardreset = ahci_hardreset, | ||
301 | .postreset = ahci_postreset, | ||
302 | .pmp_softreset = ahci_softreset, | ||
319 | .error_handler = ahci_error_handler, | 303 | .error_handler = ahci_error_handler, |
320 | .post_internal_cmd = ahci_post_internal_cmd, | 304 | .post_internal_cmd = ahci_post_internal_cmd, |
321 | 305 | .dev_config = ahci_dev_config, | |
322 | .pmp_attach = ahci_pmp_attach, | ||
323 | .pmp_detach = ahci_pmp_detach, | ||
324 | |||
325 | #ifdef CONFIG_PM | ||
326 | .port_suspend = ahci_port_suspend, | ||
327 | .port_resume = ahci_port_resume, | ||
328 | #endif | ||
329 | .enable_pm = ahci_enable_alpm, | ||
330 | .disable_pm = ahci_disable_alpm, | ||
331 | |||
332 | .port_start = ahci_port_start, | ||
333 | .port_stop = ahci_port_stop, | ||
334 | }; | ||
335 | |||
336 | static const struct ata_port_operations ahci_vt8251_ops = { | ||
337 | .check_status = ahci_check_status, | ||
338 | .check_altstatus = ahci_check_status, | ||
339 | .dev_select = ata_noop_dev_select, | ||
340 | |||
341 | .tf_read = ahci_tf_read, | ||
342 | |||
343 | .qc_defer = sata_pmp_qc_defer_cmd_switch, | ||
344 | .qc_prep = ahci_qc_prep, | ||
345 | .qc_issue = ahci_qc_issue, | ||
346 | |||
347 | .irq_clear = ahci_irq_clear, | ||
348 | 306 | ||
349 | .scr_read = ahci_scr_read, | 307 | .scr_read = ahci_scr_read, |
350 | .scr_write = ahci_scr_write, | 308 | .scr_write = ahci_scr_write, |
351 | |||
352 | .freeze = ahci_freeze, | ||
353 | .thaw = ahci_thaw, | ||
354 | |||
355 | .error_handler = ahci_vt8251_error_handler, | ||
356 | .post_internal_cmd = ahci_post_internal_cmd, | ||
357 | |||
358 | .pmp_attach = ahci_pmp_attach, | 309 | .pmp_attach = ahci_pmp_attach, |
359 | .pmp_detach = ahci_pmp_detach, | 310 | .pmp_detach = ahci_pmp_detach, |
360 | 311 | ||
312 | .enable_pm = ahci_enable_alpm, | ||
313 | .disable_pm = ahci_disable_alpm, | ||
361 | #ifdef CONFIG_PM | 314 | #ifdef CONFIG_PM |
362 | .port_suspend = ahci_port_suspend, | 315 | .port_suspend = ahci_port_suspend, |
363 | .port_resume = ahci_port_resume, | 316 | .port_resume = ahci_port_resume, |
364 | #endif | 317 | #endif |
365 | |||
366 | .port_start = ahci_port_start, | 318 | .port_start = ahci_port_start, |
367 | .port_stop = ahci_port_stop, | 319 | .port_stop = ahci_port_stop, |
368 | }; | 320 | }; |
369 | 321 | ||
370 | static const struct ata_port_operations ahci_p5wdh_ops = { | 322 | static struct ata_port_operations ahci_vt8251_ops = { |
371 | .check_status = ahci_check_status, | 323 | .inherits = &ahci_ops, |
372 | .check_altstatus = ahci_check_status, | 324 | .hardreset = ahci_vt8251_hardreset, |
373 | .dev_select = ata_noop_dev_select, | 325 | }; |
374 | |||
375 | .tf_read = ahci_tf_read, | ||
376 | |||
377 | .qc_defer = sata_pmp_qc_defer_cmd_switch, | ||
378 | .qc_prep = ahci_qc_prep, | ||
379 | .qc_issue = ahci_qc_issue, | ||
380 | |||
381 | .irq_clear = ahci_irq_clear, | ||
382 | |||
383 | .scr_read = ahci_scr_read, | ||
384 | .scr_write = ahci_scr_write, | ||
385 | |||
386 | .freeze = ahci_freeze, | ||
387 | .thaw = ahci_thaw, | ||
388 | |||
389 | .error_handler = ahci_p5wdh_error_handler, | ||
390 | .post_internal_cmd = ahci_post_internal_cmd, | ||
391 | |||
392 | .pmp_attach = ahci_pmp_attach, | ||
393 | .pmp_detach = ahci_pmp_detach, | ||
394 | |||
395 | #ifdef CONFIG_PM | ||
396 | .port_suspend = ahci_port_suspend, | ||
397 | .port_resume = ahci_port_resume, | ||
398 | #endif | ||
399 | 326 | ||
400 | .port_start = ahci_port_start, | 327 | static struct ata_port_operations ahci_p5wdh_ops = { |
401 | .port_stop = ahci_port_stop, | 328 | .inherits = &ahci_ops, |
329 | .hardreset = ahci_p5wdh_hardreset, | ||
402 | }; | 330 | }; |
403 | 331 | ||
404 | #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) | 332 | #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) |
@@ -407,7 +335,6 @@ static const struct ata_port_info ahci_port_info[] = { | |||
407 | /* board_ahci */ | 335 | /* board_ahci */ |
408 | { | 336 | { |
409 | .flags = AHCI_FLAG_COMMON, | 337 | .flags = AHCI_FLAG_COMMON, |
410 | .link_flags = AHCI_LFLAG_COMMON, | ||
411 | .pio_mask = 0x1f, /* pio0-4 */ | 338 | .pio_mask = 0x1f, /* pio0-4 */ |
412 | .udma_mask = ATA_UDMA6, | 339 | .udma_mask = ATA_UDMA6, |
413 | .port_ops = &ahci_ops, | 340 | .port_ops = &ahci_ops, |
@@ -416,7 +343,6 @@ static const struct ata_port_info ahci_port_info[] = { | |||
416 | { | 343 | { |
417 | AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), | 344 | AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), |
418 | .flags = AHCI_FLAG_COMMON, | 345 | .flags = AHCI_FLAG_COMMON, |
419 | .link_flags = AHCI_LFLAG_COMMON | ATA_LFLAG_HRST_TO_RESUME, | ||
420 | .pio_mask = 0x1f, /* pio0-4 */ | 346 | .pio_mask = 0x1f, /* pio0-4 */ |
421 | .udma_mask = ATA_UDMA6, | 347 | .udma_mask = ATA_UDMA6, |
422 | .port_ops = &ahci_vt8251_ops, | 348 | .port_ops = &ahci_vt8251_ops, |
@@ -425,7 +351,6 @@ static const struct ata_port_info ahci_port_info[] = { | |||
425 | { | 351 | { |
426 | AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), | 352 | AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), |
427 | .flags = AHCI_FLAG_COMMON, | 353 | .flags = AHCI_FLAG_COMMON, |
428 | .link_flags = AHCI_LFLAG_COMMON, | ||
429 | .pio_mask = 0x1f, /* pio0-4 */ | 354 | .pio_mask = 0x1f, /* pio0-4 */ |
430 | .udma_mask = ATA_UDMA6, | 355 | .udma_mask = ATA_UDMA6, |
431 | .port_ops = &ahci_ops, | 356 | .port_ops = &ahci_ops, |
@@ -436,7 +361,6 @@ static const struct ata_port_info ahci_port_info[] = { | |||
436 | AHCI_HFLAG_32BIT_ONLY | | 361 | AHCI_HFLAG_32BIT_ONLY | |
437 | AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP), | 362 | AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP), |
438 | .flags = AHCI_FLAG_COMMON, | 363 | .flags = AHCI_FLAG_COMMON, |
439 | .link_flags = AHCI_LFLAG_COMMON, | ||
440 | .pio_mask = 0x1f, /* pio0-4 */ | 364 | .pio_mask = 0x1f, /* pio0-4 */ |
441 | .udma_mask = ATA_UDMA6, | 365 | .udma_mask = ATA_UDMA6, |
442 | .port_ops = &ahci_ops, | 366 | .port_ops = &ahci_ops, |
@@ -447,7 +371,6 @@ static const struct ata_port_info ahci_port_info[] = { | |||
447 | AHCI_HFLAG_MV_PATA), | 371 | AHCI_HFLAG_MV_PATA), |
448 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 372 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
449 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, | 373 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, |
450 | .link_flags = AHCI_LFLAG_COMMON, | ||
451 | .pio_mask = 0x1f, /* pio0-4 */ | 374 | .pio_mask = 0x1f, /* pio0-4 */ |
452 | .udma_mask = ATA_UDMA6, | 375 | .udma_mask = ATA_UDMA6, |
453 | .port_ops = &ahci_ops, | 376 | .port_ops = &ahci_ops, |
@@ -457,7 +380,6 @@ static const struct ata_port_info ahci_port_info[] = { | |||
457 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | | 380 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | |
458 | AHCI_HFLAG_NO_PMP), | 381 | AHCI_HFLAG_NO_PMP), |
459 | .flags = AHCI_FLAG_COMMON, | 382 | .flags = AHCI_FLAG_COMMON, |
460 | .link_flags = AHCI_LFLAG_COMMON, | ||
461 | .pio_mask = 0x1f, /* pio0-4 */ | 383 | .pio_mask = 0x1f, /* pio0-4 */ |
462 | .udma_mask = ATA_UDMA6, | 384 | .udma_mask = ATA_UDMA6, |
463 | .port_ops = &ahci_ops, | 385 | .port_ops = &ahci_ops, |
@@ -1255,13 +1177,14 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, | |||
1255 | 1177 | ||
1256 | static int ahci_kick_engine(struct ata_port *ap, int force_restart) | 1178 | static int ahci_kick_engine(struct ata_port *ap, int force_restart) |
1257 | { | 1179 | { |
1258 | void __iomem *port_mmio = ap->ioaddr.cmd_addr; | 1180 | void __iomem *port_mmio = ahci_port_base(ap); |
1259 | struct ahci_host_priv *hpriv = ap->host->private_data; | 1181 | struct ahci_host_priv *hpriv = ap->host->private_data; |
1182 | u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; | ||
1260 | u32 tmp; | 1183 | u32 tmp; |
1261 | int busy, rc; | 1184 | int busy, rc; |
1262 | 1185 | ||
1263 | /* do we need to kick the port? */ | 1186 | /* do we need to kick the port? */ |
1264 | busy = ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ); | 1187 | busy = status & (ATA_BUSY | ATA_DRQ); |
1265 | if (!busy && !force_restart) | 1188 | if (!busy && !force_restart) |
1266 | return 0; | 1189 | return 0; |
1267 | 1190 | ||
@@ -1328,10 +1251,21 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, | |||
1328 | return 0; | 1251 | return 0; |
1329 | } | 1252 | } |
1330 | 1253 | ||
1331 | static int ahci_do_softreset(struct ata_link *link, unsigned int *class, | 1254 | static int ahci_check_ready(struct ata_link *link) |
1332 | int pmp, unsigned long deadline) | 1255 | { |
1256 | void __iomem *port_mmio = ahci_port_base(link->ap); | ||
1257 | u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; | ||
1258 | |||
1259 | if (!(status & ATA_BUSY)) | ||
1260 | return 1; | ||
1261 | return 0; | ||
1262 | } | ||
1263 | |||
1264 | static int ahci_softreset(struct ata_link *link, unsigned int *class, | ||
1265 | unsigned long deadline) | ||
1333 | { | 1266 | { |
1334 | struct ata_port *ap = link->ap; | 1267 | struct ata_port *ap = link->ap; |
1268 | int pmp = sata_srst_pmp(link); | ||
1335 | const char *reason = NULL; | 1269 | const char *reason = NULL; |
1336 | unsigned long now, msecs; | 1270 | unsigned long now, msecs; |
1337 | struct ata_taskfile tf; | 1271 | struct ata_taskfile tf; |
@@ -1339,12 +1273,6 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class, | |||
1339 | 1273 | ||
1340 | DPRINTK("ENTER\n"); | 1274 | DPRINTK("ENTER\n"); |
1341 | 1275 | ||
1342 | if (ata_link_offline(link)) { | ||
1343 | DPRINTK("PHY reports no device\n"); | ||
1344 | *class = ATA_DEV_NONE; | ||
1345 | return 0; | ||
1346 | } | ||
1347 | |||
1348 | /* prepare for SRST (AHCI-1.1 10.4.1) */ | 1276 | /* prepare for SRST (AHCI-1.1 10.4.1) */ |
1349 | rc = ahci_kick_engine(ap, 1); | 1277 | rc = ahci_kick_engine(ap, 1); |
1350 | if (rc && rc != -EOPNOTSUPP) | 1278 | if (rc && rc != -EOPNOTSUPP) |
@@ -1374,10 +1302,8 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class, | |||
1374 | tf.ctl &= ~ATA_SRST; | 1302 | tf.ctl &= ~ATA_SRST; |
1375 | ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); | 1303 | ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); |
1376 | 1304 | ||
1377 | /* wait a while before checking status */ | 1305 | /* wait for link to become ready */ |
1378 | ata_wait_after_reset(ap, deadline); | 1306 | rc = ata_wait_after_reset(link, deadline, ahci_check_ready); |
1379 | |||
1380 | rc = ata_wait_ready(ap, deadline); | ||
1381 | /* link occupied, -ENODEV too is an error */ | 1307 | /* link occupied, -ENODEV too is an error */ |
1382 | if (rc) { | 1308 | if (rc) { |
1383 | reason = "device not ready"; | 1309 | reason = "device not ready"; |
@@ -1393,24 +1319,15 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class, | |||
1393 | return rc; | 1319 | return rc; |
1394 | } | 1320 | } |
1395 | 1321 | ||
1396 | static int ahci_softreset(struct ata_link *link, unsigned int *class, | ||
1397 | unsigned long deadline) | ||
1398 | { | ||
1399 | int pmp = 0; | ||
1400 | |||
1401 | if (link->ap->flags & ATA_FLAG_PMP) | ||
1402 | pmp = SATA_PMP_CTRL_PORT; | ||
1403 | |||
1404 | return ahci_do_softreset(link, class, pmp, deadline); | ||
1405 | } | ||
1406 | |||
1407 | static int ahci_hardreset(struct ata_link *link, unsigned int *class, | 1322 | static int ahci_hardreset(struct ata_link *link, unsigned int *class, |
1408 | unsigned long deadline) | 1323 | unsigned long deadline) |
1409 | { | 1324 | { |
1325 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); | ||
1410 | struct ata_port *ap = link->ap; | 1326 | struct ata_port *ap = link->ap; |
1411 | struct ahci_port_priv *pp = ap->private_data; | 1327 | struct ahci_port_priv *pp = ap->private_data; |
1412 | u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; | 1328 | u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; |
1413 | struct ata_taskfile tf; | 1329 | struct ata_taskfile tf; |
1330 | bool online; | ||
1414 | int rc; | 1331 | int rc; |
1415 | 1332 | ||
1416 | DPRINTK("ENTER\n"); | 1333 | DPRINTK("ENTER\n"); |
@@ -1422,14 +1339,13 @@ static int ahci_hardreset(struct ata_link *link, unsigned int *class, | |||
1422 | tf.command = 0x80; | 1339 | tf.command = 0x80; |
1423 | ata_tf_to_fis(&tf, 0, 0, d2h_fis); | 1340 | ata_tf_to_fis(&tf, 0, 0, d2h_fis); |
1424 | 1341 | ||
1425 | rc = sata_std_hardreset(link, class, deadline); | 1342 | rc = sata_link_hardreset(link, timing, deadline, &online, |
1343 | ahci_check_ready); | ||
1426 | 1344 | ||
1427 | ahci_start_engine(ap); | 1345 | ahci_start_engine(ap); |
1428 | 1346 | ||
1429 | if (rc == 0 && ata_link_online(link)) | 1347 | if (online) |
1430 | *class = ahci_dev_classify(ap); | 1348 | *class = ahci_dev_classify(ap); |
1431 | if (rc != -EAGAIN && *class == ATA_DEV_UNKNOWN) | ||
1432 | *class = ATA_DEV_NONE; | ||
1433 | 1349 | ||
1434 | DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); | 1350 | DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); |
1435 | return rc; | 1351 | return rc; |
@@ -1439,7 +1355,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, | |||
1439 | unsigned long deadline) | 1355 | unsigned long deadline) |
1440 | { | 1356 | { |
1441 | struct ata_port *ap = link->ap; | 1357 | struct ata_port *ap = link->ap; |
1442 | u32 serror; | 1358 | bool online; |
1443 | int rc; | 1359 | int rc; |
1444 | 1360 | ||
1445 | DPRINTK("ENTER\n"); | 1361 | DPRINTK("ENTER\n"); |
@@ -1447,11 +1363,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, | |||
1447 | ahci_stop_engine(ap); | 1363 | ahci_stop_engine(ap); |
1448 | 1364 | ||
1449 | rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), | 1365 | rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), |
1450 | deadline); | 1366 | deadline, &online, NULL); |
1451 | |||
1452 | /* vt8251 needs SError cleared for the port to operate */ | ||
1453 | ahci_scr_read(ap, SCR_ERROR, &serror); | ||
1454 | ahci_scr_write(ap, SCR_ERROR, serror); | ||
1455 | 1367 | ||
1456 | ahci_start_engine(ap); | 1368 | ahci_start_engine(ap); |
1457 | 1369 | ||
@@ -1460,7 +1372,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, | |||
1460 | /* vt8251 doesn't clear BSY on signature FIS reception, | 1372 | /* vt8251 doesn't clear BSY on signature FIS reception, |
1461 | * request follow-up softreset. | 1373 | * request follow-up softreset. |
1462 | */ | 1374 | */ |
1463 | return rc ?: -EAGAIN; | 1375 | return online ? -EAGAIN : rc; |
1464 | } | 1376 | } |
1465 | 1377 | ||
1466 | static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, | 1378 | static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, |
@@ -1470,6 +1382,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, | |||
1470 | struct ahci_port_priv *pp = ap->private_data; | 1382 | struct ahci_port_priv *pp = ap->private_data; |
1471 | u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; | 1383 | u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; |
1472 | struct ata_taskfile tf; | 1384 | struct ata_taskfile tf; |
1385 | bool online; | ||
1473 | int rc; | 1386 | int rc; |
1474 | 1387 | ||
1475 | ahci_stop_engine(ap); | 1388 | ahci_stop_engine(ap); |
@@ -1480,16 +1393,10 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, | |||
1480 | ata_tf_to_fis(&tf, 0, 0, d2h_fis); | 1393 | ata_tf_to_fis(&tf, 0, 0, d2h_fis); |
1481 | 1394 | ||
1482 | rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), | 1395 | rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), |
1483 | deadline); | 1396 | deadline, &online, NULL); |
1484 | 1397 | ||
1485 | ahci_start_engine(ap); | 1398 | ahci_start_engine(ap); |
1486 | 1399 | ||
1487 | if (rc || ata_link_offline(link)) | ||
1488 | return rc; | ||
1489 | |||
1490 | /* spec mandates ">= 2ms" before checking status */ | ||
1491 | msleep(150); | ||
1492 | |||
1493 | /* The pseudo configuration device on SIMG4726 attached to | 1400 | /* The pseudo configuration device on SIMG4726 attached to |
1494 | * ASUS P5W-DH Deluxe doesn't send signature FIS after | 1401 | * ASUS P5W-DH Deluxe doesn't send signature FIS after |
1495 | * hardreset if no device is attached to the first downstream | 1402 | * hardreset if no device is attached to the first downstream |
@@ -1503,11 +1410,13 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, | |||
1503 | * have to be reset again. For most cases, this should | 1410 | * have to be reset again. For most cases, this should |
1504 | * suffice while making probing snappish enough. | 1411 | * suffice while making probing snappish enough. |
1505 | */ | 1412 | */ |
1506 | rc = ata_wait_ready(ap, jiffies + 2 * HZ); | 1413 | if (online) { |
1507 | if (rc) | 1414 | rc = ata_wait_after_reset(link, jiffies + 2 * HZ, |
1508 | ahci_kick_engine(ap, 0); | 1415 | ahci_check_ready); |
1509 | 1416 | if (rc) | |
1510 | return 0; | 1417 | ahci_kick_engine(ap, 0); |
1418 | } | ||
1419 | return rc; | ||
1511 | } | 1420 | } |
1512 | 1421 | ||
1513 | static void ahci_postreset(struct ata_link *link, unsigned int *class) | 1422 | static void ahci_postreset(struct ata_link *link, unsigned int *class) |
@@ -1530,27 +1439,6 @@ static void ahci_postreset(struct ata_link *link, unsigned int *class) | |||
1530 | } | 1439 | } |
1531 | } | 1440 | } |
1532 | 1441 | ||
1533 | static int ahci_pmp_softreset(struct ata_link *link, unsigned int *class, | ||
1534 | unsigned long deadline) | ||
1535 | { | ||
1536 | return ahci_do_softreset(link, class, link->pmp, deadline); | ||
1537 | } | ||
1538 | |||
1539 | static u8 ahci_check_status(struct ata_port *ap) | ||
1540 | { | ||
1541 | void __iomem *mmio = ap->ioaddr.cmd_addr; | ||
1542 | |||
1543 | return readl(mmio + PORT_TFDATA) & 0xFF; | ||
1544 | } | ||
1545 | |||
1546 | static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | ||
1547 | { | ||
1548 | struct ahci_port_priv *pp = ap->private_data; | ||
1549 | u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; | ||
1550 | |||
1551 | ata_tf_from_fis(d2h_fis, tf); | ||
1552 | } | ||
1553 | |||
1554 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) | 1442 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) |
1555 | { | 1443 | { |
1556 | struct scatterlist *sg; | 1444 | struct scatterlist *sg; |
@@ -1663,27 +1551,27 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) | |||
1663 | u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); | 1551 | u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); |
1664 | 1552 | ||
1665 | active_ehi->err_mask |= AC_ERR_HSM; | 1553 | active_ehi->err_mask |= AC_ERR_HSM; |
1666 | active_ehi->action |= ATA_EH_SOFTRESET; | 1554 | active_ehi->action |= ATA_EH_RESET; |
1667 | ata_ehi_push_desc(active_ehi, | 1555 | ata_ehi_push_desc(active_ehi, |
1668 | "unknown FIS %08x %08x %08x %08x" , | 1556 | "unknown FIS %08x %08x %08x %08x" , |
1669 | unk[0], unk[1], unk[2], unk[3]); | 1557 | unk[0], unk[1], unk[2], unk[3]); |
1670 | } | 1558 | } |
1671 | 1559 | ||
1672 | if (ap->nr_pmp_links && (irq_stat & PORT_IRQ_BAD_PMP)) { | 1560 | if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) { |
1673 | active_ehi->err_mask |= AC_ERR_HSM; | 1561 | active_ehi->err_mask |= AC_ERR_HSM; |
1674 | active_ehi->action |= ATA_EH_SOFTRESET; | 1562 | active_ehi->action |= ATA_EH_RESET; |
1675 | ata_ehi_push_desc(active_ehi, "incorrect PMP"); | 1563 | ata_ehi_push_desc(active_ehi, "incorrect PMP"); |
1676 | } | 1564 | } |
1677 | 1565 | ||
1678 | if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { | 1566 | if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { |
1679 | host_ehi->err_mask |= AC_ERR_HOST_BUS; | 1567 | host_ehi->err_mask |= AC_ERR_HOST_BUS; |
1680 | host_ehi->action |= ATA_EH_SOFTRESET; | 1568 | host_ehi->action |= ATA_EH_RESET; |
1681 | ata_ehi_push_desc(host_ehi, "host bus error"); | 1569 | ata_ehi_push_desc(host_ehi, "host bus error"); |
1682 | } | 1570 | } |
1683 | 1571 | ||
1684 | if (irq_stat & PORT_IRQ_IF_ERR) { | 1572 | if (irq_stat & PORT_IRQ_IF_ERR) { |
1685 | host_ehi->err_mask |= AC_ERR_ATA_BUS; | 1573 | host_ehi->err_mask |= AC_ERR_ATA_BUS; |
1686 | host_ehi->action |= ATA_EH_SOFTRESET; | 1574 | host_ehi->action |= ATA_EH_RESET; |
1687 | ata_ehi_push_desc(host_ehi, "interface fatal error"); | 1575 | ata_ehi_push_desc(host_ehi, "interface fatal error"); |
1688 | } | 1576 | } |
1689 | 1577 | ||
@@ -1704,7 +1592,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) | |||
1704 | 1592 | ||
1705 | static void ahci_port_intr(struct ata_port *ap) | 1593 | static void ahci_port_intr(struct ata_port *ap) |
1706 | { | 1594 | { |
1707 | void __iomem *port_mmio = ap->ioaddr.cmd_addr; | 1595 | void __iomem *port_mmio = ahci_port_base(ap); |
1708 | struct ata_eh_info *ehi = &ap->link.eh_info; | 1596 | struct ata_eh_info *ehi = &ap->link.eh_info; |
1709 | struct ahci_port_priv *pp = ap->private_data; | 1597 | struct ahci_port_priv *pp = ap->private_data; |
1710 | struct ahci_host_priv *hpriv = ap->host->private_data; | 1598 | struct ahci_host_priv *hpriv = ap->host->private_data; |
@@ -1766,21 +1654,16 @@ static void ahci_port_intr(struct ata_port *ap) | |||
1766 | else | 1654 | else |
1767 | qc_active = readl(port_mmio + PORT_CMD_ISSUE); | 1655 | qc_active = readl(port_mmio + PORT_CMD_ISSUE); |
1768 | 1656 | ||
1769 | rc = ata_qc_complete_multiple(ap, qc_active, NULL); | 1657 | rc = ata_qc_complete_multiple(ap, qc_active); |
1770 | 1658 | ||
1771 | /* while resetting, invalid completions are expected */ | 1659 | /* while resetting, invalid completions are expected */ |
1772 | if (unlikely(rc < 0 && !resetting)) { | 1660 | if (unlikely(rc < 0 && !resetting)) { |
1773 | ehi->err_mask |= AC_ERR_HSM; | 1661 | ehi->err_mask |= AC_ERR_HSM; |
1774 | ehi->action |= ATA_EH_SOFTRESET; | 1662 | ehi->action |= ATA_EH_RESET; |
1775 | ata_port_freeze(ap); | 1663 | ata_port_freeze(ap); |
1776 | } | 1664 | } |
1777 | } | 1665 | } |
1778 | 1666 | ||
1779 | static void ahci_irq_clear(struct ata_port *ap) | ||
1780 | { | ||
1781 | /* TODO */ | ||
1782 | } | ||
1783 | |||
1784 | static irqreturn_t ahci_interrupt(int irq, void *dev_instance) | 1667 | static irqreturn_t ahci_interrupt(int irq, void *dev_instance) |
1785 | { | 1668 | { |
1786 | struct ata_host *host = dev_instance; | 1669 | struct ata_host *host = dev_instance; |
@@ -1854,6 +1737,15 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) | |||
1854 | return 0; | 1737 | return 0; |
1855 | } | 1738 | } |
1856 | 1739 | ||
1740 | static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) | ||
1741 | { | ||
1742 | struct ahci_port_priv *pp = qc->ap->private_data; | ||
1743 | u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; | ||
1744 | |||
1745 | ata_tf_from_fis(d2h_fis, &qc->result_tf); | ||
1746 | return true; | ||
1747 | } | ||
1748 | |||
1857 | static void ahci_freeze(struct ata_port *ap) | 1749 | static void ahci_freeze(struct ata_port *ap) |
1858 | { | 1750 | { |
1859 | void __iomem *port_mmio = ahci_port_base(ap); | 1751 | void __iomem *port_mmio = ahci_port_base(ap); |
@@ -1886,37 +1778,7 @@ static void ahci_error_handler(struct ata_port *ap) | |||
1886 | ahci_start_engine(ap); | 1778 | ahci_start_engine(ap); |
1887 | } | 1779 | } |
1888 | 1780 | ||
1889 | /* perform recovery */ | 1781 | sata_pmp_error_handler(ap); |
1890 | sata_pmp_do_eh(ap, ata_std_prereset, ahci_softreset, | ||
1891 | ahci_hardreset, ahci_postreset, | ||
1892 | sata_pmp_std_prereset, ahci_pmp_softreset, | ||
1893 | sata_pmp_std_hardreset, sata_pmp_std_postreset); | ||
1894 | } | ||
1895 | |||
1896 | static void ahci_vt8251_error_handler(struct ata_port *ap) | ||
1897 | { | ||
1898 | if (!(ap->pflags & ATA_PFLAG_FROZEN)) { | ||
1899 | /* restart engine */ | ||
1900 | ahci_stop_engine(ap); | ||
1901 | ahci_start_engine(ap); | ||
1902 | } | ||
1903 | |||
1904 | /* perform recovery */ | ||
1905 | ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_vt8251_hardreset, | ||
1906 | ahci_postreset); | ||
1907 | } | ||
1908 | |||
1909 | static void ahci_p5wdh_error_handler(struct ata_port *ap) | ||
1910 | { | ||
1911 | if (!(ap->pflags & ATA_PFLAG_FROZEN)) { | ||
1912 | /* restart engine */ | ||
1913 | ahci_stop_engine(ap); | ||
1914 | ahci_start_engine(ap); | ||
1915 | } | ||
1916 | |||
1917 | /* perform recovery */ | ||
1918 | ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_p5wdh_hardreset, | ||
1919 | ahci_postreset); | ||
1920 | } | 1782 | } |
1921 | 1783 | ||
1922 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) | 1784 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) |
@@ -1961,7 +1823,7 @@ static int ahci_port_resume(struct ata_port *ap) | |||
1961 | ahci_power_up(ap); | 1823 | ahci_power_up(ap); |
1962 | ahci_start_port(ap); | 1824 | ahci_start_port(ap); |
1963 | 1825 | ||
1964 | if (ap->nr_pmp_links) | 1826 | if (sata_pmp_attached(ap)) |
1965 | ahci_pmp_attach(ap); | 1827 | ahci_pmp_attach(ap); |
1966 | else | 1828 | else |
1967 | ahci_pmp_detach(ap); | 1829 | ahci_pmp_detach(ap); |
@@ -2324,7 +2186,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2324 | 2186 | ||
2325 | for (i = 0; i < host->n_ports; i++) { | 2187 | for (i = 0; i < host->n_ports; i++) { |
2326 | struct ata_port *ap = host->ports[i]; | 2188 | struct ata_port *ap = host->ports[i]; |
2327 | void __iomem *port_mmio = ahci_port_base(ap); | ||
2328 | 2189 | ||
2329 | ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); | 2190 | ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); |
2330 | ata_port_pbar_desc(ap, AHCI_PCI_BAR, | 2191 | ata_port_pbar_desc(ap, AHCI_PCI_BAR, |
@@ -2333,12 +2194,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2333 | /* set initial link pm policy */ | 2194 | /* set initial link pm policy */ |
2334 | ap->pm_policy = NOT_AVAILABLE; | 2195 | ap->pm_policy = NOT_AVAILABLE; |
2335 | 2196 | ||
2336 | /* standard SATA port setup */ | ||
2337 | if (hpriv->port_map & (1 << i)) | ||
2338 | ap->ioaddr.cmd_addr = port_mmio; | ||
2339 | |||
2340 | /* disabled/not-implemented port */ | 2197 | /* disabled/not-implemented port */ |
2341 | else | 2198 | if (!(hpriv->port_map & (1 << i))) |
2342 | ap->ops = &ata_dummy_port_ops; | 2199 | ap->ops = &ata_dummy_port_ops; |
2343 | } | 2200 | } |
2344 | 2201 | ||
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c index 20534202fc79..47aeccd52fa9 100644 --- a/drivers/ata/ata_generic.c +++ b/drivers/ata/ata_generic.c | |||
@@ -95,53 +95,13 @@ static int generic_set_mode(struct ata_link *link, struct ata_device **unused) | |||
95 | } | 95 | } |
96 | 96 | ||
97 | static struct scsi_host_template generic_sht = { | 97 | static struct scsi_host_template generic_sht = { |
98 | .module = THIS_MODULE, | 98 | ATA_BMDMA_SHT(DRV_NAME), |
99 | .name = DRV_NAME, | ||
100 | .ioctl = ata_scsi_ioctl, | ||
101 | .queuecommand = ata_scsi_queuecmd, | ||
102 | .can_queue = ATA_DEF_QUEUE, | ||
103 | .this_id = ATA_SHT_THIS_ID, | ||
104 | .sg_tablesize = LIBATA_MAX_PRD, | ||
105 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
106 | .emulated = ATA_SHT_EMULATED, | ||
107 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
108 | .proc_name = DRV_NAME, | ||
109 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
110 | .slave_configure = ata_scsi_slave_config, | ||
111 | .slave_destroy = ata_scsi_slave_destroy, | ||
112 | .bios_param = ata_std_bios_param, | ||
113 | }; | 99 | }; |
114 | 100 | ||
115 | static struct ata_port_operations generic_port_ops = { | 101 | static struct ata_port_operations generic_port_ops = { |
116 | .set_mode = generic_set_mode, | 102 | .inherits = &ata_bmdma_port_ops, |
117 | |||
118 | .tf_load = ata_tf_load, | ||
119 | .tf_read = ata_tf_read, | ||
120 | .check_status = ata_check_status, | ||
121 | .exec_command = ata_exec_command, | ||
122 | .dev_select = ata_std_dev_select, | ||
123 | |||
124 | .bmdma_setup = ata_bmdma_setup, | ||
125 | .bmdma_start = ata_bmdma_start, | ||
126 | .bmdma_stop = ata_bmdma_stop, | ||
127 | .bmdma_status = ata_bmdma_status, | ||
128 | |||
129 | .data_xfer = ata_data_xfer, | ||
130 | |||
131 | .freeze = ata_bmdma_freeze, | ||
132 | .thaw = ata_bmdma_thaw, | ||
133 | .error_handler = ata_bmdma_error_handler, | ||
134 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
135 | .cable_detect = ata_cable_unknown, | 103 | .cable_detect = ata_cable_unknown, |
136 | 104 | .set_mode = generic_set_mode, | |
137 | .qc_prep = ata_qc_prep, | ||
138 | .qc_issue = ata_qc_issue_prot, | ||
139 | |||
140 | .irq_handler = ata_interrupt, | ||
141 | .irq_clear = ata_bmdma_irq_clear, | ||
142 | .irq_on = ata_irq_on, | ||
143 | |||
144 | .port_start = ata_sff_port_start, | ||
145 | }; | 105 | }; |
146 | 106 | ||
147 | static int all_generic_ide; /* Set to claim all devices */ | 107 | static int all_generic_ide; /* Set to claim all devices */ |
@@ -160,7 +120,6 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id | |||
160 | { | 120 | { |
161 | u16 command; | 121 | u16 command; |
162 | static const struct ata_port_info info = { | 122 | static const struct ata_port_info info = { |
163 | .sht = &generic_sht, | ||
164 | .flags = ATA_FLAG_SLAVE_POSS, | 123 | .flags = ATA_FLAG_SLAVE_POSS, |
165 | .pio_mask = 0x1f, | 124 | .pio_mask = 0x1f, |
166 | .mwdma_mask = 0x07, | 125 | .mwdma_mask = 0x07, |
@@ -191,9 +150,9 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id | |||
191 | return -ENODEV; | 150 | return -ENODEV; |
192 | 151 | ||
193 | if (dev->vendor == PCI_VENDOR_ID_AL) | 152 | if (dev->vendor == PCI_VENDOR_ID_AL) |
194 | ata_pci_clear_simplex(dev); | 153 | ata_pci_bmdma_clear_simplex(dev); |
195 | 154 | ||
196 | return ata_pci_init_one(dev, ppi); | 155 | return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL); |
197 | } | 156 | } |
198 | 157 | ||
199 | static struct pci_device_id ata_generic[] = { | 158 | static struct pci_device_id ata_generic[] = { |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index fae8404254c0..b7c38eeb498f 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -100,13 +100,11 @@ enum { | |||
100 | PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ | 100 | PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ |
101 | ICH5_PMR = 0x90, /* port mapping register */ | 101 | ICH5_PMR = 0x90, /* port mapping register */ |
102 | ICH5_PCS = 0x92, /* port control and status */ | 102 | ICH5_PCS = 0x92, /* port control and status */ |
103 | PIIX_SCC = 0x0A, /* sub-class code register */ | ||
104 | PIIX_SIDPR_BAR = 5, | 103 | PIIX_SIDPR_BAR = 5, |
105 | PIIX_SIDPR_LEN = 16, | 104 | PIIX_SIDPR_LEN = 16, |
106 | PIIX_SIDPR_IDX = 0, | 105 | PIIX_SIDPR_IDX = 0, |
107 | PIIX_SIDPR_DATA = 4, | 106 | PIIX_SIDPR_DATA = 4, |
108 | 107 | ||
109 | PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */ | ||
110 | PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ | 108 | PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ |
111 | PIIX_FLAG_SIDPR = (1 << 29), /* SATA idx/data pair regs */ | 109 | PIIX_FLAG_SIDPR = (1 << 29), /* SATA idx/data pair regs */ |
112 | 110 | ||
@@ -140,12 +138,11 @@ enum piix_controller_ids { | |||
140 | ich_pata_100, /* ICH up to UDMA 100 */ | 138 | ich_pata_100, /* ICH up to UDMA 100 */ |
141 | ich5_sata, | 139 | ich5_sata, |
142 | ich6_sata, | 140 | ich6_sata, |
143 | ich6_sata_ahci, | 141 | ich6m_sata, |
144 | ich6m_sata_ahci, | 142 | ich8_sata, |
145 | ich8_sata_ahci, | ||
146 | ich8_2port_sata, | 143 | ich8_2port_sata, |
147 | ich8m_apple_sata_ahci, /* locks up on second port enable */ | 144 | ich8m_apple_sata, /* locks up on second port enable */ |
148 | tolapai_sata_ahci, | 145 | tolapai_sata, |
149 | piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ | 146 | piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ |
150 | }; | 147 | }; |
151 | 148 | ||
@@ -162,7 +159,7 @@ struct piix_host_priv { | |||
162 | 159 | ||
163 | static int piix_init_one(struct pci_dev *pdev, | 160 | static int piix_init_one(struct pci_dev *pdev, |
164 | const struct pci_device_id *ent); | 161 | const struct pci_device_id *ent); |
165 | static void piix_pata_error_handler(struct ata_port *ap); | 162 | static int piix_pata_prereset(struct ata_link *link, unsigned long deadline); |
166 | static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev); | 163 | static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev); |
167 | static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev); | 164 | static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev); |
168 | static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev); | 165 | static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev); |
@@ -170,7 +167,6 @@ static int ich_pata_cable_detect(struct ata_port *ap); | |||
170 | static u8 piix_vmw_bmdma_status(struct ata_port *ap); | 167 | static u8 piix_vmw_bmdma_status(struct ata_port *ap); |
171 | static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val); | 168 | static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val); |
172 | static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val); | 169 | static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val); |
173 | static void piix_sidpr_error_handler(struct ata_port *ap); | ||
174 | #ifdef CONFIG_PM | 170 | #ifdef CONFIG_PM |
175 | static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); | 171 | static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); |
176 | static int piix_pci_device_resume(struct pci_dev *pdev); | 172 | static int piix_pci_device_resume(struct pci_dev *pdev); |
@@ -236,25 +232,27 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
236 | /* 82801FB/FW (ICH6/ICH6W) */ | 232 | /* 82801FB/FW (ICH6/ICH6W) */ |
237 | { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, | 233 | { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, |
238 | /* 82801FR/FRW (ICH6R/ICH6RW) */ | 234 | /* 82801FR/FRW (ICH6R/ICH6RW) */ |
239 | { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, | 235 | { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, |
240 | /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented) */ | 236 | /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented). |
241 | { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci }, | 237 | * Attach iff the controller is in IDE mode. */ |
238 | { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, | ||
239 | PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata }, | ||
242 | /* 82801GB/GR/GH (ICH7, identical to ICH6) */ | 240 | /* 82801GB/GR/GH (ICH7, identical to ICH6) */ |
243 | { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, | 241 | { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, |
244 | /* 2801GBM/GHM (ICH7M, identical to ICH6M) */ | 242 | /* 2801GBM/GHM (ICH7M, identical to ICH6M) */ |
245 | { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci }, | 243 | { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata }, |
246 | /* Enterprise Southbridge 2 (631xESB/632xESB) */ | 244 | /* Enterprise Southbridge 2 (631xESB/632xESB) */ |
247 | { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, | 245 | { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, |
248 | /* SATA Controller 1 IDE (ICH8) */ | 246 | /* SATA Controller 1 IDE (ICH8) */ |
249 | { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, | 247 | { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, |
250 | /* SATA Controller 2 IDE (ICH8) */ | 248 | /* SATA Controller 2 IDE (ICH8) */ |
251 | { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 249 | { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
252 | /* Mobile SATA Controller IDE (ICH8M) */ | 250 | /* Mobile SATA Controller IDE (ICH8M) */ |
253 | { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, | 251 | { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, |
254 | /* Mobile SATA Controller IDE (ICH8M), Apple */ | 252 | /* Mobile SATA Controller IDE (ICH8M), Apple */ |
255 | { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata_ahci }, | 253 | { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata }, |
256 | /* SATA Controller IDE (ICH9) */ | 254 | /* SATA Controller IDE (ICH9) */ |
257 | { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, | 255 | { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, |
258 | /* SATA Controller IDE (ICH9) */ | 256 | /* SATA Controller IDE (ICH9) */ |
259 | { 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 257 | { 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
260 | /* SATA Controller IDE (ICH9) */ | 258 | /* SATA Controller IDE (ICH9) */ |
@@ -264,15 +262,15 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
264 | /* SATA Controller IDE (ICH9M) */ | 262 | /* SATA Controller IDE (ICH9M) */ |
265 | { 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 263 | { 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
266 | /* SATA Controller IDE (ICH9M) */ | 264 | /* SATA Controller IDE (ICH9M) */ |
267 | { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, | 265 | { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, |
268 | /* SATA Controller IDE (Tolapai) */ | 266 | /* SATA Controller IDE (Tolapai) */ |
269 | { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata_ahci }, | 267 | { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata }, |
270 | /* SATA Controller IDE (ICH10) */ | 268 | /* SATA Controller IDE (ICH10) */ |
271 | { 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, | 269 | { 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, |
272 | /* SATA Controller IDE (ICH10) */ | 270 | /* SATA Controller IDE (ICH10) */ |
273 | { 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 271 | { 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
274 | /* SATA Controller IDE (ICH10) */ | 272 | /* SATA Controller IDE (ICH10) */ |
275 | { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, | 273 | { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, |
276 | /* SATA Controller IDE (ICH10) */ | 274 | /* SATA Controller IDE (ICH10) */ |
277 | { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 275 | { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
278 | 276 | ||
@@ -291,170 +289,37 @@ static struct pci_driver piix_pci_driver = { | |||
291 | }; | 289 | }; |
292 | 290 | ||
293 | static struct scsi_host_template piix_sht = { | 291 | static struct scsi_host_template piix_sht = { |
294 | .module = THIS_MODULE, | 292 | ATA_BMDMA_SHT(DRV_NAME), |
295 | .name = DRV_NAME, | ||
296 | .ioctl = ata_scsi_ioctl, | ||
297 | .queuecommand = ata_scsi_queuecmd, | ||
298 | .can_queue = ATA_DEF_QUEUE, | ||
299 | .this_id = ATA_SHT_THIS_ID, | ||
300 | .sg_tablesize = LIBATA_MAX_PRD, | ||
301 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
302 | .emulated = ATA_SHT_EMULATED, | ||
303 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
304 | .proc_name = DRV_NAME, | ||
305 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
306 | .slave_configure = ata_scsi_slave_config, | ||
307 | .slave_destroy = ata_scsi_slave_destroy, | ||
308 | .bios_param = ata_std_bios_param, | ||
309 | }; | 293 | }; |
310 | 294 | ||
311 | static const struct ata_port_operations piix_pata_ops = { | 295 | static struct ata_port_operations piix_pata_ops = { |
296 | .inherits = &ata_bmdma_port_ops, | ||
297 | .cable_detect = ata_cable_40wire, | ||
312 | .set_piomode = piix_set_piomode, | 298 | .set_piomode = piix_set_piomode, |
313 | .set_dmamode = piix_set_dmamode, | 299 | .set_dmamode = piix_set_dmamode, |
314 | .mode_filter = ata_pci_default_filter, | 300 | .prereset = piix_pata_prereset, |
315 | 301 | }; | |
316 | .tf_load = ata_tf_load, | ||
317 | .tf_read = ata_tf_read, | ||
318 | .check_status = ata_check_status, | ||
319 | .exec_command = ata_exec_command, | ||
320 | .dev_select = ata_std_dev_select, | ||
321 | |||
322 | .bmdma_setup = ata_bmdma_setup, | ||
323 | .bmdma_start = ata_bmdma_start, | ||
324 | .bmdma_stop = ata_bmdma_stop, | ||
325 | .bmdma_status = ata_bmdma_status, | ||
326 | .qc_prep = ata_qc_prep, | ||
327 | .qc_issue = ata_qc_issue_prot, | ||
328 | .data_xfer = ata_data_xfer, | ||
329 | |||
330 | .freeze = ata_bmdma_freeze, | ||
331 | .thaw = ata_bmdma_thaw, | ||
332 | .error_handler = piix_pata_error_handler, | ||
333 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
334 | .cable_detect = ata_cable_40wire, | ||
335 | |||
336 | .irq_clear = ata_bmdma_irq_clear, | ||
337 | .irq_on = ata_irq_on, | ||
338 | 302 | ||
339 | .port_start = ata_port_start, | 303 | static struct ata_port_operations piix_vmw_ops = { |
304 | .inherits = &piix_pata_ops, | ||
305 | .bmdma_status = piix_vmw_bmdma_status, | ||
340 | }; | 306 | }; |
341 | 307 | ||
342 | static const struct ata_port_operations ich_pata_ops = { | 308 | static struct ata_port_operations ich_pata_ops = { |
343 | .set_piomode = piix_set_piomode, | 309 | .inherits = &piix_pata_ops, |
344 | .set_dmamode = ich_set_dmamode, | ||
345 | .mode_filter = ata_pci_default_filter, | ||
346 | |||
347 | .tf_load = ata_tf_load, | ||
348 | .tf_read = ata_tf_read, | ||
349 | .check_status = ata_check_status, | ||
350 | .exec_command = ata_exec_command, | ||
351 | .dev_select = ata_std_dev_select, | ||
352 | |||
353 | .bmdma_setup = ata_bmdma_setup, | ||
354 | .bmdma_start = ata_bmdma_start, | ||
355 | .bmdma_stop = ata_bmdma_stop, | ||
356 | .bmdma_status = ata_bmdma_status, | ||
357 | .qc_prep = ata_qc_prep, | ||
358 | .qc_issue = ata_qc_issue_prot, | ||
359 | .data_xfer = ata_data_xfer, | ||
360 | |||
361 | .freeze = ata_bmdma_freeze, | ||
362 | .thaw = ata_bmdma_thaw, | ||
363 | .error_handler = piix_pata_error_handler, | ||
364 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
365 | .cable_detect = ich_pata_cable_detect, | 310 | .cable_detect = ich_pata_cable_detect, |
366 | 311 | .set_dmamode = ich_set_dmamode, | |
367 | .irq_clear = ata_bmdma_irq_clear, | ||
368 | .irq_on = ata_irq_on, | ||
369 | |||
370 | .port_start = ata_port_start, | ||
371 | }; | 312 | }; |
372 | 313 | ||
373 | static const struct ata_port_operations piix_sata_ops = { | 314 | static struct ata_port_operations piix_sata_ops = { |
374 | .tf_load = ata_tf_load, | 315 | .inherits = &ata_bmdma_port_ops, |
375 | .tf_read = ata_tf_read, | ||
376 | .check_status = ata_check_status, | ||
377 | .exec_command = ata_exec_command, | ||
378 | .dev_select = ata_std_dev_select, | ||
379 | |||
380 | .bmdma_setup = ata_bmdma_setup, | ||
381 | .bmdma_start = ata_bmdma_start, | ||
382 | .bmdma_stop = ata_bmdma_stop, | ||
383 | .bmdma_status = ata_bmdma_status, | ||
384 | .qc_prep = ata_qc_prep, | ||
385 | .qc_issue = ata_qc_issue_prot, | ||
386 | .data_xfer = ata_data_xfer, | ||
387 | |||
388 | .freeze = ata_bmdma_freeze, | ||
389 | .thaw = ata_bmdma_thaw, | ||
390 | .error_handler = ata_bmdma_error_handler, | ||
391 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
392 | |||
393 | .irq_clear = ata_bmdma_irq_clear, | ||
394 | .irq_on = ata_irq_on, | ||
395 | |||
396 | .port_start = ata_port_start, | ||
397 | }; | 316 | }; |
398 | 317 | ||
399 | static const struct ata_port_operations piix_vmw_ops = { | 318 | static struct ata_port_operations piix_sidpr_sata_ops = { |
400 | .set_piomode = piix_set_piomode, | 319 | .inherits = &piix_sata_ops, |
401 | .set_dmamode = piix_set_dmamode, | 320 | .hardreset = sata_std_hardreset, |
402 | .mode_filter = ata_pci_default_filter, | ||
403 | |||
404 | .tf_load = ata_tf_load, | ||
405 | .tf_read = ata_tf_read, | ||
406 | .check_status = ata_check_status, | ||
407 | .exec_command = ata_exec_command, | ||
408 | .dev_select = ata_std_dev_select, | ||
409 | |||
410 | .bmdma_setup = ata_bmdma_setup, | ||
411 | .bmdma_start = ata_bmdma_start, | ||
412 | .bmdma_stop = ata_bmdma_stop, | ||
413 | .bmdma_status = piix_vmw_bmdma_status, | ||
414 | .qc_prep = ata_qc_prep, | ||
415 | .qc_issue = ata_qc_issue_prot, | ||
416 | .data_xfer = ata_data_xfer, | ||
417 | |||
418 | .freeze = ata_bmdma_freeze, | ||
419 | .thaw = ata_bmdma_thaw, | ||
420 | .error_handler = piix_pata_error_handler, | ||
421 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
422 | .cable_detect = ata_cable_40wire, | ||
423 | |||
424 | .irq_handler = ata_interrupt, | ||
425 | .irq_clear = ata_bmdma_irq_clear, | ||
426 | .irq_on = ata_irq_on, | ||
427 | |||
428 | .port_start = ata_port_start, | ||
429 | }; | ||
430 | |||
431 | static const struct ata_port_operations piix_sidpr_sata_ops = { | ||
432 | .tf_load = ata_tf_load, | ||
433 | .tf_read = ata_tf_read, | ||
434 | .check_status = ata_check_status, | ||
435 | .exec_command = ata_exec_command, | ||
436 | .dev_select = ata_std_dev_select, | ||
437 | |||
438 | .bmdma_setup = ata_bmdma_setup, | ||
439 | .bmdma_start = ata_bmdma_start, | ||
440 | .bmdma_stop = ata_bmdma_stop, | ||
441 | .bmdma_status = ata_bmdma_status, | ||
442 | .qc_prep = ata_qc_prep, | ||
443 | .qc_issue = ata_qc_issue_prot, | ||
444 | .data_xfer = ata_data_xfer, | ||
445 | |||
446 | .scr_read = piix_sidpr_scr_read, | 321 | .scr_read = piix_sidpr_scr_read, |
447 | .scr_write = piix_sidpr_scr_write, | 322 | .scr_write = piix_sidpr_scr_write, |
448 | |||
449 | .freeze = ata_bmdma_freeze, | ||
450 | .thaw = ata_bmdma_thaw, | ||
451 | .error_handler = piix_sidpr_error_handler, | ||
452 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
453 | |||
454 | .irq_clear = ata_bmdma_irq_clear, | ||
455 | .irq_on = ata_irq_on, | ||
456 | |||
457 | .port_start = ata_port_start, | ||
458 | }; | 323 | }; |
459 | 324 | ||
460 | static const struct piix_map_db ich5_map_db = { | 325 | static const struct piix_map_db ich5_map_db = { |
@@ -553,12 +418,11 @@ static const struct piix_map_db tolapai_map_db = { | |||
553 | static const struct piix_map_db *piix_map_db_table[] = { | 418 | static const struct piix_map_db *piix_map_db_table[] = { |
554 | [ich5_sata] = &ich5_map_db, | 419 | [ich5_sata] = &ich5_map_db, |
555 | [ich6_sata] = &ich6_map_db, | 420 | [ich6_sata] = &ich6_map_db, |
556 | [ich6_sata_ahci] = &ich6_map_db, | 421 | [ich6m_sata] = &ich6m_map_db, |
557 | [ich6m_sata_ahci] = &ich6m_map_db, | 422 | [ich8_sata] = &ich8_map_db, |
558 | [ich8_sata_ahci] = &ich8_map_db, | ||
559 | [ich8_2port_sata] = &ich8_2port_map_db, | 423 | [ich8_2port_sata] = &ich8_2port_map_db, |
560 | [ich8m_apple_sata_ahci] = &ich8m_apple_map_db, | 424 | [ich8m_apple_sata] = &ich8m_apple_map_db, |
561 | [tolapai_sata_ahci] = &tolapai_map_db, | 425 | [tolapai_sata] = &tolapai_map_db, |
562 | }; | 426 | }; |
563 | 427 | ||
564 | static struct ata_port_info piix_port_info[] = { | 428 | static struct ata_port_info piix_port_info[] = { |
@@ -624,28 +488,18 @@ static struct ata_port_info piix_port_info[] = { | |||
624 | .port_ops = &piix_sata_ops, | 488 | .port_ops = &piix_sata_ops, |
625 | }, | 489 | }, |
626 | 490 | ||
627 | [ich6_sata_ahci] = | 491 | [ich6m_sata] = |
628 | { | ||
629 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_AHCI, | ||
630 | .pio_mask = 0x1f, /* pio0-4 */ | ||
631 | .mwdma_mask = 0x07, /* mwdma0-2 */ | ||
632 | .udma_mask = ATA_UDMA6, | ||
633 | .port_ops = &piix_sata_ops, | ||
634 | }, | ||
635 | |||
636 | [ich6m_sata_ahci] = | ||
637 | { | 492 | { |
638 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_AHCI, | 493 | .flags = PIIX_SATA_FLAGS, |
639 | .pio_mask = 0x1f, /* pio0-4 */ | 494 | .pio_mask = 0x1f, /* pio0-4 */ |
640 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 495 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
641 | .udma_mask = ATA_UDMA6, | 496 | .udma_mask = ATA_UDMA6, |
642 | .port_ops = &piix_sata_ops, | 497 | .port_ops = &piix_sata_ops, |
643 | }, | 498 | }, |
644 | 499 | ||
645 | [ich8_sata_ahci] = | 500 | [ich8_sata] = |
646 | { | 501 | { |
647 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_AHCI | | 502 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR, |
648 | PIIX_FLAG_SIDPR, | ||
649 | .pio_mask = 0x1f, /* pio0-4 */ | 503 | .pio_mask = 0x1f, /* pio0-4 */ |
650 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 504 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
651 | .udma_mask = ATA_UDMA6, | 505 | .udma_mask = ATA_UDMA6, |
@@ -654,27 +508,25 @@ static struct ata_port_info piix_port_info[] = { | |||
654 | 508 | ||
655 | [ich8_2port_sata] = | 509 | [ich8_2port_sata] = |
656 | { | 510 | { |
657 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_AHCI | | 511 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR, |
658 | PIIX_FLAG_SIDPR, | ||
659 | .pio_mask = 0x1f, /* pio0-4 */ | 512 | .pio_mask = 0x1f, /* pio0-4 */ |
660 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 513 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
661 | .udma_mask = ATA_UDMA6, | 514 | .udma_mask = ATA_UDMA6, |
662 | .port_ops = &piix_sata_ops, | 515 | .port_ops = &piix_sata_ops, |
663 | }, | 516 | }, |
664 | 517 | ||
665 | [tolapai_sata_ahci] = | 518 | [tolapai_sata] = |
666 | { | 519 | { |
667 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_AHCI, | 520 | .flags = PIIX_SATA_FLAGS, |
668 | .pio_mask = 0x1f, /* pio0-4 */ | 521 | .pio_mask = 0x1f, /* pio0-4 */ |
669 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 522 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
670 | .udma_mask = ATA_UDMA6, | 523 | .udma_mask = ATA_UDMA6, |
671 | .port_ops = &piix_sata_ops, | 524 | .port_ops = &piix_sata_ops, |
672 | }, | 525 | }, |
673 | 526 | ||
674 | [ich8m_apple_sata_ahci] = | 527 | [ich8m_apple_sata] = |
675 | { | 528 | { |
676 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_AHCI | | 529 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR, |
677 | PIIX_FLAG_SIDPR, | ||
678 | .pio_mask = 0x1f, /* pio0-4 */ | 530 | .pio_mask = 0x1f, /* pio0-4 */ |
679 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 531 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
680 | .udma_mask = ATA_UDMA6, | 532 | .udma_mask = ATA_UDMA6, |
@@ -683,7 +535,6 @@ static struct ata_port_info piix_port_info[] = { | |||
683 | 535 | ||
684 | [piix_pata_vmw] = | 536 | [piix_pata_vmw] = |
685 | { | 537 | { |
686 | .sht = &piix_sht, | ||
687 | .flags = PIIX_PATA_FLAGS, | 538 | .flags = PIIX_PATA_FLAGS, |
688 | .pio_mask = 0x1f, /* pio0-4 */ | 539 | .pio_mask = 0x1f, /* pio0-4 */ |
689 | .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ | 540 | .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ |
@@ -776,13 +627,7 @@ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline) | |||
776 | 627 | ||
777 | if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) | 628 | if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) |
778 | return -ENOENT; | 629 | return -ENOENT; |
779 | return ata_std_prereset(link, deadline); | 630 | return ata_sff_prereset(link, deadline); |
780 | } | ||
781 | |||
782 | static void piix_pata_error_handler(struct ata_port *ap) | ||
783 | { | ||
784 | ata_bmdma_drive_eh(ap, piix_pata_prereset, ata_std_softreset, NULL, | ||
785 | ata_std_postreset); | ||
786 | } | 631 | } |
787 | 632 | ||
788 | /** | 633 | /** |
@@ -1168,35 +1013,6 @@ static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val) | |||
1168 | return 0; | 1013 | return 0; |
1169 | } | 1014 | } |
1170 | 1015 | ||
1171 | static int piix_sidpr_hardreset(struct ata_link *link, unsigned int *class, | ||
1172 | unsigned long deadline) | ||
1173 | { | ||
1174 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); | ||
1175 | int rc; | ||
1176 | |||
1177 | /* do hardreset */ | ||
1178 | rc = sata_link_hardreset(link, timing, deadline); | ||
1179 | if (rc) { | ||
1180 | ata_link_printk(link, KERN_ERR, | ||
1181 | "COMRESET failed (errno=%d)\n", rc); | ||
1182 | return rc; | ||
1183 | } | ||
1184 | |||
1185 | /* TODO: phy layer with polling, timeouts, etc. */ | ||
1186 | if (ata_link_offline(link)) { | ||
1187 | *class = ATA_DEV_NONE; | ||
1188 | return 0; | ||
1189 | } | ||
1190 | |||
1191 | return -EAGAIN; | ||
1192 | } | ||
1193 | |||
1194 | static void piix_sidpr_error_handler(struct ata_port *ap) | ||
1195 | { | ||
1196 | ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, | ||
1197 | piix_sidpr_hardreset, ata_std_postreset); | ||
1198 | } | ||
1199 | |||
1200 | #ifdef CONFIG_PM | 1016 | #ifdef CONFIG_PM |
1201 | static int piix_broken_suspend(void) | 1017 | static int piix_broken_suspend(void) |
1202 | { | 1018 | { |
@@ -1633,6 +1449,16 @@ static int __devinit piix_init_one(struct pci_dev *pdev, | |||
1633 | if (rc) | 1449 | if (rc) |
1634 | return rc; | 1450 | return rc; |
1635 | 1451 | ||
1452 | /* ICH6R may be driven by either ata_piix or ahci driver | ||
1453 | * regardless of BIOS configuration. Make sure AHCI mode is | ||
1454 | * off. | ||
1455 | */ | ||
1456 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2652) { | ||
1457 | int rc = piix_disable_ahci(pdev); | ||
1458 | if (rc) | ||
1459 | return rc; | ||
1460 | } | ||
1461 | |||
1636 | /* SATA map init can change port_info, do it before prepping host */ | 1462 | /* SATA map init can change port_info, do it before prepping host */ |
1637 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); | 1463 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); |
1638 | if (!hpriv) | 1464 | if (!hpriv) |
@@ -1642,22 +1468,12 @@ static int __devinit piix_init_one(struct pci_dev *pdev, | |||
1642 | hpriv->map = piix_init_sata_map(pdev, port_info, | 1468 | hpriv->map = piix_init_sata_map(pdev, port_info, |
1643 | piix_map_db_table[ent->driver_data]); | 1469 | piix_map_db_table[ent->driver_data]); |
1644 | 1470 | ||
1645 | rc = ata_pci_prepare_sff_host(pdev, ppi, &host); | 1471 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); |
1646 | if (rc) | 1472 | if (rc) |
1647 | return rc; | 1473 | return rc; |
1648 | host->private_data = hpriv; | 1474 | host->private_data = hpriv; |
1649 | 1475 | ||
1650 | /* initialize controller */ | 1476 | /* initialize controller */ |
1651 | if (port_flags & PIIX_FLAG_AHCI) { | ||
1652 | u8 tmp; | ||
1653 | pci_read_config_byte(pdev, PIIX_SCC, &tmp); | ||
1654 | if (tmp == PIIX_AHCI_DEVICE) { | ||
1655 | rc = piix_disable_ahci(pdev); | ||
1656 | if (rc) | ||
1657 | return rc; | ||
1658 | } | ||
1659 | } | ||
1660 | |||
1661 | if (port_flags & ATA_FLAG_SATA) { | 1477 | if (port_flags & ATA_FLAG_SATA) { |
1662 | piix_init_pcs(host, piix_map_db_table[ent->driver_data]); | 1478 | piix_init_pcs(host, piix_map_db_table[ent->driver_data]); |
1663 | piix_init_sidpr(host); | 1479 | piix_init_sidpr(host); |
@@ -1686,7 +1502,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, | |||
1686 | } | 1502 | } |
1687 | 1503 | ||
1688 | pci_set_master(pdev); | 1504 | pci_set_master(pdev); |
1689 | return ata_pci_activate_sff_host(host, ata_interrupt, &piix_sht); | 1505 | return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht); |
1690 | } | 1506 | } |
1691 | 1507 | ||
1692 | static int __init piix_init(void) | 1508 | static int __init piix_init(void) |
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index bf98a566adac..8c1cfc645c85 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c | |||
@@ -77,7 +77,7 @@ void ata_acpi_associate_sata_port(struct ata_port *ap) | |||
77 | { | 77 | { |
78 | WARN_ON(!(ap->flags & ATA_FLAG_ACPI_SATA)); | 78 | WARN_ON(!(ap->flags & ATA_FLAG_ACPI_SATA)); |
79 | 79 | ||
80 | if (!ap->nr_pmp_links) { | 80 | if (!sata_pmp_attached(ap)) { |
81 | acpi_integer adr = SATA_ADR(ap->port_no, NO_PORT_MULT); | 81 | acpi_integer adr = SATA_ADR(ap->port_no, NO_PORT_MULT); |
82 | 82 | ||
83 | ap->link.device->acpi_handle = | 83 | ap->link.device->acpi_handle = |
@@ -839,7 +839,8 @@ void ata_acpi_on_resume(struct ata_port *ap) | |||
839 | */ | 839 | */ |
840 | ata_link_for_each_dev(dev, &ap->link) { | 840 | ata_link_for_each_dev(dev, &ap->link) { |
841 | ata_acpi_clear_gtf(dev); | 841 | ata_acpi_clear_gtf(dev); |
842 | if (ata_dev_get_GTF(dev, NULL) >= 0) | 842 | if (ata_dev_enabled(dev) && |
843 | ata_dev_get_GTF(dev, NULL) >= 0) | ||
843 | dev->flags |= ATA_DFLAG_ACPI_PENDING; | 844 | dev->flags |= ATA_DFLAG_ACPI_PENDING; |
844 | } | 845 | } |
845 | } else { | 846 | } else { |
@@ -849,7 +850,8 @@ void ata_acpi_on_resume(struct ata_port *ap) | |||
849 | */ | 850 | */ |
850 | ata_link_for_each_dev(dev, &ap->link) { | 851 | ata_link_for_each_dev(dev, &ap->link) { |
851 | ata_acpi_clear_gtf(dev); | 852 | ata_acpi_clear_gtf(dev); |
852 | dev->flags |= ATA_DFLAG_ACPI_PENDING; | 853 | if (ata_dev_enabled(dev)) |
854 | dev->flags |= ATA_DFLAG_ACPI_PENDING; | ||
853 | } | 855 | } |
854 | } | 856 | } |
855 | } | 857 | } |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index be95fdb69726..733eb94d055e 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -46,7 +46,6 @@ | |||
46 | #include <linux/init.h> | 46 | #include <linux/init.h> |
47 | #include <linux/list.h> | 47 | #include <linux/list.h> |
48 | #include <linux/mm.h> | 48 | #include <linux/mm.h> |
49 | #include <linux/highmem.h> | ||
50 | #include <linux/spinlock.h> | 49 | #include <linux/spinlock.h> |
51 | #include <linux/blkdev.h> | 50 | #include <linux/blkdev.h> |
52 | #include <linux/delay.h> | 51 | #include <linux/delay.h> |
@@ -74,6 +73,19 @@ const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; | |||
74 | const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; | 73 | const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; |
75 | const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; | 74 | const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; |
76 | 75 | ||
76 | const struct ata_port_operations ata_base_port_ops = { | ||
77 | .prereset = ata_std_prereset, | ||
78 | .postreset = ata_std_postreset, | ||
79 | .error_handler = ata_std_error_handler, | ||
80 | }; | ||
81 | |||
82 | const struct ata_port_operations sata_port_ops = { | ||
83 | .inherits = &ata_base_port_ops, | ||
84 | |||
85 | .qc_defer = ata_std_qc_defer, | ||
86 | .hardreset = sata_std_hardreset, | ||
87 | }; | ||
88 | |||
77 | static unsigned int ata_dev_init_params(struct ata_device *dev, | 89 | static unsigned int ata_dev_init_params(struct ata_device *dev, |
78 | u16 heads, u16 sectors); | 90 | u16 heads, u16 sectors); |
79 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev); | 91 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev); |
@@ -1043,50 +1055,6 @@ static void ata_lpm_disable(struct ata_host *host) | |||
1043 | } | 1055 | } |
1044 | #endif /* CONFIG_PM */ | 1056 | #endif /* CONFIG_PM */ |
1045 | 1057 | ||
1046 | |||
1047 | /** | ||
1048 | * ata_devchk - PATA device presence detection | ||
1049 | * @ap: ATA channel to examine | ||
1050 | * @device: Device to examine (starting at zero) | ||
1051 | * | ||
1052 | * This technique was originally described in | ||
1053 | * Hale Landis's ATADRVR (www.ata-atapi.com), and | ||
1054 | * later found its way into the ATA/ATAPI spec. | ||
1055 | * | ||
1056 | * Write a pattern to the ATA shadow registers, | ||
1057 | * and if a device is present, it will respond by | ||
1058 | * correctly storing and echoing back the | ||
1059 | * ATA shadow register contents. | ||
1060 | * | ||
1061 | * LOCKING: | ||
1062 | * caller. | ||
1063 | */ | ||
1064 | |||
1065 | static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) | ||
1066 | { | ||
1067 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
1068 | u8 nsect, lbal; | ||
1069 | |||
1070 | ap->ops->dev_select(ap, device); | ||
1071 | |||
1072 | iowrite8(0x55, ioaddr->nsect_addr); | ||
1073 | iowrite8(0xaa, ioaddr->lbal_addr); | ||
1074 | |||
1075 | iowrite8(0xaa, ioaddr->nsect_addr); | ||
1076 | iowrite8(0x55, ioaddr->lbal_addr); | ||
1077 | |||
1078 | iowrite8(0x55, ioaddr->nsect_addr); | ||
1079 | iowrite8(0xaa, ioaddr->lbal_addr); | ||
1080 | |||
1081 | nsect = ioread8(ioaddr->nsect_addr); | ||
1082 | lbal = ioread8(ioaddr->lbal_addr); | ||
1083 | |||
1084 | if ((nsect == 0x55) && (lbal == 0xaa)) | ||
1085 | return 1; /* we found a device */ | ||
1086 | |||
1087 | return 0; /* nothing found */ | ||
1088 | } | ||
1089 | |||
1090 | /** | 1058 | /** |
1091 | * ata_dev_classify - determine device type based on ATA-spec signature | 1059 | * ata_dev_classify - determine device type based on ATA-spec signature |
1092 | * @tf: ATA taskfile register set for device to be identified | 1060 | * @tf: ATA taskfile register set for device to be identified |
@@ -1147,75 +1115,6 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf) | |||
1147 | } | 1115 | } |
1148 | 1116 | ||
1149 | /** | 1117 | /** |
1150 | * ata_dev_try_classify - Parse returned ATA device signature | ||
1151 | * @dev: ATA device to classify (starting at zero) | ||
1152 | * @present: device seems present | ||
1153 | * @r_err: Value of error register on completion | ||
1154 | * | ||
1155 | * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, | ||
1156 | * an ATA/ATAPI-defined set of values is placed in the ATA | ||
1157 | * shadow registers, indicating the results of device detection | ||
1158 | * and diagnostics. | ||
1159 | * | ||
1160 | * Select the ATA device, and read the values from the ATA shadow | ||
1161 | * registers. Then parse according to the Error register value, | ||
1162 | * and the spec-defined values examined by ata_dev_classify(). | ||
1163 | * | ||
1164 | * LOCKING: | ||
1165 | * caller. | ||
1166 | * | ||
1167 | * RETURNS: | ||
1168 | * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. | ||
1169 | */ | ||
1170 | unsigned int ata_dev_try_classify(struct ata_device *dev, int present, | ||
1171 | u8 *r_err) | ||
1172 | { | ||
1173 | struct ata_port *ap = dev->link->ap; | ||
1174 | struct ata_taskfile tf; | ||
1175 | unsigned int class; | ||
1176 | u8 err; | ||
1177 | |||
1178 | ap->ops->dev_select(ap, dev->devno); | ||
1179 | |||
1180 | memset(&tf, 0, sizeof(tf)); | ||
1181 | |||
1182 | ap->ops->tf_read(ap, &tf); | ||
1183 | err = tf.feature; | ||
1184 | if (r_err) | ||
1185 | *r_err = err; | ||
1186 | |||
1187 | /* see if device passed diags: continue and warn later */ | ||
1188 | if (err == 0) | ||
1189 | /* diagnostic fail : do nothing _YET_ */ | ||
1190 | dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; | ||
1191 | else if (err == 1) | ||
1192 | /* do nothing */ ; | ||
1193 | else if ((dev->devno == 0) && (err == 0x81)) | ||
1194 | /* do nothing */ ; | ||
1195 | else | ||
1196 | return ATA_DEV_NONE; | ||
1197 | |||
1198 | /* determine if device is ATA or ATAPI */ | ||
1199 | class = ata_dev_classify(&tf); | ||
1200 | |||
1201 | if (class == ATA_DEV_UNKNOWN) { | ||
1202 | /* If the device failed diagnostic, it's likely to | ||
1203 | * have reported incorrect device signature too. | ||
1204 | * Assume ATA device if the device seems present but | ||
1205 | * device signature is invalid with diagnostic | ||
1206 | * failure. | ||
1207 | */ | ||
1208 | if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) | ||
1209 | class = ATA_DEV_ATA; | ||
1210 | else | ||
1211 | class = ATA_DEV_NONE; | ||
1212 | } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) | ||
1213 | class = ATA_DEV_NONE; | ||
1214 | |||
1215 | return class; | ||
1216 | } | ||
1217 | |||
1218 | /** | ||
1219 | * ata_id_string - Convert IDENTIFY DEVICE page into string | 1118 | * ata_id_string - Convert IDENTIFY DEVICE page into string |
1220 | * @id: IDENTIFY DEVICE results we will examine | 1119 | * @id: IDENTIFY DEVICE results we will examine |
1221 | * @s: string into which data is output | 1120 | * @s: string into which data is output |
@@ -1293,7 +1192,7 @@ static u64 ata_id_n_sectors(const u16 *id) | |||
1293 | } | 1192 | } |
1294 | } | 1193 | } |
1295 | 1194 | ||
1296 | static u64 ata_tf_to_lba48(struct ata_taskfile *tf) | 1195 | u64 ata_tf_to_lba48(const struct ata_taskfile *tf) |
1297 | { | 1196 | { |
1298 | u64 sectors = 0; | 1197 | u64 sectors = 0; |
1299 | 1198 | ||
@@ -1304,10 +1203,10 @@ static u64 ata_tf_to_lba48(struct ata_taskfile *tf) | |||
1304 | sectors |= (tf->lbam & 0xff) << 8; | 1203 | sectors |= (tf->lbam & 0xff) << 8; |
1305 | sectors |= (tf->lbal & 0xff); | 1204 | sectors |= (tf->lbal & 0xff); |
1306 | 1205 | ||
1307 | return ++sectors; | 1206 | return sectors; |
1308 | } | 1207 | } |
1309 | 1208 | ||
1310 | static u64 ata_tf_to_lba(struct ata_taskfile *tf) | 1209 | u64 ata_tf_to_lba(const struct ata_taskfile *tf) |
1311 | { | 1210 | { |
1312 | u64 sectors = 0; | 1211 | u64 sectors = 0; |
1313 | 1212 | ||
@@ -1316,7 +1215,7 @@ static u64 ata_tf_to_lba(struct ata_taskfile *tf) | |||
1316 | sectors |= (tf->lbam & 0xff) << 8; | 1215 | sectors |= (tf->lbam & 0xff) << 8; |
1317 | sectors |= (tf->lbal & 0xff); | 1216 | sectors |= (tf->lbal & 0xff); |
1318 | 1217 | ||
1319 | return ++sectors; | 1218 | return sectors; |
1320 | } | 1219 | } |
1321 | 1220 | ||
1322 | /** | 1221 | /** |
@@ -1361,9 +1260,9 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) | |||
1361 | } | 1260 | } |
1362 | 1261 | ||
1363 | if (lba48) | 1262 | if (lba48) |
1364 | *max_sectors = ata_tf_to_lba48(&tf); | 1263 | *max_sectors = ata_tf_to_lba48(&tf) + 1; |
1365 | else | 1264 | else |
1366 | *max_sectors = ata_tf_to_lba(&tf); | 1265 | *max_sectors = ata_tf_to_lba(&tf) + 1; |
1367 | if (dev->horkage & ATA_HORKAGE_HPA_SIZE) | 1266 | if (dev->horkage & ATA_HORKAGE_HPA_SIZE) |
1368 | (*max_sectors)--; | 1267 | (*max_sectors)--; |
1369 | return 0; | 1268 | return 0; |
@@ -1523,89 +1422,6 @@ static int ata_hpa_resize(struct ata_device *dev) | |||
1523 | } | 1422 | } |
1524 | 1423 | ||
1525 | /** | 1424 | /** |
1526 | * ata_noop_dev_select - Select device 0/1 on ATA bus | ||
1527 | * @ap: ATA channel to manipulate | ||
1528 | * @device: ATA device (numbered from zero) to select | ||
1529 | * | ||
1530 | * This function performs no actual function. | ||
1531 | * | ||
1532 | * May be used as the dev_select() entry in ata_port_operations. | ||
1533 | * | ||
1534 | * LOCKING: | ||
1535 | * caller. | ||
1536 | */ | ||
1537 | void ata_noop_dev_select(struct ata_port *ap, unsigned int device) | ||
1538 | { | ||
1539 | } | ||
1540 | |||
1541 | |||
1542 | /** | ||
1543 | * ata_std_dev_select - Select device 0/1 on ATA bus | ||
1544 | * @ap: ATA channel to manipulate | ||
1545 | * @device: ATA device (numbered from zero) to select | ||
1546 | * | ||
1547 | * Use the method defined in the ATA specification to | ||
1548 | * make either device 0, or device 1, active on the | ||
1549 | * ATA channel. Works with both PIO and MMIO. | ||
1550 | * | ||
1551 | * May be used as the dev_select() entry in ata_port_operations. | ||
1552 | * | ||
1553 | * LOCKING: | ||
1554 | * caller. | ||
1555 | */ | ||
1556 | |||
1557 | void ata_std_dev_select(struct ata_port *ap, unsigned int device) | ||
1558 | { | ||
1559 | u8 tmp; | ||
1560 | |||
1561 | if (device == 0) | ||
1562 | tmp = ATA_DEVICE_OBS; | ||
1563 | else | ||
1564 | tmp = ATA_DEVICE_OBS | ATA_DEV1; | ||
1565 | |||
1566 | iowrite8(tmp, ap->ioaddr.device_addr); | ||
1567 | ata_pause(ap); /* needed; also flushes, for mmio */ | ||
1568 | } | ||
1569 | |||
1570 | /** | ||
1571 | * ata_dev_select - Select device 0/1 on ATA bus | ||
1572 | * @ap: ATA channel to manipulate | ||
1573 | * @device: ATA device (numbered from zero) to select | ||
1574 | * @wait: non-zero to wait for Status register BSY bit to clear | ||
1575 | * @can_sleep: non-zero if context allows sleeping | ||
1576 | * | ||
1577 | * Use the method defined in the ATA specification to | ||
1578 | * make either device 0, or device 1, active on the | ||
1579 | * ATA channel. | ||
1580 | * | ||
1581 | * This is a high-level version of ata_std_dev_select(), | ||
1582 | * which additionally provides the services of inserting | ||
1583 | * the proper pauses and status polling, where needed. | ||
1584 | * | ||
1585 | * LOCKING: | ||
1586 | * caller. | ||
1587 | */ | ||
1588 | |||
1589 | void ata_dev_select(struct ata_port *ap, unsigned int device, | ||
1590 | unsigned int wait, unsigned int can_sleep) | ||
1591 | { | ||
1592 | if (ata_msg_probe(ap)) | ||
1593 | ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, " | ||
1594 | "device %u, wait %u\n", device, wait); | ||
1595 | |||
1596 | if (wait) | ||
1597 | ata_wait_idle(ap); | ||
1598 | |||
1599 | ap->ops->dev_select(ap, device); | ||
1600 | |||
1601 | if (wait) { | ||
1602 | if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) | ||
1603 | msleep(150); | ||
1604 | ata_wait_idle(ap); | ||
1605 | } | ||
1606 | } | ||
1607 | |||
1608 | /** | ||
1609 | * ata_dump_id - IDENTIFY DEVICE info debugging output | 1425 | * ata_dump_id - IDENTIFY DEVICE info debugging output |
1610 | * @id: IDENTIFY DEVICE page to dump | 1426 | * @id: IDENTIFY DEVICE page to dump |
1611 | * | 1427 | * |
@@ -1732,8 +1548,7 @@ unsigned long ata_id_xfermask(const u16 *id) | |||
1732 | * LOCKING: | 1548 | * LOCKING: |
1733 | * Inherited from caller. | 1549 | * Inherited from caller. |
1734 | */ | 1550 | */ |
1735 | static void ata_pio_queue_task(struct ata_port *ap, void *data, | 1551 | void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay) |
1736 | unsigned long delay) | ||
1737 | { | 1552 | { |
1738 | ap->port_task_data = data; | 1553 | ap->port_task_data = data; |
1739 | 1554 | ||
@@ -2097,7 +1912,6 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | |||
2097 | if (ata_msg_ctl(ap)) | 1912 | if (ata_msg_ctl(ap)) |
2098 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); | 1913 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); |
2099 | 1914 | ||
2100 | ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ | ||
2101 | retry: | 1915 | retry: |
2102 | ata_tf_init(dev, &tf); | 1916 | ata_tf_init(dev, &tf); |
2103 | 1917 | ||
@@ -2464,7 +2278,7 @@ int ata_dev_configure(struct ata_device *dev) | |||
2464 | * changed notifications and ATAPI ANs. | 2278 | * changed notifications and ATAPI ANs. |
2465 | */ | 2279 | */ |
2466 | if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && | 2280 | if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && |
2467 | (!ap->nr_pmp_links || | 2281 | (!sata_pmp_attached(ap) || |
2468 | sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { | 2282 | sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { |
2469 | unsigned int err_mask; | 2283 | unsigned int err_mask; |
2470 | 2284 | ||
@@ -2558,9 +2372,6 @@ int ata_dev_configure(struct ata_device *dev) | |||
2558 | } | 2372 | } |
2559 | } | 2373 | } |
2560 | 2374 | ||
2561 | if (ata_msg_probe(ap)) | ||
2562 | ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n", | ||
2563 | __func__, ata_chk_status(ap)); | ||
2564 | return 0; | 2375 | return 0; |
2565 | 2376 | ||
2566 | err_out_nosup: | 2377 | err_out_nosup: |
@@ -3321,16 +3132,21 @@ static int ata_dev_set_mode(struct ata_device *dev) | |||
3321 | if (rc) | 3132 | if (rc) |
3322 | return rc; | 3133 | return rc; |
3323 | 3134 | ||
3324 | /* Old CFA may refuse this command, which is just fine */ | 3135 | if (dev->xfer_shift == ATA_SHIFT_PIO) { |
3325 | if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) | 3136 | /* Old CFA may refuse this command, which is just fine */ |
3326 | ign_dev_err = 1; | 3137 | if (ata_id_is_cfa(dev->id)) |
3327 | 3138 | ign_dev_err = 1; | |
3328 | /* Some very old devices and some bad newer ones fail any kind of | 3139 | /* Catch several broken garbage emulations plus some pre |
3329 | SET_XFERMODE request but support PIO0-2 timings and no IORDY */ | 3140 | ATA devices */ |
3330 | if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) && | 3141 | if (ata_id_major_version(dev->id) == 0 && |
3331 | dev->pio_mode <= XFER_PIO_2) | 3142 | dev->pio_mode <= XFER_PIO_2) |
3332 | ign_dev_err = 1; | 3143 | ign_dev_err = 1; |
3333 | 3144 | /* Some very old devices and some bad newer ones fail | |
3145 | any kind of SET_XFERMODE request but support PIO0-2 | ||
3146 | timings and no IORDY */ | ||
3147 | if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) | ||
3148 | ign_dev_err = 1; | ||
3149 | } | ||
3334 | /* Early MWDMA devices do DMA but don't allow DMA mode setting. | 3150 | /* Early MWDMA devices do DMA but don't allow DMA mode setting. |
3335 | Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ | 3151 | Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ |
3336 | if (dev->xfer_shift == ATA_SHIFT_MWDMA && | 3152 | if (dev->xfer_shift == ATA_SHIFT_MWDMA && |
@@ -3474,170 +3290,73 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) | |||
3474 | } | 3290 | } |
3475 | 3291 | ||
3476 | /** | 3292 | /** |
3477 | * ata_tf_to_host - issue ATA taskfile to host controller | 3293 | * ata_wait_ready - wait for link to become ready |
3478 | * @ap: port to which command is being issued | 3294 | * @link: link to be waited on |
3479 | * @tf: ATA taskfile register set | ||
3480 | * | ||
3481 | * Issues ATA taskfile register set to ATA host controller, | ||
3482 | * with proper synchronization with interrupt handler and | ||
3483 | * other threads. | ||
3484 | * | ||
3485 | * LOCKING: | ||
3486 | * spin_lock_irqsave(host lock) | ||
3487 | */ | ||
3488 | |||
3489 | static inline void ata_tf_to_host(struct ata_port *ap, | ||
3490 | const struct ata_taskfile *tf) | ||
3491 | { | ||
3492 | ap->ops->tf_load(ap, tf); | ||
3493 | ap->ops->exec_command(ap, tf); | ||
3494 | } | ||
3495 | |||
3496 | /** | ||
3497 | * ata_busy_sleep - sleep until BSY clears, or timeout | ||
3498 | * @ap: port containing status register to be polled | ||
3499 | * @tmout_pat: impatience timeout | ||
3500 | * @tmout: overall timeout | ||
3501 | * | ||
3502 | * Sleep until ATA Status register bit BSY clears, | ||
3503 | * or a timeout occurs. | ||
3504 | * | ||
3505 | * LOCKING: | ||
3506 | * Kernel thread context (may sleep). | ||
3507 | * | ||
3508 | * RETURNS: | ||
3509 | * 0 on success, -errno otherwise. | ||
3510 | */ | ||
3511 | int ata_busy_sleep(struct ata_port *ap, | ||
3512 | unsigned long tmout_pat, unsigned long tmout) | ||
3513 | { | ||
3514 | unsigned long timer_start, timeout; | ||
3515 | u8 status; | ||
3516 | |||
3517 | status = ata_busy_wait(ap, ATA_BUSY, 300); | ||
3518 | timer_start = jiffies; | ||
3519 | timeout = timer_start + tmout_pat; | ||
3520 | while (status != 0xff && (status & ATA_BUSY) && | ||
3521 | time_before(jiffies, timeout)) { | ||
3522 | msleep(50); | ||
3523 | status = ata_busy_wait(ap, ATA_BUSY, 3); | ||
3524 | } | ||
3525 | |||
3526 | if (status != 0xff && (status & ATA_BUSY)) | ||
3527 | ata_port_printk(ap, KERN_WARNING, | ||
3528 | "port is slow to respond, please be patient " | ||
3529 | "(Status 0x%x)\n", status); | ||
3530 | |||
3531 | timeout = timer_start + tmout; | ||
3532 | while (status != 0xff && (status & ATA_BUSY) && | ||
3533 | time_before(jiffies, timeout)) { | ||
3534 | msleep(50); | ||
3535 | status = ata_chk_status(ap); | ||
3536 | } | ||
3537 | |||
3538 | if (status == 0xff) | ||
3539 | return -ENODEV; | ||
3540 | |||
3541 | if (status & ATA_BUSY) { | ||
3542 | ata_port_printk(ap, KERN_ERR, "port failed to respond " | ||
3543 | "(%lu secs, Status 0x%x)\n", | ||
3544 | tmout / HZ, status); | ||
3545 | return -EBUSY; | ||
3546 | } | ||
3547 | |||
3548 | return 0; | ||
3549 | } | ||
3550 | |||
3551 | /** | ||
3552 | * ata_wait_after_reset - wait before checking status after reset | ||
3553 | * @ap: port containing status register to be polled | ||
3554 | * @deadline: deadline jiffies for the operation | 3295 | * @deadline: deadline jiffies for the operation |
3296 | * @check_ready: callback to check link readiness | ||
3555 | * | 3297 | * |
3556 | * After reset, we need to pause a while before reading status. | 3298 | * Wait for @link to become ready. @check_ready should return |
3557 | * Also, certain combination of controller and device report 0xff | 3299 | * positive number if @link is ready, 0 if it isn't, -ENODEV if |
3558 | * for some duration (e.g. until SATA PHY is up and running) | 3300 | * link doesn't seem to be occupied, other errno for other error |
3559 | * which is interpreted as empty port in ATA world. This | 3301 | * conditions. |
3560 | * function also waits for such devices to get out of 0xff | ||
3561 | * status. | ||
3562 | * | 3302 | * |
3563 | * LOCKING: | 3303 | * Transient -ENODEV conditions are allowed for |
3564 | * Kernel thread context (may sleep). | 3304 | * ATA_TMOUT_FF_WAIT. |
3565 | */ | ||
3566 | void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline) | ||
3567 | { | ||
3568 | unsigned long until = jiffies + ATA_TMOUT_FF_WAIT; | ||
3569 | |||
3570 | if (time_before(until, deadline)) | ||
3571 | deadline = until; | ||
3572 | |||
3573 | /* Spec mandates ">= 2ms" before checking status. We wait | ||
3574 | * 150ms, because that was the magic delay used for ATAPI | ||
3575 | * devices in Hale Landis's ATADRVR, for the period of time | ||
3576 | * between when the ATA command register is written, and then | ||
3577 | * status is checked. Because waiting for "a while" before | ||
3578 | * checking status is fine, post SRST, we perform this magic | ||
3579 | * delay here as well. | ||
3580 | * | ||
3581 | * Old drivers/ide uses the 2mS rule and then waits for ready. | ||
3582 | */ | ||
3583 | msleep(150); | ||
3584 | |||
3585 | /* Wait for 0xff to clear. Some SATA devices take a long time | ||
3586 | * to clear 0xff after reset. For example, HHD424020F7SV00 | ||
3587 | * iVDR needs >= 800ms while. Quantum GoVault needs even more | ||
3588 | * than that. | ||
3589 | * | ||
3590 | * Note that some PATA controllers (pata_ali) explode if | ||
3591 | * status register is read more than once when there's no | ||
3592 | * device attached. | ||
3593 | */ | ||
3594 | if (ap->flags & ATA_FLAG_SATA) { | ||
3595 | while (1) { | ||
3596 | u8 status = ata_chk_status(ap); | ||
3597 | |||
3598 | if (status != 0xff || time_after(jiffies, deadline)) | ||
3599 | return; | ||
3600 | |||
3601 | msleep(50); | ||
3602 | } | ||
3603 | } | ||
3604 | } | ||
3605 | |||
3606 | /** | ||
3607 | * ata_wait_ready - sleep until BSY clears, or timeout | ||
3608 | * @ap: port containing status register to be polled | ||
3609 | * @deadline: deadline jiffies for the operation | ||
3610 | * | ||
3611 | * Sleep until ATA Status register bit BSY clears, or timeout | ||
3612 | * occurs. | ||
3613 | * | 3305 | * |
3614 | * LOCKING: | 3306 | * LOCKING: |
3615 | * Kernel thread context (may sleep). | 3307 | * EH context. |
3616 | * | 3308 | * |
3617 | * RETURNS: | 3309 | * RETURNS: |
3618 | * 0 on success, -errno otherwise. | 3310 | * 0 if @linke is ready before @deadline; otherwise, -errno. |
3619 | */ | 3311 | */ |
3620 | int ata_wait_ready(struct ata_port *ap, unsigned long deadline) | 3312 | int ata_wait_ready(struct ata_link *link, unsigned long deadline, |
3313 | int (*check_ready)(struct ata_link *link)) | ||
3621 | { | 3314 | { |
3622 | unsigned long start = jiffies; | 3315 | unsigned long start = jiffies; |
3316 | unsigned long nodev_deadline = start + ATA_TMOUT_FF_WAIT; | ||
3623 | int warned = 0; | 3317 | int warned = 0; |
3624 | 3318 | ||
3319 | if (time_after(nodev_deadline, deadline)) | ||
3320 | nodev_deadline = deadline; | ||
3321 | |||
3625 | while (1) { | 3322 | while (1) { |
3626 | u8 status = ata_chk_status(ap); | ||
3627 | unsigned long now = jiffies; | 3323 | unsigned long now = jiffies; |
3324 | int ready, tmp; | ||
3628 | 3325 | ||
3629 | if (!(status & ATA_BUSY)) | 3326 | ready = tmp = check_ready(link); |
3327 | if (ready > 0) | ||
3630 | return 0; | 3328 | return 0; |
3631 | if (!ata_link_online(&ap->link) && status == 0xff) | 3329 | |
3632 | return -ENODEV; | 3330 | /* -ENODEV could be transient. Ignore -ENODEV if link |
3331 | * is online. Also, some SATA devices take a long | ||
3332 | * time to clear 0xff after reset. For example, | ||
3333 | * HHD424020F7SV00 iVDR needs >= 800ms while Quantum | ||
3334 | * GoVault needs even more than that. Wait for | ||
3335 | * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline. | ||
3336 | * | ||
3337 | * Note that some PATA controllers (pata_ali) explode | ||
3338 | * if status register is read more than once when | ||
3339 | * there's no device attached. | ||
3340 | */ | ||
3341 | if (ready == -ENODEV) { | ||
3342 | if (ata_link_online(link)) | ||
3343 | ready = 0; | ||
3344 | else if ((link->ap->flags & ATA_FLAG_SATA) && | ||
3345 | !ata_link_offline(link) && | ||
3346 | time_before(now, nodev_deadline)) | ||
3347 | ready = 0; | ||
3348 | } | ||
3349 | |||
3350 | if (ready) | ||
3351 | return ready; | ||
3633 | if (time_after(now, deadline)) | 3352 | if (time_after(now, deadline)) |
3634 | return -EBUSY; | 3353 | return -EBUSY; |
3635 | 3354 | ||
3636 | if (!warned && time_after(now, start + 5 * HZ) && | 3355 | if (!warned && time_after(now, start + 5 * HZ) && |
3637 | (deadline - now > 3 * HZ)) { | 3356 | (deadline - now > 3 * HZ)) { |
3638 | ata_port_printk(ap, KERN_WARNING, | 3357 | ata_link_printk(link, KERN_WARNING, |
3639 | "port is slow to respond, please be patient " | 3358 | "link is slow to respond, please be patient " |
3640 | "(Status 0x%x)\n", status); | 3359 | "(ready=%d)\n", tmp); |
3641 | warned = 1; | 3360 | warned = 1; |
3642 | } | 3361 | } |
3643 | 3362 | ||
@@ -3645,179 +3364,26 @@ int ata_wait_ready(struct ata_port *ap, unsigned long deadline) | |||
3645 | } | 3364 | } |
3646 | } | 3365 | } |
3647 | 3366 | ||
3648 | static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask, | ||
3649 | unsigned long deadline) | ||
3650 | { | ||
3651 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
3652 | unsigned int dev0 = devmask & (1 << 0); | ||
3653 | unsigned int dev1 = devmask & (1 << 1); | ||
3654 | int rc, ret = 0; | ||
3655 | |||
3656 | /* if device 0 was found in ata_devchk, wait for its | ||
3657 | * BSY bit to clear | ||
3658 | */ | ||
3659 | if (dev0) { | ||
3660 | rc = ata_wait_ready(ap, deadline); | ||
3661 | if (rc) { | ||
3662 | if (rc != -ENODEV) | ||
3663 | return rc; | ||
3664 | ret = rc; | ||
3665 | } | ||
3666 | } | ||
3667 | |||
3668 | /* if device 1 was found in ata_devchk, wait for register | ||
3669 | * access briefly, then wait for BSY to clear. | ||
3670 | */ | ||
3671 | if (dev1) { | ||
3672 | int i; | ||
3673 | |||
3674 | ap->ops->dev_select(ap, 1); | ||
3675 | |||
3676 | /* Wait for register access. Some ATAPI devices fail | ||
3677 | * to set nsect/lbal after reset, so don't waste too | ||
3678 | * much time on it. We're gonna wait for !BSY anyway. | ||
3679 | */ | ||
3680 | for (i = 0; i < 2; i++) { | ||
3681 | u8 nsect, lbal; | ||
3682 | |||
3683 | nsect = ioread8(ioaddr->nsect_addr); | ||
3684 | lbal = ioread8(ioaddr->lbal_addr); | ||
3685 | if ((nsect == 1) && (lbal == 1)) | ||
3686 | break; | ||
3687 | msleep(50); /* give drive a breather */ | ||
3688 | } | ||
3689 | |||
3690 | rc = ata_wait_ready(ap, deadline); | ||
3691 | if (rc) { | ||
3692 | if (rc != -ENODEV) | ||
3693 | return rc; | ||
3694 | ret = rc; | ||
3695 | } | ||
3696 | } | ||
3697 | |||
3698 | /* is all this really necessary? */ | ||
3699 | ap->ops->dev_select(ap, 0); | ||
3700 | if (dev1) | ||
3701 | ap->ops->dev_select(ap, 1); | ||
3702 | if (dev0) | ||
3703 | ap->ops->dev_select(ap, 0); | ||
3704 | |||
3705 | return ret; | ||
3706 | } | ||
3707 | |||
3708 | static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, | ||
3709 | unsigned long deadline) | ||
3710 | { | ||
3711 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
3712 | |||
3713 | DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); | ||
3714 | |||
3715 | /* software reset. causes dev0 to be selected */ | ||
3716 | iowrite8(ap->ctl, ioaddr->ctl_addr); | ||
3717 | udelay(20); /* FIXME: flush */ | ||
3718 | iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); | ||
3719 | udelay(20); /* FIXME: flush */ | ||
3720 | iowrite8(ap->ctl, ioaddr->ctl_addr); | ||
3721 | |||
3722 | /* wait a while before checking status */ | ||
3723 | ata_wait_after_reset(ap, deadline); | ||
3724 | |||
3725 | /* Before we perform post reset processing we want to see if | ||
3726 | * the bus shows 0xFF because the odd clown forgets the D7 | ||
3727 | * pulldown resistor. | ||
3728 | */ | ||
3729 | if (ata_chk_status(ap) == 0xFF) | ||
3730 | return -ENODEV; | ||
3731 | |||
3732 | return ata_bus_post_reset(ap, devmask, deadline); | ||
3733 | } | ||
3734 | |||
3735 | /** | 3367 | /** |
3736 | * ata_bus_reset - reset host port and associated ATA channel | 3368 | * ata_wait_after_reset - wait for link to become ready after reset |
3737 | * @ap: port to reset | 3369 | * @link: link to be waited on |
3370 | * @deadline: deadline jiffies for the operation | ||
3371 | * @check_ready: callback to check link readiness | ||
3738 | * | 3372 | * |
3739 | * This is typically the first time we actually start issuing | 3373 | * Wait for @link to become ready after reset. |
3740 | * commands to the ATA channel. We wait for BSY to clear, then | ||
3741 | * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its | ||
3742 | * result. Determine what devices, if any, are on the channel | ||
3743 | * by looking at the device 0/1 error register. Look at the signature | ||
3744 | * stored in each device's taskfile registers, to determine if | ||
3745 | * the device is ATA or ATAPI. | ||
3746 | * | 3374 | * |
3747 | * LOCKING: | 3375 | * LOCKING: |
3748 | * PCI/etc. bus probe sem. | 3376 | * EH context. |
3749 | * Obtains host lock. | ||
3750 | * | 3377 | * |
3751 | * SIDE EFFECTS: | 3378 | * RETURNS: |
3752 | * Sets ATA_FLAG_DISABLED if bus reset fails. | 3379 | * 0 if @linke is ready before @deadline; otherwise, -errno. |
3753 | */ | 3380 | */ |
3754 | 3381 | extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, | |
3755 | void ata_bus_reset(struct ata_port *ap) | 3382 | int (*check_ready)(struct ata_link *link)) |
3756 | { | 3383 | { |
3757 | struct ata_device *device = ap->link.device; | 3384 | msleep(ATA_WAIT_AFTER_RESET_MSECS); |
3758 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
3759 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | ||
3760 | u8 err; | ||
3761 | unsigned int dev0, dev1 = 0, devmask = 0; | ||
3762 | int rc; | ||
3763 | |||
3764 | DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no); | ||
3765 | |||
3766 | /* determine if device 0/1 are present */ | ||
3767 | if (ap->flags & ATA_FLAG_SATA_RESET) | ||
3768 | dev0 = 1; | ||
3769 | else { | ||
3770 | dev0 = ata_devchk(ap, 0); | ||
3771 | if (slave_possible) | ||
3772 | dev1 = ata_devchk(ap, 1); | ||
3773 | } | ||
3774 | |||
3775 | if (dev0) | ||
3776 | devmask |= (1 << 0); | ||
3777 | if (dev1) | ||
3778 | devmask |= (1 << 1); | ||
3779 | 3385 | ||
3780 | /* select device 0 again */ | 3386 | return ata_wait_ready(link, deadline, check_ready); |
3781 | ap->ops->dev_select(ap, 0); | ||
3782 | |||
3783 | /* issue bus reset */ | ||
3784 | if (ap->flags & ATA_FLAG_SRST) { | ||
3785 | rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ); | ||
3786 | if (rc && rc != -ENODEV) | ||
3787 | goto err_out; | ||
3788 | } | ||
3789 | |||
3790 | /* | ||
3791 | * determine by signature whether we have ATA or ATAPI devices | ||
3792 | */ | ||
3793 | device[0].class = ata_dev_try_classify(&device[0], dev0, &err); | ||
3794 | if ((slave_possible) && (err != 0x81)) | ||
3795 | device[1].class = ata_dev_try_classify(&device[1], dev1, &err); | ||
3796 | |||
3797 | /* is double-select really necessary? */ | ||
3798 | if (device[1].class != ATA_DEV_NONE) | ||
3799 | ap->ops->dev_select(ap, 1); | ||
3800 | if (device[0].class != ATA_DEV_NONE) | ||
3801 | ap->ops->dev_select(ap, 0); | ||
3802 | |||
3803 | /* if no devices were detected, disable this port */ | ||
3804 | if ((device[0].class == ATA_DEV_NONE) && | ||
3805 | (device[1].class == ATA_DEV_NONE)) | ||
3806 | goto err_out; | ||
3807 | |||
3808 | if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { | ||
3809 | /* set up device control for ATA_FLAG_SATA_RESET */ | ||
3810 | iowrite8(ap->ctl, ioaddr->ctl_addr); | ||
3811 | } | ||
3812 | |||
3813 | DPRINTK("EXIT\n"); | ||
3814 | return; | ||
3815 | |||
3816 | err_out: | ||
3817 | ata_port_printk(ap, KERN_ERR, "disabling port\n"); | ||
3818 | ata_port_disable(ap); | ||
3819 | |||
3820 | DPRINTK("EXIT\n"); | ||
3821 | } | 3387 | } |
3822 | 3388 | ||
3823 | /** | 3389 | /** |
@@ -3906,7 +3472,7 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params, | |||
3906 | int sata_link_resume(struct ata_link *link, const unsigned long *params, | 3472 | int sata_link_resume(struct ata_link *link, const unsigned long *params, |
3907 | unsigned long deadline) | 3473 | unsigned long deadline) |
3908 | { | 3474 | { |
3909 | u32 scontrol; | 3475 | u32 scontrol, serror; |
3910 | int rc; | 3476 | int rc; |
3911 | 3477 | ||
3912 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) | 3478 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) |
@@ -3922,7 +3488,25 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params, | |||
3922 | */ | 3488 | */ |
3923 | msleep(200); | 3489 | msleep(200); |
3924 | 3490 | ||
3925 | return sata_link_debounce(link, params, deadline); | 3491 | if ((rc = sata_link_debounce(link, params, deadline))) |
3492 | return rc; | ||
3493 | |||
3494 | /* Clear SError. PMP and some host PHYs require this to | ||
3495 | * operate and clearing should be done before checking PHY | ||
3496 | * online status to avoid race condition (hotplugging between | ||
3497 | * link resume and status check). | ||
3498 | */ | ||
3499 | if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) | ||
3500 | rc = sata_scr_write(link, SCR_ERROR, serror); | ||
3501 | if (rc == 0 || rc == -EINVAL) { | ||
3502 | unsigned long flags; | ||
3503 | |||
3504 | spin_lock_irqsave(link->ap->lock, flags); | ||
3505 | link->eh_info.serror = 0; | ||
3506 | spin_unlock_irqrestore(link->ap->lock, flags); | ||
3507 | rc = 0; | ||
3508 | } | ||
3509 | return rc; | ||
3926 | } | 3510 | } |
3927 | 3511 | ||
3928 | /** | 3512 | /** |
@@ -3949,17 +3533,6 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline) | |||
3949 | const unsigned long *timing = sata_ehc_deb_timing(ehc); | 3533 | const unsigned long *timing = sata_ehc_deb_timing(ehc); |
3950 | int rc; | 3534 | int rc; |
3951 | 3535 | ||
3952 | /* handle link resume */ | ||
3953 | if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && | ||
3954 | (link->flags & ATA_LFLAG_HRST_TO_RESUME)) | ||
3955 | ehc->i.action |= ATA_EH_HARDRESET; | ||
3956 | |||
3957 | /* Some PMPs don't work with only SRST, force hardreset if PMP | ||
3958 | * is supported. | ||
3959 | */ | ||
3960 | if (ap->flags & ATA_FLAG_PMP) | ||
3961 | ehc->i.action |= ATA_EH_HARDRESET; | ||
3962 | |||
3963 | /* if we're about to do hardreset, nothing more to do */ | 3536 | /* if we're about to do hardreset, nothing more to do */ |
3964 | if (ehc->i.action & ATA_EH_HARDRESET) | 3537 | if (ehc->i.action & ATA_EH_HARDRESET) |
3965 | return 0; | 3538 | return 0; |
@@ -3973,88 +3546,30 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline) | |||
3973 | "link for reset (errno=%d)\n", rc); | 3546 | "link for reset (errno=%d)\n", rc); |
3974 | } | 3547 | } |
3975 | 3548 | ||
3976 | /* Wait for !BSY if the controller can wait for the first D2H | 3549 | /* no point in trying softreset on offline link */ |
3977 | * Reg FIS and we don't know that no device is attached. | 3550 | if (ata_link_offline(link)) |
3978 | */ | 3551 | ehc->i.action &= ~ATA_EH_SOFTRESET; |
3979 | if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) { | ||
3980 | rc = ata_wait_ready(ap, deadline); | ||
3981 | if (rc && rc != -ENODEV) { | ||
3982 | ata_link_printk(link, KERN_WARNING, "device not ready " | ||
3983 | "(errno=%d), forcing hardreset\n", rc); | ||
3984 | ehc->i.action |= ATA_EH_HARDRESET; | ||
3985 | } | ||
3986 | } | ||
3987 | 3552 | ||
3988 | return 0; | 3553 | return 0; |
3989 | } | 3554 | } |
3990 | 3555 | ||
3991 | /** | 3556 | /** |
3992 | * ata_std_softreset - reset host port via ATA SRST | ||
3993 | * @link: ATA link to reset | ||
3994 | * @classes: resulting classes of attached devices | ||
3995 | * @deadline: deadline jiffies for the operation | ||
3996 | * | ||
3997 | * Reset host port using ATA SRST. | ||
3998 | * | ||
3999 | * LOCKING: | ||
4000 | * Kernel thread context (may sleep) | ||
4001 | * | ||
4002 | * RETURNS: | ||
4003 | * 0 on success, -errno otherwise. | ||
4004 | */ | ||
4005 | int ata_std_softreset(struct ata_link *link, unsigned int *classes, | ||
4006 | unsigned long deadline) | ||
4007 | { | ||
4008 | struct ata_port *ap = link->ap; | ||
4009 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | ||
4010 | unsigned int devmask = 0; | ||
4011 | int rc; | ||
4012 | u8 err; | ||
4013 | |||
4014 | DPRINTK("ENTER\n"); | ||
4015 | |||
4016 | if (ata_link_offline(link)) { | ||
4017 | classes[0] = ATA_DEV_NONE; | ||
4018 | goto out; | ||
4019 | } | ||
4020 | |||
4021 | /* determine if device 0/1 are present */ | ||
4022 | if (ata_devchk(ap, 0)) | ||
4023 | devmask |= (1 << 0); | ||
4024 | if (slave_possible && ata_devchk(ap, 1)) | ||
4025 | devmask |= (1 << 1); | ||
4026 | |||
4027 | /* select device 0 again */ | ||
4028 | ap->ops->dev_select(ap, 0); | ||
4029 | |||
4030 | /* issue bus reset */ | ||
4031 | DPRINTK("about to softreset, devmask=%x\n", devmask); | ||
4032 | rc = ata_bus_softreset(ap, devmask, deadline); | ||
4033 | /* if link is occupied, -ENODEV too is an error */ | ||
4034 | if (rc && (rc != -ENODEV || sata_scr_valid(link))) { | ||
4035 | ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); | ||
4036 | return rc; | ||
4037 | } | ||
4038 | |||
4039 | /* determine by signature whether we have ATA or ATAPI devices */ | ||
4040 | classes[0] = ata_dev_try_classify(&link->device[0], | ||
4041 | devmask & (1 << 0), &err); | ||
4042 | if (slave_possible && err != 0x81) | ||
4043 | classes[1] = ata_dev_try_classify(&link->device[1], | ||
4044 | devmask & (1 << 1), &err); | ||
4045 | |||
4046 | out: | ||
4047 | DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); | ||
4048 | return 0; | ||
4049 | } | ||
4050 | |||
4051 | /** | ||
4052 | * sata_link_hardreset - reset link via SATA phy reset | 3557 | * sata_link_hardreset - reset link via SATA phy reset |
4053 | * @link: link to reset | 3558 | * @link: link to reset |
4054 | * @timing: timing parameters { interval, duratinon, timeout } in msec | 3559 | * @timing: timing parameters { interval, duratinon, timeout } in msec |
4055 | * @deadline: deadline jiffies for the operation | 3560 | * @deadline: deadline jiffies for the operation |
3561 | * @online: optional out parameter indicating link onlineness | ||
3562 | * @check_ready: optional callback to check link readiness | ||
4056 | * | 3563 | * |
4057 | * SATA phy-reset @link using DET bits of SControl register. | 3564 | * SATA phy-reset @link using DET bits of SControl register. |
3565 | * After hardreset, link readiness is waited upon using | ||
3566 | * ata_wait_ready() if @check_ready is specified. LLDs are | ||
3567 | * allowed to not specify @check_ready and wait itself after this | ||
3568 | * function returns. Device classification is LLD's | ||
3569 | * responsibility. | ||
3570 | * | ||
3571 | * *@online is set to one iff reset succeeded and @link is online | ||
3572 | * after reset. | ||
4058 | * | 3573 | * |
4059 | * LOCKING: | 3574 | * LOCKING: |
4060 | * Kernel thread context (may sleep) | 3575 | * Kernel thread context (may sleep) |
@@ -4063,13 +3578,17 @@ int ata_std_softreset(struct ata_link *link, unsigned int *classes, | |||
4063 | * 0 on success, -errno otherwise. | 3578 | * 0 on success, -errno otherwise. |
4064 | */ | 3579 | */ |
4065 | int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, | 3580 | int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, |
4066 | unsigned long deadline) | 3581 | unsigned long deadline, |
3582 | bool *online, int (*check_ready)(struct ata_link *)) | ||
4067 | { | 3583 | { |
4068 | u32 scontrol; | 3584 | u32 scontrol; |
4069 | int rc; | 3585 | int rc; |
4070 | 3586 | ||
4071 | DPRINTK("ENTER\n"); | 3587 | DPRINTK("ENTER\n"); |
4072 | 3588 | ||
3589 | if (online) | ||
3590 | *online = false; | ||
3591 | |||
4073 | if (sata_set_spd_needed(link)) { | 3592 | if (sata_set_spd_needed(link)) { |
4074 | /* SATA spec says nothing about how to reconfigure | 3593 | /* SATA spec says nothing about how to reconfigure |
4075 | * spd. To be on the safe side, turn off phy during | 3594 | * spd. To be on the safe side, turn off phy during |
@@ -4103,77 +3622,69 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, | |||
4103 | 3622 | ||
4104 | /* bring link back */ | 3623 | /* bring link back */ |
4105 | rc = sata_link_resume(link, timing, deadline); | 3624 | rc = sata_link_resume(link, timing, deadline); |
3625 | if (rc) | ||
3626 | goto out; | ||
3627 | /* if link is offline nothing more to do */ | ||
3628 | if (ata_link_offline(link)) | ||
3629 | goto out; | ||
3630 | |||
3631 | /* Link is online. From this point, -ENODEV too is an error. */ | ||
3632 | if (online) | ||
3633 | *online = true; | ||
3634 | |||
3635 | if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { | ||
3636 | /* If PMP is supported, we have to do follow-up SRST. | ||
3637 | * Some PMPs don't send D2H Reg FIS after hardreset if | ||
3638 | * the first port is empty. Wait only for | ||
3639 | * ATA_TMOUT_PMP_SRST_WAIT. | ||
3640 | */ | ||
3641 | if (check_ready) { | ||
3642 | unsigned long pmp_deadline; | ||
3643 | |||
3644 | pmp_deadline = jiffies + ATA_TMOUT_PMP_SRST_WAIT; | ||
3645 | if (time_after(pmp_deadline, deadline)) | ||
3646 | pmp_deadline = deadline; | ||
3647 | ata_wait_ready(link, pmp_deadline, check_ready); | ||
3648 | } | ||
3649 | rc = -EAGAIN; | ||
3650 | goto out; | ||
3651 | } | ||
3652 | |||
3653 | rc = 0; | ||
3654 | if (check_ready) | ||
3655 | rc = ata_wait_ready(link, deadline, check_ready); | ||
4106 | out: | 3656 | out: |
3657 | if (rc && rc != -EAGAIN) | ||
3658 | ata_link_printk(link, KERN_ERR, | ||
3659 | "COMRESET failed (errno=%d)\n", rc); | ||
4107 | DPRINTK("EXIT, rc=%d\n", rc); | 3660 | DPRINTK("EXIT, rc=%d\n", rc); |
4108 | return rc; | 3661 | return rc; |
4109 | } | 3662 | } |
4110 | 3663 | ||
4111 | /** | 3664 | /** |
4112 | * sata_std_hardreset - reset host port via SATA phy reset | 3665 | * sata_std_hardreset - COMRESET w/o waiting or classification |
4113 | * @link: link to reset | 3666 | * @link: link to reset |
4114 | * @class: resulting class of attached device | 3667 | * @class: resulting class of attached device |
4115 | * @deadline: deadline jiffies for the operation | 3668 | * @deadline: deadline jiffies for the operation |
4116 | * | 3669 | * |
4117 | * SATA phy-reset host port using DET bits of SControl register, | 3670 | * Standard SATA COMRESET w/o waiting or classification. |
4118 | * wait for !BSY and classify the attached device. | ||
4119 | * | 3671 | * |
4120 | * LOCKING: | 3672 | * LOCKING: |
4121 | * Kernel thread context (may sleep) | 3673 | * Kernel thread context (may sleep) |
4122 | * | 3674 | * |
4123 | * RETURNS: | 3675 | * RETURNS: |
4124 | * 0 on success, -errno otherwise. | 3676 | * 0 if link offline, -EAGAIN if link online, -errno on errors. |
4125 | */ | 3677 | */ |
4126 | int sata_std_hardreset(struct ata_link *link, unsigned int *class, | 3678 | int sata_std_hardreset(struct ata_link *link, unsigned int *class, |
4127 | unsigned long deadline) | 3679 | unsigned long deadline) |
4128 | { | 3680 | { |
4129 | struct ata_port *ap = link->ap; | ||
4130 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); | 3681 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); |
3682 | bool online; | ||
4131 | int rc; | 3683 | int rc; |
4132 | 3684 | ||
4133 | DPRINTK("ENTER\n"); | ||
4134 | |||
4135 | /* do hardreset */ | 3685 | /* do hardreset */ |
4136 | rc = sata_link_hardreset(link, timing, deadline); | 3686 | rc = sata_link_hardreset(link, timing, deadline, &online, NULL); |
4137 | if (rc) { | 3687 | return online ? -EAGAIN : rc; |
4138 | ata_link_printk(link, KERN_ERR, | ||
4139 | "COMRESET failed (errno=%d)\n", rc); | ||
4140 | return rc; | ||
4141 | } | ||
4142 | |||
4143 | /* TODO: phy layer with polling, timeouts, etc. */ | ||
4144 | if (ata_link_offline(link)) { | ||
4145 | *class = ATA_DEV_NONE; | ||
4146 | DPRINTK("EXIT, link offline\n"); | ||
4147 | return 0; | ||
4148 | } | ||
4149 | |||
4150 | /* wait a while before checking status */ | ||
4151 | ata_wait_after_reset(ap, deadline); | ||
4152 | |||
4153 | /* If PMP is supported, we have to do follow-up SRST. Note | ||
4154 | * that some PMPs don't send D2H Reg FIS after hardreset at | ||
4155 | * all if the first port is empty. Wait for it just for a | ||
4156 | * second and request follow-up SRST. | ||
4157 | */ | ||
4158 | if (ap->flags & ATA_FLAG_PMP) { | ||
4159 | ata_wait_ready(ap, jiffies + HZ); | ||
4160 | return -EAGAIN; | ||
4161 | } | ||
4162 | |||
4163 | rc = ata_wait_ready(ap, deadline); | ||
4164 | /* link occupied, -ENODEV too is an error */ | ||
4165 | if (rc) { | ||
4166 | ata_link_printk(link, KERN_ERR, | ||
4167 | "COMRESET failed (errno=%d)\n", rc); | ||
4168 | return rc; | ||
4169 | } | ||
4170 | |||
4171 | ap->ops->dev_select(ap, 0); /* probably unnecessary */ | ||
4172 | |||
4173 | *class = ata_dev_try_classify(link->device, 1, NULL); | ||
4174 | |||
4175 | DPRINTK("EXIT, class=%u\n", *class); | ||
4176 | return 0; | ||
4177 | } | 3688 | } |
4178 | 3689 | ||
4179 | /** | 3690 | /** |
@@ -4190,35 +3701,11 @@ int sata_std_hardreset(struct ata_link *link, unsigned int *class, | |||
4190 | */ | 3701 | */ |
4191 | void ata_std_postreset(struct ata_link *link, unsigned int *classes) | 3702 | void ata_std_postreset(struct ata_link *link, unsigned int *classes) |
4192 | { | 3703 | { |
4193 | struct ata_port *ap = link->ap; | ||
4194 | u32 serror; | ||
4195 | |||
4196 | DPRINTK("ENTER\n"); | 3704 | DPRINTK("ENTER\n"); |
4197 | 3705 | ||
4198 | /* print link status */ | 3706 | /* print link status */ |
4199 | sata_print_link_status(link); | 3707 | sata_print_link_status(link); |
4200 | 3708 | ||
4201 | /* clear SError */ | ||
4202 | if (sata_scr_read(link, SCR_ERROR, &serror) == 0) | ||
4203 | sata_scr_write(link, SCR_ERROR, serror); | ||
4204 | link->eh_info.serror = 0; | ||
4205 | |||
4206 | /* is double-select really necessary? */ | ||
4207 | if (classes[0] != ATA_DEV_NONE) | ||
4208 | ap->ops->dev_select(ap, 1); | ||
4209 | if (classes[1] != ATA_DEV_NONE) | ||
4210 | ap->ops->dev_select(ap, 0); | ||
4211 | |||
4212 | /* bail out if no device is present */ | ||
4213 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { | ||
4214 | DPRINTK("EXIT, no device\n"); | ||
4215 | return; | ||
4216 | } | ||
4217 | |||
4218 | /* set up device control */ | ||
4219 | if (ap->ioaddr.ctl_addr) | ||
4220 | iowrite8(ap->ctl, ap->ioaddr.ctl_addr); | ||
4221 | |||
4222 | DPRINTK("EXIT\n"); | 3709 | DPRINTK("EXIT\n"); |
4223 | } | 3710 | } |
4224 | 3711 | ||
@@ -4528,6 +4015,53 @@ static int ata_is_40wire(struct ata_device *dev) | |||
4528 | } | 4015 | } |
4529 | 4016 | ||
4530 | /** | 4017 | /** |
4018 | * cable_is_40wire - 40/80/SATA decider | ||
4019 | * @ap: port to consider | ||
4020 | * | ||
4021 | * This function encapsulates the policy for speed management | ||
4022 | * in one place. At the moment we don't cache the result but | ||
4023 | * there is a good case for setting ap->cbl to the result when | ||
4024 | * we are called with unknown cables (and figuring out if it | ||
4025 | * impacts hotplug at all). | ||
4026 | * | ||
4027 | * Return 1 if the cable appears to be 40 wire. | ||
4028 | */ | ||
4029 | |||
4030 | static int cable_is_40wire(struct ata_port *ap) | ||
4031 | { | ||
4032 | struct ata_link *link; | ||
4033 | struct ata_device *dev; | ||
4034 | |||
4035 | /* If the controller thinks we are 40 wire, we are */ | ||
4036 | if (ap->cbl == ATA_CBL_PATA40) | ||
4037 | return 1; | ||
4038 | /* If the controller thinks we are 80 wire, we are */ | ||
4039 | if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) | ||
4040 | return 0; | ||
4041 | /* If the system is known to be 40 wire short cable (eg laptop), | ||
4042 | then we allow 80 wire modes even if the drive isn't sure */ | ||
4043 | if (ap->cbl == ATA_CBL_PATA40_SHORT) | ||
4044 | return 0; | ||
4045 | /* If the controller doesn't know we scan | ||
4046 | |||
4047 | - Note: We look for all 40 wire detects at this point. | ||
4048 | Any 80 wire detect is taken to be 80 wire cable | ||
4049 | because | ||
4050 | - In many setups only the one drive (slave if present) | ||
4051 | will give a valid detect | ||
4052 | - If you have a non detect capable drive you don't | ||
4053 | want it to colour the choice | ||
4054 | */ | ||
4055 | ata_port_for_each_link(link, ap) { | ||
4056 | ata_link_for_each_dev(dev, link) { | ||
4057 | if (!ata_is_40wire(dev)) | ||
4058 | return 0; | ||
4059 | } | ||
4060 | } | ||
4061 | return 1; | ||
4062 | } | ||
4063 | |||
4064 | /** | ||
4531 | * ata_dev_xfermask - Compute supported xfermask of the given device | 4065 | * ata_dev_xfermask - Compute supported xfermask of the given device |
4532 | * @dev: Device to compute xfermask for | 4066 | * @dev: Device to compute xfermask for |
4533 | * | 4067 | * |
@@ -4595,10 +4129,7 @@ static void ata_dev_xfermask(struct ata_device *dev) | |||
4595 | */ | 4129 | */ |
4596 | if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) | 4130 | if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) |
4597 | /* UDMA/44 or higher would be available */ | 4131 | /* UDMA/44 or higher would be available */ |
4598 | if ((ap->cbl == ATA_CBL_PATA40) || | 4132 | if (cable_is_40wire(ap)) { |
4599 | (ata_is_40wire(dev) && | ||
4600 | (ap->cbl == ATA_CBL_PATA_UNK || | ||
4601 | ap->cbl == ATA_CBL_PATA80))) { | ||
4602 | ata_dev_printk(dev, KERN_WARNING, | 4133 | ata_dev_printk(dev, KERN_WARNING, |
4603 | "limited to UDMA/33 due to 40-wire cable\n"); | 4134 | "limited to UDMA/33 due to 40-wire cable\n"); |
4604 | xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); | 4135 | xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); |
@@ -4759,112 +4290,6 @@ void ata_sg_clean(struct ata_queued_cmd *qc) | |||
4759 | } | 4290 | } |
4760 | 4291 | ||
4761 | /** | 4292 | /** |
4762 | * ata_fill_sg - Fill PCI IDE PRD table | ||
4763 | * @qc: Metadata associated with taskfile to be transferred | ||
4764 | * | ||
4765 | * Fill PCI IDE PRD (scatter-gather) table with segments | ||
4766 | * associated with the current disk command. | ||
4767 | * | ||
4768 | * LOCKING: | ||
4769 | * spin_lock_irqsave(host lock) | ||
4770 | * | ||
4771 | */ | ||
4772 | static void ata_fill_sg(struct ata_queued_cmd *qc) | ||
4773 | { | ||
4774 | struct ata_port *ap = qc->ap; | ||
4775 | struct scatterlist *sg; | ||
4776 | unsigned int si, pi; | ||
4777 | |||
4778 | pi = 0; | ||
4779 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
4780 | u32 addr, offset; | ||
4781 | u32 sg_len, len; | ||
4782 | |||
4783 | /* determine if physical DMA addr spans 64K boundary. | ||
4784 | * Note h/w doesn't support 64-bit, so we unconditionally | ||
4785 | * truncate dma_addr_t to u32. | ||
4786 | */ | ||
4787 | addr = (u32) sg_dma_address(sg); | ||
4788 | sg_len = sg_dma_len(sg); | ||
4789 | |||
4790 | while (sg_len) { | ||
4791 | offset = addr & 0xffff; | ||
4792 | len = sg_len; | ||
4793 | if ((offset + sg_len) > 0x10000) | ||
4794 | len = 0x10000 - offset; | ||
4795 | |||
4796 | ap->prd[pi].addr = cpu_to_le32(addr); | ||
4797 | ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff); | ||
4798 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); | ||
4799 | |||
4800 | pi++; | ||
4801 | sg_len -= len; | ||
4802 | addr += len; | ||
4803 | } | ||
4804 | } | ||
4805 | |||
4806 | ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); | ||
4807 | } | ||
4808 | |||
4809 | /** | ||
4810 | * ata_fill_sg_dumb - Fill PCI IDE PRD table | ||
4811 | * @qc: Metadata associated with taskfile to be transferred | ||
4812 | * | ||
4813 | * Fill PCI IDE PRD (scatter-gather) table with segments | ||
4814 | * associated with the current disk command. Perform the fill | ||
4815 | * so that we avoid writing any length 64K records for | ||
4816 | * controllers that don't follow the spec. | ||
4817 | * | ||
4818 | * LOCKING: | ||
4819 | * spin_lock_irqsave(host lock) | ||
4820 | * | ||
4821 | */ | ||
4822 | static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) | ||
4823 | { | ||
4824 | struct ata_port *ap = qc->ap; | ||
4825 | struct scatterlist *sg; | ||
4826 | unsigned int si, pi; | ||
4827 | |||
4828 | pi = 0; | ||
4829 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
4830 | u32 addr, offset; | ||
4831 | u32 sg_len, len, blen; | ||
4832 | |||
4833 | /* determine if physical DMA addr spans 64K boundary. | ||
4834 | * Note h/w doesn't support 64-bit, so we unconditionally | ||
4835 | * truncate dma_addr_t to u32. | ||
4836 | */ | ||
4837 | addr = (u32) sg_dma_address(sg); | ||
4838 | sg_len = sg_dma_len(sg); | ||
4839 | |||
4840 | while (sg_len) { | ||
4841 | offset = addr & 0xffff; | ||
4842 | len = sg_len; | ||
4843 | if ((offset + sg_len) > 0x10000) | ||
4844 | len = 0x10000 - offset; | ||
4845 | |||
4846 | blen = len & 0xffff; | ||
4847 | ap->prd[pi].addr = cpu_to_le32(addr); | ||
4848 | if (blen == 0) { | ||
4849 | /* Some PATA chipsets like the CS5530 can't | ||
4850 | cope with 0x0000 meaning 64K as the spec says */ | ||
4851 | ap->prd[pi].flags_len = cpu_to_le32(0x8000); | ||
4852 | blen = 0x8000; | ||
4853 | ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000); | ||
4854 | } | ||
4855 | ap->prd[pi].flags_len = cpu_to_le32(blen); | ||
4856 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); | ||
4857 | |||
4858 | pi++; | ||
4859 | sg_len -= len; | ||
4860 | addr += len; | ||
4861 | } | ||
4862 | } | ||
4863 | |||
4864 | ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); | ||
4865 | } | ||
4866 | |||
4867 | /** | ||
4868 | * ata_check_atapi_dma - Check whether ATAPI DMA can be supported | 4293 | * ata_check_atapi_dma - Check whether ATAPI DMA can be supported |
4869 | * @qc: Metadata associated with taskfile to check | 4294 | * @qc: Metadata associated with taskfile to check |
4870 | * | 4295 | * |
@@ -4924,40 +4349,6 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc) | |||
4924 | return ATA_DEFER_LINK; | 4349 | return ATA_DEFER_LINK; |
4925 | } | 4350 | } |
4926 | 4351 | ||
4927 | /** | ||
4928 | * ata_qc_prep - Prepare taskfile for submission | ||
4929 | * @qc: Metadata associated with taskfile to be prepared | ||
4930 | * | ||
4931 | * Prepare ATA taskfile for submission. | ||
4932 | * | ||
4933 | * LOCKING: | ||
4934 | * spin_lock_irqsave(host lock) | ||
4935 | */ | ||
4936 | void ata_qc_prep(struct ata_queued_cmd *qc) | ||
4937 | { | ||
4938 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | ||
4939 | return; | ||
4940 | |||
4941 | ata_fill_sg(qc); | ||
4942 | } | ||
4943 | |||
4944 | /** | ||
4945 | * ata_dumb_qc_prep - Prepare taskfile for submission | ||
4946 | * @qc: Metadata associated with taskfile to be prepared | ||
4947 | * | ||
4948 | * Prepare ATA taskfile for submission. | ||
4949 | * | ||
4950 | * LOCKING: | ||
4951 | * spin_lock_irqsave(host lock) | ||
4952 | */ | ||
4953 | void ata_dumb_qc_prep(struct ata_queued_cmd *qc) | ||
4954 | { | ||
4955 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | ||
4956 | return; | ||
4957 | |||
4958 | ata_fill_sg_dumb(qc); | ||
4959 | } | ||
4960 | |||
4961 | void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } | 4352 | void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } |
4962 | 4353 | ||
4963 | /** | 4354 | /** |
@@ -5036,698 +4427,6 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) | |||
5036 | } | 4427 | } |
5037 | 4428 | ||
5038 | /** | 4429 | /** |
5039 | * ata_data_xfer - Transfer data by PIO | ||
5040 | * @dev: device to target | ||
5041 | * @buf: data buffer | ||
5042 | * @buflen: buffer length | ||
5043 | * @rw: read/write | ||
5044 | * | ||
5045 | * Transfer data from/to the device data register by PIO. | ||
5046 | * | ||
5047 | * LOCKING: | ||
5048 | * Inherited from caller. | ||
5049 | * | ||
5050 | * RETURNS: | ||
5051 | * Bytes consumed. | ||
5052 | */ | ||
5053 | unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf, | ||
5054 | unsigned int buflen, int rw) | ||
5055 | { | ||
5056 | struct ata_port *ap = dev->link->ap; | ||
5057 | void __iomem *data_addr = ap->ioaddr.data_addr; | ||
5058 | unsigned int words = buflen >> 1; | ||
5059 | |||
5060 | /* Transfer multiple of 2 bytes */ | ||
5061 | if (rw == READ) | ||
5062 | ioread16_rep(data_addr, buf, words); | ||
5063 | else | ||
5064 | iowrite16_rep(data_addr, buf, words); | ||
5065 | |||
5066 | /* Transfer trailing 1 byte, if any. */ | ||
5067 | if (unlikely(buflen & 0x01)) { | ||
5068 | __le16 align_buf[1] = { 0 }; | ||
5069 | unsigned char *trailing_buf = buf + buflen - 1; | ||
5070 | |||
5071 | if (rw == READ) { | ||
5072 | align_buf[0] = cpu_to_le16(ioread16(data_addr)); | ||
5073 | memcpy(trailing_buf, align_buf, 1); | ||
5074 | } else { | ||
5075 | memcpy(align_buf, trailing_buf, 1); | ||
5076 | iowrite16(le16_to_cpu(align_buf[0]), data_addr); | ||
5077 | } | ||
5078 | words++; | ||
5079 | } | ||
5080 | |||
5081 | return words << 1; | ||
5082 | } | ||
5083 | |||
5084 | /** | ||
5085 | * ata_data_xfer_noirq - Transfer data by PIO | ||
5086 | * @dev: device to target | ||
5087 | * @buf: data buffer | ||
5088 | * @buflen: buffer length | ||
5089 | * @rw: read/write | ||
5090 | * | ||
5091 | * Transfer data from/to the device data register by PIO. Do the | ||
5092 | * transfer with interrupts disabled. | ||
5093 | * | ||
5094 | * LOCKING: | ||
5095 | * Inherited from caller. | ||
5096 | * | ||
5097 | * RETURNS: | ||
5098 | * Bytes consumed. | ||
5099 | */ | ||
5100 | unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, | ||
5101 | unsigned int buflen, int rw) | ||
5102 | { | ||
5103 | unsigned long flags; | ||
5104 | unsigned int consumed; | ||
5105 | |||
5106 | local_irq_save(flags); | ||
5107 | consumed = ata_data_xfer(dev, buf, buflen, rw); | ||
5108 | local_irq_restore(flags); | ||
5109 | |||
5110 | return consumed; | ||
5111 | } | ||
5112 | |||
5113 | |||
5114 | /** | ||
5115 | * ata_pio_sector - Transfer a sector of data. | ||
5116 | * @qc: Command on going | ||
5117 | * | ||
5118 | * Transfer qc->sect_size bytes of data from/to the ATA device. | ||
5119 | * | ||
5120 | * LOCKING: | ||
5121 | * Inherited from caller. | ||
5122 | */ | ||
5123 | |||
5124 | static void ata_pio_sector(struct ata_queued_cmd *qc) | ||
5125 | { | ||
5126 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
5127 | struct ata_port *ap = qc->ap; | ||
5128 | struct page *page; | ||
5129 | unsigned int offset; | ||
5130 | unsigned char *buf; | ||
5131 | |||
5132 | if (qc->curbytes == qc->nbytes - qc->sect_size) | ||
5133 | ap->hsm_task_state = HSM_ST_LAST; | ||
5134 | |||
5135 | page = sg_page(qc->cursg); | ||
5136 | offset = qc->cursg->offset + qc->cursg_ofs; | ||
5137 | |||
5138 | /* get the current page and offset */ | ||
5139 | page = nth_page(page, (offset >> PAGE_SHIFT)); | ||
5140 | offset %= PAGE_SIZE; | ||
5141 | |||
5142 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | ||
5143 | |||
5144 | if (PageHighMem(page)) { | ||
5145 | unsigned long flags; | ||
5146 | |||
5147 | /* FIXME: use a bounce buffer */ | ||
5148 | local_irq_save(flags); | ||
5149 | buf = kmap_atomic(page, KM_IRQ0); | ||
5150 | |||
5151 | /* do the actual data transfer */ | ||
5152 | ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write); | ||
5153 | |||
5154 | kunmap_atomic(buf, KM_IRQ0); | ||
5155 | local_irq_restore(flags); | ||
5156 | } else { | ||
5157 | buf = page_address(page); | ||
5158 | ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write); | ||
5159 | } | ||
5160 | |||
5161 | qc->curbytes += qc->sect_size; | ||
5162 | qc->cursg_ofs += qc->sect_size; | ||
5163 | |||
5164 | if (qc->cursg_ofs == qc->cursg->length) { | ||
5165 | qc->cursg = sg_next(qc->cursg); | ||
5166 | qc->cursg_ofs = 0; | ||
5167 | } | ||
5168 | } | ||
5169 | |||
5170 | /** | ||
5171 | * ata_pio_sectors - Transfer one or many sectors. | ||
5172 | * @qc: Command on going | ||
5173 | * | ||
5174 | * Transfer one or many sectors of data from/to the | ||
5175 | * ATA device for the DRQ request. | ||
5176 | * | ||
5177 | * LOCKING: | ||
5178 | * Inherited from caller. | ||
5179 | */ | ||
5180 | |||
5181 | static void ata_pio_sectors(struct ata_queued_cmd *qc) | ||
5182 | { | ||
5183 | if (is_multi_taskfile(&qc->tf)) { | ||
5184 | /* READ/WRITE MULTIPLE */ | ||
5185 | unsigned int nsect; | ||
5186 | |||
5187 | WARN_ON(qc->dev->multi_count == 0); | ||
5188 | |||
5189 | nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, | ||
5190 | qc->dev->multi_count); | ||
5191 | while (nsect--) | ||
5192 | ata_pio_sector(qc); | ||
5193 | } else | ||
5194 | ata_pio_sector(qc); | ||
5195 | |||
5196 | ata_altstatus(qc->ap); /* flush */ | ||
5197 | } | ||
5198 | |||
5199 | /** | ||
5200 | * atapi_send_cdb - Write CDB bytes to hardware | ||
5201 | * @ap: Port to which ATAPI device is attached. | ||
5202 | * @qc: Taskfile currently active | ||
5203 | * | ||
5204 | * When device has indicated its readiness to accept | ||
5205 | * a CDB, this function is called. Send the CDB. | ||
5206 | * | ||
5207 | * LOCKING: | ||
5208 | * caller. | ||
5209 | */ | ||
5210 | |||
5211 | static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | ||
5212 | { | ||
5213 | /* send SCSI cdb */ | ||
5214 | DPRINTK("send cdb\n"); | ||
5215 | WARN_ON(qc->dev->cdb_len < 12); | ||
5216 | |||
5217 | ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); | ||
5218 | ata_altstatus(ap); /* flush */ | ||
5219 | |||
5220 | switch (qc->tf.protocol) { | ||
5221 | case ATAPI_PROT_PIO: | ||
5222 | ap->hsm_task_state = HSM_ST; | ||
5223 | break; | ||
5224 | case ATAPI_PROT_NODATA: | ||
5225 | ap->hsm_task_state = HSM_ST_LAST; | ||
5226 | break; | ||
5227 | case ATAPI_PROT_DMA: | ||
5228 | ap->hsm_task_state = HSM_ST_LAST; | ||
5229 | /* initiate bmdma */ | ||
5230 | ap->ops->bmdma_start(qc); | ||
5231 | break; | ||
5232 | } | ||
5233 | } | ||
5234 | |||
5235 | /** | ||
5236 | * __atapi_pio_bytes - Transfer data from/to the ATAPI device. | ||
5237 | * @qc: Command on going | ||
5238 | * @bytes: number of bytes | ||
5239 | * | ||
5240 | * Transfer Transfer data from/to the ATAPI device. | ||
5241 | * | ||
5242 | * LOCKING: | ||
5243 | * Inherited from caller. | ||
5244 | * | ||
5245 | */ | ||
5246 | static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) | ||
5247 | { | ||
5248 | int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; | ||
5249 | struct ata_port *ap = qc->ap; | ||
5250 | struct ata_device *dev = qc->dev; | ||
5251 | struct ata_eh_info *ehi = &dev->link->eh_info; | ||
5252 | struct scatterlist *sg; | ||
5253 | struct page *page; | ||
5254 | unsigned char *buf; | ||
5255 | unsigned int offset, count, consumed; | ||
5256 | |||
5257 | next_sg: | ||
5258 | sg = qc->cursg; | ||
5259 | if (unlikely(!sg)) { | ||
5260 | ata_ehi_push_desc(ehi, "unexpected or too much trailing data " | ||
5261 | "buf=%u cur=%u bytes=%u", | ||
5262 | qc->nbytes, qc->curbytes, bytes); | ||
5263 | return -1; | ||
5264 | } | ||
5265 | |||
5266 | page = sg_page(sg); | ||
5267 | offset = sg->offset + qc->cursg_ofs; | ||
5268 | |||
5269 | /* get the current page and offset */ | ||
5270 | page = nth_page(page, (offset >> PAGE_SHIFT)); | ||
5271 | offset %= PAGE_SIZE; | ||
5272 | |||
5273 | /* don't overrun current sg */ | ||
5274 | count = min(sg->length - qc->cursg_ofs, bytes); | ||
5275 | |||
5276 | /* don't cross page boundaries */ | ||
5277 | count = min(count, (unsigned int)PAGE_SIZE - offset); | ||
5278 | |||
5279 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | ||
5280 | |||
5281 | if (PageHighMem(page)) { | ||
5282 | unsigned long flags; | ||
5283 | |||
5284 | /* FIXME: use bounce buffer */ | ||
5285 | local_irq_save(flags); | ||
5286 | buf = kmap_atomic(page, KM_IRQ0); | ||
5287 | |||
5288 | /* do the actual data transfer */ | ||
5289 | consumed = ap->ops->data_xfer(dev, buf + offset, count, rw); | ||
5290 | |||
5291 | kunmap_atomic(buf, KM_IRQ0); | ||
5292 | local_irq_restore(flags); | ||
5293 | } else { | ||
5294 | buf = page_address(page); | ||
5295 | consumed = ap->ops->data_xfer(dev, buf + offset, count, rw); | ||
5296 | } | ||
5297 | |||
5298 | bytes -= min(bytes, consumed); | ||
5299 | qc->curbytes += count; | ||
5300 | qc->cursg_ofs += count; | ||
5301 | |||
5302 | if (qc->cursg_ofs == sg->length) { | ||
5303 | qc->cursg = sg_next(qc->cursg); | ||
5304 | qc->cursg_ofs = 0; | ||
5305 | } | ||
5306 | |||
5307 | /* consumed can be larger than count only for the last transfer */ | ||
5308 | WARN_ON(qc->cursg && count != consumed); | ||
5309 | |||
5310 | if (bytes) | ||
5311 | goto next_sg; | ||
5312 | return 0; | ||
5313 | } | ||
5314 | |||
5315 | /** | ||
5316 | * atapi_pio_bytes - Transfer data from/to the ATAPI device. | ||
5317 | * @qc: Command on going | ||
5318 | * | ||
5319 | * Transfer Transfer data from/to the ATAPI device. | ||
5320 | * | ||
5321 | * LOCKING: | ||
5322 | * Inherited from caller. | ||
5323 | */ | ||
5324 | |||
5325 | static void atapi_pio_bytes(struct ata_queued_cmd *qc) | ||
5326 | { | ||
5327 | struct ata_port *ap = qc->ap; | ||
5328 | struct ata_device *dev = qc->dev; | ||
5329 | struct ata_eh_info *ehi = &dev->link->eh_info; | ||
5330 | unsigned int ireason, bc_lo, bc_hi, bytes; | ||
5331 | int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; | ||
5332 | |||
5333 | /* Abuse qc->result_tf for temp storage of intermediate TF | ||
5334 | * here to save some kernel stack usage. | ||
5335 | * For normal completion, qc->result_tf is not relevant. For | ||
5336 | * error, qc->result_tf is later overwritten by ata_qc_complete(). | ||
5337 | * So, the correctness of qc->result_tf is not affected. | ||
5338 | */ | ||
5339 | ap->ops->tf_read(ap, &qc->result_tf); | ||
5340 | ireason = qc->result_tf.nsect; | ||
5341 | bc_lo = qc->result_tf.lbam; | ||
5342 | bc_hi = qc->result_tf.lbah; | ||
5343 | bytes = (bc_hi << 8) | bc_lo; | ||
5344 | |||
5345 | /* shall be cleared to zero, indicating xfer of data */ | ||
5346 | if (unlikely(ireason & (1 << 0))) | ||
5347 | goto atapi_check; | ||
5348 | |||
5349 | /* make sure transfer direction matches expected */ | ||
5350 | i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; | ||
5351 | if (unlikely(do_write != i_write)) | ||
5352 | goto atapi_check; | ||
5353 | |||
5354 | if (unlikely(!bytes)) | ||
5355 | goto atapi_check; | ||
5356 | |||
5357 | VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); | ||
5358 | |||
5359 | if (unlikely(__atapi_pio_bytes(qc, bytes))) | ||
5360 | goto err_out; | ||
5361 | ata_altstatus(ap); /* flush */ | ||
5362 | |||
5363 | return; | ||
5364 | |||
5365 | atapi_check: | ||
5366 | ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)", | ||
5367 | ireason, bytes); | ||
5368 | err_out: | ||
5369 | qc->err_mask |= AC_ERR_HSM; | ||
5370 | ap->hsm_task_state = HSM_ST_ERR; | ||
5371 | } | ||
5372 | |||
5373 | /** | ||
5374 | * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. | ||
5375 | * @ap: the target ata_port | ||
5376 | * @qc: qc on going | ||
5377 | * | ||
5378 | * RETURNS: | ||
5379 | * 1 if ok in workqueue, 0 otherwise. | ||
5380 | */ | ||
5381 | |||
5382 | static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) | ||
5383 | { | ||
5384 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
5385 | return 1; | ||
5386 | |||
5387 | if (ap->hsm_task_state == HSM_ST_FIRST) { | ||
5388 | if (qc->tf.protocol == ATA_PROT_PIO && | ||
5389 | (qc->tf.flags & ATA_TFLAG_WRITE)) | ||
5390 | return 1; | ||
5391 | |||
5392 | if (ata_is_atapi(qc->tf.protocol) && | ||
5393 | !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
5394 | return 1; | ||
5395 | } | ||
5396 | |||
5397 | return 0; | ||
5398 | } | ||
5399 | |||
5400 | /** | ||
5401 | * ata_hsm_qc_complete - finish a qc running on standard HSM | ||
5402 | * @qc: Command to complete | ||
5403 | * @in_wq: 1 if called from workqueue, 0 otherwise | ||
5404 | * | ||
5405 | * Finish @qc which is running on standard HSM. | ||
5406 | * | ||
5407 | * LOCKING: | ||
5408 | * If @in_wq is zero, spin_lock_irqsave(host lock). | ||
5409 | * Otherwise, none on entry and grabs host lock. | ||
5410 | */ | ||
5411 | static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | ||
5412 | { | ||
5413 | struct ata_port *ap = qc->ap; | ||
5414 | unsigned long flags; | ||
5415 | |||
5416 | if (ap->ops->error_handler) { | ||
5417 | if (in_wq) { | ||
5418 | spin_lock_irqsave(ap->lock, flags); | ||
5419 | |||
5420 | /* EH might have kicked in while host lock is | ||
5421 | * released. | ||
5422 | */ | ||
5423 | qc = ata_qc_from_tag(ap, qc->tag); | ||
5424 | if (qc) { | ||
5425 | if (likely(!(qc->err_mask & AC_ERR_HSM))) { | ||
5426 | ap->ops->irq_on(ap); | ||
5427 | ata_qc_complete(qc); | ||
5428 | } else | ||
5429 | ata_port_freeze(ap); | ||
5430 | } | ||
5431 | |||
5432 | spin_unlock_irqrestore(ap->lock, flags); | ||
5433 | } else { | ||
5434 | if (likely(!(qc->err_mask & AC_ERR_HSM))) | ||
5435 | ata_qc_complete(qc); | ||
5436 | else | ||
5437 | ata_port_freeze(ap); | ||
5438 | } | ||
5439 | } else { | ||
5440 | if (in_wq) { | ||
5441 | spin_lock_irqsave(ap->lock, flags); | ||
5442 | ap->ops->irq_on(ap); | ||
5443 | ata_qc_complete(qc); | ||
5444 | spin_unlock_irqrestore(ap->lock, flags); | ||
5445 | } else | ||
5446 | ata_qc_complete(qc); | ||
5447 | } | ||
5448 | } | ||
5449 | |||
5450 | /** | ||
5451 | * ata_hsm_move - move the HSM to the next state. | ||
5452 | * @ap: the target ata_port | ||
5453 | * @qc: qc on going | ||
5454 | * @status: current device status | ||
5455 | * @in_wq: 1 if called from workqueue, 0 otherwise | ||
5456 | * | ||
5457 | * RETURNS: | ||
5458 | * 1 when poll next status needed, 0 otherwise. | ||
5459 | */ | ||
5460 | int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | ||
5461 | u8 status, int in_wq) | ||
5462 | { | ||
5463 | unsigned long flags = 0; | ||
5464 | int poll_next; | ||
5465 | |||
5466 | WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); | ||
5467 | |||
5468 | /* Make sure ata_qc_issue_prot() does not throw things | ||
5469 | * like DMA polling into the workqueue. Notice that | ||
5470 | * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). | ||
5471 | */ | ||
5472 | WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); | ||
5473 | |||
5474 | fsm_start: | ||
5475 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", | ||
5476 | ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); | ||
5477 | |||
5478 | switch (ap->hsm_task_state) { | ||
5479 | case HSM_ST_FIRST: | ||
5480 | /* Send first data block or PACKET CDB */ | ||
5481 | |||
5482 | /* If polling, we will stay in the work queue after | ||
5483 | * sending the data. Otherwise, interrupt handler | ||
5484 | * takes over after sending the data. | ||
5485 | */ | ||
5486 | poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); | ||
5487 | |||
5488 | /* check device status */ | ||
5489 | if (unlikely((status & ATA_DRQ) == 0)) { | ||
5490 | /* handle BSY=0, DRQ=0 as error */ | ||
5491 | if (likely(status & (ATA_ERR | ATA_DF))) | ||
5492 | /* device stops HSM for abort/error */ | ||
5493 | qc->err_mask |= AC_ERR_DEV; | ||
5494 | else | ||
5495 | /* HSM violation. Let EH handle this */ | ||
5496 | qc->err_mask |= AC_ERR_HSM; | ||
5497 | |||
5498 | ap->hsm_task_state = HSM_ST_ERR; | ||
5499 | goto fsm_start; | ||
5500 | } | ||
5501 | |||
5502 | /* Device should not ask for data transfer (DRQ=1) | ||
5503 | * when it finds something wrong. | ||
5504 | * We ignore DRQ here and stop the HSM by | ||
5505 | * changing hsm_task_state to HSM_ST_ERR and | ||
5506 | * let the EH abort the command or reset the device. | ||
5507 | */ | ||
5508 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | ||
5509 | /* Some ATAPI tape drives forget to clear the ERR bit | ||
5510 | * when doing the next command (mostly request sense). | ||
5511 | * We ignore ERR here to workaround and proceed sending | ||
5512 | * the CDB. | ||
5513 | */ | ||
5514 | if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { | ||
5515 | ata_port_printk(ap, KERN_WARNING, | ||
5516 | "DRQ=1 with device error, " | ||
5517 | "dev_stat 0x%X\n", status); | ||
5518 | qc->err_mask |= AC_ERR_HSM; | ||
5519 | ap->hsm_task_state = HSM_ST_ERR; | ||
5520 | goto fsm_start; | ||
5521 | } | ||
5522 | } | ||
5523 | |||
5524 | /* Send the CDB (atapi) or the first data block (ata pio out). | ||
5525 | * During the state transition, interrupt handler shouldn't | ||
5526 | * be invoked before the data transfer is complete and | ||
5527 | * hsm_task_state is changed. Hence, the following locking. | ||
5528 | */ | ||
5529 | if (in_wq) | ||
5530 | spin_lock_irqsave(ap->lock, flags); | ||
5531 | |||
5532 | if (qc->tf.protocol == ATA_PROT_PIO) { | ||
5533 | /* PIO data out protocol. | ||
5534 | * send first data block. | ||
5535 | */ | ||
5536 | |||
5537 | /* ata_pio_sectors() might change the state | ||
5538 | * to HSM_ST_LAST. so, the state is changed here | ||
5539 | * before ata_pio_sectors(). | ||
5540 | */ | ||
5541 | ap->hsm_task_state = HSM_ST; | ||
5542 | ata_pio_sectors(qc); | ||
5543 | } else | ||
5544 | /* send CDB */ | ||
5545 | atapi_send_cdb(ap, qc); | ||
5546 | |||
5547 | if (in_wq) | ||
5548 | spin_unlock_irqrestore(ap->lock, flags); | ||
5549 | |||
5550 | /* if polling, ata_pio_task() handles the rest. | ||
5551 | * otherwise, interrupt handler takes over from here. | ||
5552 | */ | ||
5553 | break; | ||
5554 | |||
5555 | case HSM_ST: | ||
5556 | /* complete command or read/write the data register */ | ||
5557 | if (qc->tf.protocol == ATAPI_PROT_PIO) { | ||
5558 | /* ATAPI PIO protocol */ | ||
5559 | if ((status & ATA_DRQ) == 0) { | ||
5560 | /* No more data to transfer or device error. | ||
5561 | * Device error will be tagged in HSM_ST_LAST. | ||
5562 | */ | ||
5563 | ap->hsm_task_state = HSM_ST_LAST; | ||
5564 | goto fsm_start; | ||
5565 | } | ||
5566 | |||
5567 | /* Device should not ask for data transfer (DRQ=1) | ||
5568 | * when it finds something wrong. | ||
5569 | * We ignore DRQ here and stop the HSM by | ||
5570 | * changing hsm_task_state to HSM_ST_ERR and | ||
5571 | * let the EH abort the command or reset the device. | ||
5572 | */ | ||
5573 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | ||
5574 | ata_port_printk(ap, KERN_WARNING, "DRQ=1 with " | ||
5575 | "device error, dev_stat 0x%X\n", | ||
5576 | status); | ||
5577 | qc->err_mask |= AC_ERR_HSM; | ||
5578 | ap->hsm_task_state = HSM_ST_ERR; | ||
5579 | goto fsm_start; | ||
5580 | } | ||
5581 | |||
5582 | atapi_pio_bytes(qc); | ||
5583 | |||
5584 | if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) | ||
5585 | /* bad ireason reported by device */ | ||
5586 | goto fsm_start; | ||
5587 | |||
5588 | } else { | ||
5589 | /* ATA PIO protocol */ | ||
5590 | if (unlikely((status & ATA_DRQ) == 0)) { | ||
5591 | /* handle BSY=0, DRQ=0 as error */ | ||
5592 | if (likely(status & (ATA_ERR | ATA_DF))) | ||
5593 | /* device stops HSM for abort/error */ | ||
5594 | qc->err_mask |= AC_ERR_DEV; | ||
5595 | else | ||
5596 | /* HSM violation. Let EH handle this. | ||
5597 | * Phantom devices also trigger this | ||
5598 | * condition. Mark hint. | ||
5599 | */ | ||
5600 | qc->err_mask |= AC_ERR_HSM | | ||
5601 | AC_ERR_NODEV_HINT; | ||
5602 | |||
5603 | ap->hsm_task_state = HSM_ST_ERR; | ||
5604 | goto fsm_start; | ||
5605 | } | ||
5606 | |||
5607 | /* For PIO reads, some devices may ask for | ||
5608 | * data transfer (DRQ=1) alone with ERR=1. | ||
5609 | * We respect DRQ here and transfer one | ||
5610 | * block of junk data before changing the | ||
5611 | * hsm_task_state to HSM_ST_ERR. | ||
5612 | * | ||
5613 | * For PIO writes, ERR=1 DRQ=1 doesn't make | ||
5614 | * sense since the data block has been | ||
5615 | * transferred to the device. | ||
5616 | */ | ||
5617 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | ||
5618 | /* data might be corrputed */ | ||
5619 | qc->err_mask |= AC_ERR_DEV; | ||
5620 | |||
5621 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { | ||
5622 | ata_pio_sectors(qc); | ||
5623 | status = ata_wait_idle(ap); | ||
5624 | } | ||
5625 | |||
5626 | if (status & (ATA_BUSY | ATA_DRQ)) | ||
5627 | qc->err_mask |= AC_ERR_HSM; | ||
5628 | |||
5629 | /* ata_pio_sectors() might change the | ||
5630 | * state to HSM_ST_LAST. so, the state | ||
5631 | * is changed after ata_pio_sectors(). | ||
5632 | */ | ||
5633 | ap->hsm_task_state = HSM_ST_ERR; | ||
5634 | goto fsm_start; | ||
5635 | } | ||
5636 | |||
5637 | ata_pio_sectors(qc); | ||
5638 | |||
5639 | if (ap->hsm_task_state == HSM_ST_LAST && | ||
5640 | (!(qc->tf.flags & ATA_TFLAG_WRITE))) { | ||
5641 | /* all data read */ | ||
5642 | status = ata_wait_idle(ap); | ||
5643 | goto fsm_start; | ||
5644 | } | ||
5645 | } | ||
5646 | |||
5647 | poll_next = 1; | ||
5648 | break; | ||
5649 | |||
5650 | case HSM_ST_LAST: | ||
5651 | if (unlikely(!ata_ok(status))) { | ||
5652 | qc->err_mask |= __ac_err_mask(status); | ||
5653 | ap->hsm_task_state = HSM_ST_ERR; | ||
5654 | goto fsm_start; | ||
5655 | } | ||
5656 | |||
5657 | /* no more data to transfer */ | ||
5658 | DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", | ||
5659 | ap->print_id, qc->dev->devno, status); | ||
5660 | |||
5661 | WARN_ON(qc->err_mask); | ||
5662 | |||
5663 | ap->hsm_task_state = HSM_ST_IDLE; | ||
5664 | |||
5665 | /* complete taskfile transaction */ | ||
5666 | ata_hsm_qc_complete(qc, in_wq); | ||
5667 | |||
5668 | poll_next = 0; | ||
5669 | break; | ||
5670 | |||
5671 | case HSM_ST_ERR: | ||
5672 | /* make sure qc->err_mask is available to | ||
5673 | * know what's wrong and recover | ||
5674 | */ | ||
5675 | WARN_ON(qc->err_mask == 0); | ||
5676 | |||
5677 | ap->hsm_task_state = HSM_ST_IDLE; | ||
5678 | |||
5679 | /* complete taskfile transaction */ | ||
5680 | ata_hsm_qc_complete(qc, in_wq); | ||
5681 | |||
5682 | poll_next = 0; | ||
5683 | break; | ||
5684 | default: | ||
5685 | poll_next = 0; | ||
5686 | BUG(); | ||
5687 | } | ||
5688 | |||
5689 | return poll_next; | ||
5690 | } | ||
5691 | |||
5692 | static void ata_pio_task(struct work_struct *work) | ||
5693 | { | ||
5694 | struct ata_port *ap = | ||
5695 | container_of(work, struct ata_port, port_task.work); | ||
5696 | struct ata_queued_cmd *qc = ap->port_task_data; | ||
5697 | u8 status; | ||
5698 | int poll_next; | ||
5699 | |||
5700 | fsm_start: | ||
5701 | WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); | ||
5702 | |||
5703 | /* | ||
5704 | * This is purely heuristic. This is a fast path. | ||
5705 | * Sometimes when we enter, BSY will be cleared in | ||
5706 | * a chk-status or two. If not, the drive is probably seeking | ||
5707 | * or something. Snooze for a couple msecs, then | ||
5708 | * chk-status again. If still busy, queue delayed work. | ||
5709 | */ | ||
5710 | status = ata_busy_wait(ap, ATA_BUSY, 5); | ||
5711 | if (status & ATA_BUSY) { | ||
5712 | msleep(2); | ||
5713 | status = ata_busy_wait(ap, ATA_BUSY, 10); | ||
5714 | if (status & ATA_BUSY) { | ||
5715 | ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE); | ||
5716 | return; | ||
5717 | } | ||
5718 | } | ||
5719 | |||
5720 | /* move the HSM */ | ||
5721 | poll_next = ata_hsm_move(ap, qc, status, 1); | ||
5722 | |||
5723 | /* another command or interrupt handler | ||
5724 | * may be running at this point. | ||
5725 | */ | ||
5726 | if (poll_next) | ||
5727 | goto fsm_start; | ||
5728 | } | ||
5729 | |||
5730 | /** | ||
5731 | * ata_qc_new - Request an available ATA command, for queueing | 4430 | * ata_qc_new - Request an available ATA command, for queueing |
5732 | * @ap: Port associated with device @dev | 4431 | * @ap: Port associated with device @dev |
5733 | * @dev: Device from whom we request an available command structure | 4432 | * @dev: Device from whom we request an available command structure |
@@ -5850,7 +4549,7 @@ static void fill_result_tf(struct ata_queued_cmd *qc) | |||
5850 | struct ata_port *ap = qc->ap; | 4549 | struct ata_port *ap = qc->ap; |
5851 | 4550 | ||
5852 | qc->result_tf.flags = qc->tf.flags; | 4551 | qc->result_tf.flags = qc->tf.flags; |
5853 | ap->ops->tf_read(ap, &qc->result_tf); | 4552 | ap->ops->qc_fill_rtf(qc); |
5854 | } | 4553 | } |
5855 | 4554 | ||
5856 | static void ata_verify_xfer(struct ata_queued_cmd *qc) | 4555 | static void ata_verify_xfer(struct ata_queued_cmd *qc) |
@@ -5960,7 +4659,6 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
5960 | * ata_qc_complete_multiple - Complete multiple qcs successfully | 4659 | * ata_qc_complete_multiple - Complete multiple qcs successfully |
5961 | * @ap: port in question | 4660 | * @ap: port in question |
5962 | * @qc_active: new qc_active mask | 4661 | * @qc_active: new qc_active mask |
5963 | * @finish_qc: LLDD callback invoked before completing a qc | ||
5964 | * | 4662 | * |
5965 | * Complete in-flight commands. This functions is meant to be | 4663 | * Complete in-flight commands. This functions is meant to be |
5966 | * called from low-level driver's interrupt routine to complete | 4664 | * called from low-level driver's interrupt routine to complete |
@@ -5973,8 +4671,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
5973 | * RETURNS: | 4671 | * RETURNS: |
5974 | * Number of completed commands on success, -errno otherwise. | 4672 | * Number of completed commands on success, -errno otherwise. |
5975 | */ | 4673 | */ |
5976 | int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, | 4674 | int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) |
5977 | void (*finish_qc)(struct ata_queued_cmd *)) | ||
5978 | { | 4675 | { |
5979 | int nr_done = 0; | 4676 | int nr_done = 0; |
5980 | u32 done_mask; | 4677 | u32 done_mask; |
@@ -5995,8 +4692,6 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, | |||
5995 | continue; | 4692 | continue; |
5996 | 4693 | ||
5997 | if ((qc = ata_qc_from_tag(ap, i))) { | 4694 | if ((qc = ata_qc_from_tag(ap, i))) { |
5998 | if (finish_qc) | ||
5999 | finish_qc(qc); | ||
6000 | ata_qc_complete(qc); | 4695 | ata_qc_complete(qc); |
6001 | nr_done++; | 4696 | nr_done++; |
6002 | } | 4697 | } |
@@ -6055,9 +4750,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc) | |||
6055 | if (ata_sg_setup(qc)) | 4750 | if (ata_sg_setup(qc)) |
6056 | goto sg_err; | 4751 | goto sg_err; |
6057 | 4752 | ||
6058 | /* if device is sleeping, schedule softreset and abort the link */ | 4753 | /* if device is sleeping, schedule reset and abort the link */ |
6059 | if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { | 4754 | if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { |
6060 | link->eh_info.action |= ATA_EH_SOFTRESET; | 4755 | link->eh_info.action |= ATA_EH_RESET; |
6061 | ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); | 4756 | ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); |
6062 | ata_link_abort(link); | 4757 | ata_link_abort(link); |
6063 | return; | 4758 | return; |
@@ -6077,285 +4772,6 @@ err: | |||
6077 | } | 4772 | } |
6078 | 4773 | ||
6079 | /** | 4774 | /** |
6080 | * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner | ||
6081 | * @qc: command to issue to device | ||
6082 | * | ||
6083 | * Using various libata functions and hooks, this function | ||
6084 | * starts an ATA command. ATA commands are grouped into | ||
6085 | * classes called "protocols", and issuing each type of protocol | ||
6086 | * is slightly different. | ||
6087 | * | ||
6088 | * May be used as the qc_issue() entry in ata_port_operations. | ||
6089 | * | ||
6090 | * LOCKING: | ||
6091 | * spin_lock_irqsave(host lock) | ||
6092 | * | ||
6093 | * RETURNS: | ||
6094 | * Zero on success, AC_ERR_* mask on failure | ||
6095 | */ | ||
6096 | |||
6097 | unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) | ||
6098 | { | ||
6099 | struct ata_port *ap = qc->ap; | ||
6100 | |||
6101 | /* Use polling pio if the LLD doesn't handle | ||
6102 | * interrupt driven pio and atapi CDB interrupt. | ||
6103 | */ | ||
6104 | if (ap->flags & ATA_FLAG_PIO_POLLING) { | ||
6105 | switch (qc->tf.protocol) { | ||
6106 | case ATA_PROT_PIO: | ||
6107 | case ATA_PROT_NODATA: | ||
6108 | case ATAPI_PROT_PIO: | ||
6109 | case ATAPI_PROT_NODATA: | ||
6110 | qc->tf.flags |= ATA_TFLAG_POLLING; | ||
6111 | break; | ||
6112 | case ATAPI_PROT_DMA: | ||
6113 | if (qc->dev->flags & ATA_DFLAG_CDB_INTR) | ||
6114 | /* see ata_dma_blacklisted() */ | ||
6115 | BUG(); | ||
6116 | break; | ||
6117 | default: | ||
6118 | break; | ||
6119 | } | ||
6120 | } | ||
6121 | |||
6122 | /* select the device */ | ||
6123 | ata_dev_select(ap, qc->dev->devno, 1, 0); | ||
6124 | |||
6125 | /* start the command */ | ||
6126 | switch (qc->tf.protocol) { | ||
6127 | case ATA_PROT_NODATA: | ||
6128 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
6129 | ata_qc_set_polling(qc); | ||
6130 | |||
6131 | ata_tf_to_host(ap, &qc->tf); | ||
6132 | ap->hsm_task_state = HSM_ST_LAST; | ||
6133 | |||
6134 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
6135 | ata_pio_queue_task(ap, qc, 0); | ||
6136 | |||
6137 | break; | ||
6138 | |||
6139 | case ATA_PROT_DMA: | ||
6140 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); | ||
6141 | |||
6142 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | ||
6143 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | ||
6144 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | ||
6145 | ap->hsm_task_state = HSM_ST_LAST; | ||
6146 | break; | ||
6147 | |||
6148 | case ATA_PROT_PIO: | ||
6149 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
6150 | ata_qc_set_polling(qc); | ||
6151 | |||
6152 | ata_tf_to_host(ap, &qc->tf); | ||
6153 | |||
6154 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
6155 | /* PIO data out protocol */ | ||
6156 | ap->hsm_task_state = HSM_ST_FIRST; | ||
6157 | ata_pio_queue_task(ap, qc, 0); | ||
6158 | |||
6159 | /* always send first data block using | ||
6160 | * the ata_pio_task() codepath. | ||
6161 | */ | ||
6162 | } else { | ||
6163 | /* PIO data in protocol */ | ||
6164 | ap->hsm_task_state = HSM_ST; | ||
6165 | |||
6166 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
6167 | ata_pio_queue_task(ap, qc, 0); | ||
6168 | |||
6169 | /* if polling, ata_pio_task() handles the rest. | ||
6170 | * otherwise, interrupt handler takes over from here. | ||
6171 | */ | ||
6172 | } | ||
6173 | |||
6174 | break; | ||
6175 | |||
6176 | case ATAPI_PROT_PIO: | ||
6177 | case ATAPI_PROT_NODATA: | ||
6178 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
6179 | ata_qc_set_polling(qc); | ||
6180 | |||
6181 | ata_tf_to_host(ap, &qc->tf); | ||
6182 | |||
6183 | ap->hsm_task_state = HSM_ST_FIRST; | ||
6184 | |||
6185 | /* send cdb by polling if no cdb interrupt */ | ||
6186 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || | ||
6187 | (qc->tf.flags & ATA_TFLAG_POLLING)) | ||
6188 | ata_pio_queue_task(ap, qc, 0); | ||
6189 | break; | ||
6190 | |||
6191 | case ATAPI_PROT_DMA: | ||
6192 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); | ||
6193 | |||
6194 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | ||
6195 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | ||
6196 | ap->hsm_task_state = HSM_ST_FIRST; | ||
6197 | |||
6198 | /* send cdb by polling if no cdb interrupt */ | ||
6199 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
6200 | ata_pio_queue_task(ap, qc, 0); | ||
6201 | break; | ||
6202 | |||
6203 | default: | ||
6204 | WARN_ON(1); | ||
6205 | return AC_ERR_SYSTEM; | ||
6206 | } | ||
6207 | |||
6208 | return 0; | ||
6209 | } | ||
6210 | |||
6211 | /** | ||
6212 | * ata_host_intr - Handle host interrupt for given (port, task) | ||
6213 | * @ap: Port on which interrupt arrived (possibly...) | ||
6214 | * @qc: Taskfile currently active in engine | ||
6215 | * | ||
6216 | * Handle host interrupt for given queued command. Currently, | ||
6217 | * only DMA interrupts are handled. All other commands are | ||
6218 | * handled via polling with interrupts disabled (nIEN bit). | ||
6219 | * | ||
6220 | * LOCKING: | ||
6221 | * spin_lock_irqsave(host lock) | ||
6222 | * | ||
6223 | * RETURNS: | ||
6224 | * One if interrupt was handled, zero if not (shared irq). | ||
6225 | */ | ||
6226 | |||
6227 | inline unsigned int ata_host_intr(struct ata_port *ap, | ||
6228 | struct ata_queued_cmd *qc) | ||
6229 | { | ||
6230 | struct ata_eh_info *ehi = &ap->link.eh_info; | ||
6231 | u8 status, host_stat = 0; | ||
6232 | |||
6233 | VPRINTK("ata%u: protocol %d task_state %d\n", | ||
6234 | ap->print_id, qc->tf.protocol, ap->hsm_task_state); | ||
6235 | |||
6236 | /* Check whether we are expecting interrupt in this state */ | ||
6237 | switch (ap->hsm_task_state) { | ||
6238 | case HSM_ST_FIRST: | ||
6239 | /* Some pre-ATAPI-4 devices assert INTRQ | ||
6240 | * at this state when ready to receive CDB. | ||
6241 | */ | ||
6242 | |||
6243 | /* Check the ATA_DFLAG_CDB_INTR flag is enough here. | ||
6244 | * The flag was turned on only for atapi devices. No | ||
6245 | * need to check ata_is_atapi(qc->tf.protocol) again. | ||
6246 | */ | ||
6247 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
6248 | goto idle_irq; | ||
6249 | break; | ||
6250 | case HSM_ST_LAST: | ||
6251 | if (qc->tf.protocol == ATA_PROT_DMA || | ||
6252 | qc->tf.protocol == ATAPI_PROT_DMA) { | ||
6253 | /* check status of DMA engine */ | ||
6254 | host_stat = ap->ops->bmdma_status(ap); | ||
6255 | VPRINTK("ata%u: host_stat 0x%X\n", | ||
6256 | ap->print_id, host_stat); | ||
6257 | |||
6258 | /* if it's not our irq... */ | ||
6259 | if (!(host_stat & ATA_DMA_INTR)) | ||
6260 | goto idle_irq; | ||
6261 | |||
6262 | /* before we do anything else, clear DMA-Start bit */ | ||
6263 | ap->ops->bmdma_stop(qc); | ||
6264 | |||
6265 | if (unlikely(host_stat & ATA_DMA_ERR)) { | ||
6266 | /* error when transfering data to/from memory */ | ||
6267 | qc->err_mask |= AC_ERR_HOST_BUS; | ||
6268 | ap->hsm_task_state = HSM_ST_ERR; | ||
6269 | } | ||
6270 | } | ||
6271 | break; | ||
6272 | case HSM_ST: | ||
6273 | break; | ||
6274 | default: | ||
6275 | goto idle_irq; | ||
6276 | } | ||
6277 | |||
6278 | /* check altstatus */ | ||
6279 | status = ata_altstatus(ap); | ||
6280 | if (status & ATA_BUSY) | ||
6281 | goto idle_irq; | ||
6282 | |||
6283 | /* check main status, clearing INTRQ */ | ||
6284 | status = ata_chk_status(ap); | ||
6285 | if (unlikely(status & ATA_BUSY)) | ||
6286 | goto idle_irq; | ||
6287 | |||
6288 | /* ack bmdma irq events */ | ||
6289 | ap->ops->irq_clear(ap); | ||
6290 | |||
6291 | ata_hsm_move(ap, qc, status, 0); | ||
6292 | |||
6293 | if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || | ||
6294 | qc->tf.protocol == ATAPI_PROT_DMA)) | ||
6295 | ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); | ||
6296 | |||
6297 | return 1; /* irq handled */ | ||
6298 | |||
6299 | idle_irq: | ||
6300 | ap->stats.idle_irq++; | ||
6301 | |||
6302 | #ifdef ATA_IRQ_TRAP | ||
6303 | if ((ap->stats.idle_irq % 1000) == 0) { | ||
6304 | ata_chk_status(ap); | ||
6305 | ap->ops->irq_clear(ap); | ||
6306 | ata_port_printk(ap, KERN_WARNING, "irq trap\n"); | ||
6307 | return 1; | ||
6308 | } | ||
6309 | #endif | ||
6310 | return 0; /* irq not handled */ | ||
6311 | } | ||
6312 | |||
6313 | /** | ||
6314 | * ata_interrupt - Default ATA host interrupt handler | ||
6315 | * @irq: irq line (unused) | ||
6316 | * @dev_instance: pointer to our ata_host information structure | ||
6317 | * | ||
6318 | * Default interrupt handler for PCI IDE devices. Calls | ||
6319 | * ata_host_intr() for each port that is not disabled. | ||
6320 | * | ||
6321 | * LOCKING: | ||
6322 | * Obtains host lock during operation. | ||
6323 | * | ||
6324 | * RETURNS: | ||
6325 | * IRQ_NONE or IRQ_HANDLED. | ||
6326 | */ | ||
6327 | |||
6328 | irqreturn_t ata_interrupt(int irq, void *dev_instance) | ||
6329 | { | ||
6330 | struct ata_host *host = dev_instance; | ||
6331 | unsigned int i; | ||
6332 | unsigned int handled = 0; | ||
6333 | unsigned long flags; | ||
6334 | |||
6335 | /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ | ||
6336 | spin_lock_irqsave(&host->lock, flags); | ||
6337 | |||
6338 | for (i = 0; i < host->n_ports; i++) { | ||
6339 | struct ata_port *ap; | ||
6340 | |||
6341 | ap = host->ports[i]; | ||
6342 | if (ap && | ||
6343 | !(ap->flags & ATA_FLAG_DISABLED)) { | ||
6344 | struct ata_queued_cmd *qc; | ||
6345 | |||
6346 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | ||
6347 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && | ||
6348 | (qc->flags & ATA_QCFLAG_ACTIVE)) | ||
6349 | handled |= ata_host_intr(ap, qc); | ||
6350 | } | ||
6351 | } | ||
6352 | |||
6353 | spin_unlock_irqrestore(&host->lock, flags); | ||
6354 | |||
6355 | return IRQ_RETVAL(handled); | ||
6356 | } | ||
6357 | |||
6358 | /** | ||
6359 | * sata_scr_valid - test whether SCRs are accessible | 4775 | * sata_scr_valid - test whether SCRs are accessible |
6360 | * @link: ATA link to test SCR accessibility for | 4776 | * @link: ATA link to test SCR accessibility for |
6361 | * | 4777 | * |
@@ -6513,32 +4929,6 @@ int ata_link_offline(struct ata_link *link) | |||
6513 | return 0; | 4929 | return 0; |
6514 | } | 4930 | } |
6515 | 4931 | ||
6516 | int ata_flush_cache(struct ata_device *dev) | ||
6517 | { | ||
6518 | unsigned int err_mask; | ||
6519 | u8 cmd; | ||
6520 | |||
6521 | if (!ata_try_flush_cache(dev)) | ||
6522 | return 0; | ||
6523 | |||
6524 | if (dev->flags & ATA_DFLAG_FLUSH_EXT) | ||
6525 | cmd = ATA_CMD_FLUSH_EXT; | ||
6526 | else | ||
6527 | cmd = ATA_CMD_FLUSH; | ||
6528 | |||
6529 | /* This is wrong. On a failed flush we get back the LBA of the lost | ||
6530 | sector and we should (assuming it wasn't aborted as unknown) issue | ||
6531 | a further flush command to continue the writeback until it | ||
6532 | does not error */ | ||
6533 | err_mask = ata_do_simple_cmd(dev, cmd); | ||
6534 | if (err_mask) { | ||
6535 | ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n"); | ||
6536 | return -EIO; | ||
6537 | } | ||
6538 | |||
6539 | return 0; | ||
6540 | } | ||
6541 | |||
6542 | #ifdef CONFIG_PM | 4932 | #ifdef CONFIG_PM |
6543 | static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, | 4933 | static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, |
6544 | unsigned int action, unsigned int ehi_flags, | 4934 | unsigned int action, unsigned int ehi_flags, |
@@ -6634,7 +5024,7 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) | |||
6634 | */ | 5024 | */ |
6635 | void ata_host_resume(struct ata_host *host) | 5025 | void ata_host_resume(struct ata_host *host) |
6636 | { | 5026 | { |
6637 | ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, | 5027 | ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET, |
6638 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); | 5028 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); |
6639 | host->dev->power.power_state = PMSG_ON; | 5029 | host->dev->power.power_state = PMSG_ON; |
6640 | 5030 | ||
@@ -6809,7 +5199,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host) | |||
6809 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; | 5199 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; |
6810 | #endif | 5200 | #endif |
6811 | 5201 | ||
5202 | #ifdef CONFIG_ATA_SFF | ||
6812 | INIT_DELAYED_WORK(&ap->port_task, ata_pio_task); | 5203 | INIT_DELAYED_WORK(&ap->port_task, ata_pio_task); |
5204 | #endif | ||
6813 | INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); | 5205 | INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); |
6814 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); | 5206 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); |
6815 | INIT_LIST_HEAD(&ap->eh_done_q); | 5207 | INIT_LIST_HEAD(&ap->eh_done_q); |
@@ -6959,8 +5351,6 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, | |||
6959 | 5351 | ||
6960 | if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) | 5352 | if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) |
6961 | host->ops = pi->port_ops; | 5353 | host->ops = pi->port_ops; |
6962 | if (!host->private_data && pi->private_data) | ||
6963 | host->private_data = pi->private_data; | ||
6964 | } | 5354 | } |
6965 | 5355 | ||
6966 | return host; | 5356 | return host; |
@@ -6985,6 +5375,56 @@ static void ata_host_stop(struct device *gendev, void *res) | |||
6985 | } | 5375 | } |
6986 | 5376 | ||
6987 | /** | 5377 | /** |
5378 | * ata_finalize_port_ops - finalize ata_port_operations | ||
5379 | * @ops: ata_port_operations to finalize | ||
5380 | * | ||
5381 | * An ata_port_operations can inherit from another ops and that | ||
5382 | * ops can again inherit from another. This can go on as many | ||
5383 | * times as necessary as long as there is no loop in the | ||
5384 | * inheritance chain. | ||
5385 | * | ||
5386 | * Ops tables are finalized when the host is started. NULL or | ||
5387 | * unspecified entries are inherited from the closet ancestor | ||
5388 | * which has the method and the entry is populated with it. | ||
5389 | * After finalization, the ops table directly points to all the | ||
5390 | * methods and ->inherits is no longer necessary and cleared. | ||
5391 | * | ||
5392 | * Using ATA_OP_NULL, inheriting ops can force a method to NULL. | ||
5393 | * | ||
5394 | * LOCKING: | ||
5395 | * None. | ||
5396 | */ | ||
5397 | static void ata_finalize_port_ops(struct ata_port_operations *ops) | ||
5398 | { | ||
5399 | static spinlock_t lock = SPIN_LOCK_UNLOCKED; | ||
5400 | const struct ata_port_operations *cur; | ||
5401 | void **begin = (void **)ops; | ||
5402 | void **end = (void **)&ops->inherits; | ||
5403 | void **pp; | ||
5404 | |||
5405 | if (!ops || !ops->inherits) | ||
5406 | return; | ||
5407 | |||
5408 | spin_lock(&lock); | ||
5409 | |||
5410 | for (cur = ops->inherits; cur; cur = cur->inherits) { | ||
5411 | void **inherit = (void **)cur; | ||
5412 | |||
5413 | for (pp = begin; pp < end; pp++, inherit++) | ||
5414 | if (!*pp) | ||
5415 | *pp = *inherit; | ||
5416 | } | ||
5417 | |||
5418 | for (pp = begin; pp < end; pp++) | ||
5419 | if (IS_ERR(*pp)) | ||
5420 | *pp = NULL; | ||
5421 | |||
5422 | ops->inherits = NULL; | ||
5423 | |||
5424 | spin_unlock(&lock); | ||
5425 | } | ||
5426 | |||
5427 | /** | ||
6988 | * ata_host_start - start and freeze ports of an ATA host | 5428 | * ata_host_start - start and freeze ports of an ATA host |
6989 | * @host: ATA host to start ports for | 5429 | * @host: ATA host to start ports for |
6990 | * | 5430 | * |
@@ -7009,9 +5449,13 @@ int ata_host_start(struct ata_host *host) | |||
7009 | if (host->flags & ATA_HOST_STARTED) | 5449 | if (host->flags & ATA_HOST_STARTED) |
7010 | return 0; | 5450 | return 0; |
7011 | 5451 | ||
5452 | ata_finalize_port_ops(host->ops); | ||
5453 | |||
7012 | for (i = 0; i < host->n_ports; i++) { | 5454 | for (i = 0; i < host->n_ports; i++) { |
7013 | struct ata_port *ap = host->ports[i]; | 5455 | struct ata_port *ap = host->ports[i]; |
7014 | 5456 | ||
5457 | ata_finalize_port_ops(ap->ops); | ||
5458 | |||
7015 | if (!host->ops && !ata_port_is_dummy(ap)) | 5459 | if (!host->ops && !ata_port_is_dummy(ap)) |
7016 | host->ops = ap->ops; | 5460 | host->ops = ap->ops; |
7017 | 5461 | ||
@@ -7073,7 +5517,7 @@ int ata_host_start(struct ata_host *host) | |||
7073 | */ | 5517 | */ |
7074 | /* KILLME - the only user left is ipr */ | 5518 | /* KILLME - the only user left is ipr */ |
7075 | void ata_host_init(struct ata_host *host, struct device *dev, | 5519 | void ata_host_init(struct ata_host *host, struct device *dev, |
7076 | unsigned long flags, const struct ata_port_operations *ops) | 5520 | unsigned long flags, struct ata_port_operations *ops) |
7077 | { | 5521 | { |
7078 | spin_lock_init(&host->lock); | 5522 | spin_lock_init(&host->lock); |
7079 | host->dev = dev; | 5523 | host->dev = dev; |
@@ -7169,9 +5613,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) | |||
7169 | /* kick EH for boot probing */ | 5613 | /* kick EH for boot probing */ |
7170 | spin_lock_irqsave(ap->lock, flags); | 5614 | spin_lock_irqsave(ap->lock, flags); |
7171 | 5615 | ||
7172 | ehi->probe_mask = | 5616 | ehi->probe_mask |= ATA_ALL_DEVICES; |
7173 | (1 << ata_link_max_devices(&ap->link)) - 1; | 5617 | ehi->action |= ATA_EH_RESET; |
7174 | ehi->action |= ATA_EH_SOFTRESET; | ||
7175 | ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; | 5618 | ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; |
7176 | 5619 | ||
7177 | ap->pflags &= ~ATA_PFLAG_INITIALIZING; | 5620 | ap->pflags &= ~ATA_PFLAG_INITIALIZING; |
@@ -7336,33 +5779,6 @@ void ata_host_detach(struct ata_host *host) | |||
7336 | ata_acpi_dissociate(host); | 5779 | ata_acpi_dissociate(host); |
7337 | } | 5780 | } |
7338 | 5781 | ||
7339 | /** | ||
7340 | * ata_std_ports - initialize ioaddr with standard port offsets. | ||
7341 | * @ioaddr: IO address structure to be initialized | ||
7342 | * | ||
7343 | * Utility function which initializes data_addr, error_addr, | ||
7344 | * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, | ||
7345 | * device_addr, status_addr, and command_addr to standard offsets | ||
7346 | * relative to cmd_addr. | ||
7347 | * | ||
7348 | * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. | ||
7349 | */ | ||
7350 | |||
7351 | void ata_std_ports(struct ata_ioports *ioaddr) | ||
7352 | { | ||
7353 | ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; | ||
7354 | ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; | ||
7355 | ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; | ||
7356 | ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; | ||
7357 | ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; | ||
7358 | ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; | ||
7359 | ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; | ||
7360 | ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; | ||
7361 | ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; | ||
7362 | ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; | ||
7363 | } | ||
7364 | |||
7365 | |||
7366 | #ifdef CONFIG_PCI | 5782 | #ifdef CONFIG_PCI |
7367 | 5783 | ||
7368 | /** | 5784 | /** |
@@ -7749,33 +6165,20 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, | |||
7749 | /* | 6165 | /* |
7750 | * Dummy port_ops | 6166 | * Dummy port_ops |
7751 | */ | 6167 | */ |
7752 | static void ata_dummy_noret(struct ata_port *ap) { } | 6168 | static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) |
7753 | static int ata_dummy_ret0(struct ata_port *ap) { return 0; } | ||
7754 | static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { } | ||
7755 | |||
7756 | static u8 ata_dummy_check_status(struct ata_port *ap) | ||
7757 | { | 6169 | { |
7758 | return ATA_DRDY; | 6170 | return AC_ERR_SYSTEM; |
7759 | } | 6171 | } |
7760 | 6172 | ||
7761 | static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) | 6173 | static void ata_dummy_error_handler(struct ata_port *ap) |
7762 | { | 6174 | { |
7763 | return AC_ERR_SYSTEM; | 6175 | /* truly dummy */ |
7764 | } | 6176 | } |
7765 | 6177 | ||
7766 | const struct ata_port_operations ata_dummy_port_ops = { | 6178 | struct ata_port_operations ata_dummy_port_ops = { |
7767 | .check_status = ata_dummy_check_status, | ||
7768 | .check_altstatus = ata_dummy_check_status, | ||
7769 | .dev_select = ata_noop_dev_select, | ||
7770 | .qc_prep = ata_noop_qc_prep, | 6179 | .qc_prep = ata_noop_qc_prep, |
7771 | .qc_issue = ata_dummy_qc_issue, | 6180 | .qc_issue = ata_dummy_qc_issue, |
7772 | .freeze = ata_dummy_noret, | 6181 | .error_handler = ata_dummy_error_handler, |
7773 | .thaw = ata_dummy_noret, | ||
7774 | .error_handler = ata_dummy_noret, | ||
7775 | .post_internal_cmd = ata_dummy_qc_noret, | ||
7776 | .irq_clear = ata_dummy_noret, | ||
7777 | .port_start = ata_dummy_ret0, | ||
7778 | .port_stop = ata_dummy_noret, | ||
7779 | }; | 6182 | }; |
7780 | 6183 | ||
7781 | const struct ata_port_info ata_dummy_port_info = { | 6184 | const struct ata_port_info ata_dummy_port_info = { |
@@ -7791,10 +6194,11 @@ const struct ata_port_info ata_dummy_port_info = { | |||
7791 | EXPORT_SYMBOL_GPL(sata_deb_timing_normal); | 6194 | EXPORT_SYMBOL_GPL(sata_deb_timing_normal); |
7792 | EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); | 6195 | EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); |
7793 | EXPORT_SYMBOL_GPL(sata_deb_timing_long); | 6196 | EXPORT_SYMBOL_GPL(sata_deb_timing_long); |
6197 | EXPORT_SYMBOL_GPL(ata_base_port_ops); | ||
6198 | EXPORT_SYMBOL_GPL(sata_port_ops); | ||
7794 | EXPORT_SYMBOL_GPL(ata_dummy_port_ops); | 6199 | EXPORT_SYMBOL_GPL(ata_dummy_port_ops); |
7795 | EXPORT_SYMBOL_GPL(ata_dummy_port_info); | 6200 | EXPORT_SYMBOL_GPL(ata_dummy_port_info); |
7796 | EXPORT_SYMBOL_GPL(ata_std_bios_param); | 6201 | EXPORT_SYMBOL_GPL(ata_std_bios_param); |
7797 | EXPORT_SYMBOL_GPL(ata_std_ports); | ||
7798 | EXPORT_SYMBOL_GPL(ata_host_init); | 6202 | EXPORT_SYMBOL_GPL(ata_host_init); |
7799 | EXPORT_SYMBOL_GPL(ata_host_alloc); | 6203 | EXPORT_SYMBOL_GPL(ata_host_alloc); |
7800 | EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); | 6204 | EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); |
@@ -7803,14 +6207,8 @@ EXPORT_SYMBOL_GPL(ata_host_register); | |||
7803 | EXPORT_SYMBOL_GPL(ata_host_activate); | 6207 | EXPORT_SYMBOL_GPL(ata_host_activate); |
7804 | EXPORT_SYMBOL_GPL(ata_host_detach); | 6208 | EXPORT_SYMBOL_GPL(ata_host_detach); |
7805 | EXPORT_SYMBOL_GPL(ata_sg_init); | 6209 | EXPORT_SYMBOL_GPL(ata_sg_init); |
7806 | EXPORT_SYMBOL_GPL(ata_hsm_move); | ||
7807 | EXPORT_SYMBOL_GPL(ata_qc_complete); | 6210 | EXPORT_SYMBOL_GPL(ata_qc_complete); |
7808 | EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); | 6211 | EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); |
7809 | EXPORT_SYMBOL_GPL(ata_qc_issue_prot); | ||
7810 | EXPORT_SYMBOL_GPL(ata_tf_load); | ||
7811 | EXPORT_SYMBOL_GPL(ata_tf_read); | ||
7812 | EXPORT_SYMBOL_GPL(ata_noop_dev_select); | ||
7813 | EXPORT_SYMBOL_GPL(ata_std_dev_select); | ||
7814 | EXPORT_SYMBOL_GPL(sata_print_link_status); | 6212 | EXPORT_SYMBOL_GPL(sata_print_link_status); |
7815 | EXPORT_SYMBOL_GPL(atapi_cmd_type); | 6213 | EXPORT_SYMBOL_GPL(atapi_cmd_type); |
7816 | EXPORT_SYMBOL_GPL(ata_tf_to_fis); | 6214 | EXPORT_SYMBOL_GPL(ata_tf_to_fis); |
@@ -7822,37 +6220,17 @@ EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); | |||
7822 | EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); | 6220 | EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); |
7823 | EXPORT_SYMBOL_GPL(ata_mode_string); | 6221 | EXPORT_SYMBOL_GPL(ata_mode_string); |
7824 | EXPORT_SYMBOL_GPL(ata_id_xfermask); | 6222 | EXPORT_SYMBOL_GPL(ata_id_xfermask); |
7825 | EXPORT_SYMBOL_GPL(ata_check_status); | ||
7826 | EXPORT_SYMBOL_GPL(ata_altstatus); | ||
7827 | EXPORT_SYMBOL_GPL(ata_exec_command); | ||
7828 | EXPORT_SYMBOL_GPL(ata_port_start); | 6223 | EXPORT_SYMBOL_GPL(ata_port_start); |
7829 | EXPORT_SYMBOL_GPL(ata_sff_port_start); | ||
7830 | EXPORT_SYMBOL_GPL(ata_interrupt); | ||
7831 | EXPORT_SYMBOL_GPL(ata_do_set_mode); | 6224 | EXPORT_SYMBOL_GPL(ata_do_set_mode); |
7832 | EXPORT_SYMBOL_GPL(ata_data_xfer); | ||
7833 | EXPORT_SYMBOL_GPL(ata_data_xfer_noirq); | ||
7834 | EXPORT_SYMBOL_GPL(ata_std_qc_defer); | 6225 | EXPORT_SYMBOL_GPL(ata_std_qc_defer); |
7835 | EXPORT_SYMBOL_GPL(ata_qc_prep); | ||
7836 | EXPORT_SYMBOL_GPL(ata_dumb_qc_prep); | ||
7837 | EXPORT_SYMBOL_GPL(ata_noop_qc_prep); | 6226 | EXPORT_SYMBOL_GPL(ata_noop_qc_prep); |
7838 | EXPORT_SYMBOL_GPL(ata_bmdma_setup); | ||
7839 | EXPORT_SYMBOL_GPL(ata_bmdma_start); | ||
7840 | EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); | ||
7841 | EXPORT_SYMBOL_GPL(ata_bmdma_status); | ||
7842 | EXPORT_SYMBOL_GPL(ata_bmdma_stop); | ||
7843 | EXPORT_SYMBOL_GPL(ata_bmdma_freeze); | ||
7844 | EXPORT_SYMBOL_GPL(ata_bmdma_thaw); | ||
7845 | EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh); | ||
7846 | EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); | ||
7847 | EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); | ||
7848 | EXPORT_SYMBOL_GPL(ata_port_probe); | 6227 | EXPORT_SYMBOL_GPL(ata_port_probe); |
7849 | EXPORT_SYMBOL_GPL(ata_dev_disable); | 6228 | EXPORT_SYMBOL_GPL(ata_dev_disable); |
7850 | EXPORT_SYMBOL_GPL(sata_set_spd); | 6229 | EXPORT_SYMBOL_GPL(sata_set_spd); |
6230 | EXPORT_SYMBOL_GPL(ata_wait_after_reset); | ||
7851 | EXPORT_SYMBOL_GPL(sata_link_debounce); | 6231 | EXPORT_SYMBOL_GPL(sata_link_debounce); |
7852 | EXPORT_SYMBOL_GPL(sata_link_resume); | 6232 | EXPORT_SYMBOL_GPL(sata_link_resume); |
7853 | EXPORT_SYMBOL_GPL(ata_bus_reset); | ||
7854 | EXPORT_SYMBOL_GPL(ata_std_prereset); | 6233 | EXPORT_SYMBOL_GPL(ata_std_prereset); |
7855 | EXPORT_SYMBOL_GPL(ata_std_softreset); | ||
7856 | EXPORT_SYMBOL_GPL(sata_link_hardreset); | 6234 | EXPORT_SYMBOL_GPL(sata_link_hardreset); |
7857 | EXPORT_SYMBOL_GPL(sata_std_hardreset); | 6235 | EXPORT_SYMBOL_GPL(sata_std_hardreset); |
7858 | EXPORT_SYMBOL_GPL(ata_std_postreset); | 6236 | EXPORT_SYMBOL_GPL(ata_std_postreset); |
@@ -7861,15 +6239,11 @@ EXPORT_SYMBOL_GPL(ata_dev_pair); | |||
7861 | EXPORT_SYMBOL_GPL(ata_port_disable); | 6239 | EXPORT_SYMBOL_GPL(ata_port_disable); |
7862 | EXPORT_SYMBOL_GPL(ata_ratelimit); | 6240 | EXPORT_SYMBOL_GPL(ata_ratelimit); |
7863 | EXPORT_SYMBOL_GPL(ata_wait_register); | 6241 | EXPORT_SYMBOL_GPL(ata_wait_register); |
7864 | EXPORT_SYMBOL_GPL(ata_busy_sleep); | ||
7865 | EXPORT_SYMBOL_GPL(ata_wait_after_reset); | ||
7866 | EXPORT_SYMBOL_GPL(ata_wait_ready); | ||
7867 | EXPORT_SYMBOL_GPL(ata_scsi_ioctl); | 6242 | EXPORT_SYMBOL_GPL(ata_scsi_ioctl); |
7868 | EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); | 6243 | EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); |
7869 | EXPORT_SYMBOL_GPL(ata_scsi_slave_config); | 6244 | EXPORT_SYMBOL_GPL(ata_scsi_slave_config); |
7870 | EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); | 6245 | EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); |
7871 | EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); | 6246 | EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); |
7872 | EXPORT_SYMBOL_GPL(ata_host_intr); | ||
7873 | EXPORT_SYMBOL_GPL(sata_scr_valid); | 6247 | EXPORT_SYMBOL_GPL(sata_scr_valid); |
7874 | EXPORT_SYMBOL_GPL(sata_scr_read); | 6248 | EXPORT_SYMBOL_GPL(sata_scr_read); |
7875 | EXPORT_SYMBOL_GPL(sata_scr_write); | 6249 | EXPORT_SYMBOL_GPL(sata_scr_write); |
@@ -7892,11 +6266,6 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); | |||
7892 | 6266 | ||
7893 | #ifdef CONFIG_PCI | 6267 | #ifdef CONFIG_PCI |
7894 | EXPORT_SYMBOL_GPL(pci_test_config_bits); | 6268 | EXPORT_SYMBOL_GPL(pci_test_config_bits); |
7895 | EXPORT_SYMBOL_GPL(ata_pci_init_sff_host); | ||
7896 | EXPORT_SYMBOL_GPL(ata_pci_init_bmdma); | ||
7897 | EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host); | ||
7898 | EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host); | ||
7899 | EXPORT_SYMBOL_GPL(ata_pci_init_one); | ||
7900 | EXPORT_SYMBOL_GPL(ata_pci_remove_one); | 6269 | EXPORT_SYMBOL_GPL(ata_pci_remove_one); |
7901 | #ifdef CONFIG_PM | 6270 | #ifdef CONFIG_PM |
7902 | EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); | 6271 | EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); |
@@ -7904,16 +6273,8 @@ EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); | |||
7904 | EXPORT_SYMBOL_GPL(ata_pci_device_suspend); | 6273 | EXPORT_SYMBOL_GPL(ata_pci_device_suspend); |
7905 | EXPORT_SYMBOL_GPL(ata_pci_device_resume); | 6274 | EXPORT_SYMBOL_GPL(ata_pci_device_resume); |
7906 | #endif /* CONFIG_PM */ | 6275 | #endif /* CONFIG_PM */ |
7907 | EXPORT_SYMBOL_GPL(ata_pci_default_filter); | ||
7908 | EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); | ||
7909 | #endif /* CONFIG_PCI */ | 6276 | #endif /* CONFIG_PCI */ |
7910 | 6277 | ||
7911 | EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch); | ||
7912 | EXPORT_SYMBOL_GPL(sata_pmp_std_prereset); | ||
7913 | EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset); | ||
7914 | EXPORT_SYMBOL_GPL(sata_pmp_std_postreset); | ||
7915 | EXPORT_SYMBOL_GPL(sata_pmp_do_eh); | ||
7916 | |||
7917 | EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); | 6278 | EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); |
7918 | EXPORT_SYMBOL_GPL(ata_ehi_push_desc); | 6279 | EXPORT_SYMBOL_GPL(ata_ehi_push_desc); |
7919 | EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); | 6280 | EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); |
@@ -7931,8 +6292,7 @@ EXPORT_SYMBOL_GPL(ata_eh_thaw_port); | |||
7931 | EXPORT_SYMBOL_GPL(ata_eh_qc_complete); | 6292 | EXPORT_SYMBOL_GPL(ata_eh_qc_complete); |
7932 | EXPORT_SYMBOL_GPL(ata_eh_qc_retry); | 6293 | EXPORT_SYMBOL_GPL(ata_eh_qc_retry); |
7933 | EXPORT_SYMBOL_GPL(ata_do_eh); | 6294 | EXPORT_SYMBOL_GPL(ata_do_eh); |
7934 | EXPORT_SYMBOL_GPL(ata_irq_on); | 6295 | EXPORT_SYMBOL_GPL(ata_std_error_handler); |
7935 | EXPORT_SYMBOL_GPL(ata_dev_try_classify); | ||
7936 | 6296 | ||
7937 | EXPORT_SYMBOL_GPL(ata_cable_40wire); | 6297 | EXPORT_SYMBOL_GPL(ata_cable_40wire); |
7938 | EXPORT_SYMBOL_GPL(ata_cable_80wire); | 6298 | EXPORT_SYMBOL_GPL(ata_cable_80wire); |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index a5830329eda4..d94359a24d41 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -873,9 +873,9 @@ int sata_async_notification(struct ata_port *ap) | |||
873 | if (rc == 0) | 873 | if (rc == 0) |
874 | sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); | 874 | sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); |
875 | 875 | ||
876 | if (!ap->nr_pmp_links || rc) { | 876 | if (!sata_pmp_attached(ap) || rc) { |
877 | /* PMP is not attached or SNTF is not available */ | 877 | /* PMP is not attached or SNTF is not available */ |
878 | if (!ap->nr_pmp_links) { | 878 | if (!sata_pmp_attached(ap)) { |
879 | /* PMP is not attached. Check whether ATAPI | 879 | /* PMP is not attached. Check whether ATAPI |
880 | * AN is configured. If so, notify media | 880 | * AN is configured. If so, notify media |
881 | * change. | 881 | * change. |
@@ -1079,19 +1079,6 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, | |||
1079 | 1079 | ||
1080 | spin_lock_irqsave(ap->lock, flags); | 1080 | spin_lock_irqsave(ap->lock, flags); |
1081 | 1081 | ||
1082 | /* Reset is represented by combination of actions and EHI | ||
1083 | * flags. Suck in all related bits before clearing eh_info to | ||
1084 | * avoid losing requested action. | ||
1085 | */ | ||
1086 | if (action & ATA_EH_RESET_MASK) { | ||
1087 | ehc->i.action |= ehi->action & ATA_EH_RESET_MASK; | ||
1088 | ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK; | ||
1089 | |||
1090 | /* make sure all reset actions are cleared & clear EHI flags */ | ||
1091 | action |= ATA_EH_RESET_MASK; | ||
1092 | ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK; | ||
1093 | } | ||
1094 | |||
1095 | ata_eh_clear_action(link, dev, ehi, action); | 1082 | ata_eh_clear_action(link, dev, ehi, action); |
1096 | 1083 | ||
1097 | if (!(ehc->i.flags & ATA_EHI_QUIET)) | 1084 | if (!(ehc->i.flags & ATA_EHI_QUIET)) |
@@ -1117,12 +1104,6 @@ void ata_eh_done(struct ata_link *link, struct ata_device *dev, | |||
1117 | { | 1104 | { |
1118 | struct ata_eh_context *ehc = &link->eh_context; | 1105 | struct ata_eh_context *ehc = &link->eh_context; |
1119 | 1106 | ||
1120 | /* if reset is complete, clear all reset actions & reset modifier */ | ||
1121 | if (action & ATA_EH_RESET_MASK) { | ||
1122 | action |= ATA_EH_RESET_MASK; | ||
1123 | ehc->i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK; | ||
1124 | } | ||
1125 | |||
1126 | ata_eh_clear_action(link, dev, &ehc->i, action); | 1107 | ata_eh_clear_action(link, dev, &ehc->i, action); |
1127 | } | 1108 | } |
1128 | 1109 | ||
@@ -1329,20 +1310,20 @@ static void ata_eh_analyze_serror(struct ata_link *link) | |||
1329 | 1310 | ||
1330 | if (serror & SERR_PERSISTENT) { | 1311 | if (serror & SERR_PERSISTENT) { |
1331 | err_mask |= AC_ERR_ATA_BUS; | 1312 | err_mask |= AC_ERR_ATA_BUS; |
1332 | action |= ATA_EH_HARDRESET; | 1313 | action |= ATA_EH_RESET; |
1333 | } | 1314 | } |
1334 | if (serror & | 1315 | if (serror & |
1335 | (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) { | 1316 | (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) { |
1336 | err_mask |= AC_ERR_ATA_BUS; | 1317 | err_mask |= AC_ERR_ATA_BUS; |
1337 | action |= ATA_EH_SOFTRESET; | 1318 | action |= ATA_EH_RESET; |
1338 | } | 1319 | } |
1339 | if (serror & SERR_PROTOCOL) { | 1320 | if (serror & SERR_PROTOCOL) { |
1340 | err_mask |= AC_ERR_HSM; | 1321 | err_mask |= AC_ERR_HSM; |
1341 | action |= ATA_EH_SOFTRESET; | 1322 | action |= ATA_EH_RESET; |
1342 | } | 1323 | } |
1343 | if (serror & SERR_INTERNAL) { | 1324 | if (serror & SERR_INTERNAL) { |
1344 | err_mask |= AC_ERR_SYSTEM; | 1325 | err_mask |= AC_ERR_SYSTEM; |
1345 | action |= ATA_EH_HARDRESET; | 1326 | action |= ATA_EH_RESET; |
1346 | } | 1327 | } |
1347 | 1328 | ||
1348 | /* Determine whether a hotplug event has occurred. Both | 1329 | /* Determine whether a hotplug event has occurred. Both |
@@ -1448,7 +1429,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, | |||
1448 | 1429 | ||
1449 | if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { | 1430 | if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { |
1450 | qc->err_mask |= AC_ERR_HSM; | 1431 | qc->err_mask |= AC_ERR_HSM; |
1451 | return ATA_EH_SOFTRESET; | 1432 | return ATA_EH_RESET; |
1452 | } | 1433 | } |
1453 | 1434 | ||
1454 | if (stat & (ATA_ERR | ATA_DF)) | 1435 | if (stat & (ATA_ERR | ATA_DF)) |
@@ -1484,7 +1465,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, | |||
1484 | } | 1465 | } |
1485 | 1466 | ||
1486 | if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) | 1467 | if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) |
1487 | action |= ATA_EH_SOFTRESET; | 1468 | action |= ATA_EH_RESET; |
1488 | 1469 | ||
1489 | return action; | 1470 | return action; |
1490 | } | 1471 | } |
@@ -1685,7 +1666,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, | |||
1685 | if (verdict & ATA_EH_SPDN_SPEED_DOWN) { | 1666 | if (verdict & ATA_EH_SPDN_SPEED_DOWN) { |
1686 | /* speed down SATA link speed if possible */ | 1667 | /* speed down SATA link speed if possible */ |
1687 | if (sata_down_spd_limit(link) == 0) { | 1668 | if (sata_down_spd_limit(link) == 0) { |
1688 | action |= ATA_EH_HARDRESET; | 1669 | action |= ATA_EH_RESET; |
1689 | goto done; | 1670 | goto done; |
1690 | } | 1671 | } |
1691 | 1672 | ||
@@ -1705,7 +1686,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, | |||
1705 | dev->spdn_cnt++; | 1686 | dev->spdn_cnt++; |
1706 | 1687 | ||
1707 | if (ata_down_xfermask_limit(dev, sel) == 0) { | 1688 | if (ata_down_xfermask_limit(dev, sel) == 0) { |
1708 | action |= ATA_EH_SOFTRESET; | 1689 | action |= ATA_EH_RESET; |
1709 | goto done; | 1690 | goto done; |
1710 | } | 1691 | } |
1711 | } | 1692 | } |
@@ -1719,7 +1700,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, | |||
1719 | (dev->xfer_shift != ATA_SHIFT_PIO)) { | 1700 | (dev->xfer_shift != ATA_SHIFT_PIO)) { |
1720 | if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { | 1701 | if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { |
1721 | dev->spdn_cnt = 0; | 1702 | dev->spdn_cnt = 0; |
1722 | action |= ATA_EH_SOFTRESET; | 1703 | action |= ATA_EH_RESET; |
1723 | goto done; | 1704 | goto done; |
1724 | } | 1705 | } |
1725 | } | 1706 | } |
@@ -1764,9 +1745,9 @@ static void ata_eh_link_autopsy(struct ata_link *link) | |||
1764 | ehc->i.serror |= serror; | 1745 | ehc->i.serror |= serror; |
1765 | ata_eh_analyze_serror(link); | 1746 | ata_eh_analyze_serror(link); |
1766 | } else if (rc != -EOPNOTSUPP) { | 1747 | } else if (rc != -EOPNOTSUPP) { |
1767 | /* SError read failed, force hardreset and probing */ | 1748 | /* SError read failed, force reset and probing */ |
1768 | ata_ehi_schedule_probe(&ehc->i); | 1749 | ehc->i.probe_mask |= ATA_ALL_DEVICES; |
1769 | ehc->i.action |= ATA_EH_HARDRESET; | 1750 | ehc->i.action |= ATA_EH_RESET; |
1770 | ehc->i.err_mask |= AC_ERR_OTHER; | 1751 | ehc->i.err_mask |= AC_ERR_OTHER; |
1771 | } | 1752 | } |
1772 | 1753 | ||
@@ -1804,6 +1785,11 @@ static void ata_eh_link_autopsy(struct ata_link *link) | |||
1804 | if (qc->flags & ATA_QCFLAG_SENSE_VALID) | 1785 | if (qc->flags & ATA_QCFLAG_SENSE_VALID) |
1805 | qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); | 1786 | qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); |
1806 | 1787 | ||
1788 | /* determine whether the command is worth retrying */ | ||
1789 | if (!(qc->err_mask & AC_ERR_INVALID) && | ||
1790 | ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV)) | ||
1791 | qc->flags |= ATA_QCFLAG_RETRY; | ||
1792 | |||
1807 | /* accumulate error info */ | 1793 | /* accumulate error info */ |
1808 | ehc->i.dev = qc->dev; | 1794 | ehc->i.dev = qc->dev; |
1809 | all_err_mask |= qc->err_mask; | 1795 | all_err_mask |= qc->err_mask; |
@@ -1814,7 +1800,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) | |||
1814 | /* enforce default EH actions */ | 1800 | /* enforce default EH actions */ |
1815 | if (ap->pflags & ATA_PFLAG_FROZEN || | 1801 | if (ap->pflags & ATA_PFLAG_FROZEN || |
1816 | all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) | 1802 | all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) |
1817 | ehc->i.action |= ATA_EH_SOFTRESET; | 1803 | ehc->i.action |= ATA_EH_RESET; |
1818 | else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || | 1804 | else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || |
1819 | (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) | 1805 | (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) |
1820 | ehc->i.action |= ATA_EH_REVALIDATE; | 1806 | ehc->i.action |= ATA_EH_REVALIDATE; |
@@ -1867,7 +1853,7 @@ void ata_eh_autopsy(struct ata_port *ap) | |||
1867 | /* Autopsy of fanout ports can affect host link autopsy. | 1853 | /* Autopsy of fanout ports can affect host link autopsy. |
1868 | * Perform host link autopsy last. | 1854 | * Perform host link autopsy last. |
1869 | */ | 1855 | */ |
1870 | if (ap->nr_pmp_links) | 1856 | if (sata_pmp_attached(ap)) |
1871 | ata_eh_link_autopsy(&ap->link); | 1857 | ata_eh_link_autopsy(&ap->link); |
1872 | } | 1858 | } |
1873 | 1859 | ||
@@ -2066,41 +2052,29 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, | |||
2066 | classes[dev->devno] = ATA_DEV_UNKNOWN; | 2052 | classes[dev->devno] = ATA_DEV_UNKNOWN; |
2067 | 2053 | ||
2068 | rc = reset(link, classes, deadline); | 2054 | rc = reset(link, classes, deadline); |
2069 | if (rc) | ||
2070 | return rc; | ||
2071 | 2055 | ||
2072 | /* If any class isn't ATA_DEV_UNKNOWN, consider classification | 2056 | /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */ |
2073 | * is complete and convert all ATA_DEV_UNKNOWN to | ||
2074 | * ATA_DEV_NONE. | ||
2075 | */ | ||
2076 | ata_link_for_each_dev(dev, link) | 2057 | ata_link_for_each_dev(dev, link) |
2077 | if (classes[dev->devno] != ATA_DEV_UNKNOWN) | 2058 | if (classes[dev->devno] == ATA_DEV_UNKNOWN) |
2078 | break; | 2059 | classes[dev->devno] = ATA_DEV_NONE; |
2079 | |||
2080 | if (dev) { | ||
2081 | ata_link_for_each_dev(dev, link) { | ||
2082 | if (classes[dev->devno] == ATA_DEV_UNKNOWN) | ||
2083 | classes[dev->devno] = ATA_DEV_NONE; | ||
2084 | } | ||
2085 | } | ||
2086 | 2060 | ||
2087 | return 0; | 2061 | return rc; |
2088 | } | 2062 | } |
2089 | 2063 | ||
2090 | static int ata_eh_followup_srst_needed(struct ata_link *link, | 2064 | static int ata_eh_followup_srst_needed(struct ata_link *link, |
2091 | int rc, int classify, | 2065 | int rc, int classify, |
2092 | const unsigned int *classes) | 2066 | const unsigned int *classes) |
2093 | { | 2067 | { |
2094 | if (link->flags & ATA_LFLAG_NO_SRST) | 2068 | if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) |
2095 | return 0; | 2069 | return 0; |
2096 | if (rc == -EAGAIN) | 2070 | if (rc == -EAGAIN) { |
2097 | return 1; | 2071 | if (classify) |
2072 | return 1; | ||
2073 | rc = 0; | ||
2074 | } | ||
2098 | if (rc != 0) | 2075 | if (rc != 0) |
2099 | return 0; | 2076 | return 0; |
2100 | if ((link->ap->flags & ATA_FLAG_PMP) && ata_is_host_link(link)) | 2077 | if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) |
2101 | return 1; | ||
2102 | if (classify && !(link->flags & ATA_LFLAG_ASSUME_CLASS) && | ||
2103 | classes[0] == ATA_DEV_UNKNOWN) | ||
2104 | return 1; | 2078 | return 1; |
2105 | return 0; | 2079 | return 0; |
2106 | } | 2080 | } |
@@ -2118,7 +2092,6 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2118 | int try = 0; | 2092 | int try = 0; |
2119 | struct ata_device *dev; | 2093 | struct ata_device *dev; |
2120 | unsigned long deadline, now; | 2094 | unsigned long deadline, now; |
2121 | unsigned int tmp_action; | ||
2122 | ata_reset_fn_t reset; | 2095 | ata_reset_fn_t reset; |
2123 | unsigned long flags; | 2096 | unsigned long flags; |
2124 | u32 sstatus; | 2097 | u32 sstatus; |
@@ -2129,7 +2102,7 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2129 | ap->pflags |= ATA_PFLAG_RESETTING; | 2102 | ap->pflags |= ATA_PFLAG_RESETTING; |
2130 | spin_unlock_irqrestore(ap->lock, flags); | 2103 | spin_unlock_irqrestore(ap->lock, flags); |
2131 | 2104 | ||
2132 | ata_eh_about_to_do(link, NULL, ehc->i.action & ATA_EH_RESET_MASK); | 2105 | ata_eh_about_to_do(link, NULL, ATA_EH_RESET); |
2133 | 2106 | ||
2134 | ata_link_for_each_dev(dev, link) { | 2107 | ata_link_for_each_dev(dev, link) { |
2135 | /* If we issue an SRST then an ATA drive (not ATAPI) | 2108 | /* If we issue an SRST then an ATA drive (not ATAPI) |
@@ -2159,17 +2132,20 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2159 | goto done; | 2132 | goto done; |
2160 | } | 2133 | } |
2161 | 2134 | ||
2162 | /* Determine which reset to use and record in ehc->i.action. | 2135 | /* prefer hardreset */ |
2163 | * prereset() may examine and modify it. | 2136 | ehc->i.action &= ~ATA_EH_RESET; |
2164 | */ | 2137 | if (hardreset) { |
2165 | if (softreset && (!hardreset || (!(lflags & ATA_LFLAG_NO_SRST) && | 2138 | reset = hardreset; |
2166 | !sata_set_spd_needed(link) && | 2139 | ehc->i.action = ATA_EH_HARDRESET; |
2167 | !(ehc->i.action & ATA_EH_HARDRESET)))) | 2140 | } else if (softreset) { |
2168 | tmp_action = ATA_EH_SOFTRESET; | 2141 | reset = softreset; |
2169 | else | 2142 | ehc->i.action = ATA_EH_SOFTRESET; |
2170 | tmp_action = ATA_EH_HARDRESET; | 2143 | } else { |
2171 | 2144 | ata_link_printk(link, KERN_ERR, "BUG: no reset method, " | |
2172 | ehc->i.action = (ehc->i.action & ~ATA_EH_RESET_MASK) | tmp_action; | 2145 | "please report to linux-ide@vger.kernel.org\n"); |
2146 | dump_stack(); | ||
2147 | return -EINVAL; | ||
2148 | } | ||
2173 | 2149 | ||
2174 | if (prereset) { | 2150 | if (prereset) { |
2175 | rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT); | 2151 | rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT); |
@@ -2177,7 +2153,7 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2177 | if (rc == -ENOENT) { | 2153 | if (rc == -ENOENT) { |
2178 | ata_link_printk(link, KERN_DEBUG, | 2154 | ata_link_printk(link, KERN_DEBUG, |
2179 | "port disabled. ignoring.\n"); | 2155 | "port disabled. ignoring.\n"); |
2180 | ehc->i.action &= ~ATA_EH_RESET_MASK; | 2156 | ehc->i.action &= ~ATA_EH_RESET; |
2181 | 2157 | ||
2182 | ata_link_for_each_dev(dev, link) | 2158 | ata_link_for_each_dev(dev, link) |
2183 | classes[dev->devno] = ATA_DEV_NONE; | 2159 | classes[dev->devno] = ATA_DEV_NONE; |
@@ -2190,12 +2166,8 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2190 | } | 2166 | } |
2191 | } | 2167 | } |
2192 | 2168 | ||
2193 | /* prereset() might have modified ehc->i.action */ | 2169 | /* prereset() might have cleared ATA_EH_RESET */ |
2194 | if (ehc->i.action & ATA_EH_HARDRESET) | 2170 | if (!(ehc->i.action & ATA_EH_RESET)) { |
2195 | reset = hardreset; | ||
2196 | else if (ehc->i.action & ATA_EH_SOFTRESET) | ||
2197 | reset = softreset; | ||
2198 | else { | ||
2199 | /* prereset told us not to reset, bang classes and return */ | 2171 | /* prereset told us not to reset, bang classes and return */ |
2200 | ata_link_for_each_dev(dev, link) | 2172 | ata_link_for_each_dev(dev, link) |
2201 | classes[dev->devno] = ATA_DEV_NONE; | 2173 | classes[dev->devno] = ATA_DEV_NONE; |
@@ -2203,14 +2175,6 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2203 | goto out; | 2175 | goto out; |
2204 | } | 2176 | } |
2205 | 2177 | ||
2206 | /* did prereset() screw up? if so, fix up to avoid oopsing */ | ||
2207 | if (!reset) { | ||
2208 | if (softreset) | ||
2209 | reset = softreset; | ||
2210 | else | ||
2211 | reset = hardreset; | ||
2212 | } | ||
2213 | |||
2214 | retry: | 2178 | retry: |
2215 | deadline = jiffies + ata_eh_reset_timeouts[try++]; | 2179 | deadline = jiffies + ata_eh_reset_timeouts[try++]; |
2216 | 2180 | ||
@@ -2240,7 +2204,7 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2240 | goto fail; | 2204 | goto fail; |
2241 | } | 2205 | } |
2242 | 2206 | ||
2243 | ata_eh_about_to_do(link, NULL, ATA_EH_RESET_MASK); | 2207 | ata_eh_about_to_do(link, NULL, ATA_EH_RESET); |
2244 | rc = ata_do_reset(link, reset, classes, deadline); | 2208 | rc = ata_do_reset(link, reset, classes, deadline); |
2245 | } | 2209 | } |
2246 | 2210 | ||
@@ -2248,21 +2212,6 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2248 | if (rc && rc != -EAGAIN) | 2212 | if (rc && rc != -EAGAIN) |
2249 | goto fail; | 2213 | goto fail; |
2250 | 2214 | ||
2251 | /* was classification successful? */ | ||
2252 | if (classify && classes[0] == ATA_DEV_UNKNOWN && | ||
2253 | !(lflags & ATA_LFLAG_ASSUME_CLASS)) { | ||
2254 | if (try < max_tries) { | ||
2255 | ata_link_printk(link, KERN_WARNING, | ||
2256 | "classification failed\n"); | ||
2257 | rc = -EINVAL; | ||
2258 | goto fail; | ||
2259 | } | ||
2260 | |||
2261 | ata_link_printk(link, KERN_WARNING, | ||
2262 | "classfication failed, assuming ATA\n"); | ||
2263 | lflags |= ATA_LFLAG_ASSUME_ATA; | ||
2264 | } | ||
2265 | |||
2266 | done: | 2215 | done: |
2267 | ata_link_for_each_dev(dev, link) { | 2216 | ata_link_for_each_dev(dev, link) { |
2268 | /* After the reset, the device state is PIO 0 and the | 2217 | /* After the reset, the device state is PIO 0 and the |
@@ -2290,7 +2239,7 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2290 | postreset(link, classes); | 2239 | postreset(link, classes); |
2291 | 2240 | ||
2292 | /* reset successful, schedule revalidation */ | 2241 | /* reset successful, schedule revalidation */ |
2293 | ata_eh_done(link, NULL, ehc->i.action & ATA_EH_RESET_MASK); | 2242 | ata_eh_done(link, NULL, ATA_EH_RESET); |
2294 | ehc->i.action |= ATA_EH_REVALIDATE; | 2243 | ehc->i.action |= ATA_EH_REVALIDATE; |
2295 | 2244 | ||
2296 | rc = 0; | 2245 | rc = 0; |
@@ -2305,6 +2254,11 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2305 | return rc; | 2254 | return rc; |
2306 | 2255 | ||
2307 | fail: | 2256 | fail: |
2257 | /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ | ||
2258 | if (!ata_is_host_link(link) && | ||
2259 | sata_scr_read(link, SCR_STATUS, &sstatus)) | ||
2260 | rc = -ERESTART; | ||
2261 | |||
2308 | if (rc == -ERESTART || try >= max_tries) | 2262 | if (rc == -ERESTART || try >= max_tries) |
2309 | goto out; | 2263 | goto out; |
2310 | 2264 | ||
@@ -2515,6 +2469,7 @@ static int ata_link_nr_vacant(struct ata_link *link) | |||
2515 | 2469 | ||
2516 | static int ata_eh_skip_recovery(struct ata_link *link) | 2470 | static int ata_eh_skip_recovery(struct ata_link *link) |
2517 | { | 2471 | { |
2472 | struct ata_port *ap = link->ap; | ||
2518 | struct ata_eh_context *ehc = &link->eh_context; | 2473 | struct ata_eh_context *ehc = &link->eh_context; |
2519 | struct ata_device *dev; | 2474 | struct ata_device *dev; |
2520 | 2475 | ||
@@ -2522,9 +2477,13 @@ static int ata_eh_skip_recovery(struct ata_link *link) | |||
2522 | if (link->flags & ATA_LFLAG_DISABLED) | 2477 | if (link->flags & ATA_LFLAG_DISABLED) |
2523 | return 1; | 2478 | return 1; |
2524 | 2479 | ||
2525 | /* thaw frozen port, resume link and recover failed devices */ | 2480 | /* thaw frozen port and recover failed devices */ |
2526 | if ((link->ap->pflags & ATA_PFLAG_FROZEN) || | 2481 | if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) |
2527 | (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_link_nr_enabled(link)) | 2482 | return 0; |
2483 | |||
2484 | /* reset at least once if reset is requested */ | ||
2485 | if ((ehc->i.action & ATA_EH_RESET) && | ||
2486 | !(ehc->i.flags & ATA_EHI_DID_RESET)) | ||
2528 | return 0; | 2487 | return 0; |
2529 | 2488 | ||
2530 | /* skip if class codes for all vacant slots are ATA_DEV_NONE */ | 2489 | /* skip if class codes for all vacant slots are ATA_DEV_NONE */ |
@@ -2548,7 +2507,7 @@ static int ata_eh_schedule_probe(struct ata_device *dev) | |||
2548 | ata_eh_detach_dev(dev); | 2507 | ata_eh_detach_dev(dev); |
2549 | ata_dev_init(dev); | 2508 | ata_dev_init(dev); |
2550 | ehc->did_probe_mask |= (1 << dev->devno); | 2509 | ehc->did_probe_mask |= (1 << dev->devno); |
2551 | ehc->i.action |= ATA_EH_SOFTRESET; | 2510 | ehc->i.action |= ATA_EH_RESET; |
2552 | ehc->saved_xfer_mode[dev->devno] = 0; | 2511 | ehc->saved_xfer_mode[dev->devno] = 0; |
2553 | ehc->saved_ncq_enabled &= ~(1 << dev->devno); | 2512 | ehc->saved_ncq_enabled &= ~(1 << dev->devno); |
2554 | 2513 | ||
@@ -2592,12 +2551,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) | |||
2592 | 2551 | ||
2593 | return 1; | 2552 | return 1; |
2594 | } else { | 2553 | } else { |
2595 | /* soft didn't work? be haaaaard */ | 2554 | ehc->i.action |= ATA_EH_RESET; |
2596 | if (ehc->i.flags & ATA_EHI_DID_RESET) | ||
2597 | ehc->i.action |= ATA_EH_HARDRESET; | ||
2598 | else | ||
2599 | ehc->i.action |= ATA_EH_SOFTRESET; | ||
2600 | |||
2601 | return 0; | 2555 | return 0; |
2602 | } | 2556 | } |
2603 | } | 2557 | } |
@@ -2690,7 +2644,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2690 | ehc->i.action = 0; | 2644 | ehc->i.action = 0; |
2691 | 2645 | ||
2692 | /* do we need to reset? */ | 2646 | /* do we need to reset? */ |
2693 | if (ehc->i.action & ATA_EH_RESET_MASK) | 2647 | if (ehc->i.action & ATA_EH_RESET) |
2694 | reset = 1; | 2648 | reset = 1; |
2695 | 2649 | ||
2696 | ata_link_for_each_dev(dev, link) | 2650 | ata_link_for_each_dev(dev, link) |
@@ -2702,13 +2656,13 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2702 | /* if PMP is attached, this function only deals with | 2656 | /* if PMP is attached, this function only deals with |
2703 | * downstream links, port should stay thawed. | 2657 | * downstream links, port should stay thawed. |
2704 | */ | 2658 | */ |
2705 | if (!ap->nr_pmp_links) | 2659 | if (!sata_pmp_attached(ap)) |
2706 | ata_eh_freeze_port(ap); | 2660 | ata_eh_freeze_port(ap); |
2707 | 2661 | ||
2708 | ata_port_for_each_link(link, ap) { | 2662 | ata_port_for_each_link(link, ap) { |
2709 | struct ata_eh_context *ehc = &link->eh_context; | 2663 | struct ata_eh_context *ehc = &link->eh_context; |
2710 | 2664 | ||
2711 | if (!(ehc->i.action & ATA_EH_RESET_MASK)) | 2665 | if (!(ehc->i.action & ATA_EH_RESET)) |
2712 | continue; | 2666 | continue; |
2713 | 2667 | ||
2714 | rc = ata_eh_reset(link, ata_link_nr_vacant(link), | 2668 | rc = ata_eh_reset(link, ata_link_nr_vacant(link), |
@@ -2721,7 +2675,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2721 | } | 2675 | } |
2722 | } | 2676 | } |
2723 | 2677 | ||
2724 | if (!ap->nr_pmp_links) | 2678 | if (!sata_pmp_attached(ap)) |
2725 | ata_eh_thaw_port(ap); | 2679 | ata_eh_thaw_port(ap); |
2726 | } | 2680 | } |
2727 | 2681 | ||
@@ -2765,7 +2719,7 @@ dev_fail: | |||
2765 | /* PMP reset requires working host port. | 2719 | /* PMP reset requires working host port. |
2766 | * Can't retry if it's frozen. | 2720 | * Can't retry if it's frozen. |
2767 | */ | 2721 | */ |
2768 | if (ap->nr_pmp_links) | 2722 | if (sata_pmp_attached(ap)) |
2769 | goto out; | 2723 | goto out; |
2770 | break; | 2724 | break; |
2771 | } | 2725 | } |
@@ -2817,18 +2771,11 @@ void ata_eh_finish(struct ata_port *ap) | |||
2817 | /* FIXME: Once EH migration is complete, | 2771 | /* FIXME: Once EH migration is complete, |
2818 | * generate sense data in this function, | 2772 | * generate sense data in this function, |
2819 | * considering both err_mask and tf. | 2773 | * considering both err_mask and tf. |
2820 | * | ||
2821 | * There's no point in retrying invalid | ||
2822 | * (detected by libata) and non-IO device | ||
2823 | * errors (rejected by device). Finish them | ||
2824 | * immediately. | ||
2825 | */ | 2774 | */ |
2826 | if ((qc->err_mask & AC_ERR_INVALID) || | 2775 | if (qc->flags & ATA_QCFLAG_RETRY) |
2827 | (!(qc->flags & ATA_QCFLAG_IO) && | ||
2828 | qc->err_mask == AC_ERR_DEV)) | ||
2829 | ata_eh_qc_complete(qc); | ||
2830 | else | ||
2831 | ata_eh_qc_retry(qc); | 2776 | ata_eh_qc_retry(qc); |
2777 | else | ||
2778 | ata_eh_qc_complete(qc); | ||
2832 | } else { | 2779 | } else { |
2833 | if (qc->flags & ATA_QCFLAG_SENSE_VALID) { | 2780 | if (qc->flags & ATA_QCFLAG_SENSE_VALID) { |
2834 | ata_eh_qc_complete(qc); | 2781 | ata_eh_qc_complete(qc); |
@@ -2848,6 +2795,7 @@ void ata_eh_finish(struct ata_port *ap) | |||
2848 | /** | 2795 | /** |
2849 | * ata_do_eh - do standard error handling | 2796 | * ata_do_eh - do standard error handling |
2850 | * @ap: host port to handle error for | 2797 | * @ap: host port to handle error for |
2798 | * | ||
2851 | * @prereset: prereset method (can be NULL) | 2799 | * @prereset: prereset method (can be NULL) |
2852 | * @softreset: softreset method (can be NULL) | 2800 | * @softreset: softreset method (can be NULL) |
2853 | * @hardreset: hardreset method (can be NULL) | 2801 | * @hardreset: hardreset method (can be NULL) |
@@ -2878,6 +2826,27 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2878 | ata_eh_finish(ap); | 2826 | ata_eh_finish(ap); |
2879 | } | 2827 | } |
2880 | 2828 | ||
2829 | /** | ||
2830 | * ata_std_error_handler - standard error handler | ||
2831 | * @ap: host port to handle error for | ||
2832 | * | ||
2833 | * Standard error handler | ||
2834 | * | ||
2835 | * LOCKING: | ||
2836 | * Kernel thread context (may sleep). | ||
2837 | */ | ||
2838 | void ata_std_error_handler(struct ata_port *ap) | ||
2839 | { | ||
2840 | struct ata_port_operations *ops = ap->ops; | ||
2841 | ata_reset_fn_t hardreset = ops->hardreset; | ||
2842 | |||
2843 | /* ignore built-in hardreset if SCR access is not available */ | ||
2844 | if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) | ||
2845 | hardreset = NULL; | ||
2846 | |||
2847 | ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); | ||
2848 | } | ||
2849 | |||
2881 | #ifdef CONFIG_PM | 2850 | #ifdef CONFIG_PM |
2882 | /** | 2851 | /** |
2883 | * ata_eh_handle_port_suspend - perform port suspend operation | 2852 | * ata_eh_handle_port_suspend - perform port suspend operation |
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index d91f5090ba9d..ff1822a7da38 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c | |||
@@ -11,6 +11,14 @@ | |||
11 | #include <linux/libata.h> | 11 | #include <linux/libata.h> |
12 | #include "libata.h" | 12 | #include "libata.h" |
13 | 13 | ||
14 | const struct ata_port_operations sata_pmp_port_ops = { | ||
15 | .inherits = &sata_port_ops, | ||
16 | .pmp_prereset = ata_std_prereset, | ||
17 | .pmp_hardreset = sata_std_hardreset, | ||
18 | .pmp_postreset = ata_std_postreset, | ||
19 | .error_handler = sata_pmp_error_handler, | ||
20 | }; | ||
21 | |||
14 | /** | 22 | /** |
15 | * sata_pmp_read - read PMP register | 23 | * sata_pmp_read - read PMP register |
16 | * @link: link to read PMP register for | 24 | * @link: link to read PMP register for |
@@ -176,140 +184,6 @@ int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val) | |||
176 | } | 184 | } |
177 | 185 | ||
178 | /** | 186 | /** |
179 | * sata_pmp_std_prereset - prepare PMP link for reset | ||
180 | * @link: link to be reset | ||
181 | * @deadline: deadline jiffies for the operation | ||
182 | * | ||
183 | * @link is about to be reset. Initialize it. | ||
184 | * | ||
185 | * LOCKING: | ||
186 | * Kernel thread context (may sleep) | ||
187 | * | ||
188 | * RETURNS: | ||
189 | * 0 on success, -errno otherwise. | ||
190 | */ | ||
191 | int sata_pmp_std_prereset(struct ata_link *link, unsigned long deadline) | ||
192 | { | ||
193 | struct ata_eh_context *ehc = &link->eh_context; | ||
194 | const unsigned long *timing = sata_ehc_deb_timing(ehc); | ||
195 | int rc; | ||
196 | |||
197 | /* force HRST? */ | ||
198 | if (link->flags & ATA_LFLAG_NO_SRST) | ||
199 | ehc->i.action |= ATA_EH_HARDRESET; | ||
200 | |||
201 | /* handle link resume */ | ||
202 | if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && | ||
203 | (link->flags & ATA_LFLAG_HRST_TO_RESUME)) | ||
204 | ehc->i.action |= ATA_EH_HARDRESET; | ||
205 | |||
206 | /* if we're about to do hardreset, nothing more to do */ | ||
207 | if (ehc->i.action & ATA_EH_HARDRESET) | ||
208 | return 0; | ||
209 | |||
210 | /* resume link */ | ||
211 | rc = sata_link_resume(link, timing, deadline); | ||
212 | if (rc) { | ||
213 | /* phy resume failed */ | ||
214 | ata_link_printk(link, KERN_WARNING, "failed to resume link " | ||
215 | "for reset (errno=%d)\n", rc); | ||
216 | return rc; | ||
217 | } | ||
218 | |||
219 | /* clear SError bits including .X which blocks the port when set */ | ||
220 | rc = sata_scr_write(link, SCR_ERROR, 0xffffffff); | ||
221 | if (rc) { | ||
222 | ata_link_printk(link, KERN_ERR, | ||
223 | "failed to clear SError (errno=%d)\n", rc); | ||
224 | return rc; | ||
225 | } | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * sata_pmp_std_hardreset - standard hardreset method for PMP link | ||
232 | * @link: link to be reset | ||
233 | * @class: resulting class of attached device | ||
234 | * @deadline: deadline jiffies for the operation | ||
235 | * | ||
236 | * Hardreset PMP port @link. Note that this function doesn't | ||
237 | * wait for BSY clearance. There simply isn't a generic way to | ||
238 | * wait the event. Instead, this function return -EAGAIN thus | ||
239 | * telling libata-EH to followup with softreset. | ||
240 | * | ||
241 | * LOCKING: | ||
242 | * Kernel thread context (may sleep) | ||
243 | * | ||
244 | * RETURNS: | ||
245 | * 0 on success, -errno otherwise. | ||
246 | */ | ||
247 | int sata_pmp_std_hardreset(struct ata_link *link, unsigned int *class, | ||
248 | unsigned long deadline) | ||
249 | { | ||
250 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); | ||
251 | u32 tmp; | ||
252 | int rc; | ||
253 | |||
254 | DPRINTK("ENTER\n"); | ||
255 | |||
256 | /* do hardreset */ | ||
257 | rc = sata_link_hardreset(link, timing, deadline); | ||
258 | if (rc) { | ||
259 | ata_link_printk(link, KERN_ERR, | ||
260 | "COMRESET failed (errno=%d)\n", rc); | ||
261 | goto out; | ||
262 | } | ||
263 | |||
264 | /* clear SError bits including .X which blocks the port when set */ | ||
265 | rc = sata_scr_write(link, SCR_ERROR, 0xffffffff); | ||
266 | if (rc) { | ||
267 | ata_link_printk(link, KERN_ERR, "failed to clear SError " | ||
268 | "during hardreset (errno=%d)\n", rc); | ||
269 | goto out; | ||
270 | } | ||
271 | |||
272 | /* if device is present, follow up with srst to wait for !BSY */ | ||
273 | if (ata_link_online(link)) | ||
274 | rc = -EAGAIN; | ||
275 | out: | ||
276 | /* if SCR isn't accessible, we need to reset the PMP */ | ||
277 | if (rc && rc != -EAGAIN && sata_scr_read(link, SCR_STATUS, &tmp)) | ||
278 | rc = -ERESTART; | ||
279 | |||
280 | DPRINTK("EXIT, rc=%d\n", rc); | ||
281 | return rc; | ||
282 | } | ||
283 | |||
284 | /** | ||
285 | * ata_std_postreset - standard postreset method for PMP link | ||
286 | * @link: the target ata_link | ||
287 | * @classes: classes of attached devices | ||
288 | * | ||
289 | * This function is invoked after a successful reset. Note that | ||
290 | * the device might have been reset more than once using | ||
291 | * different reset methods before postreset is invoked. | ||
292 | * | ||
293 | * LOCKING: | ||
294 | * Kernel thread context (may sleep) | ||
295 | */ | ||
296 | void sata_pmp_std_postreset(struct ata_link *link, unsigned int *class) | ||
297 | { | ||
298 | u32 serror; | ||
299 | |||
300 | DPRINTK("ENTER\n"); | ||
301 | |||
302 | /* clear SError */ | ||
303 | if (sata_scr_read(link, SCR_ERROR, &serror) == 0) | ||
304 | sata_scr_write(link, SCR_ERROR, serror); | ||
305 | |||
306 | /* print link status */ | ||
307 | sata_print_link_status(link); | ||
308 | |||
309 | DPRINTK("EXIT\n"); | ||
310 | } | ||
311 | |||
312 | /** | ||
313 | * sata_pmp_read_gscr - read GSCR block of SATA PMP | 187 | * sata_pmp_read_gscr - read GSCR block of SATA PMP |
314 | * @dev: PMP device | 188 | * @dev: PMP device |
315 | * @gscr: buffer to read GSCR block into | 189 | * @gscr: buffer to read GSCR block into |
@@ -444,9 +318,8 @@ static int sata_pmp_init_links(struct ata_port *ap, int nr_ports) | |||
444 | struct ata_eh_context *ehc = &link->eh_context; | 318 | struct ata_eh_context *ehc = &link->eh_context; |
445 | 319 | ||
446 | link->flags = 0; | 320 | link->flags = 0; |
447 | ehc->i.probe_mask |= 1; | 321 | ehc->i.probe_mask |= ATA_ALL_DEVICES; |
448 | ehc->i.action |= ATA_EH_SOFTRESET; | 322 | ehc->i.action |= ATA_EH_RESET; |
449 | ehc->i.flags |= ATA_EHI_RESUME_LINK; | ||
450 | } | 323 | } |
451 | 324 | ||
452 | return 0; | 325 | return 0; |
@@ -462,9 +335,6 @@ static void sata_pmp_quirks(struct ata_port *ap) | |||
462 | if (vendor == 0x1095 && devid == 0x3726) { | 335 | if (vendor == 0x1095 && devid == 0x3726) { |
463 | /* sil3726 quirks */ | 336 | /* sil3726 quirks */ |
464 | ata_port_for_each_link(link, ap) { | 337 | ata_port_for_each_link(link, ap) { |
465 | /* SError.N need a kick in the ass to get working */ | ||
466 | link->flags |= ATA_LFLAG_HRST_TO_RESUME; | ||
467 | |||
468 | /* class code report is unreliable */ | 338 | /* class code report is unreliable */ |
469 | if (link->pmp < 5) | 339 | if (link->pmp < 5) |
470 | link->flags |= ATA_LFLAG_ASSUME_ATA; | 340 | link->flags |= ATA_LFLAG_ASSUME_ATA; |
@@ -477,9 +347,6 @@ static void sata_pmp_quirks(struct ata_port *ap) | |||
477 | } else if (vendor == 0x1095 && devid == 0x4723) { | 347 | } else if (vendor == 0x1095 && devid == 0x4723) { |
478 | /* sil4723 quirks */ | 348 | /* sil4723 quirks */ |
479 | ata_port_for_each_link(link, ap) { | 349 | ata_port_for_each_link(link, ap) { |
480 | /* SError.N need a kick in the ass to get working */ | ||
481 | link->flags |= ATA_LFLAG_HRST_TO_RESUME; | ||
482 | |||
483 | /* class code report is unreliable */ | 350 | /* class code report is unreliable */ |
484 | if (link->pmp < 2) | 351 | if (link->pmp < 2) |
485 | link->flags |= ATA_LFLAG_ASSUME_ATA; | 352 | link->flags |= ATA_LFLAG_ASSUME_ATA; |
@@ -492,9 +359,6 @@ static void sata_pmp_quirks(struct ata_port *ap) | |||
492 | } else if (vendor == 0x1095 && devid == 0x4726) { | 359 | } else if (vendor == 0x1095 && devid == 0x4726) { |
493 | /* sil4726 quirks */ | 360 | /* sil4726 quirks */ |
494 | ata_port_for_each_link(link, ap) { | 361 | ata_port_for_each_link(link, ap) { |
495 | /* SError.N need a kick in the ass to get working */ | ||
496 | link->flags |= ATA_LFLAG_HRST_TO_RESUME; | ||
497 | |||
498 | /* Class code report is unreliable and SRST | 362 | /* Class code report is unreliable and SRST |
499 | * times out under certain configurations. | 363 | * times out under certain configurations. |
500 | * Config device can be at port 0 or 5 and | 364 | * Config device can be at port 0 or 5 and |
@@ -522,13 +386,6 @@ static void sata_pmp_quirks(struct ata_port *ap) | |||
522 | * otherwise. Don't try hard to recover it. | 386 | * otherwise. Don't try hard to recover it. |
523 | */ | 387 | */ |
524 | ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY; | 388 | ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY; |
525 | } else if (vendor == 0x11ab && devid == 0x4140) { | ||
526 | /* Marvell 88SM4140 quirks. Fan-out ports require PHY | ||
527 | * reset to work; other than that, it behaves very | ||
528 | * nicely. | ||
529 | */ | ||
530 | ata_port_for_each_link(link, ap) | ||
531 | link->flags |= ATA_LFLAG_HRST_TO_RESUME; | ||
532 | } | 389 | } |
533 | } | 390 | } |
534 | 391 | ||
@@ -554,7 +411,7 @@ int sata_pmp_attach(struct ata_device *dev) | |||
554 | int rc; | 411 | int rc; |
555 | 412 | ||
556 | /* is it hanging off the right place? */ | 413 | /* is it hanging off the right place? */ |
557 | if (!(ap->flags & ATA_FLAG_PMP)) { | 414 | if (!sata_pmp_supported(ap)) { |
558 | ata_dev_printk(dev, KERN_ERR, | 415 | ata_dev_printk(dev, KERN_ERR, |
559 | "host does not support Port Multiplier\n"); | 416 | "host does not support Port Multiplier\n"); |
560 | return -EINVAL; | 417 | return -EINVAL; |
@@ -840,13 +697,12 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, | |||
840 | retry: | 697 | retry: |
841 | ehc->classes[0] = ATA_DEV_UNKNOWN; | 698 | ehc->classes[0] = ATA_DEV_UNKNOWN; |
842 | 699 | ||
843 | if (ehc->i.action & ATA_EH_RESET_MASK) { | 700 | if (ehc->i.action & ATA_EH_RESET) { |
844 | struct ata_link *tlink; | 701 | struct ata_link *tlink; |
845 | 702 | ||
846 | ata_eh_freeze_port(ap); | 703 | ata_eh_freeze_port(ap); |
847 | 704 | ||
848 | /* reset */ | 705 | /* reset */ |
849 | ehc->i.action = ATA_EH_HARDRESET; | ||
850 | rc = ata_eh_reset(link, 0, prereset, softreset, hardreset, | 706 | rc = ata_eh_reset(link, 0, prereset, softreset, hardreset, |
851 | postreset); | 707 | postreset); |
852 | if (rc) { | 708 | if (rc) { |
@@ -858,8 +714,12 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, | |||
858 | ata_eh_thaw_port(ap); | 714 | ata_eh_thaw_port(ap); |
859 | 715 | ||
860 | /* PMP is reset, SErrors cannot be trusted, scan all */ | 716 | /* PMP is reset, SErrors cannot be trusted, scan all */ |
861 | ata_port_for_each_link(tlink, ap) | 717 | ata_port_for_each_link(tlink, ap) { |
862 | ata_ehi_schedule_probe(&tlink->eh_context.i); | 718 | struct ata_eh_context *ehc = &tlink->eh_context; |
719 | |||
720 | ehc->i.probe_mask |= ATA_ALL_DEVICES; | ||
721 | ehc->i.action |= ATA_EH_RESET; | ||
722 | } | ||
863 | } | 723 | } |
864 | 724 | ||
865 | /* If revalidation is requested, revalidate and reconfigure; | 725 | /* If revalidation is requested, revalidate and reconfigure; |
@@ -874,7 +734,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, | |||
874 | tries--; | 734 | tries--; |
875 | 735 | ||
876 | if (rc == -ENODEV) { | 736 | if (rc == -ENODEV) { |
877 | ehc->i.probe_mask |= 1; | 737 | ehc->i.probe_mask |= ATA_ALL_DEVICES; |
878 | detach = 1; | 738 | detach = 1; |
879 | /* give it just two more chances */ | 739 | /* give it just two more chances */ |
880 | tries = min(tries, 2); | 740 | tries = min(tries, 2); |
@@ -890,11 +750,11 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, | |||
890 | reval_failed = 1; | 750 | reval_failed = 1; |
891 | 751 | ||
892 | ata_dev_printk(dev, KERN_WARNING, | 752 | ata_dev_printk(dev, KERN_WARNING, |
893 | "retrying hardreset%s\n", | 753 | "retrying reset%s\n", |
894 | sleep ? " in 5 secs" : ""); | 754 | sleep ? " in 5 secs" : ""); |
895 | if (sleep) | 755 | if (sleep) |
896 | ssleep(5); | 756 | ssleep(5); |
897 | ehc->i.action |= ATA_EH_HARDRESET; | 757 | ehc->i.action |= ATA_EH_RESET; |
898 | goto retry; | 758 | goto retry; |
899 | } else { | 759 | } else { |
900 | ata_dev_printk(dev, KERN_ERR, "failed to recover PMP " | 760 | ata_dev_printk(dev, KERN_ERR, "failed to recover PMP " |
@@ -938,10 +798,8 @@ static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap) | |||
938 | /* Some PMPs require hardreset sequence to get | 798 | /* Some PMPs require hardreset sequence to get |
939 | * SError.N working. | 799 | * SError.N working. |
940 | */ | 800 | */ |
941 | if ((link->flags & ATA_LFLAG_HRST_TO_RESUME) && | 801 | sata_link_hardreset(link, sata_deb_timing_normal, |
942 | (link->eh_context.i.flags & ATA_EHI_RESUME_LINK)) | 802 | jiffies + ATA_TMOUT_INTERNAL_QUICK, NULL, NULL); |
943 | sata_link_hardreset(link, sata_deb_timing_normal, | ||
944 | jiffies + ATA_TMOUT_INTERNAL_QUICK); | ||
945 | 803 | ||
946 | /* unconditionally clear SError.N */ | 804 | /* unconditionally clear SError.N */ |
947 | rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); | 805 | rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); |
@@ -987,14 +845,6 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries) | |||
987 | /** | 845 | /** |
988 | * sata_pmp_eh_recover - recover PMP-enabled port | 846 | * sata_pmp_eh_recover - recover PMP-enabled port |
989 | * @ap: ATA port to recover | 847 | * @ap: ATA port to recover |
990 | * @prereset: prereset method (can be NULL) | ||
991 | * @softreset: softreset method | ||
992 | * @hardreset: hardreset method | ||
993 | * @postreset: postreset method (can be NULL) | ||
994 | * @pmp_prereset: PMP prereset method (can be NULL) | ||
995 | * @pmp_softreset: PMP softreset method (can be NULL) | ||
996 | * @pmp_hardreset: PMP hardreset method (can be NULL) | ||
997 | * @pmp_postreset: PMP postreset method (can be NULL) | ||
998 | * | 848 | * |
999 | * Drive EH recovery operation for PMP enabled port @ap. This | 849 | * Drive EH recovery operation for PMP enabled port @ap. This |
1000 | * function recovers host and PMP ports with proper retrials and | 850 | * function recovers host and PMP ports with proper retrials and |
@@ -1007,12 +857,9 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries) | |||
1007 | * RETURNS: | 857 | * RETURNS: |
1008 | * 0 on success, -errno on failure. | 858 | * 0 on success, -errno on failure. |
1009 | */ | 859 | */ |
1010 | static int sata_pmp_eh_recover(struct ata_port *ap, | 860 | static int sata_pmp_eh_recover(struct ata_port *ap) |
1011 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, | ||
1012 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset, | ||
1013 | ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset, | ||
1014 | ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset) | ||
1015 | { | 861 | { |
862 | struct ata_port_operations *ops = ap->ops; | ||
1016 | int pmp_tries, link_tries[SATA_PMP_MAX_PORTS]; | 863 | int pmp_tries, link_tries[SATA_PMP_MAX_PORTS]; |
1017 | struct ata_link *pmp_link = &ap->link; | 864 | struct ata_link *pmp_link = &ap->link; |
1018 | struct ata_device *pmp_dev = pmp_link->device; | 865 | struct ata_device *pmp_dev = pmp_link->device; |
@@ -1029,9 +876,9 @@ static int sata_pmp_eh_recover(struct ata_port *ap, | |||
1029 | 876 | ||
1030 | retry: | 877 | retry: |
1031 | /* PMP attached? */ | 878 | /* PMP attached? */ |
1032 | if (!ap->nr_pmp_links) { | 879 | if (!sata_pmp_attached(ap)) { |
1033 | rc = ata_eh_recover(ap, prereset, softreset, hardreset, | 880 | rc = ata_eh_recover(ap, ops->prereset, ops->softreset, |
1034 | postreset, NULL); | 881 | ops->hardreset, ops->postreset, NULL); |
1035 | if (rc) { | 882 | if (rc) { |
1036 | ata_link_for_each_dev(dev, &ap->link) | 883 | ata_link_for_each_dev(dev, &ap->link) |
1037 | ata_dev_disable(dev); | 884 | ata_dev_disable(dev); |
@@ -1049,8 +896,8 @@ static int sata_pmp_eh_recover(struct ata_port *ap, | |||
1049 | } | 896 | } |
1050 | 897 | ||
1051 | /* recover pmp */ | 898 | /* recover pmp */ |
1052 | rc = sata_pmp_eh_recover_pmp(ap, prereset, softreset, hardreset, | 899 | rc = sata_pmp_eh_recover_pmp(ap, ops->prereset, ops->softreset, |
1053 | postreset); | 900 | ops->hardreset, ops->postreset); |
1054 | if (rc) | 901 | if (rc) |
1055 | goto pmp_fail; | 902 | goto pmp_fail; |
1056 | 903 | ||
@@ -1060,8 +907,8 @@ static int sata_pmp_eh_recover(struct ata_port *ap, | |||
1060 | goto pmp_fail; | 907 | goto pmp_fail; |
1061 | 908 | ||
1062 | /* recover links */ | 909 | /* recover links */ |
1063 | rc = ata_eh_recover(ap, pmp_prereset, pmp_softreset, pmp_hardreset, | 910 | rc = ata_eh_recover(ap, ops->pmp_prereset, ops->pmp_softreset, |
1064 | pmp_postreset, &link); | 911 | ops->pmp_hardreset, ops->pmp_postreset, &link); |
1065 | if (rc) | 912 | if (rc) |
1066 | goto link_fail; | 913 | goto link_fail; |
1067 | 914 | ||
@@ -1124,7 +971,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap, | |||
1124 | 971 | ||
1125 | link_fail: | 972 | link_fail: |
1126 | if (sata_pmp_handle_link_fail(link, link_tries)) { | 973 | if (sata_pmp_handle_link_fail(link, link_tries)) { |
1127 | pmp_ehc->i.action |= ATA_EH_HARDRESET; | 974 | pmp_ehc->i.action |= ATA_EH_RESET; |
1128 | goto retry; | 975 | goto retry; |
1129 | } | 976 | } |
1130 | 977 | ||
@@ -1136,13 +983,13 @@ static int sata_pmp_eh_recover(struct ata_port *ap, | |||
1136 | if (ap->pflags & ATA_PFLAG_UNLOADING) | 983 | if (ap->pflags & ATA_PFLAG_UNLOADING) |
1137 | return rc; | 984 | return rc; |
1138 | 985 | ||
1139 | if (!ap->nr_pmp_links) | 986 | if (!sata_pmp_attached(ap)) |
1140 | goto retry; | 987 | goto retry; |
1141 | 988 | ||
1142 | if (--pmp_tries) { | 989 | if (--pmp_tries) { |
1143 | ata_port_printk(ap, KERN_WARNING, | 990 | ata_port_printk(ap, KERN_WARNING, |
1144 | "failed to recover PMP, retrying in 5 secs\n"); | 991 | "failed to recover PMP, retrying in 5 secs\n"); |
1145 | pmp_ehc->i.action |= ATA_EH_HARDRESET; | 992 | pmp_ehc->i.action |= ATA_EH_RESET; |
1146 | ssleep(5); | 993 | ssleep(5); |
1147 | goto retry; | 994 | goto retry; |
1148 | } | 995 | } |
@@ -1157,16 +1004,8 @@ static int sata_pmp_eh_recover(struct ata_port *ap, | |||
1157 | } | 1004 | } |
1158 | 1005 | ||
1159 | /** | 1006 | /** |
1160 | * sata_pmp_do_eh - do standard error handling for PMP-enabled host | 1007 | * sata_pmp_error_handler - do standard error handling for PMP-enabled host |
1161 | * @ap: host port to handle error for | 1008 | * @ap: host port to handle error for |
1162 | * @prereset: prereset method (can be NULL) | ||
1163 | * @softreset: softreset method | ||
1164 | * @hardreset: hardreset method | ||
1165 | * @postreset: postreset method (can be NULL) | ||
1166 | * @pmp_prereset: PMP prereset method (can be NULL) | ||
1167 | * @pmp_softreset: PMP softreset method (can be NULL) | ||
1168 | * @pmp_hardreset: PMP hardreset method (can be NULL) | ||
1169 | * @pmp_postreset: PMP postreset method (can be NULL) | ||
1170 | * | 1009 | * |
1171 | * Perform standard error handling sequence for PMP-enabled host | 1010 | * Perform standard error handling sequence for PMP-enabled host |
1172 | * @ap. | 1011 | * @ap. |
@@ -1174,16 +1013,14 @@ static int sata_pmp_eh_recover(struct ata_port *ap, | |||
1174 | * LOCKING: | 1013 | * LOCKING: |
1175 | * Kernel thread context (may sleep). | 1014 | * Kernel thread context (may sleep). |
1176 | */ | 1015 | */ |
1177 | void sata_pmp_do_eh(struct ata_port *ap, | 1016 | void sata_pmp_error_handler(struct ata_port *ap) |
1178 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, | ||
1179 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset, | ||
1180 | ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset, | ||
1181 | ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset) | ||
1182 | { | 1017 | { |
1183 | ata_eh_autopsy(ap); | 1018 | ata_eh_autopsy(ap); |
1184 | ata_eh_report(ap); | 1019 | ata_eh_report(ap); |
1185 | sata_pmp_eh_recover(ap, prereset, softreset, hardreset, postreset, | 1020 | sata_pmp_eh_recover(ap); |
1186 | pmp_prereset, pmp_softreset, pmp_hardreset, | ||
1187 | pmp_postreset); | ||
1188 | ata_eh_finish(ap); | 1021 | ata_eh_finish(ap); |
1189 | } | 1022 | } |
1023 | |||
1024 | EXPORT_SYMBOL_GPL(sata_pmp_port_ops); | ||
1025 | EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch); | ||
1026 | EXPORT_SYMBOL_GPL(sata_pmp_error_handler); | ||
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 15795394b0a8..fedf62de9460 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -2393,7 +2393,9 @@ static void atapi_request_sense(struct ata_queued_cmd *qc) | |||
2393 | /* FIXME: is this needed? */ | 2393 | /* FIXME: is this needed? */ |
2394 | memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); | 2394 | memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); |
2395 | 2395 | ||
2396 | ap->ops->tf_read(ap, &qc->tf); | 2396 | #ifdef CONFIG_ATA_SFF |
2397 | ap->ops->sff_tf_read(ap, &qc->tf); | ||
2398 | #endif | ||
2397 | 2399 | ||
2398 | /* fill these in, for the case where they are -not- overwritten */ | 2400 | /* fill these in, for the case where they are -not- overwritten */ |
2399 | cmd->sense_buffer[0] = 0x70; | 2401 | cmd->sense_buffer[0] = 0x70; |
@@ -2615,7 +2617,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2615 | 2617 | ||
2616 | static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) | 2618 | static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) |
2617 | { | 2619 | { |
2618 | if (ap->nr_pmp_links == 0) { | 2620 | if (!sata_pmp_attached(ap)) { |
2619 | if (likely(devno < ata_link_max_devices(&ap->link))) | 2621 | if (likely(devno < ata_link_max_devices(&ap->link))) |
2620 | return &ap->link.device[devno]; | 2622 | return &ap->link.device[devno]; |
2621 | } else { | 2623 | } else { |
@@ -2632,7 +2634,7 @@ static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, | |||
2632 | int devno; | 2634 | int devno; |
2633 | 2635 | ||
2634 | /* skip commands not addressed to targets we simulate */ | 2636 | /* skip commands not addressed to targets we simulate */ |
2635 | if (ap->nr_pmp_links == 0) { | 2637 | if (!sata_pmp_attached(ap)) { |
2636 | if (unlikely(scsidev->channel || scsidev->lun)) | 2638 | if (unlikely(scsidev->channel || scsidev->lun)) |
2637 | return NULL; | 2639 | return NULL; |
2638 | devno = scsidev->id; | 2640 | devno = scsidev->id; |
@@ -3490,7 +3492,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | |||
3490 | if (lun != SCAN_WILD_CARD && lun) | 3492 | if (lun != SCAN_WILD_CARD && lun) |
3491 | return -EINVAL; | 3493 | return -EINVAL; |
3492 | 3494 | ||
3493 | if (ap->nr_pmp_links == 0) { | 3495 | if (!sata_pmp_attached(ap)) { |
3494 | if (channel != SCAN_WILD_CARD && channel) | 3496 | if (channel != SCAN_WILD_CARD && channel) |
3495 | return -EINVAL; | 3497 | return -EINVAL; |
3496 | devno = id; | 3498 | devno = id; |
@@ -3507,8 +3509,8 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | |||
3507 | 3509 | ||
3508 | ata_port_for_each_link(link, ap) { | 3510 | ata_port_for_each_link(link, ap) { |
3509 | struct ata_eh_info *ehi = &link->eh_info; | 3511 | struct ata_eh_info *ehi = &link->eh_info; |
3510 | ehi->probe_mask |= (1 << ata_link_max_devices(link)) - 1; | 3512 | ehi->probe_mask |= ATA_ALL_DEVICES; |
3511 | ehi->action |= ATA_EH_SOFTRESET; | 3513 | ehi->action |= ATA_EH_RESET; |
3512 | } | 3514 | } |
3513 | } else { | 3515 | } else { |
3514 | struct ata_device *dev = ata_find_dev(ap, devno); | 3516 | struct ata_device *dev = ata_find_dev(ap, devno); |
@@ -3516,8 +3518,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | |||
3516 | if (dev) { | 3518 | if (dev) { |
3517 | struct ata_eh_info *ehi = &dev->link->eh_info; | 3519 | struct ata_eh_info *ehi = &dev->link->eh_info; |
3518 | ehi->probe_mask |= 1 << dev->devno; | 3520 | ehi->probe_mask |= 1 << dev->devno; |
3519 | ehi->action |= ATA_EH_SOFTRESET; | 3521 | ehi->action |= ATA_EH_RESET; |
3520 | ehi->flags |= ATA_EHI_RESUME_LINK; | ||
3521 | } else | 3522 | } else |
3522 | rc = -EINVAL; | 3523 | rc = -EINVAL; |
3523 | } | 3524 | } |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 20dc572fb45a..15499522e642 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -35,11 +35,377 @@ | |||
35 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | #include <linux/libata.h> | 37 | #include <linux/libata.h> |
38 | #include <linux/highmem.h> | ||
38 | 39 | ||
39 | #include "libata.h" | 40 | #include "libata.h" |
40 | 41 | ||
42 | const struct ata_port_operations ata_sff_port_ops = { | ||
43 | .inherits = &ata_base_port_ops, | ||
44 | |||
45 | .qc_prep = ata_sff_qc_prep, | ||
46 | .qc_issue = ata_sff_qc_issue, | ||
47 | .qc_fill_rtf = ata_sff_qc_fill_rtf, | ||
48 | |||
49 | .freeze = ata_sff_freeze, | ||
50 | .thaw = ata_sff_thaw, | ||
51 | .prereset = ata_sff_prereset, | ||
52 | .softreset = ata_sff_softreset, | ||
53 | .hardreset = sata_sff_hardreset, | ||
54 | .postreset = ata_sff_postreset, | ||
55 | .error_handler = ata_sff_error_handler, | ||
56 | .post_internal_cmd = ata_sff_post_internal_cmd, | ||
57 | |||
58 | .sff_dev_select = ata_sff_dev_select, | ||
59 | .sff_check_status = ata_sff_check_status, | ||
60 | .sff_tf_load = ata_sff_tf_load, | ||
61 | .sff_tf_read = ata_sff_tf_read, | ||
62 | .sff_exec_command = ata_sff_exec_command, | ||
63 | .sff_data_xfer = ata_sff_data_xfer, | ||
64 | .sff_irq_on = ata_sff_irq_on, | ||
65 | .sff_irq_clear = ata_sff_irq_clear, | ||
66 | |||
67 | .port_start = ata_sff_port_start, | ||
68 | }; | ||
69 | |||
70 | const struct ata_port_operations ata_bmdma_port_ops = { | ||
71 | .inherits = &ata_sff_port_ops, | ||
72 | |||
73 | .mode_filter = ata_bmdma_mode_filter, | ||
74 | |||
75 | .bmdma_setup = ata_bmdma_setup, | ||
76 | .bmdma_start = ata_bmdma_start, | ||
77 | .bmdma_stop = ata_bmdma_stop, | ||
78 | .bmdma_status = ata_bmdma_status, | ||
79 | }; | ||
80 | |||
81 | /** | ||
82 | * ata_fill_sg - Fill PCI IDE PRD table | ||
83 | * @qc: Metadata associated with taskfile to be transferred | ||
84 | * | ||
85 | * Fill PCI IDE PRD (scatter-gather) table with segments | ||
86 | * associated with the current disk command. | ||
87 | * | ||
88 | * LOCKING: | ||
89 | * spin_lock_irqsave(host lock) | ||
90 | * | ||
91 | */ | ||
92 | static void ata_fill_sg(struct ata_queued_cmd *qc) | ||
93 | { | ||
94 | struct ata_port *ap = qc->ap; | ||
95 | struct scatterlist *sg; | ||
96 | unsigned int si, pi; | ||
97 | |||
98 | pi = 0; | ||
99 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
100 | u32 addr, offset; | ||
101 | u32 sg_len, len; | ||
102 | |||
103 | /* determine if physical DMA addr spans 64K boundary. | ||
104 | * Note h/w doesn't support 64-bit, so we unconditionally | ||
105 | * truncate dma_addr_t to u32. | ||
106 | */ | ||
107 | addr = (u32) sg_dma_address(sg); | ||
108 | sg_len = sg_dma_len(sg); | ||
109 | |||
110 | while (sg_len) { | ||
111 | offset = addr & 0xffff; | ||
112 | len = sg_len; | ||
113 | if ((offset + sg_len) > 0x10000) | ||
114 | len = 0x10000 - offset; | ||
115 | |||
116 | ap->prd[pi].addr = cpu_to_le32(addr); | ||
117 | ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff); | ||
118 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); | ||
119 | |||
120 | pi++; | ||
121 | sg_len -= len; | ||
122 | addr += len; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * ata_fill_sg_dumb - Fill PCI IDE PRD table | ||
131 | * @qc: Metadata associated with taskfile to be transferred | ||
132 | * | ||
133 | * Fill PCI IDE PRD (scatter-gather) table with segments | ||
134 | * associated with the current disk command. Perform the fill | ||
135 | * so that we avoid writing any length 64K records for | ||
136 | * controllers that don't follow the spec. | ||
137 | * | ||
138 | * LOCKING: | ||
139 | * spin_lock_irqsave(host lock) | ||
140 | * | ||
141 | */ | ||
142 | static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) | ||
143 | { | ||
144 | struct ata_port *ap = qc->ap; | ||
145 | struct scatterlist *sg; | ||
146 | unsigned int si, pi; | ||
147 | |||
148 | pi = 0; | ||
149 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
150 | u32 addr, offset; | ||
151 | u32 sg_len, len, blen; | ||
152 | |||
153 | /* determine if physical DMA addr spans 64K boundary. | ||
154 | * Note h/w doesn't support 64-bit, so we unconditionally | ||
155 | * truncate dma_addr_t to u32. | ||
156 | */ | ||
157 | addr = (u32) sg_dma_address(sg); | ||
158 | sg_len = sg_dma_len(sg); | ||
159 | |||
160 | while (sg_len) { | ||
161 | offset = addr & 0xffff; | ||
162 | len = sg_len; | ||
163 | if ((offset + sg_len) > 0x10000) | ||
164 | len = 0x10000 - offset; | ||
165 | |||
166 | blen = len & 0xffff; | ||
167 | ap->prd[pi].addr = cpu_to_le32(addr); | ||
168 | if (blen == 0) { | ||
169 | /* Some PATA chipsets like the CS5530 can't | ||
170 | cope with 0x0000 meaning 64K as the spec says */ | ||
171 | ap->prd[pi].flags_len = cpu_to_le32(0x8000); | ||
172 | blen = 0x8000; | ||
173 | ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000); | ||
174 | } | ||
175 | ap->prd[pi].flags_len = cpu_to_le32(blen); | ||
176 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); | ||
177 | |||
178 | pi++; | ||
179 | sg_len -= len; | ||
180 | addr += len; | ||
181 | } | ||
182 | } | ||
183 | |||
184 | ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); | ||
185 | } | ||
186 | |||
187 | /** | ||
188 | * ata_sff_qc_prep - Prepare taskfile for submission | ||
189 | * @qc: Metadata associated with taskfile to be prepared | ||
190 | * | ||
191 | * Prepare ATA taskfile for submission. | ||
192 | * | ||
193 | * LOCKING: | ||
194 | * spin_lock_irqsave(host lock) | ||
195 | */ | ||
196 | void ata_sff_qc_prep(struct ata_queued_cmd *qc) | ||
197 | { | ||
198 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | ||
199 | return; | ||
200 | |||
201 | ata_fill_sg(qc); | ||
202 | } | ||
203 | |||
204 | /** | ||
205 | * ata_sff_dumb_qc_prep - Prepare taskfile for submission | ||
206 | * @qc: Metadata associated with taskfile to be prepared | ||
207 | * | ||
208 | * Prepare ATA taskfile for submission. | ||
209 | * | ||
210 | * LOCKING: | ||
211 | * spin_lock_irqsave(host lock) | ||
212 | */ | ||
213 | void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc) | ||
214 | { | ||
215 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | ||
216 | return; | ||
217 | |||
218 | ata_fill_sg_dumb(qc); | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * ata_sff_check_status - Read device status reg & clear interrupt | ||
223 | * @ap: port where the device is | ||
224 | * | ||
225 | * Reads ATA taskfile status register for currently-selected device | ||
226 | * and return its value. This also clears pending interrupts | ||
227 | * from this device | ||
228 | * | ||
229 | * LOCKING: | ||
230 | * Inherited from caller. | ||
231 | */ | ||
232 | u8 ata_sff_check_status(struct ata_port *ap) | ||
233 | { | ||
234 | return ioread8(ap->ioaddr.status_addr); | ||
235 | } | ||
236 | |||
237 | /** | ||
238 | * ata_sff_altstatus - Read device alternate status reg | ||
239 | * @ap: port where the device is | ||
240 | * | ||
241 | * Reads ATA taskfile alternate status register for | ||
242 | * currently-selected device and return its value. | ||
243 | * | ||
244 | * Note: may NOT be used as the check_altstatus() entry in | ||
245 | * ata_port_operations. | ||
246 | * | ||
247 | * LOCKING: | ||
248 | * Inherited from caller. | ||
249 | */ | ||
250 | u8 ata_sff_altstatus(struct ata_port *ap) | ||
251 | { | ||
252 | if (ap->ops->sff_check_altstatus) | ||
253 | return ap->ops->sff_check_altstatus(ap); | ||
254 | |||
255 | return ioread8(ap->ioaddr.altstatus_addr); | ||
256 | } | ||
257 | |||
258 | /** | ||
259 | * ata_sff_busy_sleep - sleep until BSY clears, or timeout | ||
260 | * @ap: port containing status register to be polled | ||
261 | * @tmout_pat: impatience timeout | ||
262 | * @tmout: overall timeout | ||
263 | * | ||
264 | * Sleep until ATA Status register bit BSY clears, | ||
265 | * or a timeout occurs. | ||
266 | * | ||
267 | * LOCKING: | ||
268 | * Kernel thread context (may sleep). | ||
269 | * | ||
270 | * RETURNS: | ||
271 | * 0 on success, -errno otherwise. | ||
272 | */ | ||
273 | int ata_sff_busy_sleep(struct ata_port *ap, | ||
274 | unsigned long tmout_pat, unsigned long tmout) | ||
275 | { | ||
276 | unsigned long timer_start, timeout; | ||
277 | u8 status; | ||
278 | |||
279 | status = ata_sff_busy_wait(ap, ATA_BUSY, 300); | ||
280 | timer_start = jiffies; | ||
281 | timeout = timer_start + tmout_pat; | ||
282 | while (status != 0xff && (status & ATA_BUSY) && | ||
283 | time_before(jiffies, timeout)) { | ||
284 | msleep(50); | ||
285 | status = ata_sff_busy_wait(ap, ATA_BUSY, 3); | ||
286 | } | ||
287 | |||
288 | if (status != 0xff && (status & ATA_BUSY)) | ||
289 | ata_port_printk(ap, KERN_WARNING, | ||
290 | "port is slow to respond, please be patient " | ||
291 | "(Status 0x%x)\n", status); | ||
292 | |||
293 | timeout = timer_start + tmout; | ||
294 | while (status != 0xff && (status & ATA_BUSY) && | ||
295 | time_before(jiffies, timeout)) { | ||
296 | msleep(50); | ||
297 | status = ap->ops->sff_check_status(ap); | ||
298 | } | ||
299 | |||
300 | if (status == 0xff) | ||
301 | return -ENODEV; | ||
302 | |||
303 | if (status & ATA_BUSY) { | ||
304 | ata_port_printk(ap, KERN_ERR, "port failed to respond " | ||
305 | "(%lu secs, Status 0x%x)\n", | ||
306 | tmout / HZ, status); | ||
307 | return -EBUSY; | ||
308 | } | ||
309 | |||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static int ata_sff_check_ready(struct ata_link *link) | ||
314 | { | ||
315 | u8 status = link->ap->ops->sff_check_status(link->ap); | ||
316 | |||
317 | if (!(status & ATA_BUSY)) | ||
318 | return 1; | ||
319 | if (status == 0xff) | ||
320 | return -ENODEV; | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | /** | ||
325 | * ata_sff_wait_ready - sleep until BSY clears, or timeout | ||
326 | * @link: SFF link to wait ready status for | ||
327 | * @deadline: deadline jiffies for the operation | ||
328 | * | ||
329 | * Sleep until ATA Status register bit BSY clears, or timeout | ||
330 | * occurs. | ||
331 | * | ||
332 | * LOCKING: | ||
333 | * Kernel thread context (may sleep). | ||
334 | * | ||
335 | * RETURNS: | ||
336 | * 0 on success, -errno otherwise. | ||
337 | */ | ||
338 | int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline) | ||
339 | { | ||
340 | return ata_wait_ready(link, deadline, ata_sff_check_ready); | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * ata_sff_dev_select - Select device 0/1 on ATA bus | ||
345 | * @ap: ATA channel to manipulate | ||
346 | * @device: ATA device (numbered from zero) to select | ||
347 | * | ||
348 | * Use the method defined in the ATA specification to | ||
349 | * make either device 0, or device 1, active on the | ||
350 | * ATA channel. Works with both PIO and MMIO. | ||
351 | * | ||
352 | * May be used as the dev_select() entry in ata_port_operations. | ||
353 | * | ||
354 | * LOCKING: | ||
355 | * caller. | ||
356 | */ | ||
357 | void ata_sff_dev_select(struct ata_port *ap, unsigned int device) | ||
358 | { | ||
359 | u8 tmp; | ||
360 | |||
361 | if (device == 0) | ||
362 | tmp = ATA_DEVICE_OBS; | ||
363 | else | ||
364 | tmp = ATA_DEVICE_OBS | ATA_DEV1; | ||
365 | |||
366 | iowrite8(tmp, ap->ioaddr.device_addr); | ||
367 | ata_sff_pause(ap); /* needed; also flushes, for mmio */ | ||
368 | } | ||
369 | |||
370 | /** | ||
371 | * ata_dev_select - Select device 0/1 on ATA bus | ||
372 | * @ap: ATA channel to manipulate | ||
373 | * @device: ATA device (numbered from zero) to select | ||
374 | * @wait: non-zero to wait for Status register BSY bit to clear | ||
375 | * @can_sleep: non-zero if context allows sleeping | ||
376 | * | ||
377 | * Use the method defined in the ATA specification to | ||
378 | * make either device 0, or device 1, active on the | ||
379 | * ATA channel. | ||
380 | * | ||
381 | * This is a high-level version of ata_sff_dev_select(), which | ||
382 | * additionally provides the services of inserting the proper | ||
383 | * pauses and status polling, where needed. | ||
384 | * | ||
385 | * LOCKING: | ||
386 | * caller. | ||
387 | */ | ||
388 | void ata_dev_select(struct ata_port *ap, unsigned int device, | ||
389 | unsigned int wait, unsigned int can_sleep) | ||
390 | { | ||
391 | if (ata_msg_probe(ap)) | ||
392 | ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, " | ||
393 | "device %u, wait %u\n", device, wait); | ||
394 | |||
395 | if (wait) | ||
396 | ata_wait_idle(ap); | ||
397 | |||
398 | ap->ops->sff_dev_select(ap, device); | ||
399 | |||
400 | if (wait) { | ||
401 | if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) | ||
402 | msleep(150); | ||
403 | ata_wait_idle(ap); | ||
404 | } | ||
405 | } | ||
406 | |||
41 | /** | 407 | /** |
42 | * ata_irq_on - Enable interrupts on a port. | 408 | * ata_sff_irq_on - Enable interrupts on a port. |
43 | * @ap: Port on which interrupts are enabled. | 409 | * @ap: Port on which interrupts are enabled. |
44 | * | 410 | * |
45 | * Enable interrupts on a legacy IDE device using MMIO or PIO, | 411 | * Enable interrupts on a legacy IDE device using MMIO or PIO, |
@@ -48,7 +414,7 @@ | |||
48 | * LOCKING: | 414 | * LOCKING: |
49 | * Inherited from caller. | 415 | * Inherited from caller. |
50 | */ | 416 | */ |
51 | u8 ata_irq_on(struct ata_port *ap) | 417 | u8 ata_sff_irq_on(struct ata_port *ap) |
52 | { | 418 | { |
53 | struct ata_ioports *ioaddr = &ap->ioaddr; | 419 | struct ata_ioports *ioaddr = &ap->ioaddr; |
54 | u8 tmp; | 420 | u8 tmp; |
@@ -60,13 +426,34 @@ u8 ata_irq_on(struct ata_port *ap) | |||
60 | iowrite8(ap->ctl, ioaddr->ctl_addr); | 426 | iowrite8(ap->ctl, ioaddr->ctl_addr); |
61 | tmp = ata_wait_idle(ap); | 427 | tmp = ata_wait_idle(ap); |
62 | 428 | ||
63 | ap->ops->irq_clear(ap); | 429 | ap->ops->sff_irq_clear(ap); |
64 | 430 | ||
65 | return tmp; | 431 | return tmp; |
66 | } | 432 | } |
67 | 433 | ||
68 | /** | 434 | /** |
69 | * ata_tf_load - send taskfile registers to host controller | 435 | * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt. |
436 | * @ap: Port associated with this ATA transaction. | ||
437 | * | ||
438 | * Clear interrupt and error flags in DMA status register. | ||
439 | * | ||
440 | * May be used as the irq_clear() entry in ata_port_operations. | ||
441 | * | ||
442 | * LOCKING: | ||
443 | * spin_lock_irqsave(host lock) | ||
444 | */ | ||
445 | void ata_sff_irq_clear(struct ata_port *ap) | ||
446 | { | ||
447 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
448 | |||
449 | if (!mmio) | ||
450 | return; | ||
451 | |||
452 | iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); | ||
453 | } | ||
454 | |||
455 | /** | ||
456 | * ata_sff_tf_load - send taskfile registers to host controller | ||
70 | * @ap: Port to which output is sent | 457 | * @ap: Port to which output is sent |
71 | * @tf: ATA taskfile register set | 458 | * @tf: ATA taskfile register set |
72 | * | 459 | * |
@@ -75,8 +462,7 @@ u8 ata_irq_on(struct ata_port *ap) | |||
75 | * LOCKING: | 462 | * LOCKING: |
76 | * Inherited from caller. | 463 | * Inherited from caller. |
77 | */ | 464 | */ |
78 | 465 | void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |
79 | void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | ||
80 | { | 466 | { |
81 | struct ata_ioports *ioaddr = &ap->ioaddr; | 467 | struct ata_ioports *ioaddr = &ap->ioaddr; |
82 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; | 468 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; |
@@ -126,26 +512,7 @@ void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |||
126 | } | 512 | } |
127 | 513 | ||
128 | /** | 514 | /** |
129 | * ata_exec_command - issue ATA command to host controller | 515 | * ata_sff_tf_read - input device's ATA taskfile shadow registers |
130 | * @ap: port to which command is being issued | ||
131 | * @tf: ATA taskfile register set | ||
132 | * | ||
133 | * Issues ATA command, with proper synchronization with interrupt | ||
134 | * handler / other threads. | ||
135 | * | ||
136 | * LOCKING: | ||
137 | * spin_lock_irqsave(host lock) | ||
138 | */ | ||
139 | void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) | ||
140 | { | ||
141 | DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); | ||
142 | |||
143 | iowrite8(tf->command, ap->ioaddr.command_addr); | ||
144 | ata_pause(ap); | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * ata_tf_read - input device's ATA taskfile shadow registers | ||
149 | * @ap: Port from which input is read | 516 | * @ap: Port from which input is read |
150 | * @tf: ATA taskfile register set for storing input | 517 | * @tf: ATA taskfile register set for storing input |
151 | * | 518 | * |
@@ -157,11 +524,11 @@ void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) | |||
157 | * LOCKING: | 524 | * LOCKING: |
158 | * Inherited from caller. | 525 | * Inherited from caller. |
159 | */ | 526 | */ |
160 | void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | 527 | void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
161 | { | 528 | { |
162 | struct ata_ioports *ioaddr = &ap->ioaddr; | 529 | struct ata_ioports *ioaddr = &ap->ioaddr; |
163 | 530 | ||
164 | tf->command = ata_check_status(ap); | 531 | tf->command = ata_sff_check_status(ap); |
165 | tf->feature = ioread8(ioaddr->error_addr); | 532 | tf->feature = ioread8(ioaddr->error_addr); |
166 | tf->nsect = ioread8(ioaddr->nsect_addr); | 533 | tf->nsect = ioread8(ioaddr->nsect_addr); |
167 | tf->lbal = ioread8(ioaddr->lbal_addr); | 534 | tf->lbal = ioread8(ioaddr->lbal_addr); |
@@ -185,165 +552,1028 @@ void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
185 | } | 552 | } |
186 | 553 | ||
187 | /** | 554 | /** |
188 | * ata_check_status - Read device status reg & clear interrupt | 555 | * ata_sff_exec_command - issue ATA command to host controller |
189 | * @ap: port where the device is | 556 | * @ap: port to which command is being issued |
557 | * @tf: ATA taskfile register set | ||
190 | * | 558 | * |
191 | * Reads ATA taskfile status register for currently-selected device | 559 | * Issues ATA command, with proper synchronization with interrupt |
192 | * and return its value. This also clears pending interrupts | 560 | * handler / other threads. |
193 | * from this device | ||
194 | * | 561 | * |
195 | * LOCKING: | 562 | * LOCKING: |
196 | * Inherited from caller. | 563 | * spin_lock_irqsave(host lock) |
197 | */ | 564 | */ |
198 | u8 ata_check_status(struct ata_port *ap) | 565 | void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) |
199 | { | 566 | { |
200 | return ioread8(ap->ioaddr.status_addr); | 567 | DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); |
568 | |||
569 | iowrite8(tf->command, ap->ioaddr.command_addr); | ||
570 | ata_sff_pause(ap); | ||
201 | } | 571 | } |
202 | 572 | ||
203 | /** | 573 | /** |
204 | * ata_altstatus - Read device alternate status reg | 574 | * ata_tf_to_host - issue ATA taskfile to host controller |
205 | * @ap: port where the device is | 575 | * @ap: port to which command is being issued |
576 | * @tf: ATA taskfile register set | ||
206 | * | 577 | * |
207 | * Reads ATA taskfile alternate status register for | 578 | * Issues ATA taskfile register set to ATA host controller, |
208 | * currently-selected device and return its value. | 579 | * with proper synchronization with interrupt handler and |
580 | * other threads. | ||
209 | * | 581 | * |
210 | * Note: may NOT be used as the check_altstatus() entry in | 582 | * LOCKING: |
211 | * ata_port_operations. | 583 | * spin_lock_irqsave(host lock) |
584 | */ | ||
585 | static inline void ata_tf_to_host(struct ata_port *ap, | ||
586 | const struct ata_taskfile *tf) | ||
587 | { | ||
588 | ap->ops->sff_tf_load(ap, tf); | ||
589 | ap->ops->sff_exec_command(ap, tf); | ||
590 | } | ||
591 | |||
592 | /** | ||
593 | * ata_sff_data_xfer - Transfer data by PIO | ||
594 | * @dev: device to target | ||
595 | * @buf: data buffer | ||
596 | * @buflen: buffer length | ||
597 | * @rw: read/write | ||
598 | * | ||
599 | * Transfer data from/to the device data register by PIO. | ||
212 | * | 600 | * |
213 | * LOCKING: | 601 | * LOCKING: |
214 | * Inherited from caller. | 602 | * Inherited from caller. |
603 | * | ||
604 | * RETURNS: | ||
605 | * Bytes consumed. | ||
215 | */ | 606 | */ |
216 | u8 ata_altstatus(struct ata_port *ap) | 607 | unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf, |
608 | unsigned int buflen, int rw) | ||
217 | { | 609 | { |
218 | if (ap->ops->check_altstatus) | 610 | struct ata_port *ap = dev->link->ap; |
219 | return ap->ops->check_altstatus(ap); | 611 | void __iomem *data_addr = ap->ioaddr.data_addr; |
612 | unsigned int words = buflen >> 1; | ||
220 | 613 | ||
221 | return ioread8(ap->ioaddr.altstatus_addr); | 614 | /* Transfer multiple of 2 bytes */ |
615 | if (rw == READ) | ||
616 | ioread16_rep(data_addr, buf, words); | ||
617 | else | ||
618 | iowrite16_rep(data_addr, buf, words); | ||
619 | |||
620 | /* Transfer trailing 1 byte, if any. */ | ||
621 | if (unlikely(buflen & 0x01)) { | ||
622 | __le16 align_buf[1] = { 0 }; | ||
623 | unsigned char *trailing_buf = buf + buflen - 1; | ||
624 | |||
625 | if (rw == READ) { | ||
626 | align_buf[0] = cpu_to_le16(ioread16(data_addr)); | ||
627 | memcpy(trailing_buf, align_buf, 1); | ||
628 | } else { | ||
629 | memcpy(align_buf, trailing_buf, 1); | ||
630 | iowrite16(le16_to_cpu(align_buf[0]), data_addr); | ||
631 | } | ||
632 | words++; | ||
633 | } | ||
634 | |||
635 | return words << 1; | ||
222 | } | 636 | } |
223 | 637 | ||
224 | /** | 638 | /** |
225 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction | 639 | * ata_sff_data_xfer_noirq - Transfer data by PIO |
226 | * @qc: Info associated with this ATA transaction. | 640 | * @dev: device to target |
641 | * @buf: data buffer | ||
642 | * @buflen: buffer length | ||
643 | * @rw: read/write | ||
644 | * | ||
645 | * Transfer data from/to the device data register by PIO. Do the | ||
646 | * transfer with interrupts disabled. | ||
227 | * | 647 | * |
228 | * LOCKING: | 648 | * LOCKING: |
229 | * spin_lock_irqsave(host lock) | 649 | * Inherited from caller. |
650 | * | ||
651 | * RETURNS: | ||
652 | * Bytes consumed. | ||
230 | */ | 653 | */ |
231 | void ata_bmdma_setup(struct ata_queued_cmd *qc) | 654 | unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, |
655 | unsigned int buflen, int rw) | ||
656 | { | ||
657 | unsigned long flags; | ||
658 | unsigned int consumed; | ||
659 | |||
660 | local_irq_save(flags); | ||
661 | consumed = ata_sff_data_xfer(dev, buf, buflen, rw); | ||
662 | local_irq_restore(flags); | ||
663 | |||
664 | return consumed; | ||
665 | } | ||
666 | |||
667 | /** | ||
668 | * ata_pio_sector - Transfer a sector of data. | ||
669 | * @qc: Command on going | ||
670 | * | ||
671 | * Transfer qc->sect_size bytes of data from/to the ATA device. | ||
672 | * | ||
673 | * LOCKING: | ||
674 | * Inherited from caller. | ||
675 | */ | ||
676 | static void ata_pio_sector(struct ata_queued_cmd *qc) | ||
232 | { | 677 | { |
678 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
233 | struct ata_port *ap = qc->ap; | 679 | struct ata_port *ap = qc->ap; |
234 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | 680 | struct page *page; |
235 | u8 dmactl; | 681 | unsigned int offset; |
682 | unsigned char *buf; | ||
236 | 683 | ||
237 | /* load PRD table addr. */ | 684 | if (qc->curbytes == qc->nbytes - qc->sect_size) |
238 | mb(); /* make sure PRD table writes are visible to controller */ | 685 | ap->hsm_task_state = HSM_ST_LAST; |
239 | iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); | ||
240 | 686 | ||
241 | /* specify data direction, triple-check start bit is clear */ | 687 | page = sg_page(qc->cursg); |
242 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | 688 | offset = qc->cursg->offset + qc->cursg_ofs; |
243 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | ||
244 | if (!rw) | ||
245 | dmactl |= ATA_DMA_WR; | ||
246 | iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
247 | 689 | ||
248 | /* issue r/w command */ | 690 | /* get the current page and offset */ |
249 | ap->ops->exec_command(ap, &qc->tf); | 691 | page = nth_page(page, (offset >> PAGE_SHIFT)); |
692 | offset %= PAGE_SIZE; | ||
693 | |||
694 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | ||
695 | |||
696 | if (PageHighMem(page)) { | ||
697 | unsigned long flags; | ||
698 | |||
699 | /* FIXME: use a bounce buffer */ | ||
700 | local_irq_save(flags); | ||
701 | buf = kmap_atomic(page, KM_IRQ0); | ||
702 | |||
703 | /* do the actual data transfer */ | ||
704 | ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, | ||
705 | do_write); | ||
706 | |||
707 | kunmap_atomic(buf, KM_IRQ0); | ||
708 | local_irq_restore(flags); | ||
709 | } else { | ||
710 | buf = page_address(page); | ||
711 | ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, | ||
712 | do_write); | ||
713 | } | ||
714 | |||
715 | qc->curbytes += qc->sect_size; | ||
716 | qc->cursg_ofs += qc->sect_size; | ||
717 | |||
718 | if (qc->cursg_ofs == qc->cursg->length) { | ||
719 | qc->cursg = sg_next(qc->cursg); | ||
720 | qc->cursg_ofs = 0; | ||
721 | } | ||
250 | } | 722 | } |
251 | 723 | ||
252 | /** | 724 | /** |
253 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction | 725 | * ata_pio_sectors - Transfer one or many sectors. |
254 | * @qc: Info associated with this ATA transaction. | 726 | * @qc: Command on going |
727 | * | ||
728 | * Transfer one or many sectors of data from/to the | ||
729 | * ATA device for the DRQ request. | ||
255 | * | 730 | * |
256 | * LOCKING: | 731 | * LOCKING: |
257 | * spin_lock_irqsave(host lock) | 732 | * Inherited from caller. |
258 | */ | 733 | */ |
259 | void ata_bmdma_start(struct ata_queued_cmd *qc) | 734 | static void ata_pio_sectors(struct ata_queued_cmd *qc) |
735 | { | ||
736 | if (is_multi_taskfile(&qc->tf)) { | ||
737 | /* READ/WRITE MULTIPLE */ | ||
738 | unsigned int nsect; | ||
739 | |||
740 | WARN_ON(qc->dev->multi_count == 0); | ||
741 | |||
742 | nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, | ||
743 | qc->dev->multi_count); | ||
744 | while (nsect--) | ||
745 | ata_pio_sector(qc); | ||
746 | } else | ||
747 | ata_pio_sector(qc); | ||
748 | |||
749 | ata_sff_altstatus(qc->ap); /* flush */ | ||
750 | } | ||
751 | |||
752 | /** | ||
753 | * atapi_send_cdb - Write CDB bytes to hardware | ||
754 | * @ap: Port to which ATAPI device is attached. | ||
755 | * @qc: Taskfile currently active | ||
756 | * | ||
757 | * When device has indicated its readiness to accept | ||
758 | * a CDB, this function is called. Send the CDB. | ||
759 | * | ||
760 | * LOCKING: | ||
761 | * caller. | ||
762 | */ | ||
763 | static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | ||
764 | { | ||
765 | /* send SCSI cdb */ | ||
766 | DPRINTK("send cdb\n"); | ||
767 | WARN_ON(qc->dev->cdb_len < 12); | ||
768 | |||
769 | ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); | ||
770 | ata_sff_altstatus(ap); /* flush */ | ||
771 | |||
772 | switch (qc->tf.protocol) { | ||
773 | case ATAPI_PROT_PIO: | ||
774 | ap->hsm_task_state = HSM_ST; | ||
775 | break; | ||
776 | case ATAPI_PROT_NODATA: | ||
777 | ap->hsm_task_state = HSM_ST_LAST; | ||
778 | break; | ||
779 | case ATAPI_PROT_DMA: | ||
780 | ap->hsm_task_state = HSM_ST_LAST; | ||
781 | /* initiate bmdma */ | ||
782 | ap->ops->bmdma_start(qc); | ||
783 | break; | ||
784 | } | ||
785 | } | ||
786 | |||
787 | /** | ||
788 | * __atapi_pio_bytes - Transfer data from/to the ATAPI device. | ||
789 | * @qc: Command on going | ||
790 | * @bytes: number of bytes | ||
791 | * | ||
792 | * Transfer Transfer data from/to the ATAPI device. | ||
793 | * | ||
794 | * LOCKING: | ||
795 | * Inherited from caller. | ||
796 | * | ||
797 | */ | ||
798 | static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) | ||
260 | { | 799 | { |
800 | int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; | ||
261 | struct ata_port *ap = qc->ap; | 801 | struct ata_port *ap = qc->ap; |
262 | u8 dmactl; | 802 | struct ata_device *dev = qc->dev; |
803 | struct ata_eh_info *ehi = &dev->link->eh_info; | ||
804 | struct scatterlist *sg; | ||
805 | struct page *page; | ||
806 | unsigned char *buf; | ||
807 | unsigned int offset, count, consumed; | ||
808 | |||
809 | next_sg: | ||
810 | sg = qc->cursg; | ||
811 | if (unlikely(!sg)) { | ||
812 | ata_ehi_push_desc(ehi, "unexpected or too much trailing data " | ||
813 | "buf=%u cur=%u bytes=%u", | ||
814 | qc->nbytes, qc->curbytes, bytes); | ||
815 | return -1; | ||
816 | } | ||
263 | 817 | ||
264 | /* start host DMA transaction */ | 818 | page = sg_page(sg); |
265 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | 819 | offset = sg->offset + qc->cursg_ofs; |
266 | iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
267 | 820 | ||
268 | /* Strictly, one may wish to issue an ioread8() here, to | 821 | /* get the current page and offset */ |
269 | * flush the mmio write. However, control also passes | 822 | page = nth_page(page, (offset >> PAGE_SHIFT)); |
270 | * to the hardware at this point, and it will interrupt | 823 | offset %= PAGE_SIZE; |
271 | * us when we are to resume control. So, in effect, | 824 | |
272 | * we don't care when the mmio write flushes. | 825 | /* don't overrun current sg */ |
273 | * Further, a read of the DMA status register _immediately_ | 826 | count = min(sg->length - qc->cursg_ofs, bytes); |
274 | * following the write may not be what certain flaky hardware | 827 | |
275 | * is expected, so I think it is best to not add a readb() | 828 | /* don't cross page boundaries */ |
276 | * without first all the MMIO ATA cards/mobos. | 829 | count = min(count, (unsigned int)PAGE_SIZE - offset); |
277 | * Or maybe I'm just being paranoid. | 830 | |
278 | * | 831 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
279 | * FIXME: The posting of this write means I/O starts are | 832 | |
280 | * unneccessarily delayed for MMIO | 833 | if (PageHighMem(page)) { |
834 | unsigned long flags; | ||
835 | |||
836 | /* FIXME: use bounce buffer */ | ||
837 | local_irq_save(flags); | ||
838 | buf = kmap_atomic(page, KM_IRQ0); | ||
839 | |||
840 | /* do the actual data transfer */ | ||
841 | consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw); | ||
842 | |||
843 | kunmap_atomic(buf, KM_IRQ0); | ||
844 | local_irq_restore(flags); | ||
845 | } else { | ||
846 | buf = page_address(page); | ||
847 | consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw); | ||
848 | } | ||
849 | |||
850 | bytes -= min(bytes, consumed); | ||
851 | qc->curbytes += count; | ||
852 | qc->cursg_ofs += count; | ||
853 | |||
854 | if (qc->cursg_ofs == sg->length) { | ||
855 | qc->cursg = sg_next(qc->cursg); | ||
856 | qc->cursg_ofs = 0; | ||
857 | } | ||
858 | |||
859 | /* consumed can be larger than count only for the last transfer */ | ||
860 | WARN_ON(qc->cursg && count != consumed); | ||
861 | |||
862 | if (bytes) | ||
863 | goto next_sg; | ||
864 | return 0; | ||
865 | } | ||
866 | |||
867 | /** | ||
868 | * atapi_pio_bytes - Transfer data from/to the ATAPI device. | ||
869 | * @qc: Command on going | ||
870 | * | ||
871 | * Transfer Transfer data from/to the ATAPI device. | ||
872 | * | ||
873 | * LOCKING: | ||
874 | * Inherited from caller. | ||
875 | */ | ||
876 | static void atapi_pio_bytes(struct ata_queued_cmd *qc) | ||
877 | { | ||
878 | struct ata_port *ap = qc->ap; | ||
879 | struct ata_device *dev = qc->dev; | ||
880 | struct ata_eh_info *ehi = &dev->link->eh_info; | ||
881 | unsigned int ireason, bc_lo, bc_hi, bytes; | ||
882 | int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; | ||
883 | |||
884 | /* Abuse qc->result_tf for temp storage of intermediate TF | ||
885 | * here to save some kernel stack usage. | ||
886 | * For normal completion, qc->result_tf is not relevant. For | ||
887 | * error, qc->result_tf is later overwritten by ata_qc_complete(). | ||
888 | * So, the correctness of qc->result_tf is not affected. | ||
281 | */ | 889 | */ |
890 | ap->ops->sff_tf_read(ap, &qc->result_tf); | ||
891 | ireason = qc->result_tf.nsect; | ||
892 | bc_lo = qc->result_tf.lbam; | ||
893 | bc_hi = qc->result_tf.lbah; | ||
894 | bytes = (bc_hi << 8) | bc_lo; | ||
895 | |||
896 | /* shall be cleared to zero, indicating xfer of data */ | ||
897 | if (unlikely(ireason & (1 << 0))) | ||
898 | goto atapi_check; | ||
899 | |||
900 | /* make sure transfer direction matches expected */ | ||
901 | i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; | ||
902 | if (unlikely(do_write != i_write)) | ||
903 | goto atapi_check; | ||
904 | |||
905 | if (unlikely(!bytes)) | ||
906 | goto atapi_check; | ||
907 | |||
908 | VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); | ||
909 | |||
910 | if (unlikely(__atapi_pio_bytes(qc, bytes))) | ||
911 | goto err_out; | ||
912 | ata_sff_altstatus(ap); /* flush */ | ||
913 | |||
914 | return; | ||
915 | |||
916 | atapi_check: | ||
917 | ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)", | ||
918 | ireason, bytes); | ||
919 | err_out: | ||
920 | qc->err_mask |= AC_ERR_HSM; | ||
921 | ap->hsm_task_state = HSM_ST_ERR; | ||
282 | } | 922 | } |
283 | 923 | ||
284 | /** | 924 | /** |
285 | * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. | 925 | * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. |
286 | * @ap: Port associated with this ATA transaction. | 926 | * @ap: the target ata_port |
927 | * @qc: qc on going | ||
287 | * | 928 | * |
288 | * Clear interrupt and error flags in DMA status register. | 929 | * RETURNS: |
930 | * 1 if ok in workqueue, 0 otherwise. | ||
931 | */ | ||
932 | static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) | ||
933 | { | ||
934 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
935 | return 1; | ||
936 | |||
937 | if (ap->hsm_task_state == HSM_ST_FIRST) { | ||
938 | if (qc->tf.protocol == ATA_PROT_PIO && | ||
939 | (qc->tf.flags & ATA_TFLAG_WRITE)) | ||
940 | return 1; | ||
941 | |||
942 | if (ata_is_atapi(qc->tf.protocol) && | ||
943 | !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
944 | return 1; | ||
945 | } | ||
946 | |||
947 | return 0; | ||
948 | } | ||
949 | |||
950 | /** | ||
951 | * ata_hsm_qc_complete - finish a qc running on standard HSM | ||
952 | * @qc: Command to complete | ||
953 | * @in_wq: 1 if called from workqueue, 0 otherwise | ||
289 | * | 954 | * |
290 | * May be used as the irq_clear() entry in ata_port_operations. | 955 | * Finish @qc which is running on standard HSM. |
291 | * | 956 | * |
292 | * LOCKING: | 957 | * LOCKING: |
293 | * spin_lock_irqsave(host lock) | 958 | * If @in_wq is zero, spin_lock_irqsave(host lock). |
959 | * Otherwise, none on entry and grabs host lock. | ||
294 | */ | 960 | */ |
295 | void ata_bmdma_irq_clear(struct ata_port *ap) | 961 | static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) |
296 | { | 962 | { |
297 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | 963 | struct ata_port *ap = qc->ap; |
964 | unsigned long flags; | ||
298 | 965 | ||
299 | if (!mmio) | 966 | if (ap->ops->error_handler) { |
300 | return; | 967 | if (in_wq) { |
968 | spin_lock_irqsave(ap->lock, flags); | ||
969 | |||
970 | /* EH might have kicked in while host lock is | ||
971 | * released. | ||
972 | */ | ||
973 | qc = ata_qc_from_tag(ap, qc->tag); | ||
974 | if (qc) { | ||
975 | if (likely(!(qc->err_mask & AC_ERR_HSM))) { | ||
976 | ap->ops->sff_irq_on(ap); | ||
977 | ata_qc_complete(qc); | ||
978 | } else | ||
979 | ata_port_freeze(ap); | ||
980 | } | ||
981 | |||
982 | spin_unlock_irqrestore(ap->lock, flags); | ||
983 | } else { | ||
984 | if (likely(!(qc->err_mask & AC_ERR_HSM))) | ||
985 | ata_qc_complete(qc); | ||
986 | else | ||
987 | ata_port_freeze(ap); | ||
988 | } | ||
989 | } else { | ||
990 | if (in_wq) { | ||
991 | spin_lock_irqsave(ap->lock, flags); | ||
992 | ap->ops->sff_irq_on(ap); | ||
993 | ata_qc_complete(qc); | ||
994 | spin_unlock_irqrestore(ap->lock, flags); | ||
995 | } else | ||
996 | ata_qc_complete(qc); | ||
997 | } | ||
998 | } | ||
301 | 999 | ||
302 | iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); | 1000 | /** |
1001 | * ata_sff_hsm_move - move the HSM to the next state. | ||
1002 | * @ap: the target ata_port | ||
1003 | * @qc: qc on going | ||
1004 | * @status: current device status | ||
1005 | * @in_wq: 1 if called from workqueue, 0 otherwise | ||
1006 | * | ||
1007 | * RETURNS: | ||
1008 | * 1 when poll next status needed, 0 otherwise. | ||
1009 | */ | ||
1010 | int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | ||
1011 | u8 status, int in_wq) | ||
1012 | { | ||
1013 | unsigned long flags = 0; | ||
1014 | int poll_next; | ||
1015 | |||
1016 | WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); | ||
1017 | |||
1018 | /* Make sure ata_sff_qc_issue() does not throw things | ||
1019 | * like DMA polling into the workqueue. Notice that | ||
1020 | * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). | ||
1021 | */ | ||
1022 | WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); | ||
1023 | |||
1024 | fsm_start: | ||
1025 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", | ||
1026 | ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); | ||
1027 | |||
1028 | switch (ap->hsm_task_state) { | ||
1029 | case HSM_ST_FIRST: | ||
1030 | /* Send first data block or PACKET CDB */ | ||
1031 | |||
1032 | /* If polling, we will stay in the work queue after | ||
1033 | * sending the data. Otherwise, interrupt handler | ||
1034 | * takes over after sending the data. | ||
1035 | */ | ||
1036 | poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); | ||
1037 | |||
1038 | /* check device status */ | ||
1039 | if (unlikely((status & ATA_DRQ) == 0)) { | ||
1040 | /* handle BSY=0, DRQ=0 as error */ | ||
1041 | if (likely(status & (ATA_ERR | ATA_DF))) | ||
1042 | /* device stops HSM for abort/error */ | ||
1043 | qc->err_mask |= AC_ERR_DEV; | ||
1044 | else | ||
1045 | /* HSM violation. Let EH handle this */ | ||
1046 | qc->err_mask |= AC_ERR_HSM; | ||
1047 | |||
1048 | ap->hsm_task_state = HSM_ST_ERR; | ||
1049 | goto fsm_start; | ||
1050 | } | ||
1051 | |||
1052 | /* Device should not ask for data transfer (DRQ=1) | ||
1053 | * when it finds something wrong. | ||
1054 | * We ignore DRQ here and stop the HSM by | ||
1055 | * changing hsm_task_state to HSM_ST_ERR and | ||
1056 | * let the EH abort the command or reset the device. | ||
1057 | */ | ||
1058 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | ||
1059 | /* Some ATAPI tape drives forget to clear the ERR bit | ||
1060 | * when doing the next command (mostly request sense). | ||
1061 | * We ignore ERR here to workaround and proceed sending | ||
1062 | * the CDB. | ||
1063 | */ | ||
1064 | if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { | ||
1065 | ata_port_printk(ap, KERN_WARNING, | ||
1066 | "DRQ=1 with device error, " | ||
1067 | "dev_stat 0x%X\n", status); | ||
1068 | qc->err_mask |= AC_ERR_HSM; | ||
1069 | ap->hsm_task_state = HSM_ST_ERR; | ||
1070 | goto fsm_start; | ||
1071 | } | ||
1072 | } | ||
1073 | |||
1074 | /* Send the CDB (atapi) or the first data block (ata pio out). | ||
1075 | * During the state transition, interrupt handler shouldn't | ||
1076 | * be invoked before the data transfer is complete and | ||
1077 | * hsm_task_state is changed. Hence, the following locking. | ||
1078 | */ | ||
1079 | if (in_wq) | ||
1080 | spin_lock_irqsave(ap->lock, flags); | ||
1081 | |||
1082 | if (qc->tf.protocol == ATA_PROT_PIO) { | ||
1083 | /* PIO data out protocol. | ||
1084 | * send first data block. | ||
1085 | */ | ||
1086 | |||
1087 | /* ata_pio_sectors() might change the state | ||
1088 | * to HSM_ST_LAST. so, the state is changed here | ||
1089 | * before ata_pio_sectors(). | ||
1090 | */ | ||
1091 | ap->hsm_task_state = HSM_ST; | ||
1092 | ata_pio_sectors(qc); | ||
1093 | } else | ||
1094 | /* send CDB */ | ||
1095 | atapi_send_cdb(ap, qc); | ||
1096 | |||
1097 | if (in_wq) | ||
1098 | spin_unlock_irqrestore(ap->lock, flags); | ||
1099 | |||
1100 | /* if polling, ata_pio_task() handles the rest. | ||
1101 | * otherwise, interrupt handler takes over from here. | ||
1102 | */ | ||
1103 | break; | ||
1104 | |||
1105 | case HSM_ST: | ||
1106 | /* complete command or read/write the data register */ | ||
1107 | if (qc->tf.protocol == ATAPI_PROT_PIO) { | ||
1108 | /* ATAPI PIO protocol */ | ||
1109 | if ((status & ATA_DRQ) == 0) { | ||
1110 | /* No more data to transfer or device error. | ||
1111 | * Device error will be tagged in HSM_ST_LAST. | ||
1112 | */ | ||
1113 | ap->hsm_task_state = HSM_ST_LAST; | ||
1114 | goto fsm_start; | ||
1115 | } | ||
1116 | |||
1117 | /* Device should not ask for data transfer (DRQ=1) | ||
1118 | * when it finds something wrong. | ||
1119 | * We ignore DRQ here and stop the HSM by | ||
1120 | * changing hsm_task_state to HSM_ST_ERR and | ||
1121 | * let the EH abort the command or reset the device. | ||
1122 | */ | ||
1123 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | ||
1124 | ata_port_printk(ap, KERN_WARNING, "DRQ=1 with " | ||
1125 | "device error, dev_stat 0x%X\n", | ||
1126 | status); | ||
1127 | qc->err_mask |= AC_ERR_HSM; | ||
1128 | ap->hsm_task_state = HSM_ST_ERR; | ||
1129 | goto fsm_start; | ||
1130 | } | ||
1131 | |||
1132 | atapi_pio_bytes(qc); | ||
1133 | |||
1134 | if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) | ||
1135 | /* bad ireason reported by device */ | ||
1136 | goto fsm_start; | ||
1137 | |||
1138 | } else { | ||
1139 | /* ATA PIO protocol */ | ||
1140 | if (unlikely((status & ATA_DRQ) == 0)) { | ||
1141 | /* handle BSY=0, DRQ=0 as error */ | ||
1142 | if (likely(status & (ATA_ERR | ATA_DF))) | ||
1143 | /* device stops HSM for abort/error */ | ||
1144 | qc->err_mask |= AC_ERR_DEV; | ||
1145 | else | ||
1146 | /* HSM violation. Let EH handle this. | ||
1147 | * Phantom devices also trigger this | ||
1148 | * condition. Mark hint. | ||
1149 | */ | ||
1150 | qc->err_mask |= AC_ERR_HSM | | ||
1151 | AC_ERR_NODEV_HINT; | ||
1152 | |||
1153 | ap->hsm_task_state = HSM_ST_ERR; | ||
1154 | goto fsm_start; | ||
1155 | } | ||
1156 | |||
1157 | /* For PIO reads, some devices may ask for | ||
1158 | * data transfer (DRQ=1) alone with ERR=1. | ||
1159 | * We respect DRQ here and transfer one | ||
1160 | * block of junk data before changing the | ||
1161 | * hsm_task_state to HSM_ST_ERR. | ||
1162 | * | ||
1163 | * For PIO writes, ERR=1 DRQ=1 doesn't make | ||
1164 | * sense since the data block has been | ||
1165 | * transferred to the device. | ||
1166 | */ | ||
1167 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | ||
1168 | /* data might be corrputed */ | ||
1169 | qc->err_mask |= AC_ERR_DEV; | ||
1170 | |||
1171 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { | ||
1172 | ata_pio_sectors(qc); | ||
1173 | status = ata_wait_idle(ap); | ||
1174 | } | ||
1175 | |||
1176 | if (status & (ATA_BUSY | ATA_DRQ)) | ||
1177 | qc->err_mask |= AC_ERR_HSM; | ||
1178 | |||
1179 | /* ata_pio_sectors() might change the | ||
1180 | * state to HSM_ST_LAST. so, the state | ||
1181 | * is changed after ata_pio_sectors(). | ||
1182 | */ | ||
1183 | ap->hsm_task_state = HSM_ST_ERR; | ||
1184 | goto fsm_start; | ||
1185 | } | ||
1186 | |||
1187 | ata_pio_sectors(qc); | ||
1188 | |||
1189 | if (ap->hsm_task_state == HSM_ST_LAST && | ||
1190 | (!(qc->tf.flags & ATA_TFLAG_WRITE))) { | ||
1191 | /* all data read */ | ||
1192 | status = ata_wait_idle(ap); | ||
1193 | goto fsm_start; | ||
1194 | } | ||
1195 | } | ||
1196 | |||
1197 | poll_next = 1; | ||
1198 | break; | ||
1199 | |||
1200 | case HSM_ST_LAST: | ||
1201 | if (unlikely(!ata_ok(status))) { | ||
1202 | qc->err_mask |= __ac_err_mask(status); | ||
1203 | ap->hsm_task_state = HSM_ST_ERR; | ||
1204 | goto fsm_start; | ||
1205 | } | ||
1206 | |||
1207 | /* no more data to transfer */ | ||
1208 | DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", | ||
1209 | ap->print_id, qc->dev->devno, status); | ||
1210 | |||
1211 | WARN_ON(qc->err_mask); | ||
1212 | |||
1213 | ap->hsm_task_state = HSM_ST_IDLE; | ||
1214 | |||
1215 | /* complete taskfile transaction */ | ||
1216 | ata_hsm_qc_complete(qc, in_wq); | ||
1217 | |||
1218 | poll_next = 0; | ||
1219 | break; | ||
1220 | |||
1221 | case HSM_ST_ERR: | ||
1222 | /* make sure qc->err_mask is available to | ||
1223 | * know what's wrong and recover | ||
1224 | */ | ||
1225 | WARN_ON(qc->err_mask == 0); | ||
1226 | |||
1227 | ap->hsm_task_state = HSM_ST_IDLE; | ||
1228 | |||
1229 | /* complete taskfile transaction */ | ||
1230 | ata_hsm_qc_complete(qc, in_wq); | ||
1231 | |||
1232 | poll_next = 0; | ||
1233 | break; | ||
1234 | default: | ||
1235 | poll_next = 0; | ||
1236 | BUG(); | ||
1237 | } | ||
1238 | |||
1239 | return poll_next; | ||
1240 | } | ||
1241 | |||
1242 | void ata_pio_task(struct work_struct *work) | ||
1243 | { | ||
1244 | struct ata_port *ap = | ||
1245 | container_of(work, struct ata_port, port_task.work); | ||
1246 | struct ata_queued_cmd *qc = ap->port_task_data; | ||
1247 | u8 status; | ||
1248 | int poll_next; | ||
1249 | |||
1250 | fsm_start: | ||
1251 | WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); | ||
1252 | |||
1253 | /* | ||
1254 | * This is purely heuristic. This is a fast path. | ||
1255 | * Sometimes when we enter, BSY will be cleared in | ||
1256 | * a chk-status or two. If not, the drive is probably seeking | ||
1257 | * or something. Snooze for a couple msecs, then | ||
1258 | * chk-status again. If still busy, queue delayed work. | ||
1259 | */ | ||
1260 | status = ata_sff_busy_wait(ap, ATA_BUSY, 5); | ||
1261 | if (status & ATA_BUSY) { | ||
1262 | msleep(2); | ||
1263 | status = ata_sff_busy_wait(ap, ATA_BUSY, 10); | ||
1264 | if (status & ATA_BUSY) { | ||
1265 | ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE); | ||
1266 | return; | ||
1267 | } | ||
1268 | } | ||
1269 | |||
1270 | /* move the HSM */ | ||
1271 | poll_next = ata_sff_hsm_move(ap, qc, status, 1); | ||
1272 | |||
1273 | /* another command or interrupt handler | ||
1274 | * may be running at this point. | ||
1275 | */ | ||
1276 | if (poll_next) | ||
1277 | goto fsm_start; | ||
303 | } | 1278 | } |
304 | 1279 | ||
305 | /** | 1280 | /** |
306 | * ata_bmdma_status - Read PCI IDE BMDMA status | 1281 | * ata_sff_qc_issue - issue taskfile to device in proto-dependent manner |
307 | * @ap: Port associated with this ATA transaction. | 1282 | * @qc: command to issue to device |
308 | * | 1283 | * |
309 | * Read and return BMDMA status register. | 1284 | * Using various libata functions and hooks, this function |
1285 | * starts an ATA command. ATA commands are grouped into | ||
1286 | * classes called "protocols", and issuing each type of protocol | ||
1287 | * is slightly different. | ||
310 | * | 1288 | * |
311 | * May be used as the bmdma_status() entry in ata_port_operations. | 1289 | * May be used as the qc_issue() entry in ata_port_operations. |
312 | * | 1290 | * |
313 | * LOCKING: | 1291 | * LOCKING: |
314 | * spin_lock_irqsave(host lock) | 1292 | * spin_lock_irqsave(host lock) |
1293 | * | ||
1294 | * RETURNS: | ||
1295 | * Zero on success, AC_ERR_* mask on failure | ||
315 | */ | 1296 | */ |
316 | u8 ata_bmdma_status(struct ata_port *ap) | 1297 | unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) |
317 | { | 1298 | { |
318 | return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | 1299 | struct ata_port *ap = qc->ap; |
1300 | |||
1301 | /* Use polling pio if the LLD doesn't handle | ||
1302 | * interrupt driven pio and atapi CDB interrupt. | ||
1303 | */ | ||
1304 | if (ap->flags & ATA_FLAG_PIO_POLLING) { | ||
1305 | switch (qc->tf.protocol) { | ||
1306 | case ATA_PROT_PIO: | ||
1307 | case ATA_PROT_NODATA: | ||
1308 | case ATAPI_PROT_PIO: | ||
1309 | case ATAPI_PROT_NODATA: | ||
1310 | qc->tf.flags |= ATA_TFLAG_POLLING; | ||
1311 | break; | ||
1312 | case ATAPI_PROT_DMA: | ||
1313 | if (qc->dev->flags & ATA_DFLAG_CDB_INTR) | ||
1314 | /* see ata_dma_blacklisted() */ | ||
1315 | BUG(); | ||
1316 | break; | ||
1317 | default: | ||
1318 | break; | ||
1319 | } | ||
1320 | } | ||
1321 | |||
1322 | /* select the device */ | ||
1323 | ata_dev_select(ap, qc->dev->devno, 1, 0); | ||
1324 | |||
1325 | /* start the command */ | ||
1326 | switch (qc->tf.protocol) { | ||
1327 | case ATA_PROT_NODATA: | ||
1328 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
1329 | ata_qc_set_polling(qc); | ||
1330 | |||
1331 | ata_tf_to_host(ap, &qc->tf); | ||
1332 | ap->hsm_task_state = HSM_ST_LAST; | ||
1333 | |||
1334 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
1335 | ata_pio_queue_task(ap, qc, 0); | ||
1336 | |||
1337 | break; | ||
1338 | |||
1339 | case ATA_PROT_DMA: | ||
1340 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); | ||
1341 | |||
1342 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | ||
1343 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | ||
1344 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | ||
1345 | ap->hsm_task_state = HSM_ST_LAST; | ||
1346 | break; | ||
1347 | |||
1348 | case ATA_PROT_PIO: | ||
1349 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
1350 | ata_qc_set_polling(qc); | ||
1351 | |||
1352 | ata_tf_to_host(ap, &qc->tf); | ||
1353 | |||
1354 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
1355 | /* PIO data out protocol */ | ||
1356 | ap->hsm_task_state = HSM_ST_FIRST; | ||
1357 | ata_pio_queue_task(ap, qc, 0); | ||
1358 | |||
1359 | /* always send first data block using | ||
1360 | * the ata_pio_task() codepath. | ||
1361 | */ | ||
1362 | } else { | ||
1363 | /* PIO data in protocol */ | ||
1364 | ap->hsm_task_state = HSM_ST; | ||
1365 | |||
1366 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
1367 | ata_pio_queue_task(ap, qc, 0); | ||
1368 | |||
1369 | /* if polling, ata_pio_task() handles the rest. | ||
1370 | * otherwise, interrupt handler takes over from here. | ||
1371 | */ | ||
1372 | } | ||
1373 | |||
1374 | break; | ||
1375 | |||
1376 | case ATAPI_PROT_PIO: | ||
1377 | case ATAPI_PROT_NODATA: | ||
1378 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
1379 | ata_qc_set_polling(qc); | ||
1380 | |||
1381 | ata_tf_to_host(ap, &qc->tf); | ||
1382 | |||
1383 | ap->hsm_task_state = HSM_ST_FIRST; | ||
1384 | |||
1385 | /* send cdb by polling if no cdb interrupt */ | ||
1386 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || | ||
1387 | (qc->tf.flags & ATA_TFLAG_POLLING)) | ||
1388 | ata_pio_queue_task(ap, qc, 0); | ||
1389 | break; | ||
1390 | |||
1391 | case ATAPI_PROT_DMA: | ||
1392 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); | ||
1393 | |||
1394 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | ||
1395 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | ||
1396 | ap->hsm_task_state = HSM_ST_FIRST; | ||
1397 | |||
1398 | /* send cdb by polling if no cdb interrupt */ | ||
1399 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
1400 | ata_pio_queue_task(ap, qc, 0); | ||
1401 | break; | ||
1402 | |||
1403 | default: | ||
1404 | WARN_ON(1); | ||
1405 | return AC_ERR_SYSTEM; | ||
1406 | } | ||
1407 | |||
1408 | return 0; | ||
319 | } | 1409 | } |
320 | 1410 | ||
321 | /** | 1411 | /** |
322 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer | 1412 | * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read |
323 | * @qc: Command we are ending DMA for | 1413 | * @qc: qc to fill result TF for |
324 | * | 1414 | * |
325 | * Clears the ATA_DMA_START flag in the dma control register | 1415 | * @qc is finished and result TF needs to be filled. Fill it |
1416 | * using ->sff_tf_read. | ||
326 | * | 1417 | * |
327 | * May be used as the bmdma_stop() entry in ata_port_operations. | 1418 | * LOCKING: |
1419 | * spin_lock_irqsave(host lock) | ||
1420 | * | ||
1421 | * RETURNS: | ||
1422 | * true indicating that result TF is successfully filled. | ||
1423 | */ | ||
1424 | bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) | ||
1425 | { | ||
1426 | qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); | ||
1427 | return true; | ||
1428 | } | ||
1429 | |||
1430 | /** | ||
1431 | * ata_sff_host_intr - Handle host interrupt for given (port, task) | ||
1432 | * @ap: Port on which interrupt arrived (possibly...) | ||
1433 | * @qc: Taskfile currently active in engine | ||
1434 | * | ||
1435 | * Handle host interrupt for given queued command. Currently, | ||
1436 | * only DMA interrupts are handled. All other commands are | ||
1437 | * handled via polling with interrupts disabled (nIEN bit). | ||
328 | * | 1438 | * |
329 | * LOCKING: | 1439 | * LOCKING: |
330 | * spin_lock_irqsave(host lock) | 1440 | * spin_lock_irqsave(host lock) |
1441 | * | ||
1442 | * RETURNS: | ||
1443 | * One if interrupt was handled, zero if not (shared irq). | ||
331 | */ | 1444 | */ |
332 | void ata_bmdma_stop(struct ata_queued_cmd *qc) | 1445 | inline unsigned int ata_sff_host_intr(struct ata_port *ap, |
1446 | struct ata_queued_cmd *qc) | ||
333 | { | 1447 | { |
334 | struct ata_port *ap = qc->ap; | 1448 | struct ata_eh_info *ehi = &ap->link.eh_info; |
335 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | 1449 | u8 status, host_stat = 0; |
336 | 1450 | ||
337 | /* clear start/stop bit */ | 1451 | VPRINTK("ata%u: protocol %d task_state %d\n", |
338 | iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, | 1452 | ap->print_id, qc->tf.protocol, ap->hsm_task_state); |
339 | mmio + ATA_DMA_CMD); | ||
340 | 1453 | ||
341 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | 1454 | /* Check whether we are expecting interrupt in this state */ |
342 | ata_altstatus(ap); /* dummy read */ | 1455 | switch (ap->hsm_task_state) { |
1456 | case HSM_ST_FIRST: | ||
1457 | /* Some pre-ATAPI-4 devices assert INTRQ | ||
1458 | * at this state when ready to receive CDB. | ||
1459 | */ | ||
1460 | |||
1461 | /* Check the ATA_DFLAG_CDB_INTR flag is enough here. | ||
1462 | * The flag was turned on only for atapi devices. No | ||
1463 | * need to check ata_is_atapi(qc->tf.protocol) again. | ||
1464 | */ | ||
1465 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
1466 | goto idle_irq; | ||
1467 | break; | ||
1468 | case HSM_ST_LAST: | ||
1469 | if (qc->tf.protocol == ATA_PROT_DMA || | ||
1470 | qc->tf.protocol == ATAPI_PROT_DMA) { | ||
1471 | /* check status of DMA engine */ | ||
1472 | host_stat = ap->ops->bmdma_status(ap); | ||
1473 | VPRINTK("ata%u: host_stat 0x%X\n", | ||
1474 | ap->print_id, host_stat); | ||
1475 | |||
1476 | /* if it's not our irq... */ | ||
1477 | if (!(host_stat & ATA_DMA_INTR)) | ||
1478 | goto idle_irq; | ||
1479 | |||
1480 | /* before we do anything else, clear DMA-Start bit */ | ||
1481 | ap->ops->bmdma_stop(qc); | ||
1482 | |||
1483 | if (unlikely(host_stat & ATA_DMA_ERR)) { | ||
1484 | /* error when transfering data to/from memory */ | ||
1485 | qc->err_mask |= AC_ERR_HOST_BUS; | ||
1486 | ap->hsm_task_state = HSM_ST_ERR; | ||
1487 | } | ||
1488 | } | ||
1489 | break; | ||
1490 | case HSM_ST: | ||
1491 | break; | ||
1492 | default: | ||
1493 | goto idle_irq; | ||
1494 | } | ||
1495 | |||
1496 | /* check altstatus */ | ||
1497 | status = ata_sff_altstatus(ap); | ||
1498 | if (status & ATA_BUSY) | ||
1499 | goto idle_irq; | ||
1500 | |||
1501 | /* check main status, clearing INTRQ */ | ||
1502 | status = ap->ops->sff_check_status(ap); | ||
1503 | if (unlikely(status & ATA_BUSY)) | ||
1504 | goto idle_irq; | ||
1505 | |||
1506 | /* ack bmdma irq events */ | ||
1507 | ap->ops->sff_irq_clear(ap); | ||
1508 | |||
1509 | ata_sff_hsm_move(ap, qc, status, 0); | ||
1510 | |||
1511 | if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || | ||
1512 | qc->tf.protocol == ATAPI_PROT_DMA)) | ||
1513 | ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); | ||
1514 | |||
1515 | return 1; /* irq handled */ | ||
1516 | |||
1517 | idle_irq: | ||
1518 | ap->stats.idle_irq++; | ||
1519 | |||
1520 | #ifdef ATA_IRQ_TRAP | ||
1521 | if ((ap->stats.idle_irq % 1000) == 0) { | ||
1522 | ap->ops->sff_check_status(ap); | ||
1523 | ap->ops->sff_irq_clear(ap); | ||
1524 | ata_port_printk(ap, KERN_WARNING, "irq trap\n"); | ||
1525 | return 1; | ||
1526 | } | ||
1527 | #endif | ||
1528 | return 0; /* irq not handled */ | ||
1529 | } | ||
1530 | |||
1531 | /** | ||
1532 | * ata_sff_interrupt - Default ATA host interrupt handler | ||
1533 | * @irq: irq line (unused) | ||
1534 | * @dev_instance: pointer to our ata_host information structure | ||
1535 | * | ||
1536 | * Default interrupt handler for PCI IDE devices. Calls | ||
1537 | * ata_sff_host_intr() for each port that is not disabled. | ||
1538 | * | ||
1539 | * LOCKING: | ||
1540 | * Obtains host lock during operation. | ||
1541 | * | ||
1542 | * RETURNS: | ||
1543 | * IRQ_NONE or IRQ_HANDLED. | ||
1544 | */ | ||
1545 | irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) | ||
1546 | { | ||
1547 | struct ata_host *host = dev_instance; | ||
1548 | unsigned int i; | ||
1549 | unsigned int handled = 0; | ||
1550 | unsigned long flags; | ||
1551 | |||
1552 | /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ | ||
1553 | spin_lock_irqsave(&host->lock, flags); | ||
1554 | |||
1555 | for (i = 0; i < host->n_ports; i++) { | ||
1556 | struct ata_port *ap; | ||
1557 | |||
1558 | ap = host->ports[i]; | ||
1559 | if (ap && | ||
1560 | !(ap->flags & ATA_FLAG_DISABLED)) { | ||
1561 | struct ata_queued_cmd *qc; | ||
1562 | |||
1563 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | ||
1564 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && | ||
1565 | (qc->flags & ATA_QCFLAG_ACTIVE)) | ||
1566 | handled |= ata_sff_host_intr(ap, qc); | ||
1567 | } | ||
1568 | } | ||
1569 | |||
1570 | spin_unlock_irqrestore(&host->lock, flags); | ||
1571 | |||
1572 | return IRQ_RETVAL(handled); | ||
343 | } | 1573 | } |
344 | 1574 | ||
345 | /** | 1575 | /** |
346 | * ata_bmdma_freeze - Freeze BMDMA controller port | 1576 | * ata_sff_freeze - Freeze SFF controller port |
347 | * @ap: port to freeze | 1577 | * @ap: port to freeze |
348 | * | 1578 | * |
349 | * Freeze BMDMA controller port. | 1579 | * Freeze BMDMA controller port. |
@@ -351,7 +1581,7 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc) | |||
351 | * LOCKING: | 1581 | * LOCKING: |
352 | * Inherited from caller. | 1582 | * Inherited from caller. |
353 | */ | 1583 | */ |
354 | void ata_bmdma_freeze(struct ata_port *ap) | 1584 | void ata_sff_freeze(struct ata_port *ap) |
355 | { | 1585 | { |
356 | struct ata_ioports *ioaddr = &ap->ioaddr; | 1586 | struct ata_ioports *ioaddr = &ap->ioaddr; |
357 | 1587 | ||
@@ -365,51 +1595,412 @@ void ata_bmdma_freeze(struct ata_port *ap) | |||
365 | * ATA_NIEN manipulation. Also, many controllers fail to mask | 1595 | * ATA_NIEN manipulation. Also, many controllers fail to mask |
366 | * previously pending IRQ on ATA_NIEN assertion. Clear it. | 1596 | * previously pending IRQ on ATA_NIEN assertion. Clear it. |
367 | */ | 1597 | */ |
368 | ata_chk_status(ap); | 1598 | ap->ops->sff_check_status(ap); |
369 | 1599 | ||
370 | ap->ops->irq_clear(ap); | 1600 | ap->ops->sff_irq_clear(ap); |
371 | } | 1601 | } |
372 | 1602 | ||
373 | /** | 1603 | /** |
374 | * ata_bmdma_thaw - Thaw BMDMA controller port | 1604 | * ata_sff_thaw - Thaw SFF controller port |
375 | * @ap: port to thaw | 1605 | * @ap: port to thaw |
376 | * | 1606 | * |
377 | * Thaw BMDMA controller port. | 1607 | * Thaw SFF controller port. |
378 | * | 1608 | * |
379 | * LOCKING: | 1609 | * LOCKING: |
380 | * Inherited from caller. | 1610 | * Inherited from caller. |
381 | */ | 1611 | */ |
382 | void ata_bmdma_thaw(struct ata_port *ap) | 1612 | void ata_sff_thaw(struct ata_port *ap) |
383 | { | 1613 | { |
384 | /* clear & re-enable interrupts */ | 1614 | /* clear & re-enable interrupts */ |
385 | ata_chk_status(ap); | 1615 | ap->ops->sff_check_status(ap); |
386 | ap->ops->irq_clear(ap); | 1616 | ap->ops->sff_irq_clear(ap); |
387 | ap->ops->irq_on(ap); | 1617 | ap->ops->sff_irq_on(ap); |
1618 | } | ||
1619 | |||
1620 | /** | ||
1621 | * ata_sff_prereset - prepare SFF link for reset | ||
1622 | * @link: SFF link to be reset | ||
1623 | * @deadline: deadline jiffies for the operation | ||
1624 | * | ||
1625 | * SFF link @link is about to be reset. Initialize it. It first | ||
1626 | * calls ata_std_prereset() and wait for !BSY if the port is | ||
1627 | * being softreset. | ||
1628 | * | ||
1629 | * LOCKING: | ||
1630 | * Kernel thread context (may sleep) | ||
1631 | * | ||
1632 | * RETURNS: | ||
1633 | * 0 on success, -errno otherwise. | ||
1634 | */ | ||
1635 | int ata_sff_prereset(struct ata_link *link, unsigned long deadline) | ||
1636 | { | ||
1637 | struct ata_eh_context *ehc = &link->eh_context; | ||
1638 | int rc; | ||
1639 | |||
1640 | rc = ata_std_prereset(link, deadline); | ||
1641 | if (rc) | ||
1642 | return rc; | ||
1643 | |||
1644 | /* if we're about to do hardreset, nothing more to do */ | ||
1645 | if (ehc->i.action & ATA_EH_HARDRESET) | ||
1646 | return 0; | ||
1647 | |||
1648 | /* wait for !BSY if we don't know that no device is attached */ | ||
1649 | if (!ata_link_offline(link)) { | ||
1650 | rc = ata_sff_wait_ready(link, deadline); | ||
1651 | if (rc && rc != -ENODEV) { | ||
1652 | ata_link_printk(link, KERN_WARNING, "device not ready " | ||
1653 | "(errno=%d), forcing hardreset\n", rc); | ||
1654 | ehc->i.action |= ATA_EH_HARDRESET; | ||
1655 | } | ||
1656 | } | ||
1657 | |||
1658 | return 0; | ||
1659 | } | ||
1660 | |||
1661 | /** | ||
1662 | * ata_devchk - PATA device presence detection | ||
1663 | * @ap: ATA channel to examine | ||
1664 | * @device: Device to examine (starting at zero) | ||
1665 | * | ||
1666 | * This technique was originally described in | ||
1667 | * Hale Landis's ATADRVR (www.ata-atapi.com), and | ||
1668 | * later found its way into the ATA/ATAPI spec. | ||
1669 | * | ||
1670 | * Write a pattern to the ATA shadow registers, | ||
1671 | * and if a device is present, it will respond by | ||
1672 | * correctly storing and echoing back the | ||
1673 | * ATA shadow register contents. | ||
1674 | * | ||
1675 | * LOCKING: | ||
1676 | * caller. | ||
1677 | */ | ||
1678 | static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) | ||
1679 | { | ||
1680 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
1681 | u8 nsect, lbal; | ||
1682 | |||
1683 | ap->ops->sff_dev_select(ap, device); | ||
1684 | |||
1685 | iowrite8(0x55, ioaddr->nsect_addr); | ||
1686 | iowrite8(0xaa, ioaddr->lbal_addr); | ||
1687 | |||
1688 | iowrite8(0xaa, ioaddr->nsect_addr); | ||
1689 | iowrite8(0x55, ioaddr->lbal_addr); | ||
1690 | |||
1691 | iowrite8(0x55, ioaddr->nsect_addr); | ||
1692 | iowrite8(0xaa, ioaddr->lbal_addr); | ||
1693 | |||
1694 | nsect = ioread8(ioaddr->nsect_addr); | ||
1695 | lbal = ioread8(ioaddr->lbal_addr); | ||
1696 | |||
1697 | if ((nsect == 0x55) && (lbal == 0xaa)) | ||
1698 | return 1; /* we found a device */ | ||
1699 | |||
1700 | return 0; /* nothing found */ | ||
1701 | } | ||
1702 | |||
1703 | /** | ||
1704 | * ata_sff_dev_classify - Parse returned ATA device signature | ||
1705 | * @dev: ATA device to classify (starting at zero) | ||
1706 | * @present: device seems present | ||
1707 | * @r_err: Value of error register on completion | ||
1708 | * | ||
1709 | * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, | ||
1710 | * an ATA/ATAPI-defined set of values is placed in the ATA | ||
1711 | * shadow registers, indicating the results of device detection | ||
1712 | * and diagnostics. | ||
1713 | * | ||
1714 | * Select the ATA device, and read the values from the ATA shadow | ||
1715 | * registers. Then parse according to the Error register value, | ||
1716 | * and the spec-defined values examined by ata_dev_classify(). | ||
1717 | * | ||
1718 | * LOCKING: | ||
1719 | * caller. | ||
1720 | * | ||
1721 | * RETURNS: | ||
1722 | * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. | ||
1723 | */ | ||
1724 | unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, | ||
1725 | u8 *r_err) | ||
1726 | { | ||
1727 | struct ata_port *ap = dev->link->ap; | ||
1728 | struct ata_taskfile tf; | ||
1729 | unsigned int class; | ||
1730 | u8 err; | ||
1731 | |||
1732 | ap->ops->sff_dev_select(ap, dev->devno); | ||
1733 | |||
1734 | memset(&tf, 0, sizeof(tf)); | ||
1735 | |||
1736 | ap->ops->sff_tf_read(ap, &tf); | ||
1737 | err = tf.feature; | ||
1738 | if (r_err) | ||
1739 | *r_err = err; | ||
1740 | |||
1741 | /* see if device passed diags: continue and warn later */ | ||
1742 | if (err == 0) | ||
1743 | /* diagnostic fail : do nothing _YET_ */ | ||
1744 | dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; | ||
1745 | else if (err == 1) | ||
1746 | /* do nothing */ ; | ||
1747 | else if ((dev->devno == 0) && (err == 0x81)) | ||
1748 | /* do nothing */ ; | ||
1749 | else | ||
1750 | return ATA_DEV_NONE; | ||
1751 | |||
1752 | /* determine if device is ATA or ATAPI */ | ||
1753 | class = ata_dev_classify(&tf); | ||
1754 | |||
1755 | if (class == ATA_DEV_UNKNOWN) { | ||
1756 | /* If the device failed diagnostic, it's likely to | ||
1757 | * have reported incorrect device signature too. | ||
1758 | * Assume ATA device if the device seems present but | ||
1759 | * device signature is invalid with diagnostic | ||
1760 | * failure. | ||
1761 | */ | ||
1762 | if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) | ||
1763 | class = ATA_DEV_ATA; | ||
1764 | else | ||
1765 | class = ATA_DEV_NONE; | ||
1766 | } else if ((class == ATA_DEV_ATA) && | ||
1767 | (ap->ops->sff_check_status(ap) == 0)) | ||
1768 | class = ATA_DEV_NONE; | ||
1769 | |||
1770 | return class; | ||
1771 | } | ||
1772 | |||
1773 | /** | ||
1774 | * ata_sff_wait_after_reset - wait for devices to become ready after reset | ||
1775 | * @link: SFF link which is just reset | ||
1776 | * @devmask: mask of present devices | ||
1777 | * @deadline: deadline jiffies for the operation | ||
1778 | * | ||
1779 | * Wait devices attached to SFF @link to become ready after | ||
1780 | * reset. It contains preceding 150ms wait to avoid accessing TF | ||
1781 | * status register too early. | ||
1782 | * | ||
1783 | * LOCKING: | ||
1784 | * Kernel thread context (may sleep). | ||
1785 | * | ||
1786 | * RETURNS: | ||
1787 | * 0 on success, -ENODEV if some or all of devices in @devmask | ||
1788 | * don't seem to exist. -errno on other errors. | ||
1789 | */ | ||
1790 | int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, | ||
1791 | unsigned long deadline) | ||
1792 | { | ||
1793 | struct ata_port *ap = link->ap; | ||
1794 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
1795 | unsigned int dev0 = devmask & (1 << 0); | ||
1796 | unsigned int dev1 = devmask & (1 << 1); | ||
1797 | int rc, ret = 0; | ||
1798 | |||
1799 | msleep(ATA_WAIT_AFTER_RESET_MSECS); | ||
1800 | |||
1801 | /* always check readiness of the master device */ | ||
1802 | rc = ata_sff_wait_ready(link, deadline); | ||
1803 | /* -ENODEV means the odd clown forgot the D7 pulldown resistor | ||
1804 | * and TF status is 0xff, bail out on it too. | ||
1805 | */ | ||
1806 | if (rc) | ||
1807 | return rc; | ||
1808 | |||
1809 | /* if device 1 was found in ata_devchk, wait for register | ||
1810 | * access briefly, then wait for BSY to clear. | ||
1811 | */ | ||
1812 | if (dev1) { | ||
1813 | int i; | ||
1814 | |||
1815 | ap->ops->sff_dev_select(ap, 1); | ||
1816 | |||
1817 | /* Wait for register access. Some ATAPI devices fail | ||
1818 | * to set nsect/lbal after reset, so don't waste too | ||
1819 | * much time on it. We're gonna wait for !BSY anyway. | ||
1820 | */ | ||
1821 | for (i = 0; i < 2; i++) { | ||
1822 | u8 nsect, lbal; | ||
1823 | |||
1824 | nsect = ioread8(ioaddr->nsect_addr); | ||
1825 | lbal = ioread8(ioaddr->lbal_addr); | ||
1826 | if ((nsect == 1) && (lbal == 1)) | ||
1827 | break; | ||
1828 | msleep(50); /* give drive a breather */ | ||
1829 | } | ||
1830 | |||
1831 | rc = ata_sff_wait_ready(link, deadline); | ||
1832 | if (rc) { | ||
1833 | if (rc != -ENODEV) | ||
1834 | return rc; | ||
1835 | ret = rc; | ||
1836 | } | ||
1837 | } | ||
1838 | |||
1839 | /* is all this really necessary? */ | ||
1840 | ap->ops->sff_dev_select(ap, 0); | ||
1841 | if (dev1) | ||
1842 | ap->ops->sff_dev_select(ap, 1); | ||
1843 | if (dev0) | ||
1844 | ap->ops->sff_dev_select(ap, 0); | ||
1845 | |||
1846 | return ret; | ||
1847 | } | ||
1848 | |||
1849 | static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, | ||
1850 | unsigned long deadline) | ||
1851 | { | ||
1852 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
1853 | |||
1854 | DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); | ||
1855 | |||
1856 | /* software reset. causes dev0 to be selected */ | ||
1857 | iowrite8(ap->ctl, ioaddr->ctl_addr); | ||
1858 | udelay(20); /* FIXME: flush */ | ||
1859 | iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); | ||
1860 | udelay(20); /* FIXME: flush */ | ||
1861 | iowrite8(ap->ctl, ioaddr->ctl_addr); | ||
1862 | |||
1863 | /* wait the port to become ready */ | ||
1864 | return ata_sff_wait_after_reset(&ap->link, devmask, deadline); | ||
1865 | } | ||
1866 | |||
1867 | /** | ||
1868 | * ata_sff_softreset - reset host port via ATA SRST | ||
1869 | * @link: ATA link to reset | ||
1870 | * @classes: resulting classes of attached devices | ||
1871 | * @deadline: deadline jiffies for the operation | ||
1872 | * | ||
1873 | * Reset host port using ATA SRST. | ||
1874 | * | ||
1875 | * LOCKING: | ||
1876 | * Kernel thread context (may sleep) | ||
1877 | * | ||
1878 | * RETURNS: | ||
1879 | * 0 on success, -errno otherwise. | ||
1880 | */ | ||
1881 | int ata_sff_softreset(struct ata_link *link, unsigned int *classes, | ||
1882 | unsigned long deadline) | ||
1883 | { | ||
1884 | struct ata_port *ap = link->ap; | ||
1885 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | ||
1886 | unsigned int devmask = 0; | ||
1887 | int rc; | ||
1888 | u8 err; | ||
1889 | |||
1890 | DPRINTK("ENTER\n"); | ||
1891 | |||
1892 | /* determine if device 0/1 are present */ | ||
1893 | if (ata_devchk(ap, 0)) | ||
1894 | devmask |= (1 << 0); | ||
1895 | if (slave_possible && ata_devchk(ap, 1)) | ||
1896 | devmask |= (1 << 1); | ||
1897 | |||
1898 | /* select device 0 again */ | ||
1899 | ap->ops->sff_dev_select(ap, 0); | ||
1900 | |||
1901 | /* issue bus reset */ | ||
1902 | DPRINTK("about to softreset, devmask=%x\n", devmask); | ||
1903 | rc = ata_bus_softreset(ap, devmask, deadline); | ||
1904 | /* if link is occupied, -ENODEV too is an error */ | ||
1905 | if (rc && (rc != -ENODEV || sata_scr_valid(link))) { | ||
1906 | ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); | ||
1907 | return rc; | ||
1908 | } | ||
1909 | |||
1910 | /* determine by signature whether we have ATA or ATAPI devices */ | ||
1911 | classes[0] = ata_sff_dev_classify(&link->device[0], | ||
1912 | devmask & (1 << 0), &err); | ||
1913 | if (slave_possible && err != 0x81) | ||
1914 | classes[1] = ata_sff_dev_classify(&link->device[1], | ||
1915 | devmask & (1 << 1), &err); | ||
1916 | |||
1917 | DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); | ||
1918 | return 0; | ||
1919 | } | ||
1920 | |||
1921 | /** | ||
1922 | * sata_sff_hardreset - reset host port via SATA phy reset | ||
1923 | * @link: link to reset | ||
1924 | * @class: resulting class of attached device | ||
1925 | * @deadline: deadline jiffies for the operation | ||
1926 | * | ||
1927 | * SATA phy-reset host port using DET bits of SControl register, | ||
1928 | * wait for !BSY and classify the attached device. | ||
1929 | * | ||
1930 | * LOCKING: | ||
1931 | * Kernel thread context (may sleep) | ||
1932 | * | ||
1933 | * RETURNS: | ||
1934 | * 0 on success, -errno otherwise. | ||
1935 | */ | ||
1936 | int sata_sff_hardreset(struct ata_link *link, unsigned int *class, | ||
1937 | unsigned long deadline) | ||
1938 | { | ||
1939 | struct ata_eh_context *ehc = &link->eh_context; | ||
1940 | const unsigned long *timing = sata_ehc_deb_timing(ehc); | ||
1941 | bool online; | ||
1942 | int rc; | ||
1943 | |||
1944 | rc = sata_link_hardreset(link, timing, deadline, &online, | ||
1945 | ata_sff_check_ready); | ||
1946 | if (online) | ||
1947 | *class = ata_sff_dev_classify(link->device, 1, NULL); | ||
1948 | |||
1949 | DPRINTK("EXIT, class=%u\n", *class); | ||
1950 | return rc; | ||
388 | } | 1951 | } |
389 | 1952 | ||
390 | /** | 1953 | /** |
391 | * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller | 1954 | * ata_sff_postreset - SFF postreset callback |
1955 | * @link: the target SFF ata_link | ||
1956 | * @classes: classes of attached devices | ||
1957 | * | ||
1958 | * This function is invoked after a successful reset. It first | ||
1959 | * calls ata_std_postreset() and performs SFF specific postreset | ||
1960 | * processing. | ||
1961 | * | ||
1962 | * LOCKING: | ||
1963 | * Kernel thread context (may sleep) | ||
1964 | */ | ||
1965 | void ata_sff_postreset(struct ata_link *link, unsigned int *classes) | ||
1966 | { | ||
1967 | struct ata_port *ap = link->ap; | ||
1968 | |||
1969 | ata_std_postreset(link, classes); | ||
1970 | |||
1971 | /* is double-select really necessary? */ | ||
1972 | if (classes[0] != ATA_DEV_NONE) | ||
1973 | ap->ops->sff_dev_select(ap, 1); | ||
1974 | if (classes[1] != ATA_DEV_NONE) | ||
1975 | ap->ops->sff_dev_select(ap, 0); | ||
1976 | |||
1977 | /* bail out if no device is present */ | ||
1978 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { | ||
1979 | DPRINTK("EXIT, no device\n"); | ||
1980 | return; | ||
1981 | } | ||
1982 | |||
1983 | /* set up device control */ | ||
1984 | if (ap->ioaddr.ctl_addr) | ||
1985 | iowrite8(ap->ctl, ap->ioaddr.ctl_addr); | ||
1986 | } | ||
1987 | |||
1988 | /** | ||
1989 | * ata_sff_error_handler - Stock error handler for BMDMA controller | ||
392 | * @ap: port to handle error for | 1990 | * @ap: port to handle error for |
393 | * @prereset: prereset method (can be NULL) | ||
394 | * @softreset: softreset method (can be NULL) | ||
395 | * @hardreset: hardreset method (can be NULL) | ||
396 | * @postreset: postreset method (can be NULL) | ||
397 | * | 1991 | * |
398 | * Handle error for ATA BMDMA controller. It can handle both | 1992 | * Stock error handler for SFF controller. It can handle both |
399 | * PATA and SATA controllers. Many controllers should be able to | 1993 | * PATA and SATA controllers. Many controllers should be able to |
400 | * use this EH as-is or with some added handling before and | 1994 | * use this EH as-is or with some added handling before and |
401 | * after. | 1995 | * after. |
402 | * | 1996 | * |
403 | * This function is intended to be used for constructing | ||
404 | * ->error_handler callback by low level drivers. | ||
405 | * | ||
406 | * LOCKING: | 1997 | * LOCKING: |
407 | * Kernel thread context (may sleep) | 1998 | * Kernel thread context (may sleep) |
408 | */ | 1999 | */ |
409 | void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | 2000 | void ata_sff_error_handler(struct ata_port *ap) |
410 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, | ||
411 | ata_postreset_fn_t postreset) | ||
412 | { | 2001 | { |
2002 | ata_reset_fn_t softreset = ap->ops->softreset; | ||
2003 | ata_reset_fn_t hardreset = ap->ops->hardreset; | ||
413 | struct ata_queued_cmd *qc; | 2004 | struct ata_queued_cmd *qc; |
414 | unsigned long flags; | 2005 | unsigned long flags; |
415 | int thaw = 0; | 2006 | int thaw = 0; |
@@ -423,7 +2014,8 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
423 | 2014 | ||
424 | ap->hsm_task_state = HSM_ST_IDLE; | 2015 | ap->hsm_task_state = HSM_ST_IDLE; |
425 | 2016 | ||
426 | if (qc && (qc->tf.protocol == ATA_PROT_DMA || | 2017 | if (ap->ioaddr.bmdma_addr && |
2018 | qc && (qc->tf.protocol == ATA_PROT_DMA || | ||
427 | qc->tf.protocol == ATAPI_PROT_DMA)) { | 2019 | qc->tf.protocol == ATAPI_PROT_DMA)) { |
428 | u8 host_stat; | 2020 | u8 host_stat; |
429 | 2021 | ||
@@ -442,9 +2034,9 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
442 | ap->ops->bmdma_stop(qc); | 2034 | ap->ops->bmdma_stop(qc); |
443 | } | 2035 | } |
444 | 2036 | ||
445 | ata_altstatus(ap); | 2037 | ata_sff_altstatus(ap); |
446 | ata_chk_status(ap); | 2038 | ap->ops->sff_check_status(ap); |
447 | ap->ops->irq_clear(ap); | 2039 | ap->ops->sff_irq_clear(ap); |
448 | 2040 | ||
449 | spin_unlock_irqrestore(ap->lock, flags); | 2041 | spin_unlock_irqrestore(ap->lock, flags); |
450 | 2042 | ||
@@ -452,40 +2044,27 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
452 | ata_eh_thaw_port(ap); | 2044 | ata_eh_thaw_port(ap); |
453 | 2045 | ||
454 | /* PIO and DMA engines have been stopped, perform recovery */ | 2046 | /* PIO and DMA engines have been stopped, perform recovery */ |
455 | ata_do_eh(ap, prereset, softreset, hardreset, postreset); | ||
456 | } | ||
457 | |||
458 | /** | ||
459 | * ata_bmdma_error_handler - Stock error handler for BMDMA controller | ||
460 | * @ap: port to handle error for | ||
461 | * | ||
462 | * Stock error handler for BMDMA controller. | ||
463 | * | ||
464 | * LOCKING: | ||
465 | * Kernel thread context (may sleep) | ||
466 | */ | ||
467 | void ata_bmdma_error_handler(struct ata_port *ap) | ||
468 | { | ||
469 | ata_reset_fn_t softreset = NULL, hardreset = NULL; | ||
470 | 2047 | ||
471 | if (ap->ioaddr.ctl_addr) | 2048 | /* Ignore ata_sff_softreset if ctl isn't accessible and |
472 | softreset = ata_std_softreset; | 2049 | * built-in hardresets if SCR access isn't available. |
473 | if (sata_scr_valid(&ap->link)) | 2050 | */ |
474 | hardreset = sata_std_hardreset; | 2051 | if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) |
2052 | softreset = NULL; | ||
2053 | if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) | ||
2054 | hardreset = NULL; | ||
475 | 2055 | ||
476 | ata_bmdma_drive_eh(ap, ata_std_prereset, softreset, hardreset, | 2056 | ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, |
477 | ata_std_postreset); | 2057 | ap->ops->postreset); |
478 | } | 2058 | } |
479 | 2059 | ||
480 | /** | 2060 | /** |
481 | * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for | 2061 | * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller |
482 | * BMDMA controller | ||
483 | * @qc: internal command to clean up | 2062 | * @qc: internal command to clean up |
484 | * | 2063 | * |
485 | * LOCKING: | 2064 | * LOCKING: |
486 | * Kernel thread context (may sleep) | 2065 | * Kernel thread context (may sleep) |
487 | */ | 2066 | */ |
488 | void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) | 2067 | void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) |
489 | { | 2068 | { |
490 | if (qc->ap->ioaddr.bmdma_addr) | 2069 | if (qc->ap->ioaddr.bmdma_addr) |
491 | ata_bmdma_stop(qc); | 2070 | ata_bmdma_stop(qc); |
@@ -504,7 +2083,6 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) | |||
504 | * LOCKING: | 2083 | * LOCKING: |
505 | * Inherited from caller. | 2084 | * Inherited from caller. |
506 | */ | 2085 | */ |
507 | |||
508 | int ata_sff_port_start(struct ata_port *ap) | 2086 | int ata_sff_port_start(struct ata_port *ap) |
509 | { | 2087 | { |
510 | if (ap->ioaddr.bmdma_addr) | 2088 | if (ap->ioaddr.bmdma_addr) |
@@ -512,24 +2090,262 @@ int ata_sff_port_start(struct ata_port *ap) | |||
512 | return 0; | 2090 | return 0; |
513 | } | 2091 | } |
514 | 2092 | ||
515 | #ifdef CONFIG_PCI | 2093 | /** |
2094 | * ata_sff_std_ports - initialize ioaddr with standard port offsets. | ||
2095 | * @ioaddr: IO address structure to be initialized | ||
2096 | * | ||
2097 | * Utility function which initializes data_addr, error_addr, | ||
2098 | * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, | ||
2099 | * device_addr, status_addr, and command_addr to standard offsets | ||
2100 | * relative to cmd_addr. | ||
2101 | * | ||
2102 | * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. | ||
2103 | */ | ||
2104 | void ata_sff_std_ports(struct ata_ioports *ioaddr) | ||
2105 | { | ||
2106 | ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; | ||
2107 | ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; | ||
2108 | ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; | ||
2109 | ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; | ||
2110 | ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; | ||
2111 | ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; | ||
2112 | ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; | ||
2113 | ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; | ||
2114 | ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; | ||
2115 | ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; | ||
2116 | } | ||
516 | 2117 | ||
517 | static int ata_resources_present(struct pci_dev *pdev, int port) | 2118 | unsigned long ata_bmdma_mode_filter(struct ata_device *adev, |
2119 | unsigned long xfer_mask) | ||
518 | { | 2120 | { |
519 | int i; | 2121 | /* Filter out DMA modes if the device has been configured by |
2122 | the BIOS as PIO only */ | ||
520 | 2123 | ||
521 | /* Check the PCI resources for this channel are enabled */ | 2124 | if (adev->link->ap->ioaddr.bmdma_addr == NULL) |
522 | port = port * 2; | 2125 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); |
523 | for (i = 0; i < 2; i ++) { | 2126 | return xfer_mask; |
524 | if (pci_resource_start(pdev, port + i) == 0 || | 2127 | } |
525 | pci_resource_len(pdev, port + i) == 0) | 2128 | |
526 | return 0; | 2129 | /** |
2130 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction | ||
2131 | * @qc: Info associated with this ATA transaction. | ||
2132 | * | ||
2133 | * LOCKING: | ||
2134 | * spin_lock_irqsave(host lock) | ||
2135 | */ | ||
2136 | void ata_bmdma_setup(struct ata_queued_cmd *qc) | ||
2137 | { | ||
2138 | struct ata_port *ap = qc->ap; | ||
2139 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
2140 | u8 dmactl; | ||
2141 | |||
2142 | /* load PRD table addr. */ | ||
2143 | mb(); /* make sure PRD table writes are visible to controller */ | ||
2144 | iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); | ||
2145 | |||
2146 | /* specify data direction, triple-check start bit is clear */ | ||
2147 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2148 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | ||
2149 | if (!rw) | ||
2150 | dmactl |= ATA_DMA_WR; | ||
2151 | iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2152 | |||
2153 | /* issue r/w command */ | ||
2154 | ap->ops->sff_exec_command(ap, &qc->tf); | ||
2155 | } | ||
2156 | |||
2157 | /** | ||
2158 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction | ||
2159 | * @qc: Info associated with this ATA transaction. | ||
2160 | * | ||
2161 | * LOCKING: | ||
2162 | * spin_lock_irqsave(host lock) | ||
2163 | */ | ||
2164 | void ata_bmdma_start(struct ata_queued_cmd *qc) | ||
2165 | { | ||
2166 | struct ata_port *ap = qc->ap; | ||
2167 | u8 dmactl; | ||
2168 | |||
2169 | /* start host DMA transaction */ | ||
2170 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2171 | iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2172 | |||
2173 | /* Strictly, one may wish to issue an ioread8() here, to | ||
2174 | * flush the mmio write. However, control also passes | ||
2175 | * to the hardware at this point, and it will interrupt | ||
2176 | * us when we are to resume control. So, in effect, | ||
2177 | * we don't care when the mmio write flushes. | ||
2178 | * Further, a read of the DMA status register _immediately_ | ||
2179 | * following the write may not be what certain flaky hardware | ||
2180 | * is expected, so I think it is best to not add a readb() | ||
2181 | * without first all the MMIO ATA cards/mobos. | ||
2182 | * Or maybe I'm just being paranoid. | ||
2183 | * | ||
2184 | * FIXME: The posting of this write means I/O starts are | ||
2185 | * unneccessarily delayed for MMIO | ||
2186 | */ | ||
2187 | } | ||
2188 | |||
2189 | /** | ||
2190 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer | ||
2191 | * @qc: Command we are ending DMA for | ||
2192 | * | ||
2193 | * Clears the ATA_DMA_START flag in the dma control register | ||
2194 | * | ||
2195 | * May be used as the bmdma_stop() entry in ata_port_operations. | ||
2196 | * | ||
2197 | * LOCKING: | ||
2198 | * spin_lock_irqsave(host lock) | ||
2199 | */ | ||
2200 | void ata_bmdma_stop(struct ata_queued_cmd *qc) | ||
2201 | { | ||
2202 | struct ata_port *ap = qc->ap; | ||
2203 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
2204 | |||
2205 | /* clear start/stop bit */ | ||
2206 | iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, | ||
2207 | mmio + ATA_DMA_CMD); | ||
2208 | |||
2209 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | ||
2210 | ata_sff_altstatus(ap); /* dummy read */ | ||
2211 | } | ||
2212 | |||
2213 | /** | ||
2214 | * ata_bmdma_status - Read PCI IDE BMDMA status | ||
2215 | * @ap: Port associated with this ATA transaction. | ||
2216 | * | ||
2217 | * Read and return BMDMA status register. | ||
2218 | * | ||
2219 | * May be used as the bmdma_status() entry in ata_port_operations. | ||
2220 | * | ||
2221 | * LOCKING: | ||
2222 | * spin_lock_irqsave(host lock) | ||
2223 | */ | ||
2224 | u8 ata_bmdma_status(struct ata_port *ap) | ||
2225 | { | ||
2226 | return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
2227 | } | ||
2228 | |||
2229 | /** | ||
2230 | * ata_bus_reset - reset host port and associated ATA channel | ||
2231 | * @ap: port to reset | ||
2232 | * | ||
2233 | * This is typically the first time we actually start issuing | ||
2234 | * commands to the ATA channel. We wait for BSY to clear, then | ||
2235 | * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its | ||
2236 | * result. Determine what devices, if any, are on the channel | ||
2237 | * by looking at the device 0/1 error register. Look at the signature | ||
2238 | * stored in each device's taskfile registers, to determine if | ||
2239 | * the device is ATA or ATAPI. | ||
2240 | * | ||
2241 | * LOCKING: | ||
2242 | * PCI/etc. bus probe sem. | ||
2243 | * Obtains host lock. | ||
2244 | * | ||
2245 | * SIDE EFFECTS: | ||
2246 | * Sets ATA_FLAG_DISABLED if bus reset fails. | ||
2247 | * | ||
2248 | * DEPRECATED: | ||
2249 | * This function is only for drivers which still use old EH and | ||
2250 | * will be removed soon. | ||
2251 | */ | ||
2252 | void ata_bus_reset(struct ata_port *ap) | ||
2253 | { | ||
2254 | struct ata_device *device = ap->link.device; | ||
2255 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
2256 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | ||
2257 | u8 err; | ||
2258 | unsigned int dev0, dev1 = 0, devmask = 0; | ||
2259 | int rc; | ||
2260 | |||
2261 | DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no); | ||
2262 | |||
2263 | /* determine if device 0/1 are present */ | ||
2264 | if (ap->flags & ATA_FLAG_SATA_RESET) | ||
2265 | dev0 = 1; | ||
2266 | else { | ||
2267 | dev0 = ata_devchk(ap, 0); | ||
2268 | if (slave_possible) | ||
2269 | dev1 = ata_devchk(ap, 1); | ||
527 | } | 2270 | } |
528 | return 1; | 2271 | |
2272 | if (dev0) | ||
2273 | devmask |= (1 << 0); | ||
2274 | if (dev1) | ||
2275 | devmask |= (1 << 1); | ||
2276 | |||
2277 | /* select device 0 again */ | ||
2278 | ap->ops->sff_dev_select(ap, 0); | ||
2279 | |||
2280 | /* issue bus reset */ | ||
2281 | if (ap->flags & ATA_FLAG_SRST) { | ||
2282 | rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ); | ||
2283 | if (rc && rc != -ENODEV) | ||
2284 | goto err_out; | ||
2285 | } | ||
2286 | |||
2287 | /* | ||
2288 | * determine by signature whether we have ATA or ATAPI devices | ||
2289 | */ | ||
2290 | device[0].class = ata_sff_dev_classify(&device[0], dev0, &err); | ||
2291 | if ((slave_possible) && (err != 0x81)) | ||
2292 | device[1].class = ata_sff_dev_classify(&device[1], dev1, &err); | ||
2293 | |||
2294 | /* is double-select really necessary? */ | ||
2295 | if (device[1].class != ATA_DEV_NONE) | ||
2296 | ap->ops->sff_dev_select(ap, 1); | ||
2297 | if (device[0].class != ATA_DEV_NONE) | ||
2298 | ap->ops->sff_dev_select(ap, 0); | ||
2299 | |||
2300 | /* if no devices were detected, disable this port */ | ||
2301 | if ((device[0].class == ATA_DEV_NONE) && | ||
2302 | (device[1].class == ATA_DEV_NONE)) | ||
2303 | goto err_out; | ||
2304 | |||
2305 | if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { | ||
2306 | /* set up device control for ATA_FLAG_SATA_RESET */ | ||
2307 | iowrite8(ap->ctl, ioaddr->ctl_addr); | ||
2308 | } | ||
2309 | |||
2310 | DPRINTK("EXIT\n"); | ||
2311 | return; | ||
2312 | |||
2313 | err_out: | ||
2314 | ata_port_printk(ap, KERN_ERR, "disabling port\n"); | ||
2315 | ata_port_disable(ap); | ||
2316 | |||
2317 | DPRINTK("EXIT\n"); | ||
529 | } | 2318 | } |
530 | 2319 | ||
2320 | #ifdef CONFIG_PCI | ||
2321 | |||
531 | /** | 2322 | /** |
532 | * ata_pci_init_bmdma - acquire PCI BMDMA resources and init ATA host | 2323 | * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex |
2324 | * @pdev: PCI device | ||
2325 | * | ||
2326 | * Some PCI ATA devices report simplex mode but in fact can be told to | ||
2327 | * enter non simplex mode. This implements the necessary logic to | ||
2328 | * perform the task on such devices. Calling it on other devices will | ||
2329 | * have -undefined- behaviour. | ||
2330 | */ | ||
2331 | int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) | ||
2332 | { | ||
2333 | unsigned long bmdma = pci_resource_start(pdev, 4); | ||
2334 | u8 simplex; | ||
2335 | |||
2336 | if (bmdma == 0) | ||
2337 | return -ENOENT; | ||
2338 | |||
2339 | simplex = inb(bmdma + 0x02); | ||
2340 | outb(simplex & 0x60, bmdma + 0x02); | ||
2341 | simplex = inb(bmdma + 0x02); | ||
2342 | if (simplex & 0x80) | ||
2343 | return -EOPNOTSUPP; | ||
2344 | return 0; | ||
2345 | } | ||
2346 | |||
2347 | /** | ||
2348 | * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host | ||
533 | * @host: target ATA host | 2349 | * @host: target ATA host |
534 | * | 2350 | * |
535 | * Acquire PCI BMDMA resources and initialize @host accordingly. | 2351 | * Acquire PCI BMDMA resources and initialize @host accordingly. |
@@ -540,7 +2356,7 @@ static int ata_resources_present(struct pci_dev *pdev, int port) | |||
540 | * RETURNS: | 2356 | * RETURNS: |
541 | * 0 on success, -errno otherwise. | 2357 | * 0 on success, -errno otherwise. |
542 | */ | 2358 | */ |
543 | int ata_pci_init_bmdma(struct ata_host *host) | 2359 | int ata_pci_bmdma_init(struct ata_host *host) |
544 | { | 2360 | { |
545 | struct device *gdev = host->dev; | 2361 | struct device *gdev = host->dev; |
546 | struct pci_dev *pdev = to_pci_dev(gdev); | 2362 | struct pci_dev *pdev = to_pci_dev(gdev); |
@@ -585,8 +2401,22 @@ int ata_pci_init_bmdma(struct ata_host *host) | |||
585 | return 0; | 2401 | return 0; |
586 | } | 2402 | } |
587 | 2403 | ||
2404 | static int ata_resources_present(struct pci_dev *pdev, int port) | ||
2405 | { | ||
2406 | int i; | ||
2407 | |||
2408 | /* Check the PCI resources for this channel are enabled */ | ||
2409 | port = port * 2; | ||
2410 | for (i = 0; i < 2; i ++) { | ||
2411 | if (pci_resource_start(pdev, port + i) == 0 || | ||
2412 | pci_resource_len(pdev, port + i) == 0) | ||
2413 | return 0; | ||
2414 | } | ||
2415 | return 1; | ||
2416 | } | ||
2417 | |||
588 | /** | 2418 | /** |
589 | * ata_pci_init_sff_host - acquire native PCI ATA resources and init host | 2419 | * ata_pci_sff_init_host - acquire native PCI ATA resources and init host |
590 | * @host: target ATA host | 2420 | * @host: target ATA host |
591 | * | 2421 | * |
592 | * Acquire native PCI ATA resources for @host and initialize the | 2422 | * Acquire native PCI ATA resources for @host and initialize the |
@@ -604,7 +2434,7 @@ int ata_pci_init_bmdma(struct ata_host *host) | |||
604 | * 0 if at least one port is initialized, -ENODEV if no port is | 2434 | * 0 if at least one port is initialized, -ENODEV if no port is |
605 | * available. | 2435 | * available. |
606 | */ | 2436 | */ |
607 | int ata_pci_init_sff_host(struct ata_host *host) | 2437 | int ata_pci_sff_init_host(struct ata_host *host) |
608 | { | 2438 | { |
609 | struct device *gdev = host->dev; | 2439 | struct device *gdev = host->dev; |
610 | struct pci_dev *pdev = to_pci_dev(gdev); | 2440 | struct pci_dev *pdev = to_pci_dev(gdev); |
@@ -646,7 +2476,7 @@ int ata_pci_init_sff_host(struct ata_host *host) | |||
646 | ap->ioaddr.altstatus_addr = | 2476 | ap->ioaddr.altstatus_addr = |
647 | ap->ioaddr.ctl_addr = (void __iomem *) | 2477 | ap->ioaddr.ctl_addr = (void __iomem *) |
648 | ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); | 2478 | ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); |
649 | ata_std_ports(&ap->ioaddr); | 2479 | ata_sff_std_ports(&ap->ioaddr); |
650 | 2480 | ||
651 | ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", | 2481 | ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", |
652 | (unsigned long long)pci_resource_start(pdev, base), | 2482 | (unsigned long long)pci_resource_start(pdev, base), |
@@ -664,7 +2494,7 @@ int ata_pci_init_sff_host(struct ata_host *host) | |||
664 | } | 2494 | } |
665 | 2495 | ||
666 | /** | 2496 | /** |
667 | * ata_pci_prepare_sff_host - helper to prepare native PCI ATA host | 2497 | * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host |
668 | * @pdev: target PCI device | 2498 | * @pdev: target PCI device |
669 | * @ppi: array of port_info, must be enough for two ports | 2499 | * @ppi: array of port_info, must be enough for two ports |
670 | * @r_host: out argument for the initialized ATA host | 2500 | * @r_host: out argument for the initialized ATA host |
@@ -678,7 +2508,7 @@ int ata_pci_init_sff_host(struct ata_host *host) | |||
678 | * RETURNS: | 2508 | * RETURNS: |
679 | * 0 on success, -errno otherwise. | 2509 | * 0 on success, -errno otherwise. |
680 | */ | 2510 | */ |
681 | int ata_pci_prepare_sff_host(struct pci_dev *pdev, | 2511 | int ata_pci_sff_prepare_host(struct pci_dev *pdev, |
682 | const struct ata_port_info * const * ppi, | 2512 | const struct ata_port_info * const * ppi, |
683 | struct ata_host **r_host) | 2513 | struct ata_host **r_host) |
684 | { | 2514 | { |
@@ -696,12 +2526,12 @@ int ata_pci_prepare_sff_host(struct pci_dev *pdev, | |||
696 | goto err_out; | 2526 | goto err_out; |
697 | } | 2527 | } |
698 | 2528 | ||
699 | rc = ata_pci_init_sff_host(host); | 2529 | rc = ata_pci_sff_init_host(host); |
700 | if (rc) | 2530 | if (rc) |
701 | goto err_out; | 2531 | goto err_out; |
702 | 2532 | ||
703 | /* init DMA related stuff */ | 2533 | /* init DMA related stuff */ |
704 | rc = ata_pci_init_bmdma(host); | 2534 | rc = ata_pci_bmdma_init(host); |
705 | if (rc) | 2535 | if (rc) |
706 | goto err_bmdma; | 2536 | goto err_bmdma; |
707 | 2537 | ||
@@ -722,7 +2552,7 @@ int ata_pci_prepare_sff_host(struct pci_dev *pdev, | |||
722 | } | 2552 | } |
723 | 2553 | ||
724 | /** | 2554 | /** |
725 | * ata_pci_activate_sff_host - start SFF host, request IRQ and register it | 2555 | * ata_pci_sff_activate_host - start SFF host, request IRQ and register it |
726 | * @host: target SFF ATA host | 2556 | * @host: target SFF ATA host |
727 | * @irq_handler: irq_handler used when requesting IRQ(s) | 2557 | * @irq_handler: irq_handler used when requesting IRQ(s) |
728 | * @sht: scsi_host_template to use when registering the host | 2558 | * @sht: scsi_host_template to use when registering the host |
@@ -737,7 +2567,7 @@ int ata_pci_prepare_sff_host(struct pci_dev *pdev, | |||
737 | * RETURNS: | 2567 | * RETURNS: |
738 | * 0 on success, -errno otherwise. | 2568 | * 0 on success, -errno otherwise. |
739 | */ | 2569 | */ |
740 | int ata_pci_activate_sff_host(struct ata_host *host, | 2570 | int ata_pci_sff_activate_host(struct ata_host *host, |
741 | irq_handler_t irq_handler, | 2571 | irq_handler_t irq_handler, |
742 | struct scsi_host_template *sht) | 2572 | struct scsi_host_template *sht) |
743 | { | 2573 | { |
@@ -815,9 +2645,11 @@ int ata_pci_activate_sff_host(struct ata_host *host, | |||
815 | } | 2645 | } |
816 | 2646 | ||
817 | /** | 2647 | /** |
818 | * ata_pci_init_one - Initialize/register PCI IDE host controller | 2648 | * ata_pci_sff_init_one - Initialize/register PCI IDE host controller |
819 | * @pdev: Controller to be initialized | 2649 | * @pdev: Controller to be initialized |
820 | * @ppi: array of port_info, must be enough for two ports | 2650 | * @ppi: array of port_info, must be enough for two ports |
2651 | * @sht: scsi_host_template to use when registering the host | ||
2652 | * @host_priv: host private_data | ||
821 | * | 2653 | * |
822 | * This is a helper function which can be called from a driver's | 2654 | * This is a helper function which can be called from a driver's |
823 | * xxx_init_one() probe function if the hardware uses traditional | 2655 | * xxx_init_one() probe function if the hardware uses traditional |
@@ -837,8 +2669,9 @@ int ata_pci_activate_sff_host(struct ata_host *host, | |||
837 | * RETURNS: | 2669 | * RETURNS: |
838 | * Zero on success, negative on errno-based value on error. | 2670 | * Zero on success, negative on errno-based value on error. |
839 | */ | 2671 | */ |
840 | int ata_pci_init_one(struct pci_dev *pdev, | 2672 | int ata_pci_sff_init_one(struct pci_dev *pdev, |
841 | const struct ata_port_info * const * ppi) | 2673 | const struct ata_port_info * const * ppi, |
2674 | struct scsi_host_template *sht, void *host_priv) | ||
842 | { | 2675 | { |
843 | struct device *dev = &pdev->dev; | 2676 | struct device *dev = &pdev->dev; |
844 | const struct ata_port_info *pi = NULL; | 2677 | const struct ata_port_info *pi = NULL; |
@@ -869,13 +2702,13 @@ int ata_pci_init_one(struct pci_dev *pdev, | |||
869 | goto out; | 2702 | goto out; |
870 | 2703 | ||
871 | /* prepare and activate SFF host */ | 2704 | /* prepare and activate SFF host */ |
872 | rc = ata_pci_prepare_sff_host(pdev, ppi, &host); | 2705 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); |
873 | if (rc) | 2706 | if (rc) |
874 | goto out; | 2707 | goto out; |
2708 | host->private_data = host_priv; | ||
875 | 2709 | ||
876 | pci_set_master(pdev); | 2710 | pci_set_master(pdev); |
877 | rc = ata_pci_activate_sff_host(host, pi->port_ops->irq_handler, | 2711 | rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); |
878 | pi->sht); | ||
879 | out: | 2712 | out: |
880 | if (rc == 0) | 2713 | if (rc == 0) |
881 | devres_remove_group(&pdev->dev, NULL); | 2714 | devres_remove_group(&pdev->dev, NULL); |
@@ -885,41 +2718,52 @@ int ata_pci_init_one(struct pci_dev *pdev, | |||
885 | return rc; | 2718 | return rc; |
886 | } | 2719 | } |
887 | 2720 | ||
888 | /** | ||
889 | * ata_pci_clear_simplex - attempt to kick device out of simplex | ||
890 | * @pdev: PCI device | ||
891 | * | ||
892 | * Some PCI ATA devices report simplex mode but in fact can be told to | ||
893 | * enter non simplex mode. This implements the necessary logic to | ||
894 | * perform the task on such devices. Calling it on other devices will | ||
895 | * have -undefined- behaviour. | ||
896 | */ | ||
897 | |||
898 | int ata_pci_clear_simplex(struct pci_dev *pdev) | ||
899 | { | ||
900 | unsigned long bmdma = pci_resource_start(pdev, 4); | ||
901 | u8 simplex; | ||
902 | |||
903 | if (bmdma == 0) | ||
904 | return -ENOENT; | ||
905 | |||
906 | simplex = inb(bmdma + 0x02); | ||
907 | outb(simplex & 0x60, bmdma + 0x02); | ||
908 | simplex = inb(bmdma + 0x02); | ||
909 | if (simplex & 0x80) | ||
910 | return -EOPNOTSUPP; | ||
911 | return 0; | ||
912 | } | ||
913 | |||
914 | unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer_mask) | ||
915 | { | ||
916 | /* Filter out DMA modes if the device has been configured by | ||
917 | the BIOS as PIO only */ | ||
918 | |||
919 | if (adev->link->ap->ioaddr.bmdma_addr == NULL) | ||
920 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); | ||
921 | return xfer_mask; | ||
922 | } | ||
923 | |||
924 | #endif /* CONFIG_PCI */ | 2721 | #endif /* CONFIG_PCI */ |
925 | 2722 | ||
2723 | EXPORT_SYMBOL_GPL(ata_sff_port_ops); | ||
2724 | EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); | ||
2725 | EXPORT_SYMBOL_GPL(ata_sff_qc_prep); | ||
2726 | EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); | ||
2727 | EXPORT_SYMBOL_GPL(ata_sff_dev_select); | ||
2728 | EXPORT_SYMBOL_GPL(ata_sff_check_status); | ||
2729 | EXPORT_SYMBOL_GPL(ata_sff_altstatus); | ||
2730 | EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); | ||
2731 | EXPORT_SYMBOL_GPL(ata_sff_wait_ready); | ||
2732 | EXPORT_SYMBOL_GPL(ata_sff_tf_load); | ||
2733 | EXPORT_SYMBOL_GPL(ata_sff_tf_read); | ||
2734 | EXPORT_SYMBOL_GPL(ata_sff_exec_command); | ||
2735 | EXPORT_SYMBOL_GPL(ata_sff_data_xfer); | ||
2736 | EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); | ||
2737 | EXPORT_SYMBOL_GPL(ata_sff_irq_on); | ||
2738 | EXPORT_SYMBOL_GPL(ata_sff_irq_clear); | ||
2739 | EXPORT_SYMBOL_GPL(ata_sff_hsm_move); | ||
2740 | EXPORT_SYMBOL_GPL(ata_sff_qc_issue); | ||
2741 | EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); | ||
2742 | EXPORT_SYMBOL_GPL(ata_sff_host_intr); | ||
2743 | EXPORT_SYMBOL_GPL(ata_sff_interrupt); | ||
2744 | EXPORT_SYMBOL_GPL(ata_sff_freeze); | ||
2745 | EXPORT_SYMBOL_GPL(ata_sff_thaw); | ||
2746 | EXPORT_SYMBOL_GPL(ata_sff_prereset); | ||
2747 | EXPORT_SYMBOL_GPL(ata_sff_dev_classify); | ||
2748 | EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); | ||
2749 | EXPORT_SYMBOL_GPL(ata_sff_softreset); | ||
2750 | EXPORT_SYMBOL_GPL(sata_sff_hardreset); | ||
2751 | EXPORT_SYMBOL_GPL(ata_sff_postreset); | ||
2752 | EXPORT_SYMBOL_GPL(ata_sff_error_handler); | ||
2753 | EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd); | ||
2754 | EXPORT_SYMBOL_GPL(ata_sff_port_start); | ||
2755 | EXPORT_SYMBOL_GPL(ata_sff_std_ports); | ||
2756 | EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter); | ||
2757 | EXPORT_SYMBOL_GPL(ata_bmdma_setup); | ||
2758 | EXPORT_SYMBOL_GPL(ata_bmdma_start); | ||
2759 | EXPORT_SYMBOL_GPL(ata_bmdma_stop); | ||
2760 | EXPORT_SYMBOL_GPL(ata_bmdma_status); | ||
2761 | EXPORT_SYMBOL_GPL(ata_bus_reset); | ||
2762 | #ifdef CONFIG_PCI | ||
2763 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); | ||
2764 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); | ||
2765 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); | ||
2766 | EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); | ||
2767 | EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); | ||
2768 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); | ||
2769 | #endif /* CONFIG_PCI */ | ||
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index aa884f71a12a..4aeeabb10a47 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -38,6 +38,17 @@ struct ata_scsi_args { | |||
38 | void (*done)(struct scsi_cmnd *); | 38 | void (*done)(struct scsi_cmnd *); |
39 | }; | 39 | }; |
40 | 40 | ||
41 | static inline int ata_is_builtin_hardreset(ata_reset_fn_t reset) | ||
42 | { | ||
43 | if (reset == sata_std_hardreset) | ||
44 | return 1; | ||
45 | #ifdef CONFIG_ATA_SFF | ||
46 | if (reset == sata_sff_hardreset) | ||
47 | return 1; | ||
48 | #endif | ||
49 | return 0; | ||
50 | } | ||
51 | |||
41 | /* libata-core.c */ | 52 | /* libata-core.c */ |
42 | enum { | 53 | enum { |
43 | /* flags for ata_dev_read_id() */ | 54 | /* flags for ata_dev_read_id() */ |
@@ -61,12 +72,16 @@ extern int libata_fua; | |||
61 | extern int libata_noacpi; | 72 | extern int libata_noacpi; |
62 | extern int libata_allow_tpm; | 73 | extern int libata_allow_tpm; |
63 | extern void ata_force_cbl(struct ata_port *ap); | 74 | extern void ata_force_cbl(struct ata_port *ap); |
75 | extern u64 ata_tf_to_lba(const struct ata_taskfile *tf); | ||
76 | extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf); | ||
64 | extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev); | 77 | extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev); |
65 | extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, | 78 | extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, |
66 | u64 block, u32 n_block, unsigned int tf_flags, | 79 | u64 block, u32 n_block, unsigned int tf_flags, |
67 | unsigned int tag); | 80 | unsigned int tag); |
68 | extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev); | 81 | extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev); |
69 | extern void ata_dev_disable(struct ata_device *dev); | 82 | extern void ata_dev_disable(struct ata_device *dev); |
83 | extern void ata_pio_queue_task(struct ata_port *ap, void *data, | ||
84 | unsigned long delay); | ||
70 | extern void ata_port_flush_task(struct ata_port *ap); | 85 | extern void ata_port_flush_task(struct ata_port *ap); |
71 | extern unsigned ata_exec_internal(struct ata_device *dev, | 86 | extern unsigned ata_exec_internal(struct ata_device *dev, |
72 | struct ata_taskfile *tf, const u8 *cdb, | 87 | struct ata_taskfile *tf, const u8 *cdb, |
@@ -77,6 +92,8 @@ extern unsigned ata_exec_internal_sg(struct ata_device *dev, | |||
77 | int dma_dir, struct scatterlist *sg, | 92 | int dma_dir, struct scatterlist *sg, |
78 | unsigned int n_elem, unsigned long timeout); | 93 | unsigned int n_elem, unsigned long timeout); |
79 | extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd); | 94 | extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd); |
95 | extern int ata_wait_ready(struct ata_link *link, unsigned long deadline, | ||
96 | int (*check_ready)(struct ata_link *link)); | ||
80 | extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | 97 | extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, |
81 | unsigned int flags, u16 *id); | 98 | unsigned int flags, u16 *id); |
82 | extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags); | 99 | extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags); |
@@ -91,10 +108,7 @@ extern void ata_qc_free(struct ata_queued_cmd *qc); | |||
91 | extern void ata_qc_issue(struct ata_queued_cmd *qc); | 108 | extern void ata_qc_issue(struct ata_queued_cmd *qc); |
92 | extern void __ata_qc_complete(struct ata_queued_cmd *qc); | 109 | extern void __ata_qc_complete(struct ata_queued_cmd *qc); |
93 | extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); | 110 | extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); |
94 | extern void ata_dev_select(struct ata_port *ap, unsigned int device, | ||
95 | unsigned int wait, unsigned int can_sleep); | ||
96 | extern void swap_buf_le16(u16 *buf, unsigned int buf_words); | 111 | extern void swap_buf_le16(u16 *buf, unsigned int buf_words); |
97 | extern int ata_flush_cache(struct ata_device *dev); | ||
98 | extern void ata_dev_init(struct ata_device *dev); | 112 | extern void ata_dev_init(struct ata_device *dev); |
99 | extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp); | 113 | extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp); |
100 | extern int sata_link_init_spd(struct ata_link *link); | 114 | extern int sata_link_init_spd(struct ata_link *link); |
@@ -165,11 +179,6 @@ extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); | |||
165 | extern void ata_scsi_dev_rescan(struct work_struct *work); | 179 | extern void ata_scsi_dev_rescan(struct work_struct *work); |
166 | extern int ata_bus_probe(struct ata_port *ap); | 180 | extern int ata_bus_probe(struct ata_port *ap); |
167 | 181 | ||
168 | /* libata-pmp.c */ | ||
169 | extern int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val); | ||
170 | extern int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val); | ||
171 | extern int sata_pmp_attach(struct ata_device *dev); | ||
172 | |||
173 | /* libata-eh.c */ | 182 | /* libata-eh.c */ |
174 | extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); | 183 | extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); |
175 | extern void ata_scsi_error(struct Scsi_Host *host); | 184 | extern void ata_scsi_error(struct Scsi_Host *host); |
@@ -193,8 +202,34 @@ extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
193 | struct ata_link **r_failed_disk); | 202 | struct ata_link **r_failed_disk); |
194 | extern void ata_eh_finish(struct ata_port *ap); | 203 | extern void ata_eh_finish(struct ata_port *ap); |
195 | 204 | ||
205 | /* libata-pmp.c */ | ||
206 | #ifdef CONFIG_SATA_PMP | ||
207 | extern int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val); | ||
208 | extern int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val); | ||
209 | extern int sata_pmp_attach(struct ata_device *dev); | ||
210 | #else /* CONFIG_SATA_PMP */ | ||
211 | static inline int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val) | ||
212 | { | ||
213 | return -EINVAL; | ||
214 | } | ||
215 | |||
216 | static inline int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val) | ||
217 | { | ||
218 | return -EINVAL; | ||
219 | } | ||
220 | |||
221 | static inline int sata_pmp_attach(struct ata_device *dev) | ||
222 | { | ||
223 | return -EINVAL; | ||
224 | } | ||
225 | #endif /* CONFIG_SATA_PMP */ | ||
226 | |||
196 | /* libata-sff.c */ | 227 | /* libata-sff.c */ |
228 | #ifdef CONFIG_ATA_SFF | ||
229 | extern void ata_dev_select(struct ata_port *ap, unsigned int device, | ||
230 | unsigned int wait, unsigned int can_sleep); | ||
197 | extern u8 ata_irq_on(struct ata_port *ap); | 231 | extern u8 ata_irq_on(struct ata_port *ap); |
198 | 232 | extern void ata_pio_task(struct work_struct *work); | |
233 | #endif /* CONFIG_ATA_SFF */ | ||
199 | 234 | ||
200 | #endif /* __LIBATA_H__ */ | 235 | #endif /* __LIBATA_H__ */ |
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c index bdc3b9d7395c..c5f91e629945 100644 --- a/drivers/ata/pata_acpi.c +++ b/drivers/ata/pata_acpi.c | |||
@@ -47,7 +47,7 @@ static int pacpi_pre_reset(struct ata_link *link, unsigned long deadline) | |||
47 | if (ap->acpi_handle == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0) | 47 | if (ap->acpi_handle == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0) |
48 | return -ENODEV; | 48 | return -ENODEV; |
49 | 49 | ||
50 | return ata_std_prereset(link, deadline); | 50 | return ata_sff_prereset(link, deadline); |
51 | } | 51 | } |
52 | 52 | ||
53 | /** | 53 | /** |
@@ -68,20 +68,6 @@ static int pacpi_cable_detect(struct ata_port *ap) | |||
68 | } | 68 | } |
69 | 69 | ||
70 | /** | 70 | /** |
71 | * pacpi_error_handler - Setup and error handler | ||
72 | * @ap: Port to handle | ||
73 | * | ||
74 | * LOCKING: | ||
75 | * None (inherited from caller). | ||
76 | */ | ||
77 | |||
78 | static void pacpi_error_handler(struct ata_port *ap) | ||
79 | { | ||
80 | ata_bmdma_drive_eh(ap, pacpi_pre_reset, ata_std_softreset, NULL, | ||
81 | ata_std_postreset); | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * pacpi_discover_modes - filter non ACPI modes | 71 | * pacpi_discover_modes - filter non ACPI modes |
86 | * @adev: ATA device | 72 | * @adev: ATA device |
87 | * @mask: proposed modes | 73 | * @mask: proposed modes |
@@ -120,7 +106,7 @@ static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device | |||
120 | static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask) | 106 | static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask) |
121 | { | 107 | { |
122 | struct pata_acpi *acpi = adev->link->ap->private_data; | 108 | struct pata_acpi *acpi = adev->link->ap->private_data; |
123 | return ata_pci_default_filter(adev, mask & acpi->mask[adev->devno]); | 109 | return ata_bmdma_mode_filter(adev, mask & acpi->mask[adev->devno]); |
124 | } | 110 | } |
125 | 111 | ||
126 | /** | 112 | /** |
@@ -176,7 +162,7 @@ static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
176 | } | 162 | } |
177 | 163 | ||
178 | /** | 164 | /** |
179 | * pacpi_qc_issue_prot - command issue | 165 | * pacpi_qc_issue - command issue |
180 | * @qc: command pending | 166 | * @qc: command pending |
181 | * | 167 | * |
182 | * Called when the libata layer is about to issue a command. We wrap | 168 | * Called when the libata layer is about to issue a command. We wrap |
@@ -184,14 +170,14 @@ static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
184 | * neccessary. | 170 | * neccessary. |
185 | */ | 171 | */ |
186 | 172 | ||
187 | static unsigned int pacpi_qc_issue_prot(struct ata_queued_cmd *qc) | 173 | static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc) |
188 | { | 174 | { |
189 | struct ata_port *ap = qc->ap; | 175 | struct ata_port *ap = qc->ap; |
190 | struct ata_device *adev = qc->dev; | 176 | struct ata_device *adev = qc->dev; |
191 | struct pata_acpi *acpi = ap->private_data; | 177 | struct pata_acpi *acpi = ap->private_data; |
192 | 178 | ||
193 | if (acpi->gtm.flags & 0x10) | 179 | if (acpi->gtm.flags & 0x10) |
194 | return ata_qc_issue_prot(qc); | 180 | return ata_sff_qc_issue(qc); |
195 | 181 | ||
196 | if (adev != acpi->last) { | 182 | if (adev != acpi->last) { |
197 | pacpi_set_piomode(ap, adev); | 183 | pacpi_set_piomode(ap, adev); |
@@ -199,7 +185,7 @@ static unsigned int pacpi_qc_issue_prot(struct ata_queued_cmd *qc) | |||
199 | pacpi_set_dmamode(ap, adev); | 185 | pacpi_set_dmamode(ap, adev); |
200 | acpi->last = adev; | 186 | acpi->last = adev; |
201 | } | 187 | } |
202 | return ata_qc_issue_prot(qc); | 188 | return ata_sff_qc_issue(qc); |
203 | } | 189 | } |
204 | 190 | ||
205 | /** | 191 | /** |
@@ -232,57 +218,17 @@ static int pacpi_port_start(struct ata_port *ap) | |||
232 | } | 218 | } |
233 | 219 | ||
234 | static struct scsi_host_template pacpi_sht = { | 220 | static struct scsi_host_template pacpi_sht = { |
235 | .module = THIS_MODULE, | 221 | ATA_BMDMA_SHT(DRV_NAME), |
236 | .name = DRV_NAME, | ||
237 | .ioctl = ata_scsi_ioctl, | ||
238 | .queuecommand = ata_scsi_queuecmd, | ||
239 | .can_queue = ATA_DEF_QUEUE, | ||
240 | .this_id = ATA_SHT_THIS_ID, | ||
241 | .sg_tablesize = LIBATA_MAX_PRD, | ||
242 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
243 | .emulated = ATA_SHT_EMULATED, | ||
244 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
245 | .proc_name = DRV_NAME, | ||
246 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
247 | .slave_configure = ata_scsi_slave_config, | ||
248 | .slave_destroy = ata_scsi_slave_destroy, | ||
249 | /* Use standard CHS mapping rules */ | ||
250 | .bios_param = ata_std_bios_param, | ||
251 | }; | 222 | }; |
252 | 223 | ||
253 | static const struct ata_port_operations pacpi_ops = { | 224 | static struct ata_port_operations pacpi_ops = { |
225 | .inherits = &ata_bmdma_port_ops, | ||
226 | .qc_issue = pacpi_qc_issue, | ||
227 | .cable_detect = pacpi_cable_detect, | ||
228 | .mode_filter = pacpi_mode_filter, | ||
254 | .set_piomode = pacpi_set_piomode, | 229 | .set_piomode = pacpi_set_piomode, |
255 | .set_dmamode = pacpi_set_dmamode, | 230 | .set_dmamode = pacpi_set_dmamode, |
256 | .mode_filter = pacpi_mode_filter, | 231 | .prereset = pacpi_pre_reset, |
257 | |||
258 | /* Task file is PCI ATA format, use helpers */ | ||
259 | .tf_load = ata_tf_load, | ||
260 | .tf_read = ata_tf_read, | ||
261 | .check_status = ata_check_status, | ||
262 | .exec_command = ata_exec_command, | ||
263 | .dev_select = ata_std_dev_select, | ||
264 | |||
265 | .freeze = ata_bmdma_freeze, | ||
266 | .thaw = ata_bmdma_thaw, | ||
267 | .error_handler = pacpi_error_handler, | ||
268 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
269 | .cable_detect = pacpi_cable_detect, | ||
270 | |||
271 | /* BMDMA handling is PCI ATA format, use helpers */ | ||
272 | .bmdma_setup = ata_bmdma_setup, | ||
273 | .bmdma_start = ata_bmdma_start, | ||
274 | .bmdma_stop = ata_bmdma_stop, | ||
275 | .bmdma_status = ata_bmdma_status, | ||
276 | .qc_prep = ata_qc_prep, | ||
277 | .qc_issue = pacpi_qc_issue_prot, | ||
278 | .data_xfer = ata_data_xfer, | ||
279 | |||
280 | /* Timeout handling */ | ||
281 | .irq_handler = ata_interrupt, | ||
282 | .irq_clear = ata_bmdma_irq_clear, | ||
283 | .irq_on = ata_irq_on, | ||
284 | |||
285 | /* Generic PATA PCI ATA helpers */ | ||
286 | .port_start = pacpi_port_start, | 232 | .port_start = pacpi_port_start, |
287 | }; | 233 | }; |
288 | 234 | ||
@@ -304,7 +250,6 @@ static const struct ata_port_operations pacpi_ops = { | |||
304 | static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | 250 | static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id) |
305 | { | 251 | { |
306 | static const struct ata_port_info info = { | 252 | static const struct ata_port_info info = { |
307 | .sht = &pacpi_sht, | ||
308 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, | 253 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, |
309 | 254 | ||
310 | .pio_mask = 0x1f, | 255 | .pio_mask = 0x1f, |
@@ -314,7 +259,7 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | |||
314 | .port_ops = &pacpi_ops, | 259 | .port_ops = &pacpi_ops, |
315 | }; | 260 | }; |
316 | const struct ata_port_info *ppi[] = { &info, NULL }; | 261 | const struct ata_port_info *ppi[] = { &info, NULL }; |
317 | return ata_pci_init_one(pdev, ppi); | 262 | return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL); |
318 | } | 263 | } |
319 | 264 | ||
320 | static const struct pci_device_id pacpi_pci_tbl[] = { | 265 | static const struct pci_device_id pacpi_pci_tbl[] = { |
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index 511a830b6256..fcabe46f262b 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c | |||
@@ -121,7 +121,7 @@ static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask) | |||
121 | ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); | 121 | ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); |
122 | if (strstr(model_num, "WDC")) | 122 | if (strstr(model_num, "WDC")) |
123 | return mask &= ~ATA_MASK_UDMA; | 123 | return mask &= ~ATA_MASK_UDMA; |
124 | return ata_pci_default_filter(adev, mask); | 124 | return ata_bmdma_mode_filter(adev, mask); |
125 | } | 125 | } |
126 | 126 | ||
127 | /** | 127 | /** |
@@ -339,21 +339,7 @@ static int ali_check_atapi_dma(struct ata_queued_cmd *qc) | |||
339 | } | 339 | } |
340 | 340 | ||
341 | static struct scsi_host_template ali_sht = { | 341 | static struct scsi_host_template ali_sht = { |
342 | .module = THIS_MODULE, | 342 | ATA_BMDMA_SHT(DRV_NAME), |
343 | .name = DRV_NAME, | ||
344 | .ioctl = ata_scsi_ioctl, | ||
345 | .queuecommand = ata_scsi_queuecmd, | ||
346 | .can_queue = ATA_DEF_QUEUE, | ||
347 | .this_id = ATA_SHT_THIS_ID, | ||
348 | .sg_tablesize = LIBATA_MAX_PRD, | ||
349 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
350 | .emulated = ATA_SHT_EMULATED, | ||
351 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
352 | .proc_name = DRV_NAME, | ||
353 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
354 | .slave_configure = ata_scsi_slave_config, | ||
355 | .slave_destroy = ata_scsi_slave_destroy, | ||
356 | .bios_param = ata_std_bios_param, | ||
357 | }; | 343 | }; |
358 | 344 | ||
359 | /* | 345 | /* |
@@ -361,29 +347,15 @@ static struct scsi_host_template ali_sht = { | |||
361 | */ | 347 | */ |
362 | 348 | ||
363 | static struct ata_port_operations ali_early_port_ops = { | 349 | static struct ata_port_operations ali_early_port_ops = { |
364 | .set_piomode = ali_set_piomode, | 350 | .inherits = &ata_sff_port_ops, |
365 | .tf_load = ata_tf_load, | ||
366 | .tf_read = ata_tf_read, | ||
367 | .check_status = ata_check_status, | ||
368 | .exec_command = ata_exec_command, | ||
369 | .dev_select = ata_std_dev_select, | ||
370 | |||
371 | .freeze = ata_bmdma_freeze, | ||
372 | .thaw = ata_bmdma_thaw, | ||
373 | .error_handler = ata_bmdma_error_handler, | ||
374 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
375 | .cable_detect = ata_cable_40wire, | 351 | .cable_detect = ata_cable_40wire, |
352 | .set_piomode = ali_set_piomode, | ||
353 | }; | ||
376 | 354 | ||
377 | .qc_prep = ata_qc_prep, | 355 | static const struct ata_port_operations ali_dma_base_ops = { |
378 | .qc_issue = ata_qc_issue_prot, | 356 | .inherits = &ata_bmdma_port_ops, |
379 | 357 | .set_piomode = ali_set_piomode, | |
380 | .data_xfer = ata_data_xfer, | 358 | .set_dmamode = ali_set_dmamode, |
381 | |||
382 | .irq_handler = ata_interrupt, | ||
383 | .irq_clear = ata_bmdma_irq_clear, | ||
384 | .irq_on = ata_irq_on, | ||
385 | |||
386 | .port_start = ata_sff_port_start, | ||
387 | }; | 359 | }; |
388 | 360 | ||
389 | /* | 361 | /* |
@@ -391,115 +363,31 @@ static struct ata_port_operations ali_early_port_ops = { | |||
391 | * detect | 363 | * detect |
392 | */ | 364 | */ |
393 | static struct ata_port_operations ali_20_port_ops = { | 365 | static struct ata_port_operations ali_20_port_ops = { |
394 | .set_piomode = ali_set_piomode, | 366 | .inherits = &ali_dma_base_ops, |
395 | .set_dmamode = ali_set_dmamode, | 367 | .cable_detect = ata_cable_40wire, |
396 | .mode_filter = ali_20_filter, | 368 | .mode_filter = ali_20_filter, |
397 | |||
398 | .tf_load = ata_tf_load, | ||
399 | .tf_read = ata_tf_read, | ||
400 | .check_atapi_dma = ali_check_atapi_dma, | 369 | .check_atapi_dma = ali_check_atapi_dma, |
401 | .check_status = ata_check_status, | ||
402 | .exec_command = ata_exec_command, | ||
403 | .dev_select = ata_std_dev_select, | ||
404 | .dev_config = ali_lock_sectors, | 370 | .dev_config = ali_lock_sectors, |
405 | |||
406 | .freeze = ata_bmdma_freeze, | ||
407 | .thaw = ata_bmdma_thaw, | ||
408 | .error_handler = ata_bmdma_error_handler, | ||
409 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
410 | .cable_detect = ata_cable_40wire, | ||
411 | |||
412 | .bmdma_setup = ata_bmdma_setup, | ||
413 | .bmdma_start = ata_bmdma_start, | ||
414 | .bmdma_stop = ata_bmdma_stop, | ||
415 | .bmdma_status = ata_bmdma_status, | ||
416 | |||
417 | .qc_prep = ata_qc_prep, | ||
418 | .qc_issue = ata_qc_issue_prot, | ||
419 | |||
420 | .data_xfer = ata_data_xfer, | ||
421 | |||
422 | .irq_handler = ata_interrupt, | ||
423 | .irq_clear = ata_bmdma_irq_clear, | ||
424 | .irq_on = ata_irq_on, | ||
425 | |||
426 | .port_start = ata_sff_port_start, | ||
427 | }; | 371 | }; |
428 | 372 | ||
429 | /* | 373 | /* |
430 | * Port operations for DMA capable ALi with cable detect | 374 | * Port operations for DMA capable ALi with cable detect |
431 | */ | 375 | */ |
432 | static struct ata_port_operations ali_c2_port_ops = { | 376 | static struct ata_port_operations ali_c2_port_ops = { |
433 | .set_piomode = ali_set_piomode, | 377 | .inherits = &ali_dma_base_ops, |
434 | .set_dmamode = ali_set_dmamode, | ||
435 | .mode_filter = ata_pci_default_filter, | ||
436 | .tf_load = ata_tf_load, | ||
437 | .tf_read = ata_tf_read, | ||
438 | .check_atapi_dma = ali_check_atapi_dma, | 378 | .check_atapi_dma = ali_check_atapi_dma, |
439 | .check_status = ata_check_status, | ||
440 | .exec_command = ata_exec_command, | ||
441 | .dev_select = ata_std_dev_select, | ||
442 | .dev_config = ali_lock_sectors, | ||
443 | |||
444 | .freeze = ata_bmdma_freeze, | ||
445 | .thaw = ata_bmdma_thaw, | ||
446 | .error_handler = ata_bmdma_error_handler, | ||
447 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
448 | .cable_detect = ali_c2_cable_detect, | 379 | .cable_detect = ali_c2_cable_detect, |
449 | 380 | .dev_config = ali_lock_sectors, | |
450 | .bmdma_setup = ata_bmdma_setup, | ||
451 | .bmdma_start = ata_bmdma_start, | ||
452 | .bmdma_stop = ata_bmdma_stop, | ||
453 | .bmdma_status = ata_bmdma_status, | ||
454 | |||
455 | .qc_prep = ata_qc_prep, | ||
456 | .qc_issue = ata_qc_issue_prot, | ||
457 | |||
458 | .data_xfer = ata_data_xfer, | ||
459 | |||
460 | .irq_handler = ata_interrupt, | ||
461 | .irq_clear = ata_bmdma_irq_clear, | ||
462 | .irq_on = ata_irq_on, | ||
463 | |||
464 | .port_start = ata_sff_port_start, | ||
465 | }; | 381 | }; |
466 | 382 | ||
467 | /* | 383 | /* |
468 | * Port operations for DMA capable ALi with cable detect and LBA48 | 384 | * Port operations for DMA capable ALi with cable detect and LBA48 |
469 | */ | 385 | */ |
470 | static struct ata_port_operations ali_c5_port_ops = { | 386 | static struct ata_port_operations ali_c5_port_ops = { |
471 | .set_piomode = ali_set_piomode, | 387 | .inherits = &ali_dma_base_ops, |
472 | .set_dmamode = ali_set_dmamode, | ||
473 | .mode_filter = ata_pci_default_filter, | ||
474 | .tf_load = ata_tf_load, | ||
475 | .tf_read = ata_tf_read, | ||
476 | .check_atapi_dma = ali_check_atapi_dma, | 388 | .check_atapi_dma = ali_check_atapi_dma, |
477 | .check_status = ata_check_status, | ||
478 | .exec_command = ata_exec_command, | ||
479 | .dev_select = ata_std_dev_select, | ||
480 | .dev_config = ali_warn_atapi_dma, | 389 | .dev_config = ali_warn_atapi_dma, |
481 | |||
482 | .freeze = ata_bmdma_freeze, | ||
483 | .thaw = ata_bmdma_thaw, | ||
484 | .error_handler = ata_bmdma_error_handler, | ||
485 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
486 | .cable_detect = ali_c2_cable_detect, | 390 | .cable_detect = ali_c2_cable_detect, |
487 | |||
488 | .bmdma_setup = ata_bmdma_setup, | ||
489 | .bmdma_start = ata_bmdma_start, | ||
490 | .bmdma_stop = ata_bmdma_stop, | ||
491 | .bmdma_status = ata_bmdma_status, | ||
492 | |||
493 | .qc_prep = ata_qc_prep, | ||
494 | .qc_issue = ata_qc_issue_prot, | ||
495 | |||
496 | .data_xfer = ata_data_xfer, | ||
497 | |||
498 | .irq_handler = ata_interrupt, | ||
499 | .irq_clear = ata_bmdma_irq_clear, | ||
500 | .irq_on = ata_irq_on, | ||
501 | |||
502 | .port_start = ata_sff_port_start, | ||
503 | }; | 391 | }; |
504 | 392 | ||
505 | 393 | ||
@@ -561,7 +449,7 @@ static void ali_init_chipset(struct pci_dev *pdev) | |||
561 | } | 449 | } |
562 | pci_dev_put(isa_bridge); | 450 | pci_dev_put(isa_bridge); |
563 | pci_dev_put(north); | 451 | pci_dev_put(north); |
564 | ata_pci_clear_simplex(pdev); | 452 | ata_pci_bmdma_clear_simplex(pdev); |
565 | } | 453 | } |
566 | /** | 454 | /** |
567 | * ali_init_one - discovery callback | 455 | * ali_init_one - discovery callback |
@@ -575,14 +463,12 @@ static void ali_init_chipset(struct pci_dev *pdev) | |||
575 | static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 463 | static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
576 | { | 464 | { |
577 | static const struct ata_port_info info_early = { | 465 | static const struct ata_port_info info_early = { |
578 | .sht = &ali_sht, | ||
579 | .flags = ATA_FLAG_SLAVE_POSS, | 466 | .flags = ATA_FLAG_SLAVE_POSS, |
580 | .pio_mask = 0x1f, | 467 | .pio_mask = 0x1f, |
581 | .port_ops = &ali_early_port_ops | 468 | .port_ops = &ali_early_port_ops |
582 | }; | 469 | }; |
583 | /* Revision 0x20 added DMA */ | 470 | /* Revision 0x20 added DMA */ |
584 | static const struct ata_port_info info_20 = { | 471 | static const struct ata_port_info info_20 = { |
585 | .sht = &ali_sht, | ||
586 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, | 472 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, |
587 | .pio_mask = 0x1f, | 473 | .pio_mask = 0x1f, |
588 | .mwdma_mask = 0x07, | 474 | .mwdma_mask = 0x07, |
@@ -590,7 +476,6 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
590 | }; | 476 | }; |
591 | /* Revision 0x20 with support logic added UDMA */ | 477 | /* Revision 0x20 with support logic added UDMA */ |
592 | static const struct ata_port_info info_20_udma = { | 478 | static const struct ata_port_info info_20_udma = { |
593 | .sht = &ali_sht, | ||
594 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, | 479 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, |
595 | .pio_mask = 0x1f, | 480 | .pio_mask = 0x1f, |
596 | .mwdma_mask = 0x07, | 481 | .mwdma_mask = 0x07, |
@@ -599,7 +484,6 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
599 | }; | 484 | }; |
600 | /* Revision 0xC2 adds UDMA66 */ | 485 | /* Revision 0xC2 adds UDMA66 */ |
601 | static const struct ata_port_info info_c2 = { | 486 | static const struct ata_port_info info_c2 = { |
602 | .sht = &ali_sht, | ||
603 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, | 487 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, |
604 | .pio_mask = 0x1f, | 488 | .pio_mask = 0x1f, |
605 | .mwdma_mask = 0x07, | 489 | .mwdma_mask = 0x07, |
@@ -608,7 +492,6 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
608 | }; | 492 | }; |
609 | /* Revision 0xC3 is UDMA66 for now */ | 493 | /* Revision 0xC3 is UDMA66 for now */ |
610 | static const struct ata_port_info info_c3 = { | 494 | static const struct ata_port_info info_c3 = { |
611 | .sht = &ali_sht, | ||
612 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, | 495 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, |
613 | .pio_mask = 0x1f, | 496 | .pio_mask = 0x1f, |
614 | .mwdma_mask = 0x07, | 497 | .mwdma_mask = 0x07, |
@@ -617,7 +500,6 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
617 | }; | 500 | }; |
618 | /* Revision 0xC4 is UDMA100 */ | 501 | /* Revision 0xC4 is UDMA100 */ |
619 | static const struct ata_port_info info_c4 = { | 502 | static const struct ata_port_info info_c4 = { |
620 | .sht = &ali_sht, | ||
621 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, | 503 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, |
622 | .pio_mask = 0x1f, | 504 | .pio_mask = 0x1f, |
623 | .mwdma_mask = 0x07, | 505 | .mwdma_mask = 0x07, |
@@ -626,7 +508,6 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
626 | }; | 508 | }; |
627 | /* Revision 0xC5 is UDMA133 with LBA48 DMA */ | 509 | /* Revision 0xC5 is UDMA133 with LBA48 DMA */ |
628 | static const struct ata_port_info info_c5 = { | 510 | static const struct ata_port_info info_c5 = { |
629 | .sht = &ali_sht, | ||
630 | .flags = ATA_FLAG_SLAVE_POSS, | 511 | .flags = ATA_FLAG_SLAVE_POSS, |
631 | .pio_mask = 0x1f, | 512 | .pio_mask = 0x1f, |
632 | .mwdma_mask = 0x07, | 513 | .mwdma_mask = 0x07, |
@@ -637,6 +518,11 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
637 | const struct ata_port_info *ppi[] = { NULL, NULL }; | 518 | const struct ata_port_info *ppi[] = { NULL, NULL }; |
638 | u8 tmp; | 519 | u8 tmp; |
639 | struct pci_dev *isa_bridge; | 520 | struct pci_dev *isa_bridge; |
521 | int rc; | ||
522 | |||
523 | rc = pcim_enable_device(pdev); | ||
524 | if (rc) | ||
525 | return rc; | ||
640 | 526 | ||
641 | /* | 527 | /* |
642 | * The chipset revision selects the driver operations and | 528 | * The chipset revision selects the driver operations and |
@@ -666,14 +552,21 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
666 | ppi[0] = &info_20_udma; | 552 | ppi[0] = &info_20_udma; |
667 | pci_dev_put(isa_bridge); | 553 | pci_dev_put(isa_bridge); |
668 | } | 554 | } |
669 | return ata_pci_init_one(pdev, ppi); | 555 | return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL); |
670 | } | 556 | } |
671 | 557 | ||
672 | #ifdef CONFIG_PM | 558 | #ifdef CONFIG_PM |
673 | static int ali_reinit_one(struct pci_dev *pdev) | 559 | static int ali_reinit_one(struct pci_dev *pdev) |
674 | { | 560 | { |
561 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
562 | int rc; | ||
563 | |||
564 | rc = ata_pci_device_do_resume(pdev); | ||
565 | if (rc) | ||
566 | return rc; | ||
675 | ali_init_chipset(pdev); | 567 | ali_init_chipset(pdev); |
676 | return ata_pci_device_resume(pdev); | 568 | ata_host_resume(host); |
569 | return 0; | ||
677 | } | 570 | } |
678 | #endif | 571 | #endif |
679 | 572 | ||
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index 4b8d9b592ca4..26665c396485 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c | |||
@@ -56,7 +56,9 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse | |||
56 | u8 t; | 56 | u8 t; |
57 | 57 | ||
58 | T = 1000000000 / amd_clock; | 58 | T = 1000000000 / amd_clock; |
59 | UT = T / min_t(int, max_t(int, clock, 1), 2); | 59 | UT = T; |
60 | if (clock >= 2) | ||
61 | UT = T / 2; | ||
60 | 62 | ||
61 | if (ata_timing_compute(adev, speed, &at, T, UT) < 0) { | 63 | if (ata_timing_compute(adev, speed, &at, T, UT) < 0) { |
62 | dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed); | 64 | dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed); |
@@ -141,13 +143,7 @@ static int amd_pre_reset(struct ata_link *link, unsigned long deadline) | |||
141 | if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) | 143 | if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) |
142 | return -ENOENT; | 144 | return -ENOENT; |
143 | 145 | ||
144 | return ata_std_prereset(link, deadline); | 146 | return ata_sff_prereset(link, deadline); |
145 | } | ||
146 | |||
147 | static void amd_error_handler(struct ata_port *ap) | ||
148 | { | ||
149 | ata_bmdma_drive_eh(ap, amd_pre_reset, ata_std_softreset, NULL, | ||
150 | ata_std_postreset); | ||
151 | } | 147 | } |
152 | 148 | ||
153 | static int amd_cable_detect(struct ata_port *ap) | 149 | static int amd_cable_detect(struct ata_port *ap) |
@@ -297,14 +293,7 @@ static int nv_pre_reset(struct ata_link *link, unsigned long deadline) | |||
297 | if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no])) | 293 | if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no])) |
298 | return -ENOENT; | 294 | return -ENOENT; |
299 | 295 | ||
300 | return ata_std_prereset(link, deadline); | 296 | return ata_sff_prereset(link, deadline); |
301 | } | ||
302 | |||
303 | static void nv_error_handler(struct ata_port *ap) | ||
304 | { | ||
305 | ata_bmdma_drive_eh(ap, nv_pre_reset, | ||
306 | ata_std_softreset, NULL, | ||
307 | ata_std_postreset); | ||
308 | } | 297 | } |
309 | 298 | ||
310 | /** | 299 | /** |
@@ -353,228 +342,66 @@ static void nv_host_stop(struct ata_host *host) | |||
353 | } | 342 | } |
354 | 343 | ||
355 | static struct scsi_host_template amd_sht = { | 344 | static struct scsi_host_template amd_sht = { |
356 | .module = THIS_MODULE, | 345 | ATA_BMDMA_SHT(DRV_NAME), |
357 | .name = DRV_NAME, | 346 | }; |
358 | .ioctl = ata_scsi_ioctl, | 347 | |
359 | .queuecommand = ata_scsi_queuecmd, | 348 | static const struct ata_port_operations amd_base_port_ops = { |
360 | .can_queue = ATA_DEF_QUEUE, | 349 | .inherits = &ata_bmdma_port_ops, |
361 | .this_id = ATA_SHT_THIS_ID, | 350 | .prereset = amd_pre_reset, |
362 | .sg_tablesize = LIBATA_MAX_PRD, | ||
363 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
364 | .emulated = ATA_SHT_EMULATED, | ||
365 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
366 | .proc_name = DRV_NAME, | ||
367 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
368 | .slave_configure = ata_scsi_slave_config, | ||
369 | .slave_destroy = ata_scsi_slave_destroy, | ||
370 | .bios_param = ata_std_bios_param, | ||
371 | }; | 351 | }; |
372 | 352 | ||
373 | static struct ata_port_operations amd33_port_ops = { | 353 | static struct ata_port_operations amd33_port_ops = { |
354 | .inherits = &amd_base_port_ops, | ||
355 | .cable_detect = ata_cable_40wire, | ||
374 | .set_piomode = amd33_set_piomode, | 356 | .set_piomode = amd33_set_piomode, |
375 | .set_dmamode = amd33_set_dmamode, | 357 | .set_dmamode = amd33_set_dmamode, |
376 | .mode_filter = ata_pci_default_filter, | ||
377 | .tf_load = ata_tf_load, | ||
378 | .tf_read = ata_tf_read, | ||
379 | .check_status = ata_check_status, | ||
380 | .exec_command = ata_exec_command, | ||
381 | .dev_select = ata_std_dev_select, | ||
382 | |||
383 | .freeze = ata_bmdma_freeze, | ||
384 | .thaw = ata_bmdma_thaw, | ||
385 | .error_handler = amd_error_handler, | ||
386 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
387 | .cable_detect = ata_cable_40wire, | ||
388 | |||
389 | .bmdma_setup = ata_bmdma_setup, | ||
390 | .bmdma_start = ata_bmdma_start, | ||
391 | .bmdma_stop = ata_bmdma_stop, | ||
392 | .bmdma_status = ata_bmdma_status, | ||
393 | |||
394 | .qc_prep = ata_qc_prep, | ||
395 | .qc_issue = ata_qc_issue_prot, | ||
396 | |||
397 | .data_xfer = ata_data_xfer, | ||
398 | |||
399 | .irq_handler = ata_interrupt, | ||
400 | .irq_clear = ata_bmdma_irq_clear, | ||
401 | .irq_on = ata_irq_on, | ||
402 | |||
403 | .port_start = ata_sff_port_start, | ||
404 | }; | 358 | }; |
405 | 359 | ||
406 | static struct ata_port_operations amd66_port_ops = { | 360 | static struct ata_port_operations amd66_port_ops = { |
361 | .inherits = &amd_base_port_ops, | ||
362 | .cable_detect = ata_cable_unknown, | ||
407 | .set_piomode = amd66_set_piomode, | 363 | .set_piomode = amd66_set_piomode, |
408 | .set_dmamode = amd66_set_dmamode, | 364 | .set_dmamode = amd66_set_dmamode, |
409 | .mode_filter = ata_pci_default_filter, | ||
410 | .tf_load = ata_tf_load, | ||
411 | .tf_read = ata_tf_read, | ||
412 | .check_status = ata_check_status, | ||
413 | .exec_command = ata_exec_command, | ||
414 | .dev_select = ata_std_dev_select, | ||
415 | |||
416 | .freeze = ata_bmdma_freeze, | ||
417 | .thaw = ata_bmdma_thaw, | ||
418 | .error_handler = amd_error_handler, | ||
419 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
420 | .cable_detect = ata_cable_unknown, | ||
421 | |||
422 | .bmdma_setup = ata_bmdma_setup, | ||
423 | .bmdma_start = ata_bmdma_start, | ||
424 | .bmdma_stop = ata_bmdma_stop, | ||
425 | .bmdma_status = ata_bmdma_status, | ||
426 | |||
427 | .qc_prep = ata_qc_prep, | ||
428 | .qc_issue = ata_qc_issue_prot, | ||
429 | |||
430 | .data_xfer = ata_data_xfer, | ||
431 | |||
432 | .irq_handler = ata_interrupt, | ||
433 | .irq_clear = ata_bmdma_irq_clear, | ||
434 | .irq_on = ata_irq_on, | ||
435 | |||
436 | .port_start = ata_sff_port_start, | ||
437 | }; | 365 | }; |
438 | 366 | ||
439 | static struct ata_port_operations amd100_port_ops = { | 367 | static struct ata_port_operations amd100_port_ops = { |
368 | .inherits = &amd_base_port_ops, | ||
369 | .cable_detect = ata_cable_unknown, | ||
440 | .set_piomode = amd100_set_piomode, | 370 | .set_piomode = amd100_set_piomode, |
441 | .set_dmamode = amd100_set_dmamode, | 371 | .set_dmamode = amd100_set_dmamode, |
442 | .mode_filter = ata_pci_default_filter, | ||
443 | .tf_load = ata_tf_load, | ||
444 | .tf_read = ata_tf_read, | ||
445 | .check_status = ata_check_status, | ||
446 | .exec_command = ata_exec_command, | ||
447 | .dev_select = ata_std_dev_select, | ||
448 | |||
449 | .freeze = ata_bmdma_freeze, | ||
450 | .thaw = ata_bmdma_thaw, | ||
451 | .error_handler = amd_error_handler, | ||
452 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
453 | .cable_detect = ata_cable_unknown, | ||
454 | |||
455 | .bmdma_setup = ata_bmdma_setup, | ||
456 | .bmdma_start = ata_bmdma_start, | ||
457 | .bmdma_stop = ata_bmdma_stop, | ||
458 | .bmdma_status = ata_bmdma_status, | ||
459 | |||
460 | .qc_prep = ata_qc_prep, | ||
461 | .qc_issue = ata_qc_issue_prot, | ||
462 | |||
463 | .data_xfer = ata_data_xfer, | ||
464 | |||
465 | .irq_handler = ata_interrupt, | ||
466 | .irq_clear = ata_bmdma_irq_clear, | ||
467 | .irq_on = ata_irq_on, | ||
468 | |||
469 | .port_start = ata_sff_port_start, | ||
470 | }; | 372 | }; |
471 | 373 | ||
472 | static struct ata_port_operations amd133_port_ops = { | 374 | static struct ata_port_operations amd133_port_ops = { |
375 | .inherits = &amd_base_port_ops, | ||
376 | .cable_detect = amd_cable_detect, | ||
473 | .set_piomode = amd133_set_piomode, | 377 | .set_piomode = amd133_set_piomode, |
474 | .set_dmamode = amd133_set_dmamode, | 378 | .set_dmamode = amd133_set_dmamode, |
475 | .mode_filter = ata_pci_default_filter, | 379 | }; |
476 | .tf_load = ata_tf_load, | ||
477 | .tf_read = ata_tf_read, | ||
478 | .check_status = ata_check_status, | ||
479 | .exec_command = ata_exec_command, | ||
480 | .dev_select = ata_std_dev_select, | ||
481 | |||
482 | .freeze = ata_bmdma_freeze, | ||
483 | .thaw = ata_bmdma_thaw, | ||
484 | .error_handler = amd_error_handler, | ||
485 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
486 | .cable_detect = amd_cable_detect, | ||
487 | |||
488 | .bmdma_setup = ata_bmdma_setup, | ||
489 | .bmdma_start = ata_bmdma_start, | ||
490 | .bmdma_stop = ata_bmdma_stop, | ||
491 | .bmdma_status = ata_bmdma_status, | ||
492 | |||
493 | .qc_prep = ata_qc_prep, | ||
494 | .qc_issue = ata_qc_issue_prot, | ||
495 | |||
496 | .data_xfer = ata_data_xfer, | ||
497 | |||
498 | .irq_handler = ata_interrupt, | ||
499 | .irq_clear = ata_bmdma_irq_clear, | ||
500 | .irq_on = ata_irq_on, | ||
501 | 380 | ||
502 | .port_start = ata_sff_port_start, | 381 | static const struct ata_port_operations nv_base_port_ops = { |
382 | .inherits = &ata_bmdma_port_ops, | ||
383 | .cable_detect = ata_cable_ignore, | ||
384 | .mode_filter = nv_mode_filter, | ||
385 | .prereset = nv_pre_reset, | ||
386 | .host_stop = nv_host_stop, | ||
503 | }; | 387 | }; |
504 | 388 | ||
505 | static struct ata_port_operations nv100_port_ops = { | 389 | static struct ata_port_operations nv100_port_ops = { |
390 | .inherits = &nv_base_port_ops, | ||
506 | .set_piomode = nv100_set_piomode, | 391 | .set_piomode = nv100_set_piomode, |
507 | .set_dmamode = nv100_set_dmamode, | 392 | .set_dmamode = nv100_set_dmamode, |
508 | .tf_load = ata_tf_load, | ||
509 | .tf_read = ata_tf_read, | ||
510 | .check_status = ata_check_status, | ||
511 | .exec_command = ata_exec_command, | ||
512 | .dev_select = ata_std_dev_select, | ||
513 | |||
514 | .freeze = ata_bmdma_freeze, | ||
515 | .thaw = ata_bmdma_thaw, | ||
516 | .error_handler = nv_error_handler, | ||
517 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
518 | .cable_detect = ata_cable_ignore, | ||
519 | .mode_filter = nv_mode_filter, | ||
520 | |||
521 | .bmdma_setup = ata_bmdma_setup, | ||
522 | .bmdma_start = ata_bmdma_start, | ||
523 | .bmdma_stop = ata_bmdma_stop, | ||
524 | .bmdma_status = ata_bmdma_status, | ||
525 | |||
526 | .qc_prep = ata_qc_prep, | ||
527 | .qc_issue = ata_qc_issue_prot, | ||
528 | |||
529 | .data_xfer = ata_data_xfer, | ||
530 | |||
531 | .irq_handler = ata_interrupt, | ||
532 | .irq_clear = ata_bmdma_irq_clear, | ||
533 | .irq_on = ata_irq_on, | ||
534 | |||
535 | .port_start = ata_sff_port_start, | ||
536 | .host_stop = nv_host_stop, | ||
537 | }; | 393 | }; |
538 | 394 | ||
539 | static struct ata_port_operations nv133_port_ops = { | 395 | static struct ata_port_operations nv133_port_ops = { |
396 | .inherits = &nv_base_port_ops, | ||
540 | .set_piomode = nv133_set_piomode, | 397 | .set_piomode = nv133_set_piomode, |
541 | .set_dmamode = nv133_set_dmamode, | 398 | .set_dmamode = nv133_set_dmamode, |
542 | .tf_load = ata_tf_load, | ||
543 | .tf_read = ata_tf_read, | ||
544 | .check_status = ata_check_status, | ||
545 | .exec_command = ata_exec_command, | ||
546 | .dev_select = ata_std_dev_select, | ||
547 | |||
548 | .freeze = ata_bmdma_freeze, | ||
549 | .thaw = ata_bmdma_thaw, | ||
550 | .error_handler = nv_error_handler, | ||
551 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
552 | .cable_detect = ata_cable_ignore, | ||
553 | .mode_filter = nv_mode_filter, | ||
554 | |||
555 | .bmdma_setup = ata_bmdma_setup, | ||
556 | .bmdma_start = ata_bmdma_start, | ||
557 | .bmdma_stop = ata_bmdma_stop, | ||
558 | .bmdma_status = ata_bmdma_status, | ||
559 | |||
560 | .qc_prep = ata_qc_prep, | ||
561 | .qc_issue = ata_qc_issue_prot, | ||
562 | |||
563 | .data_xfer = ata_data_xfer, | ||
564 | |||
565 | .irq_handler = ata_interrupt, | ||
566 | .irq_clear = ata_bmdma_irq_clear, | ||
567 | .irq_on = ata_irq_on, | ||
568 | |||
569 | .port_start = ata_sff_port_start, | ||
570 | .host_stop = nv_host_stop, | ||
571 | }; | 399 | }; |
572 | 400 | ||
573 | static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 401 | static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
574 | { | 402 | { |
575 | static const struct ata_port_info info[10] = { | 403 | static const struct ata_port_info info[10] = { |
576 | { /* 0: AMD 7401 */ | 404 | { /* 0: AMD 7401 */ |
577 | .sht = &amd_sht, | ||
578 | .flags = ATA_FLAG_SLAVE_POSS, | 405 | .flags = ATA_FLAG_SLAVE_POSS, |
579 | .pio_mask = 0x1f, | 406 | .pio_mask = 0x1f, |
580 | .mwdma_mask = 0x07, /* No SWDMA */ | 407 | .mwdma_mask = 0x07, /* No SWDMA */ |
@@ -582,7 +409,6 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
582 | .port_ops = &amd33_port_ops | 409 | .port_ops = &amd33_port_ops |
583 | }, | 410 | }, |
584 | { /* 1: Early AMD7409 - no swdma */ | 411 | { /* 1: Early AMD7409 - no swdma */ |
585 | .sht = &amd_sht, | ||
586 | .flags = ATA_FLAG_SLAVE_POSS, | 412 | .flags = ATA_FLAG_SLAVE_POSS, |
587 | .pio_mask = 0x1f, | 413 | .pio_mask = 0x1f, |
588 | .mwdma_mask = 0x07, | 414 | .mwdma_mask = 0x07, |
@@ -590,7 +416,6 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
590 | .port_ops = &amd66_port_ops | 416 | .port_ops = &amd66_port_ops |
591 | }, | 417 | }, |
592 | { /* 2: AMD 7409, no swdma errata */ | 418 | { /* 2: AMD 7409, no swdma errata */ |
593 | .sht = &amd_sht, | ||
594 | .flags = ATA_FLAG_SLAVE_POSS, | 419 | .flags = ATA_FLAG_SLAVE_POSS, |
595 | .pio_mask = 0x1f, | 420 | .pio_mask = 0x1f, |
596 | .mwdma_mask = 0x07, | 421 | .mwdma_mask = 0x07, |
@@ -598,7 +423,6 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
598 | .port_ops = &amd66_port_ops | 423 | .port_ops = &amd66_port_ops |
599 | }, | 424 | }, |
600 | { /* 3: AMD 7411 */ | 425 | { /* 3: AMD 7411 */ |
601 | .sht = &amd_sht, | ||
602 | .flags = ATA_FLAG_SLAVE_POSS, | 426 | .flags = ATA_FLAG_SLAVE_POSS, |
603 | .pio_mask = 0x1f, | 427 | .pio_mask = 0x1f, |
604 | .mwdma_mask = 0x07, | 428 | .mwdma_mask = 0x07, |
@@ -606,7 +430,6 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
606 | .port_ops = &amd100_port_ops | 430 | .port_ops = &amd100_port_ops |
607 | }, | 431 | }, |
608 | { /* 4: AMD 7441 */ | 432 | { /* 4: AMD 7441 */ |
609 | .sht = &amd_sht, | ||
610 | .flags = ATA_FLAG_SLAVE_POSS, | 433 | .flags = ATA_FLAG_SLAVE_POSS, |
611 | .pio_mask = 0x1f, | 434 | .pio_mask = 0x1f, |
612 | .mwdma_mask = 0x07, | 435 | .mwdma_mask = 0x07, |
@@ -614,7 +437,6 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
614 | .port_ops = &amd100_port_ops | 437 | .port_ops = &amd100_port_ops |
615 | }, | 438 | }, |
616 | { /* 5: AMD 8111*/ | 439 | { /* 5: AMD 8111*/ |
617 | .sht = &amd_sht, | ||
618 | .flags = ATA_FLAG_SLAVE_POSS, | 440 | .flags = ATA_FLAG_SLAVE_POSS, |
619 | .pio_mask = 0x1f, | 441 | .pio_mask = 0x1f, |
620 | .mwdma_mask = 0x07, | 442 | .mwdma_mask = 0x07, |
@@ -622,7 +444,6 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
622 | .port_ops = &amd133_port_ops | 444 | .port_ops = &amd133_port_ops |
623 | }, | 445 | }, |
624 | { /* 6: AMD 8111 UDMA 100 (Serenade) */ | 446 | { /* 6: AMD 8111 UDMA 100 (Serenade) */ |
625 | .sht = &amd_sht, | ||
626 | .flags = ATA_FLAG_SLAVE_POSS, | 447 | .flags = ATA_FLAG_SLAVE_POSS, |
627 | .pio_mask = 0x1f, | 448 | .pio_mask = 0x1f, |
628 | .mwdma_mask = 0x07, | 449 | .mwdma_mask = 0x07, |
@@ -630,7 +451,6 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
630 | .port_ops = &amd133_port_ops | 451 | .port_ops = &amd133_port_ops |
631 | }, | 452 | }, |
632 | { /* 7: Nvidia Nforce */ | 453 | { /* 7: Nvidia Nforce */ |
633 | .sht = &amd_sht, | ||
634 | .flags = ATA_FLAG_SLAVE_POSS, | 454 | .flags = ATA_FLAG_SLAVE_POSS, |
635 | .pio_mask = 0x1f, | 455 | .pio_mask = 0x1f, |
636 | .mwdma_mask = 0x07, | 456 | .mwdma_mask = 0x07, |
@@ -638,7 +458,6 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
638 | .port_ops = &nv100_port_ops | 458 | .port_ops = &nv100_port_ops |
639 | }, | 459 | }, |
640 | { /* 8: Nvidia Nforce2 and later */ | 460 | { /* 8: Nvidia Nforce2 and later */ |
641 | .sht = &amd_sht, | ||
642 | .flags = ATA_FLAG_SLAVE_POSS, | 461 | .flags = ATA_FLAG_SLAVE_POSS, |
643 | .pio_mask = 0x1f, | 462 | .pio_mask = 0x1f, |
644 | .mwdma_mask = 0x07, | 463 | .mwdma_mask = 0x07, |
@@ -646,7 +465,6 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
646 | .port_ops = &nv133_port_ops | 465 | .port_ops = &nv133_port_ops |
647 | }, | 466 | }, |
648 | { /* 9: AMD CS5536 (Geode companion) */ | 467 | { /* 9: AMD CS5536 (Geode companion) */ |
649 | .sht = &amd_sht, | ||
650 | .flags = ATA_FLAG_SLAVE_POSS, | 468 | .flags = ATA_FLAG_SLAVE_POSS, |
651 | .pio_mask = 0x1f, | 469 | .pio_mask = 0x1f, |
652 | .mwdma_mask = 0x07, | 470 | .mwdma_mask = 0x07, |
@@ -654,15 +472,20 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
654 | .port_ops = &amd100_port_ops | 472 | .port_ops = &amd100_port_ops |
655 | } | 473 | } |
656 | }; | 474 | }; |
657 | struct ata_port_info pi; | 475 | const struct ata_port_info *ppi[] = { NULL, NULL }; |
658 | const struct ata_port_info *ppi[] = { &pi, NULL }; | ||
659 | static int printed_version; | 476 | static int printed_version; |
660 | int type = id->driver_data; | 477 | int type = id->driver_data; |
478 | void *hpriv = NULL; | ||
661 | u8 fifo; | 479 | u8 fifo; |
480 | int rc; | ||
662 | 481 | ||
663 | if (!printed_version++) | 482 | if (!printed_version++) |
664 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 483 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
665 | 484 | ||
485 | rc = pcim_enable_device(pdev); | ||
486 | if (rc) | ||
487 | return rc; | ||
488 | |||
666 | pci_read_config_byte(pdev, 0x41, &fifo); | 489 | pci_read_config_byte(pdev, 0x41, &fifo); |
667 | 490 | ||
668 | /* Check for AMD7409 without swdma errata and if found adjust type */ | 491 | /* Check for AMD7409 without swdma errata and if found adjust type */ |
@@ -677,10 +500,10 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
677 | /* | 500 | /* |
678 | * Okay, type is determined now. Apply type-specific workarounds. | 501 | * Okay, type is determined now. Apply type-specific workarounds. |
679 | */ | 502 | */ |
680 | pi = info[type]; | 503 | ppi[0] = &info[type]; |
681 | 504 | ||
682 | if (type < 3) | 505 | if (type < 3) |
683 | ata_pci_clear_simplex(pdev); | 506 | ata_pci_bmdma_clear_simplex(pdev); |
684 | 507 | ||
685 | /* Check for AMD7411 */ | 508 | /* Check for AMD7411 */ |
686 | if (type == 3) | 509 | if (type == 3) |
@@ -696,16 +519,23 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
696 | u32 udma; | 519 | u32 udma; |
697 | 520 | ||
698 | pci_read_config_dword(pdev, 0x60, &udma); | 521 | pci_read_config_dword(pdev, 0x60, &udma); |
699 | pi.private_data = (void *)(unsigned long)udma; | 522 | hpriv = (void *)(unsigned long)udma; |
700 | } | 523 | } |
701 | 524 | ||
702 | /* And fire it up */ | 525 | /* And fire it up */ |
703 | return ata_pci_init_one(pdev, ppi); | 526 | return ata_pci_sff_init_one(pdev, ppi, &amd_sht, hpriv); |
704 | } | 527 | } |
705 | 528 | ||
706 | #ifdef CONFIG_PM | 529 | #ifdef CONFIG_PM |
707 | static int amd_reinit_one(struct pci_dev *pdev) | 530 | static int amd_reinit_one(struct pci_dev *pdev) |
708 | { | 531 | { |
532 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
533 | int rc; | ||
534 | |||
535 | rc = ata_pci_device_do_resume(pdev); | ||
536 | if (rc) | ||
537 | return rc; | ||
538 | |||
709 | if (pdev->vendor == PCI_VENDOR_ID_AMD) { | 539 | if (pdev->vendor == PCI_VENDOR_ID_AMD) { |
710 | u8 fifo; | 540 | u8 fifo; |
711 | pci_read_config_byte(pdev, 0x41, &fifo); | 541 | pci_read_config_byte(pdev, 0x41, &fifo); |
@@ -716,9 +546,11 @@ static int amd_reinit_one(struct pci_dev *pdev) | |||
716 | pci_write_config_byte(pdev, 0x41, fifo | 0xF0); | 546 | pci_write_config_byte(pdev, 0x41, fifo | 0xF0); |
717 | if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 || | 547 | if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 || |
718 | pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401) | 548 | pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401) |
719 | ata_pci_clear_simplex(pdev); | 549 | ata_pci_bmdma_clear_simplex(pdev); |
720 | } | 550 | } |
721 | return ata_pci_device_resume(pdev); | 551 | |
552 | ata_host_resume(host); | ||
553 | return 0; | ||
722 | } | 554 | } |
723 | #endif | 555 | #endif |
724 | 556 | ||
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c index d4218310327b..0f513bc11193 100644 --- a/drivers/ata/pata_artop.c +++ b/drivers/ata/pata_artop.c | |||
@@ -52,22 +52,7 @@ static int artop6210_pre_reset(struct ata_link *link, unsigned long deadline) | |||
52 | if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) | 52 | if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) |
53 | return -ENOENT; | 53 | return -ENOENT; |
54 | 54 | ||
55 | return ata_std_prereset(link, deadline); | 55 | return ata_sff_prereset(link, deadline); |
56 | } | ||
57 | |||
58 | /** | ||
59 | * artop6210_error_handler - Probe specified port on PATA host controller | ||
60 | * @ap: Port to probe | ||
61 | * | ||
62 | * LOCKING: | ||
63 | * None (inherited from caller). | ||
64 | */ | ||
65 | |||
66 | static void artop6210_error_handler(struct ata_port *ap) | ||
67 | { | ||
68 | ata_bmdma_drive_eh(ap, artop6210_pre_reset, | ||
69 | ata_std_softreset, NULL, | ||
70 | ata_std_postreset); | ||
71 | } | 56 | } |
72 | 57 | ||
73 | /** | 58 | /** |
@@ -93,7 +78,7 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline) | |||
93 | if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) | 78 | if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) |
94 | return -ENOENT; | 79 | return -ENOENT; |
95 | 80 | ||
96 | return ata_std_prereset(link, deadline); | 81 | return ata_sff_prereset(link, deadline); |
97 | } | 82 | } |
98 | 83 | ||
99 | /** | 84 | /** |
@@ -114,21 +99,6 @@ static int artop6260_cable_detect(struct ata_port *ap) | |||
114 | } | 99 | } |
115 | 100 | ||
116 | /** | 101 | /** |
117 | * artop6260_error_handler - Probe specified port on PATA host controller | ||
118 | * @ap: Port to probe | ||
119 | * | ||
120 | * LOCKING: | ||
121 | * None (inherited from caller). | ||
122 | */ | ||
123 | |||
124 | static void artop6260_error_handler(struct ata_port *ap) | ||
125 | { | ||
126 | ata_bmdma_drive_eh(ap, artop6260_pre_reset, | ||
127 | ata_std_softreset, NULL, | ||
128 | ata_std_postreset); | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * artop6210_load_piomode - Load a set of PATA PIO timings | 102 | * artop6210_load_piomode - Load a set of PATA PIO timings |
133 | * @ap: Port whose timings we are configuring | 103 | * @ap: Port whose timings we are configuring |
134 | * @adev: Device | 104 | * @adev: Device |
@@ -314,85 +284,23 @@ static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
314 | } | 284 | } |
315 | 285 | ||
316 | static struct scsi_host_template artop_sht = { | 286 | static struct scsi_host_template artop_sht = { |
317 | .module = THIS_MODULE, | 287 | ATA_BMDMA_SHT(DRV_NAME), |
318 | .name = DRV_NAME, | ||
319 | .ioctl = ata_scsi_ioctl, | ||
320 | .queuecommand = ata_scsi_queuecmd, | ||
321 | .can_queue = ATA_DEF_QUEUE, | ||
322 | .this_id = ATA_SHT_THIS_ID, | ||
323 | .sg_tablesize = LIBATA_MAX_PRD, | ||
324 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
325 | .emulated = ATA_SHT_EMULATED, | ||
326 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
327 | .proc_name = DRV_NAME, | ||
328 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
329 | .slave_configure = ata_scsi_slave_config, | ||
330 | .slave_destroy = ata_scsi_slave_destroy, | ||
331 | .bios_param = ata_std_bios_param, | ||
332 | }; | 288 | }; |
333 | 289 | ||
334 | static const struct ata_port_operations artop6210_ops = { | 290 | static struct ata_port_operations artop6210_ops = { |
291 | .inherits = &ata_bmdma_port_ops, | ||
292 | .cable_detect = ata_cable_40wire, | ||
335 | .set_piomode = artop6210_set_piomode, | 293 | .set_piomode = artop6210_set_piomode, |
336 | .set_dmamode = artop6210_set_dmamode, | 294 | .set_dmamode = artop6210_set_dmamode, |
337 | .mode_filter = ata_pci_default_filter, | 295 | .prereset = artop6210_pre_reset, |
338 | |||
339 | .tf_load = ata_tf_load, | ||
340 | .tf_read = ata_tf_read, | ||
341 | .check_status = ata_check_status, | ||
342 | .exec_command = ata_exec_command, | ||
343 | .dev_select = ata_std_dev_select, | ||
344 | |||
345 | .freeze = ata_bmdma_freeze, | ||
346 | .thaw = ata_bmdma_thaw, | ||
347 | .error_handler = artop6210_error_handler, | ||
348 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
349 | .cable_detect = ata_cable_40wire, | ||
350 | |||
351 | .bmdma_setup = ata_bmdma_setup, | ||
352 | .bmdma_start = ata_bmdma_start, | ||
353 | .bmdma_stop = ata_bmdma_stop, | ||
354 | .bmdma_status = ata_bmdma_status, | ||
355 | .qc_prep = ata_qc_prep, | ||
356 | .qc_issue = ata_qc_issue_prot, | ||
357 | |||
358 | .data_xfer = ata_data_xfer, | ||
359 | |||
360 | .irq_handler = ata_interrupt, | ||
361 | .irq_clear = ata_bmdma_irq_clear, | ||
362 | .irq_on = ata_irq_on, | ||
363 | |||
364 | .port_start = ata_sff_port_start, | ||
365 | }; | 296 | }; |
366 | 297 | ||
367 | static const struct ata_port_operations artop6260_ops = { | 298 | static struct ata_port_operations artop6260_ops = { |
299 | .inherits = &ata_bmdma_port_ops, | ||
300 | .cable_detect = artop6260_cable_detect, | ||
368 | .set_piomode = artop6260_set_piomode, | 301 | .set_piomode = artop6260_set_piomode, |
369 | .set_dmamode = artop6260_set_dmamode, | 302 | .set_dmamode = artop6260_set_dmamode, |
370 | 303 | .prereset = artop6260_pre_reset, | |
371 | .tf_load = ata_tf_load, | ||
372 | .tf_read = ata_tf_read, | ||
373 | .check_status = ata_check_status, | ||
374 | .exec_command = ata_exec_command, | ||
375 | .dev_select = ata_std_dev_select, | ||
376 | |||
377 | .freeze = ata_bmdma_freeze, | ||
378 | .thaw = ata_bmdma_thaw, | ||
379 | .error_handler = artop6260_error_handler, | ||
380 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
381 | .cable_detect = artop6260_cable_detect, | ||
382 | |||
383 | .bmdma_setup = ata_bmdma_setup, | ||
384 | .bmdma_start = ata_bmdma_start, | ||
385 | .bmdma_stop = ata_bmdma_stop, | ||
386 | .bmdma_status = ata_bmdma_status, | ||
387 | .qc_prep = ata_qc_prep, | ||
388 | .qc_issue = ata_qc_issue_prot, | ||
389 | .data_xfer = ata_data_xfer, | ||
390 | |||
391 | .irq_handler = ata_interrupt, | ||
392 | .irq_clear = ata_bmdma_irq_clear, | ||
393 | .irq_on = ata_irq_on, | ||
394 | |||
395 | .port_start = ata_sff_port_start, | ||
396 | }; | 304 | }; |
397 | 305 | ||
398 | 306 | ||
@@ -414,7 +322,6 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | |||
414 | { | 322 | { |
415 | static int printed_version; | 323 | static int printed_version; |
416 | static const struct ata_port_info info_6210 = { | 324 | static const struct ata_port_info info_6210 = { |
417 | .sht = &artop_sht, | ||
418 | .flags = ATA_FLAG_SLAVE_POSS, | 325 | .flags = ATA_FLAG_SLAVE_POSS, |
419 | .pio_mask = 0x1f, /* pio0-4 */ | 326 | .pio_mask = 0x1f, /* pio0-4 */ |
420 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 327 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
@@ -422,7 +329,6 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | |||
422 | .port_ops = &artop6210_ops, | 329 | .port_ops = &artop6210_ops, |
423 | }; | 330 | }; |
424 | static const struct ata_port_info info_626x = { | 331 | static const struct ata_port_info info_626x = { |
425 | .sht = &artop_sht, | ||
426 | .flags = ATA_FLAG_SLAVE_POSS, | 332 | .flags = ATA_FLAG_SLAVE_POSS, |
427 | .pio_mask = 0x1f, /* pio0-4 */ | 333 | .pio_mask = 0x1f, /* pio0-4 */ |
428 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 334 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
@@ -430,7 +336,6 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | |||
430 | .port_ops = &artop6260_ops, | 336 | .port_ops = &artop6260_ops, |
431 | }; | 337 | }; |
432 | static const struct ata_port_info info_628x = { | 338 | static const struct ata_port_info info_628x = { |
433 | .sht = &artop_sht, | ||
434 | .flags = ATA_FLAG_SLAVE_POSS, | 339 | .flags = ATA_FLAG_SLAVE_POSS, |
435 | .pio_mask = 0x1f, /* pio0-4 */ | 340 | .pio_mask = 0x1f, /* pio0-4 */ |
436 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 341 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
@@ -438,7 +343,6 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | |||
438 | .port_ops = &artop6260_ops, | 343 | .port_ops = &artop6260_ops, |
439 | }; | 344 | }; |
440 | static const struct ata_port_info info_628x_fast = { | 345 | static const struct ata_port_info info_628x_fast = { |
441 | .sht = &artop_sht, | ||
442 | .flags = ATA_FLAG_SLAVE_POSS, | 346 | .flags = ATA_FLAG_SLAVE_POSS, |
443 | .pio_mask = 0x1f, /* pio0-4 */ | 347 | .pio_mask = 0x1f, /* pio0-4 */ |
444 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 348 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
@@ -446,11 +350,16 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | |||
446 | .port_ops = &artop6260_ops, | 350 | .port_ops = &artop6260_ops, |
447 | }; | 351 | }; |
448 | const struct ata_port_info *ppi[] = { NULL, NULL }; | 352 | const struct ata_port_info *ppi[] = { NULL, NULL }; |
353 | int rc; | ||
449 | 354 | ||
450 | if (!printed_version++) | 355 | if (!printed_version++) |
451 | dev_printk(KERN_DEBUG, &pdev->dev, | 356 | dev_printk(KERN_DEBUG, &pdev->dev, |
452 | "version " DRV_VERSION "\n"); | 357 | "version " DRV_VERSION "\n"); |
453 | 358 | ||
359 | rc = pcim_enable_device(pdev); | ||
360 | if (rc) | ||
361 | return rc; | ||
362 | |||
454 | if (id->driver_data == 0) { /* 6210 variant */ | 363 | if (id->driver_data == 0) { /* 6210 variant */ |
455 | ppi[0] = &info_6210; | 364 | ppi[0] = &info_6210; |
456 | ppi[1] = &ata_dummy_port_info; | 365 | ppi[1] = &ata_dummy_port_info; |
@@ -491,7 +400,7 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | |||
491 | 400 | ||
492 | BUG_ON(ppi[0] == NULL); | 401 | BUG_ON(ppi[0] == NULL); |
493 | 402 | ||
494 | return ata_pci_init_one(pdev, ppi); | 403 | return ata_pci_sff_init_one(pdev, ppi, &artop_sht, NULL); |
495 | } | 404 | } |
496 | 405 | ||
497 | static const struct pci_device_id artop_pci_tbl[] = { | 406 | static const struct pci_device_id artop_pci_tbl[] = { |
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c index db057b183d60..3e8651d78952 100644 --- a/drivers/ata/pata_at32.c +++ b/drivers/ata/pata_at32.c | |||
@@ -166,52 +166,14 @@ static void pata_at32_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
166 | } | 166 | } |
167 | } | 167 | } |
168 | 168 | ||
169 | static void pata_at32_irq_clear(struct ata_port *ap) | ||
170 | { | ||
171 | /* No DMA controller yet */ | ||
172 | } | ||
173 | |||
174 | static struct scsi_host_template at32_sht = { | 169 | static struct scsi_host_template at32_sht = { |
175 | .module = THIS_MODULE, | 170 | ATA_PIO_SHT(DRV_NAME), |
176 | .name = DRV_NAME, | ||
177 | .ioctl = ata_scsi_ioctl, | ||
178 | .queuecommand = ata_scsi_queuecmd, | ||
179 | .can_queue = ATA_DEF_QUEUE, | ||
180 | .this_id = ATA_SHT_THIS_ID, | ||
181 | .sg_tablesize = LIBATA_MAX_PRD, | ||
182 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
183 | .emulated = ATA_SHT_EMULATED, | ||
184 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
185 | .proc_name = DRV_NAME, | ||
186 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
187 | .slave_configure = ata_scsi_slave_config, | ||
188 | .slave_destroy = ata_scsi_slave_destroy, | ||
189 | .bios_param = ata_std_bios_param, | ||
190 | }; | 171 | }; |
191 | 172 | ||
192 | static struct ata_port_operations at32_port_ops = { | 173 | static struct ata_port_operations at32_port_ops = { |
193 | .set_piomode = pata_at32_set_piomode, | 174 | .inherits = &ata_sff_port_ops, |
194 | .tf_load = ata_tf_load, | ||
195 | .tf_read = ata_tf_read, | ||
196 | .exec_command = ata_exec_command, | ||
197 | .check_status = ata_check_status, | ||
198 | .dev_select = ata_std_dev_select, | ||
199 | |||
200 | .freeze = ata_bmdma_freeze, | ||
201 | .thaw = ata_bmdma_thaw, | ||
202 | .error_handler = ata_bmdma_error_handler, | ||
203 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
204 | .cable_detect = ata_cable_40wire, | 175 | .cable_detect = ata_cable_40wire, |
205 | 176 | .set_piomode = pata_at32_set_piomode, | |
206 | .qc_prep = ata_qc_prep, | ||
207 | .qc_issue = ata_qc_issue_prot, | ||
208 | |||
209 | .data_xfer = ata_data_xfer, | ||
210 | |||
211 | .irq_clear = pata_at32_irq_clear, | ||
212 | .irq_on = ata_irq_on, | ||
213 | |||
214 | .port_start = ata_sff_port_start, | ||
215 | }; | 177 | }; |
216 | 178 | ||
217 | static int __init pata_at32_init_one(struct device *dev, | 179 | static int __init pata_at32_init_one(struct device *dev, |
@@ -261,7 +223,7 @@ static int __init pata_at32_init_one(struct device *dev, | |||
261 | host->private_data = info; | 223 | host->private_data = info; |
262 | 224 | ||
263 | /* Register ATA device and return */ | 225 | /* Register ATA device and return */ |
264 | return ata_host_activate(host, info->irq, ata_interrupt, | 226 | return ata_host_activate(host, info->irq, ata_sff_interrupt, |
265 | IRQF_SHARED | IRQF_TRIGGER_RISING, | 227 | IRQF_SHARED | IRQF_TRIGGER_RISING, |
266 | &at32_sht); | 228 | &at32_sht); |
267 | } | 229 | } |
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index 408bdc1a9776..78738fb4223b 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c | |||
@@ -45,12 +45,7 @@ static int atiixp_pre_reset(struct ata_link *link, unsigned long deadline) | |||
45 | if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no])) | 45 | if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no])) |
46 | return -ENOENT; | 46 | return -ENOENT; |
47 | 47 | ||
48 | return ata_std_prereset(link, deadline); | 48 | return ata_sff_prereset(link, deadline); |
49 | } | ||
50 | |||
51 | static void atiixp_error_handler(struct ata_port *ap) | ||
52 | { | ||
53 | ata_bmdma_drive_eh(ap, atiixp_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
54 | } | 49 | } |
55 | 50 | ||
56 | static int atiixp_cable_detect(struct ata_port *ap) | 51 | static int atiixp_cable_detect(struct ata_port *ap) |
@@ -221,60 +216,26 @@ static void atiixp_bmdma_stop(struct ata_queued_cmd *qc) | |||
221 | } | 216 | } |
222 | 217 | ||
223 | static struct scsi_host_template atiixp_sht = { | 218 | static struct scsi_host_template atiixp_sht = { |
224 | .module = THIS_MODULE, | 219 | ATA_BMDMA_SHT(DRV_NAME), |
225 | .name = DRV_NAME, | ||
226 | .ioctl = ata_scsi_ioctl, | ||
227 | .queuecommand = ata_scsi_queuecmd, | ||
228 | .can_queue = ATA_DEF_QUEUE, | ||
229 | .this_id = ATA_SHT_THIS_ID, | ||
230 | .sg_tablesize = LIBATA_DUMB_MAX_PRD, | 220 | .sg_tablesize = LIBATA_DUMB_MAX_PRD, |
231 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
232 | .emulated = ATA_SHT_EMULATED, | ||
233 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
234 | .proc_name = DRV_NAME, | ||
235 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
236 | .slave_configure = ata_scsi_slave_config, | ||
237 | .slave_destroy = ata_scsi_slave_destroy, | ||
238 | .bios_param = ata_std_bios_param, | ||
239 | }; | 221 | }; |
240 | 222 | ||
241 | static struct ata_port_operations atiixp_port_ops = { | 223 | static struct ata_port_operations atiixp_port_ops = { |
242 | .set_piomode = atiixp_set_piomode, | 224 | .inherits = &ata_bmdma_port_ops, |
243 | .set_dmamode = atiixp_set_dmamode, | ||
244 | .mode_filter = ata_pci_default_filter, | ||
245 | .tf_load = ata_tf_load, | ||
246 | .tf_read = ata_tf_read, | ||
247 | .check_status = ata_check_status, | ||
248 | .exec_command = ata_exec_command, | ||
249 | .dev_select = ata_std_dev_select, | ||
250 | |||
251 | .freeze = ata_bmdma_freeze, | ||
252 | .thaw = ata_bmdma_thaw, | ||
253 | .error_handler = atiixp_error_handler, | ||
254 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
255 | .cable_detect = atiixp_cable_detect, | ||
256 | 225 | ||
257 | .bmdma_setup = ata_bmdma_setup, | 226 | .qc_prep = ata_sff_dumb_qc_prep, |
258 | .bmdma_start = atiixp_bmdma_start, | 227 | .bmdma_start = atiixp_bmdma_start, |
259 | .bmdma_stop = atiixp_bmdma_stop, | 228 | .bmdma_stop = atiixp_bmdma_stop, |
260 | .bmdma_status = ata_bmdma_status, | ||
261 | 229 | ||
262 | .qc_prep = ata_dumb_qc_prep, | 230 | .cable_detect = atiixp_cable_detect, |
263 | .qc_issue = ata_qc_issue_prot, | 231 | .set_piomode = atiixp_set_piomode, |
264 | 232 | .set_dmamode = atiixp_set_dmamode, | |
265 | .data_xfer = ata_data_xfer, | 233 | .prereset = atiixp_pre_reset, |
266 | |||
267 | .irq_handler = ata_interrupt, | ||
268 | .irq_clear = ata_bmdma_irq_clear, | ||
269 | .irq_on = ata_irq_on, | ||
270 | |||
271 | .port_start = ata_sff_port_start, | ||
272 | }; | 234 | }; |
273 | 235 | ||
274 | static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 236 | static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
275 | { | 237 | { |
276 | static const struct ata_port_info info = { | 238 | static const struct ata_port_info info = { |
277 | .sht = &atiixp_sht, | ||
278 | .flags = ATA_FLAG_SLAVE_POSS, | 239 | .flags = ATA_FLAG_SLAVE_POSS, |
279 | .pio_mask = 0x1f, | 240 | .pio_mask = 0x1f, |
280 | .mwdma_mask = 0x06, /* No MWDMA0 support */ | 241 | .mwdma_mask = 0x06, /* No MWDMA0 support */ |
@@ -282,7 +243,7 @@ static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
282 | .port_ops = &atiixp_port_ops | 243 | .port_ops = &atiixp_port_ops |
283 | }; | 244 | }; |
284 | const struct ata_port_info *ppi[] = { &info, NULL }; | 245 | const struct ata_port_info *ppi[] = { &info, NULL }; |
285 | return ata_pci_init_one(dev, ppi); | 246 | return ata_pci_sff_init_one(dev, ppi, &atiixp_sht, NULL); |
286 | } | 247 | } |
287 | 248 | ||
288 | static const struct pci_device_id atiixp[] = { | 249 | static const struct pci_device_id atiixp[] = { |
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index 7f87f105c2f6..0a5ad98635b1 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c | |||
@@ -674,7 +674,7 @@ static void read_atapi_data(void __iomem *base, | |||
674 | * @ap: Port to which output is sent | 674 | * @ap: Port to which output is sent |
675 | * @tf: ATA taskfile register set | 675 | * @tf: ATA taskfile register set |
676 | * | 676 | * |
677 | * Note: Original code is ata_tf_load(). | 677 | * Note: Original code is ata_sff_tf_load(). |
678 | */ | 678 | */ |
679 | 679 | ||
680 | static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | 680 | static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) |
@@ -745,7 +745,7 @@ static u8 bfin_check_status(struct ata_port *ap) | |||
745 | * @ap: Port from which input is read | 745 | * @ap: Port from which input is read |
746 | * @tf: ATA taskfile register set for storing input | 746 | * @tf: ATA taskfile register set for storing input |
747 | * | 747 | * |
748 | * Note: Original code is ata_tf_read(). | 748 | * Note: Original code is ata_sff_tf_read(). |
749 | */ | 749 | */ |
750 | 750 | ||
751 | static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | 751 | static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
@@ -775,7 +775,7 @@ static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
775 | * @ap: port to which command is being issued | 775 | * @ap: port to which command is being issued |
776 | * @tf: ATA taskfile register set | 776 | * @tf: ATA taskfile register set |
777 | * | 777 | * |
778 | * Note: Original code is ata_exec_command(). | 778 | * Note: Original code is ata_sff_exec_command(). |
779 | */ | 779 | */ |
780 | 780 | ||
781 | static void bfin_exec_command(struct ata_port *ap, | 781 | static void bfin_exec_command(struct ata_port *ap, |
@@ -785,7 +785,7 @@ static void bfin_exec_command(struct ata_port *ap, | |||
785 | dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command); | 785 | dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command); |
786 | 786 | ||
787 | write_atapi_register(base, ATA_REG_CMD, tf->command); | 787 | write_atapi_register(base, ATA_REG_CMD, tf->command); |
788 | ata_pause(ap); | 788 | ata_sff_pause(ap); |
789 | } | 789 | } |
790 | 790 | ||
791 | /** | 791 | /** |
@@ -800,14 +800,14 @@ static u8 bfin_check_altstatus(struct ata_port *ap) | |||
800 | } | 800 | } |
801 | 801 | ||
802 | /** | 802 | /** |
803 | * bfin_std_dev_select - Select device 0/1 on ATA bus | 803 | * bfin_dev_select - Select device 0/1 on ATA bus |
804 | * @ap: ATA channel to manipulate | 804 | * @ap: ATA channel to manipulate |
805 | * @device: ATA device (numbered from zero) to select | 805 | * @device: ATA device (numbered from zero) to select |
806 | * | 806 | * |
807 | * Note: Original code is ata_std_dev_select(). | 807 | * Note: Original code is ata_sff_dev_select(). |
808 | */ | 808 | */ |
809 | 809 | ||
810 | static void bfin_std_dev_select(struct ata_port *ap, unsigned int device) | 810 | static void bfin_dev_select(struct ata_port *ap, unsigned int device) |
811 | { | 811 | { |
812 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | 812 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; |
813 | u8 tmp; | 813 | u8 tmp; |
@@ -818,7 +818,7 @@ static void bfin_std_dev_select(struct ata_port *ap, unsigned int device) | |||
818 | tmp = ATA_DEVICE_OBS | ATA_DEV1; | 818 | tmp = ATA_DEVICE_OBS | ATA_DEV1; |
819 | 819 | ||
820 | write_atapi_register(base, ATA_REG_DEVICE, tmp); | 820 | write_atapi_register(base, ATA_REG_DEVICE, tmp); |
821 | ata_pause(ap); | 821 | ata_sff_pause(ap); |
822 | } | 822 | } |
823 | 823 | ||
824 | /** | 824 | /** |
@@ -977,7 +977,7 @@ static unsigned int bfin_devchk(struct ata_port *ap, | |||
977 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | 977 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; |
978 | u8 nsect, lbal; | 978 | u8 nsect, lbal; |
979 | 979 | ||
980 | bfin_std_dev_select(ap, device); | 980 | bfin_dev_select(ap, device); |
981 | 981 | ||
982 | write_atapi_register(base, ATA_REG_NSECT, 0x55); | 982 | write_atapi_register(base, ATA_REG_NSECT, 0x55); |
983 | write_atapi_register(base, ATA_REG_LBAL, 0xaa); | 983 | write_atapi_register(base, ATA_REG_LBAL, 0xaa); |
@@ -1014,7 +1014,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) | |||
1014 | * BSY bit to clear | 1014 | * BSY bit to clear |
1015 | */ | 1015 | */ |
1016 | if (dev0) | 1016 | if (dev0) |
1017 | ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); | 1017 | ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); |
1018 | 1018 | ||
1019 | /* if device 1 was found in ata_devchk, wait for | 1019 | /* if device 1 was found in ata_devchk, wait for |
1020 | * register access, then wait for BSY to clear | 1020 | * register access, then wait for BSY to clear |
@@ -1023,7 +1023,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) | |||
1023 | while (dev1) { | 1023 | while (dev1) { |
1024 | u8 nsect, lbal; | 1024 | u8 nsect, lbal; |
1025 | 1025 | ||
1026 | bfin_std_dev_select(ap, 1); | 1026 | bfin_dev_select(ap, 1); |
1027 | nsect = read_atapi_register(base, ATA_REG_NSECT); | 1027 | nsect = read_atapi_register(base, ATA_REG_NSECT); |
1028 | lbal = read_atapi_register(base, ATA_REG_LBAL); | 1028 | lbal = read_atapi_register(base, ATA_REG_LBAL); |
1029 | if ((nsect == 1) && (lbal == 1)) | 1029 | if ((nsect == 1) && (lbal == 1)) |
@@ -1035,14 +1035,14 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) | |||
1035 | msleep(50); /* give drive a breather */ | 1035 | msleep(50); /* give drive a breather */ |
1036 | } | 1036 | } |
1037 | if (dev1) | 1037 | if (dev1) |
1038 | ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); | 1038 | ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); |
1039 | 1039 | ||
1040 | /* is all this really necessary? */ | 1040 | /* is all this really necessary? */ |
1041 | bfin_std_dev_select(ap, 0); | 1041 | bfin_dev_select(ap, 0); |
1042 | if (dev1) | 1042 | if (dev1) |
1043 | bfin_std_dev_select(ap, 1); | 1043 | bfin_dev_select(ap, 1); |
1044 | if (dev0) | 1044 | if (dev0) |
1045 | bfin_std_dev_select(ap, 0); | 1045 | bfin_dev_select(ap, 0); |
1046 | } | 1046 | } |
1047 | 1047 | ||
1048 | /** | 1048 | /** |
@@ -1088,26 +1088,21 @@ static unsigned int bfin_bus_softreset(struct ata_port *ap, | |||
1088 | } | 1088 | } |
1089 | 1089 | ||
1090 | /** | 1090 | /** |
1091 | * bfin_std_softreset - reset host port via ATA SRST | 1091 | * bfin_softreset - reset host port via ATA SRST |
1092 | * @ap: port to reset | 1092 | * @ap: port to reset |
1093 | * @classes: resulting classes of attached devices | 1093 | * @classes: resulting classes of attached devices |
1094 | * | 1094 | * |
1095 | * Note: Original code is ata_std_softreset(). | 1095 | * Note: Original code is ata_sff_softreset(). |
1096 | */ | 1096 | */ |
1097 | 1097 | ||
1098 | static int bfin_std_softreset(struct ata_link *link, unsigned int *classes, | 1098 | static int bfin_softreset(struct ata_link *link, unsigned int *classes, |
1099 | unsigned long deadline) | 1099 | unsigned long deadline) |
1100 | { | 1100 | { |
1101 | struct ata_port *ap = link->ap; | 1101 | struct ata_port *ap = link->ap; |
1102 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | 1102 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; |
1103 | unsigned int devmask = 0, err_mask; | 1103 | unsigned int devmask = 0, err_mask; |
1104 | u8 err; | 1104 | u8 err; |
1105 | 1105 | ||
1106 | if (ata_link_offline(link)) { | ||
1107 | classes[0] = ATA_DEV_NONE; | ||
1108 | goto out; | ||
1109 | } | ||
1110 | |||
1111 | /* determine if device 0/1 are present */ | 1106 | /* determine if device 0/1 are present */ |
1112 | if (bfin_devchk(ap, 0)) | 1107 | if (bfin_devchk(ap, 0)) |
1113 | devmask |= (1 << 0); | 1108 | devmask |= (1 << 0); |
@@ -1115,7 +1110,7 @@ static int bfin_std_softreset(struct ata_link *link, unsigned int *classes, | |||
1115 | devmask |= (1 << 1); | 1110 | devmask |= (1 << 1); |
1116 | 1111 | ||
1117 | /* select device 0 again */ | 1112 | /* select device 0 again */ |
1118 | bfin_std_dev_select(ap, 0); | 1113 | bfin_dev_select(ap, 0); |
1119 | 1114 | ||
1120 | /* issue bus reset */ | 1115 | /* issue bus reset */ |
1121 | err_mask = bfin_bus_softreset(ap, devmask); | 1116 | err_mask = bfin_bus_softreset(ap, devmask); |
@@ -1126,13 +1121,12 @@ static int bfin_std_softreset(struct ata_link *link, unsigned int *classes, | |||
1126 | } | 1121 | } |
1127 | 1122 | ||
1128 | /* determine by signature whether we have ATA or ATAPI devices */ | 1123 | /* determine by signature whether we have ATA or ATAPI devices */ |
1129 | classes[0] = ata_dev_try_classify(&ap->link.device[0], | 1124 | classes[0] = ata_sff_dev_classify(&ap->link.device[0], |
1130 | devmask & (1 << 0), &err); | 1125 | devmask & (1 << 0), &err); |
1131 | if (slave_possible && err != 0x81) | 1126 | if (slave_possible && err != 0x81) |
1132 | classes[1] = ata_dev_try_classify(&ap->link.device[1], | 1127 | classes[1] = ata_sff_dev_classify(&ap->link.device[1], |
1133 | devmask & (1 << 1), &err); | 1128 | devmask & (1 << 1), &err); |
1134 | 1129 | ||
1135 | out: | ||
1136 | return 0; | 1130 | return 0; |
1137 | } | 1131 | } |
1138 | 1132 | ||
@@ -1167,7 +1161,7 @@ static unsigned char bfin_bmdma_status(struct ata_port *ap) | |||
1167 | * @buflen: buffer length | 1161 | * @buflen: buffer length |
1168 | * @write_data: read/write | 1162 | * @write_data: read/write |
1169 | * | 1163 | * |
1170 | * Note: Original code is ata_data_xfer(). | 1164 | * Note: Original code is ata_sff_data_xfer(). |
1171 | */ | 1165 | */ |
1172 | 1166 | ||
1173 | static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf, | 1167 | static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf, |
@@ -1206,7 +1200,7 @@ static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf, | |||
1206 | * bfin_irq_clear - Clear ATAPI interrupt. | 1200 | * bfin_irq_clear - Clear ATAPI interrupt. |
1207 | * @ap: Port associated with this ATA transaction. | 1201 | * @ap: Port associated with this ATA transaction. |
1208 | * | 1202 | * |
1209 | * Note: Original code is ata_bmdma_irq_clear(). | 1203 | * Note: Original code is ata_sff_irq_clear(). |
1210 | */ | 1204 | */ |
1211 | 1205 | ||
1212 | static void bfin_irq_clear(struct ata_port *ap) | 1206 | static void bfin_irq_clear(struct ata_port *ap) |
@@ -1223,7 +1217,7 @@ static void bfin_irq_clear(struct ata_port *ap) | |||
1223 | * bfin_irq_on - Enable interrupts on a port. | 1217 | * bfin_irq_on - Enable interrupts on a port. |
1224 | * @ap: Port on which interrupts are enabled. | 1218 | * @ap: Port on which interrupts are enabled. |
1225 | * | 1219 | * |
1226 | * Note: Original code is ata_irq_on(). | 1220 | * Note: Original code is ata_sff_irq_on(). |
1227 | */ | 1221 | */ |
1228 | 1222 | ||
1229 | static unsigned char bfin_irq_on(struct ata_port *ap) | 1223 | static unsigned char bfin_irq_on(struct ata_port *ap) |
@@ -1244,13 +1238,13 @@ static unsigned char bfin_irq_on(struct ata_port *ap) | |||
1244 | } | 1238 | } |
1245 | 1239 | ||
1246 | /** | 1240 | /** |
1247 | * bfin_bmdma_freeze - Freeze DMA controller port | 1241 | * bfin_freeze - Freeze DMA controller port |
1248 | * @ap: port to freeze | 1242 | * @ap: port to freeze |
1249 | * | 1243 | * |
1250 | * Note: Original code is ata_bmdma_freeze(). | 1244 | * Note: Original code is ata_sff_freeze(). |
1251 | */ | 1245 | */ |
1252 | 1246 | ||
1253 | static void bfin_bmdma_freeze(struct ata_port *ap) | 1247 | static void bfin_freeze(struct ata_port *ap) |
1254 | { | 1248 | { |
1255 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | 1249 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; |
1256 | 1250 | ||
@@ -1264,19 +1258,19 @@ static void bfin_bmdma_freeze(struct ata_port *ap) | |||
1264 | * ATA_NIEN manipulation. Also, many controllers fail to mask | 1258 | * ATA_NIEN manipulation. Also, many controllers fail to mask |
1265 | * previously pending IRQ on ATA_NIEN assertion. Clear it. | 1259 | * previously pending IRQ on ATA_NIEN assertion. Clear it. |
1266 | */ | 1260 | */ |
1267 | ata_chk_status(ap); | 1261 | ap->ops->sff_check_status(ap); |
1268 | 1262 | ||
1269 | bfin_irq_clear(ap); | 1263 | bfin_irq_clear(ap); |
1270 | } | 1264 | } |
1271 | 1265 | ||
1272 | /** | 1266 | /** |
1273 | * bfin_bmdma_thaw - Thaw DMA controller port | 1267 | * bfin_thaw - Thaw DMA controller port |
1274 | * @ap: port to thaw | 1268 | * @ap: port to thaw |
1275 | * | 1269 | * |
1276 | * Note: Original code is ata_bmdma_thaw(). | 1270 | * Note: Original code is ata_sff_thaw(). |
1277 | */ | 1271 | */ |
1278 | 1272 | ||
1279 | void bfin_bmdma_thaw(struct ata_port *ap) | 1273 | void bfin_thaw(struct ata_port *ap) |
1280 | { | 1274 | { |
1281 | bfin_check_status(ap); | 1275 | bfin_check_status(ap); |
1282 | bfin_irq_clear(ap); | 1276 | bfin_irq_clear(ap); |
@@ -1284,14 +1278,14 @@ void bfin_bmdma_thaw(struct ata_port *ap) | |||
1284 | } | 1278 | } |
1285 | 1279 | ||
1286 | /** | 1280 | /** |
1287 | * bfin_std_postreset - standard postreset callback | 1281 | * bfin_postreset - standard postreset callback |
1288 | * @ap: the target ata_port | 1282 | * @ap: the target ata_port |
1289 | * @classes: classes of attached devices | 1283 | * @classes: classes of attached devices |
1290 | * | 1284 | * |
1291 | * Note: Original code is ata_std_postreset(). | 1285 | * Note: Original code is ata_sff_postreset(). |
1292 | */ | 1286 | */ |
1293 | 1287 | ||
1294 | static void bfin_std_postreset(struct ata_link *link, unsigned int *classes) | 1288 | static void bfin_postreset(struct ata_link *link, unsigned int *classes) |
1295 | { | 1289 | { |
1296 | struct ata_port *ap = link->ap; | 1290 | struct ata_port *ap = link->ap; |
1297 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | 1291 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; |
@@ -1301,9 +1295,9 @@ static void bfin_std_postreset(struct ata_link *link, unsigned int *classes) | |||
1301 | 1295 | ||
1302 | /* is double-select really necessary? */ | 1296 | /* is double-select really necessary? */ |
1303 | if (classes[0] != ATA_DEV_NONE) | 1297 | if (classes[0] != ATA_DEV_NONE) |
1304 | bfin_std_dev_select(ap, 1); | 1298 | bfin_dev_select(ap, 1); |
1305 | if (classes[1] != ATA_DEV_NONE) | 1299 | if (classes[1] != ATA_DEV_NONE) |
1306 | bfin_std_dev_select(ap, 0); | 1300 | bfin_dev_select(ap, 0); |
1307 | 1301 | ||
1308 | /* bail out if no device is present */ | 1302 | /* bail out if no device is present */ |
1309 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { | 1303 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { |
@@ -1314,17 +1308,6 @@ static void bfin_std_postreset(struct ata_link *link, unsigned int *classes) | |||
1314 | write_atapi_register(base, ATA_REG_CTRL, ap->ctl); | 1308 | write_atapi_register(base, ATA_REG_CTRL, ap->ctl); |
1315 | } | 1309 | } |
1316 | 1310 | ||
1317 | /** | ||
1318 | * bfin_error_handler - Stock error handler for DMA controller | ||
1319 | * @ap: port to handle error for | ||
1320 | */ | ||
1321 | |||
1322 | static void bfin_error_handler(struct ata_port *ap) | ||
1323 | { | ||
1324 | ata_bmdma_drive_eh(ap, ata_std_prereset, bfin_std_softreset, NULL, | ||
1325 | bfin_std_postreset); | ||
1326 | } | ||
1327 | |||
1328 | static void bfin_port_stop(struct ata_port *ap) | 1311 | static void bfin_port_stop(struct ata_port *ap) |
1329 | { | 1312 | { |
1330 | dev_dbg(ap->dev, "in atapi port stop\n"); | 1313 | dev_dbg(ap->dev, "in atapi port stop\n"); |
@@ -1357,51 +1340,40 @@ static int bfin_port_start(struct ata_port *ap) | |||
1357 | } | 1340 | } |
1358 | 1341 | ||
1359 | static struct scsi_host_template bfin_sht = { | 1342 | static struct scsi_host_template bfin_sht = { |
1360 | .module = THIS_MODULE, | 1343 | ATA_BASE_SHT(DRV_NAME), |
1361 | .name = DRV_NAME, | ||
1362 | .ioctl = ata_scsi_ioctl, | ||
1363 | .queuecommand = ata_scsi_queuecmd, | ||
1364 | .can_queue = ATA_DEF_QUEUE, | ||
1365 | .this_id = ATA_SHT_THIS_ID, | ||
1366 | .sg_tablesize = SG_NONE, | 1344 | .sg_tablesize = SG_NONE, |
1367 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
1368 | .emulated = ATA_SHT_EMULATED, | ||
1369 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
1370 | .proc_name = DRV_NAME, | ||
1371 | .dma_boundary = ATA_DMA_BOUNDARY, | 1345 | .dma_boundary = ATA_DMA_BOUNDARY, |
1372 | .slave_configure = ata_scsi_slave_config, | ||
1373 | .slave_destroy = ata_scsi_slave_destroy, | ||
1374 | .bios_param = ata_std_bios_param, | ||
1375 | }; | 1346 | }; |
1376 | 1347 | ||
1377 | static const struct ata_port_operations bfin_pata_ops = { | 1348 | static const struct ata_port_operations bfin_pata_ops = { |
1349 | .inherits = &ata_sff_port_ops, | ||
1350 | |||
1378 | .set_piomode = bfin_set_piomode, | 1351 | .set_piomode = bfin_set_piomode, |
1379 | .set_dmamode = bfin_set_dmamode, | 1352 | .set_dmamode = bfin_set_dmamode, |
1380 | 1353 | ||
1381 | .tf_load = bfin_tf_load, | 1354 | .sff_tf_load = bfin_tf_load, |
1382 | .tf_read = bfin_tf_read, | 1355 | .sff_tf_read = bfin_tf_read, |
1383 | .exec_command = bfin_exec_command, | 1356 | .sff_exec_command = bfin_exec_command, |
1384 | .check_status = bfin_check_status, | 1357 | .sff_check_status = bfin_check_status, |
1385 | .check_altstatus = bfin_check_altstatus, | 1358 | .sff_check_altstatus = bfin_check_altstatus, |
1386 | .dev_select = bfin_std_dev_select, | 1359 | .sff_dev_select = bfin_dev_select, |
1387 | 1360 | ||
1388 | .bmdma_setup = bfin_bmdma_setup, | 1361 | .bmdma_setup = bfin_bmdma_setup, |
1389 | .bmdma_start = bfin_bmdma_start, | 1362 | .bmdma_start = bfin_bmdma_start, |
1390 | .bmdma_stop = bfin_bmdma_stop, | 1363 | .bmdma_stop = bfin_bmdma_stop, |
1391 | .bmdma_status = bfin_bmdma_status, | 1364 | .bmdma_status = bfin_bmdma_status, |
1392 | .data_xfer = bfin_data_xfer, | 1365 | .sff_data_xfer = bfin_data_xfer, |
1393 | 1366 | ||
1394 | .qc_prep = ata_noop_qc_prep, | 1367 | .qc_prep = ata_noop_qc_prep, |
1395 | .qc_issue = ata_qc_issue_prot, | ||
1396 | 1368 | ||
1397 | .freeze = bfin_bmdma_freeze, | 1369 | .freeze = bfin_freeze, |
1398 | .thaw = bfin_bmdma_thaw, | 1370 | .thaw = bfin_thaw, |
1399 | .error_handler = bfin_error_handler, | 1371 | .softreset = bfin_softreset, |
1372 | .postreset = bfin_postreset, | ||
1400 | .post_internal_cmd = bfin_bmdma_stop, | 1373 | .post_internal_cmd = bfin_bmdma_stop, |
1401 | 1374 | ||
1402 | .irq_handler = ata_interrupt, | 1375 | .sff_irq_clear = bfin_irq_clear, |
1403 | .irq_clear = bfin_irq_clear, | 1376 | .sff_irq_on = bfin_irq_on, |
1404 | .irq_on = bfin_irq_on, | ||
1405 | 1377 | ||
1406 | .port_start = bfin_port_start, | 1378 | .port_start = bfin_port_start, |
1407 | .port_stop = bfin_port_stop, | 1379 | .port_stop = bfin_port_stop, |
@@ -1409,7 +1381,6 @@ static const struct ata_port_operations bfin_pata_ops = { | |||
1409 | 1381 | ||
1410 | static struct ata_port_info bfin_port_info[] = { | 1382 | static struct ata_port_info bfin_port_info[] = { |
1411 | { | 1383 | { |
1412 | .sht = &bfin_sht, | ||
1413 | .flags = ATA_FLAG_SLAVE_POSS | 1384 | .flags = ATA_FLAG_SLAVE_POSS |
1414 | | ATA_FLAG_MMIO | 1385 | | ATA_FLAG_MMIO |
1415 | | ATA_FLAG_NO_LEGACY, | 1386 | | ATA_FLAG_NO_LEGACY, |
@@ -1536,7 +1507,7 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev) | |||
1536 | } | 1507 | } |
1537 | 1508 | ||
1538 | if (ata_host_activate(host, platform_get_irq(pdev, 0), | 1509 | if (ata_host_activate(host, platform_get_irq(pdev, 0), |
1539 | ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) { | 1510 | ata_sff_interrupt, IRQF_SHARED, &bfin_sht) != 0) { |
1540 | peripheral_free_list(atapi_io_port); | 1511 | peripheral_free_list(atapi_io_port); |
1541 | dev_err(&pdev->dev, "Fail to attach ATAPI device\n"); | 1512 | dev_err(&pdev->dev, "Fail to attach ATAPI device\n"); |
1542 | return -ENODEV; | 1513 | return -ENODEV; |
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c index 43d198f90968..2de30b990278 100644 --- a/drivers/ata/pata_cmd640.c +++ b/drivers/ata/pata_cmd640.c | |||
@@ -107,8 +107,8 @@ static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
107 | pci_write_config_byte(pdev, arttim + 1, (t.active << 4) | t.recover); | 107 | pci_write_config_byte(pdev, arttim + 1, (t.active << 4) | t.recover); |
108 | } else { | 108 | } else { |
109 | /* Save the shared timings for channel, they will be loaded | 109 | /* Save the shared timings for channel, they will be loaded |
110 | by qc_issue_prot. Reloading the setup time is expensive | 110 | by qc_issue. Reloading the setup time is expensive so we |
111 | so we keep a merged one loaded */ | 111 | keep a merged one loaded */ |
112 | pci_read_config_byte(pdev, ARTIM23, ®); | 112 | pci_read_config_byte(pdev, ARTIM23, ®); |
113 | reg &= 0x3F; | 113 | reg &= 0x3F; |
114 | reg |= t.setup; | 114 | reg |= t.setup; |
@@ -119,14 +119,14 @@ static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
119 | 119 | ||
120 | 120 | ||
121 | /** | 121 | /** |
122 | * cmd640_qc_issue_prot - command preparation hook | 122 | * cmd640_qc_issue - command preparation hook |
123 | * @qc: Command to be issued | 123 | * @qc: Command to be issued |
124 | * | 124 | * |
125 | * Channel 1 has shared timings. We must reprogram the | 125 | * Channel 1 has shared timings. We must reprogram the |
126 | * clock each drive 2/3 switch we do. | 126 | * clock each drive 2/3 switch we do. |
127 | */ | 127 | */ |
128 | 128 | ||
129 | static unsigned int cmd640_qc_issue_prot(struct ata_queued_cmd *qc) | 129 | static unsigned int cmd640_qc_issue(struct ata_queued_cmd *qc) |
130 | { | 130 | { |
131 | struct ata_port *ap = qc->ap; | 131 | struct ata_port *ap = qc->ap; |
132 | struct ata_device *adev = qc->dev; | 132 | struct ata_device *adev = qc->dev; |
@@ -137,7 +137,7 @@ static unsigned int cmd640_qc_issue_prot(struct ata_queued_cmd *qc) | |||
137 | pci_write_config_byte(pdev, DRWTIM23, timing->reg58[adev->devno]); | 137 | pci_write_config_byte(pdev, DRWTIM23, timing->reg58[adev->devno]); |
138 | timing->last = adev->devno; | 138 | timing->last = adev->devno; |
139 | } | 139 | } |
140 | return ata_qc_issue_prot(qc); | 140 | return ata_sff_qc_issue(qc); |
141 | } | 141 | } |
142 | 142 | ||
143 | /** | 143 | /** |
@@ -166,53 +166,16 @@ static int cmd640_port_start(struct ata_port *ap) | |||
166 | } | 166 | } |
167 | 167 | ||
168 | static struct scsi_host_template cmd640_sht = { | 168 | static struct scsi_host_template cmd640_sht = { |
169 | .module = THIS_MODULE, | 169 | ATA_BMDMA_SHT(DRV_NAME), |
170 | .name = DRV_NAME, | ||
171 | .ioctl = ata_scsi_ioctl, | ||
172 | .queuecommand = ata_scsi_queuecmd, | ||
173 | .can_queue = ATA_DEF_QUEUE, | ||
174 | .this_id = ATA_SHT_THIS_ID, | ||
175 | .sg_tablesize = LIBATA_MAX_PRD, | ||
176 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
177 | .emulated = ATA_SHT_EMULATED, | ||
178 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
179 | .proc_name = DRV_NAME, | ||
180 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
181 | .slave_configure = ata_scsi_slave_config, | ||
182 | .slave_destroy = ata_scsi_slave_destroy, | ||
183 | .bios_param = ata_std_bios_param, | ||
184 | }; | 170 | }; |
185 | 171 | ||
186 | static struct ata_port_operations cmd640_port_ops = { | 172 | static struct ata_port_operations cmd640_port_ops = { |
187 | .set_piomode = cmd640_set_piomode, | 173 | .inherits = &ata_bmdma_port_ops, |
188 | .mode_filter = ata_pci_default_filter, | 174 | /* In theory xfer_noirq is not needed once we kill the prefetcher */ |
189 | .tf_load = ata_tf_load, | 175 | .sff_data_xfer = ata_sff_data_xfer_noirq, |
190 | .tf_read = ata_tf_read, | 176 | .qc_issue = cmd640_qc_issue, |
191 | .check_status = ata_check_status, | ||
192 | .exec_command = ata_exec_command, | ||
193 | .dev_select = ata_std_dev_select, | ||
194 | |||
195 | .freeze = ata_bmdma_freeze, | ||
196 | .thaw = ata_bmdma_thaw, | ||
197 | .error_handler = ata_bmdma_error_handler, | ||
198 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
199 | .cable_detect = ata_cable_40wire, | 177 | .cable_detect = ata_cable_40wire, |
200 | 178 | .set_piomode = cmd640_set_piomode, | |
201 | .bmdma_setup = ata_bmdma_setup, | ||
202 | .bmdma_start = ata_bmdma_start, | ||
203 | .bmdma_stop = ata_bmdma_stop, | ||
204 | .bmdma_status = ata_bmdma_status, | ||
205 | |||
206 | .qc_prep = ata_qc_prep, | ||
207 | .qc_issue = cmd640_qc_issue_prot, | ||
208 | |||
209 | /* In theory this is not needed once we kill the prefetcher */ | ||
210 | .data_xfer = ata_data_xfer_noirq, | ||
211 | |||
212 | .irq_handler = ata_interrupt, | ||
213 | .irq_clear = ata_bmdma_irq_clear, | ||
214 | .irq_on = ata_irq_on, | ||
215 | |||
216 | .port_start = cmd640_port_start, | 179 | .port_start = cmd640_port_start, |
217 | }; | 180 | }; |
218 | 181 | ||
@@ -248,26 +211,36 @@ static void cmd640_hardware_init(struct pci_dev *pdev) | |||
248 | static int cmd640_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 211 | static int cmd640_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
249 | { | 212 | { |
250 | static const struct ata_port_info info = { | 213 | static const struct ata_port_info info = { |
251 | .sht = &cmd640_sht, | ||
252 | .flags = ATA_FLAG_SLAVE_POSS, | 214 | .flags = ATA_FLAG_SLAVE_POSS, |
253 | .pio_mask = 0x1f, | 215 | .pio_mask = 0x1f, |
254 | .port_ops = &cmd640_port_ops | 216 | .port_ops = &cmd640_port_ops |
255 | }; | 217 | }; |
256 | const struct ata_port_info *ppi[] = { &info, NULL }; | 218 | const struct ata_port_info *ppi[] = { &info, NULL }; |
219 | int rc; | ||
220 | |||
221 | rc = pcim_enable_device(pdev); | ||
222 | if (rc) | ||
223 | return rc; | ||
257 | 224 | ||
258 | cmd640_hardware_init(pdev); | 225 | cmd640_hardware_init(pdev); |
259 | return ata_pci_init_one(pdev, ppi); | 226 | |
227 | return ata_pci_sff_init_one(pdev, ppi, &cmd640_sht, NULL); | ||
260 | } | 228 | } |
261 | 229 | ||
230 | #ifdef CONFIG_PM | ||
262 | static int cmd640_reinit_one(struct pci_dev *pdev) | 231 | static int cmd640_reinit_one(struct pci_dev *pdev) |
263 | { | 232 | { |
233 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
234 | int rc; | ||
235 | |||
236 | rc = ata_pci_device_do_resume(pdev); | ||
237 | if (rc) | ||
238 | return rc; | ||
264 | cmd640_hardware_init(pdev); | 239 | cmd640_hardware_init(pdev); |
265 | #ifdef CONFIG_PM | 240 | ata_host_resume(host); |
266 | return ata_pci_device_resume(pdev); | ||
267 | #else | ||
268 | return 0; | 241 | return 0; |
269 | #endif | ||
270 | } | 242 | } |
243 | #endif | ||
271 | 244 | ||
272 | static const struct pci_device_id cmd640[] = { | 245 | static const struct pci_device_id cmd640[] = { |
273 | { PCI_VDEVICE(CMD, 0x640), 0 }, | 246 | { PCI_VDEVICE(CMD, 0x640), 0 }, |
@@ -281,8 +254,8 @@ static struct pci_driver cmd640_pci_driver = { | |||
281 | .remove = ata_pci_remove_one, | 254 | .remove = ata_pci_remove_one, |
282 | #ifdef CONFIG_PM | 255 | #ifdef CONFIG_PM |
283 | .suspend = ata_pci_device_suspend, | 256 | .suspend = ata_pci_device_suspend, |
284 | #endif | ||
285 | .resume = cmd640_reinit_one, | 257 | .resume = cmd640_reinit_one, |
258 | #endif | ||
286 | }; | 259 | }; |
287 | 260 | ||
288 | static int __init cmd640_init(void) | 261 | static int __init cmd640_init(void) |
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c index 7acbbd9ee469..ddd09b7d98c9 100644 --- a/drivers/ata/pata_cmd64x.c +++ b/drivers/ata/pata_cmd64x.c | |||
@@ -266,120 +266,30 @@ static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc) | |||
266 | } | 266 | } |
267 | 267 | ||
268 | static struct scsi_host_template cmd64x_sht = { | 268 | static struct scsi_host_template cmd64x_sht = { |
269 | .module = THIS_MODULE, | 269 | ATA_BMDMA_SHT(DRV_NAME), |
270 | .name = DRV_NAME, | ||
271 | .ioctl = ata_scsi_ioctl, | ||
272 | .queuecommand = ata_scsi_queuecmd, | ||
273 | .can_queue = ATA_DEF_QUEUE, | ||
274 | .this_id = ATA_SHT_THIS_ID, | ||
275 | .sg_tablesize = LIBATA_MAX_PRD, | ||
276 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
277 | .emulated = ATA_SHT_EMULATED, | ||
278 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
279 | .proc_name = DRV_NAME, | ||
280 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
281 | .slave_configure = ata_scsi_slave_config, | ||
282 | .slave_destroy = ata_scsi_slave_destroy, | ||
283 | .bios_param = ata_std_bios_param, | ||
284 | }; | 270 | }; |
285 | 271 | ||
286 | static struct ata_port_operations cmd64x_port_ops = { | 272 | static const struct ata_port_operations cmd64x_base_ops = { |
273 | .inherits = &ata_bmdma_port_ops, | ||
287 | .set_piomode = cmd64x_set_piomode, | 274 | .set_piomode = cmd64x_set_piomode, |
288 | .set_dmamode = cmd64x_set_dmamode, | 275 | .set_dmamode = cmd64x_set_dmamode, |
289 | .mode_filter = ata_pci_default_filter, | ||
290 | .tf_load = ata_tf_load, | ||
291 | .tf_read = ata_tf_read, | ||
292 | .check_status = ata_check_status, | ||
293 | .exec_command = ata_exec_command, | ||
294 | .dev_select = ata_std_dev_select, | ||
295 | |||
296 | .freeze = ata_bmdma_freeze, | ||
297 | .thaw = ata_bmdma_thaw, | ||
298 | .error_handler = ata_bmdma_error_handler, | ||
299 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
300 | .cable_detect = ata_cable_40wire, | ||
301 | |||
302 | .bmdma_setup = ata_bmdma_setup, | ||
303 | .bmdma_start = ata_bmdma_start, | ||
304 | .bmdma_stop = ata_bmdma_stop, | ||
305 | .bmdma_status = ata_bmdma_status, | ||
306 | |||
307 | .qc_prep = ata_qc_prep, | ||
308 | .qc_issue = ata_qc_issue_prot, | ||
309 | |||
310 | .data_xfer = ata_data_xfer, | ||
311 | |||
312 | .irq_handler = ata_interrupt, | ||
313 | .irq_clear = ata_bmdma_irq_clear, | ||
314 | .irq_on = ata_irq_on, | ||
315 | |||
316 | .port_start = ata_port_start, | ||
317 | }; | 276 | }; |
318 | 277 | ||
319 | static struct ata_port_operations cmd646r1_port_ops = { | 278 | static struct ata_port_operations cmd64x_port_ops = { |
320 | .set_piomode = cmd64x_set_piomode, | 279 | .inherits = &cmd64x_base_ops, |
321 | .set_dmamode = cmd64x_set_dmamode, | ||
322 | .mode_filter = ata_pci_default_filter, | ||
323 | .tf_load = ata_tf_load, | ||
324 | .tf_read = ata_tf_read, | ||
325 | .check_status = ata_check_status, | ||
326 | .exec_command = ata_exec_command, | ||
327 | .dev_select = ata_std_dev_select, | ||
328 | |||
329 | .freeze = ata_bmdma_freeze, | ||
330 | .thaw = ata_bmdma_thaw, | ||
331 | .error_handler = ata_bmdma_error_handler, | ||
332 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
333 | .cable_detect = ata_cable_40wire, | 280 | .cable_detect = ata_cable_40wire, |
281 | }; | ||
334 | 282 | ||
335 | .bmdma_setup = ata_bmdma_setup, | 283 | static struct ata_port_operations cmd646r1_port_ops = { |
336 | .bmdma_start = ata_bmdma_start, | 284 | .inherits = &cmd64x_base_ops, |
337 | .bmdma_stop = cmd646r1_bmdma_stop, | 285 | .bmdma_stop = cmd646r1_bmdma_stop, |
338 | .bmdma_status = ata_bmdma_status, | 286 | .cable_detect = ata_cable_40wire, |
339 | |||
340 | .qc_prep = ata_qc_prep, | ||
341 | .qc_issue = ata_qc_issue_prot, | ||
342 | |||
343 | .data_xfer = ata_data_xfer, | ||
344 | |||
345 | .irq_handler = ata_interrupt, | ||
346 | .irq_clear = ata_bmdma_irq_clear, | ||
347 | .irq_on = ata_irq_on, | ||
348 | |||
349 | .port_start = ata_port_start, | ||
350 | }; | 287 | }; |
351 | 288 | ||
352 | static struct ata_port_operations cmd648_port_ops = { | 289 | static struct ata_port_operations cmd648_port_ops = { |
353 | .set_piomode = cmd64x_set_piomode, | 290 | .inherits = &cmd64x_base_ops, |
354 | .set_dmamode = cmd64x_set_dmamode, | ||
355 | .mode_filter = ata_pci_default_filter, | ||
356 | .tf_load = ata_tf_load, | ||
357 | .tf_read = ata_tf_read, | ||
358 | .check_status = ata_check_status, | ||
359 | .exec_command = ata_exec_command, | ||
360 | .dev_select = ata_std_dev_select, | ||
361 | |||
362 | .freeze = ata_bmdma_freeze, | ||
363 | .thaw = ata_bmdma_thaw, | ||
364 | .error_handler = ata_bmdma_error_handler, | ||
365 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
366 | .cable_detect = cmd648_cable_detect, | ||
367 | |||
368 | .bmdma_setup = ata_bmdma_setup, | ||
369 | .bmdma_start = ata_bmdma_start, | ||
370 | .bmdma_stop = cmd648_bmdma_stop, | 291 | .bmdma_stop = cmd648_bmdma_stop, |
371 | .bmdma_status = ata_bmdma_status, | 292 | .cable_detect = cmd648_cable_detect, |
372 | |||
373 | .qc_prep = ata_qc_prep, | ||
374 | .qc_issue = ata_qc_issue_prot, | ||
375 | |||
376 | .data_xfer = ata_data_xfer, | ||
377 | |||
378 | .irq_handler = ata_interrupt, | ||
379 | .irq_clear = ata_bmdma_irq_clear, | ||
380 | .irq_on = ata_irq_on, | ||
381 | |||
382 | .port_start = ata_port_start, | ||
383 | }; | 293 | }; |
384 | 294 | ||
385 | static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 295 | static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
@@ -388,21 +298,18 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
388 | 298 | ||
389 | static const struct ata_port_info cmd_info[6] = { | 299 | static const struct ata_port_info cmd_info[6] = { |
390 | { /* CMD 643 - no UDMA */ | 300 | { /* CMD 643 - no UDMA */ |
391 | .sht = &cmd64x_sht, | ||
392 | .flags = ATA_FLAG_SLAVE_POSS, | 301 | .flags = ATA_FLAG_SLAVE_POSS, |
393 | .pio_mask = 0x1f, | 302 | .pio_mask = 0x1f, |
394 | .mwdma_mask = 0x07, | 303 | .mwdma_mask = 0x07, |
395 | .port_ops = &cmd64x_port_ops | 304 | .port_ops = &cmd64x_port_ops |
396 | }, | 305 | }, |
397 | { /* CMD 646 with broken UDMA */ | 306 | { /* CMD 646 with broken UDMA */ |
398 | .sht = &cmd64x_sht, | ||
399 | .flags = ATA_FLAG_SLAVE_POSS, | 307 | .flags = ATA_FLAG_SLAVE_POSS, |
400 | .pio_mask = 0x1f, | 308 | .pio_mask = 0x1f, |
401 | .mwdma_mask = 0x07, | 309 | .mwdma_mask = 0x07, |
402 | .port_ops = &cmd64x_port_ops | 310 | .port_ops = &cmd64x_port_ops |
403 | }, | 311 | }, |
404 | { /* CMD 646 with working UDMA */ | 312 | { /* CMD 646 with working UDMA */ |
405 | .sht = &cmd64x_sht, | ||
406 | .flags = ATA_FLAG_SLAVE_POSS, | 313 | .flags = ATA_FLAG_SLAVE_POSS, |
407 | .pio_mask = 0x1f, | 314 | .pio_mask = 0x1f, |
408 | .mwdma_mask = 0x07, | 315 | .mwdma_mask = 0x07, |
@@ -410,14 +317,12 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
410 | .port_ops = &cmd64x_port_ops | 317 | .port_ops = &cmd64x_port_ops |
411 | }, | 318 | }, |
412 | { /* CMD 646 rev 1 */ | 319 | { /* CMD 646 rev 1 */ |
413 | .sht = &cmd64x_sht, | ||
414 | .flags = ATA_FLAG_SLAVE_POSS, | 320 | .flags = ATA_FLAG_SLAVE_POSS, |
415 | .pio_mask = 0x1f, | 321 | .pio_mask = 0x1f, |
416 | .mwdma_mask = 0x07, | 322 | .mwdma_mask = 0x07, |
417 | .port_ops = &cmd646r1_port_ops | 323 | .port_ops = &cmd646r1_port_ops |
418 | }, | 324 | }, |
419 | { /* CMD 648 */ | 325 | { /* CMD 648 */ |
420 | .sht = &cmd64x_sht, | ||
421 | .flags = ATA_FLAG_SLAVE_POSS, | 326 | .flags = ATA_FLAG_SLAVE_POSS, |
422 | .pio_mask = 0x1f, | 327 | .pio_mask = 0x1f, |
423 | .mwdma_mask = 0x07, | 328 | .mwdma_mask = 0x07, |
@@ -425,7 +330,6 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
425 | .port_ops = &cmd648_port_ops | 330 | .port_ops = &cmd648_port_ops |
426 | }, | 331 | }, |
427 | { /* CMD 649 */ | 332 | { /* CMD 649 */ |
428 | .sht = &cmd64x_sht, | ||
429 | .flags = ATA_FLAG_SLAVE_POSS, | 333 | .flags = ATA_FLAG_SLAVE_POSS, |
430 | .pio_mask = 0x1f, | 334 | .pio_mask = 0x1f, |
431 | .mwdma_mask = 0x07, | 335 | .mwdma_mask = 0x07, |
@@ -435,12 +339,17 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
435 | }; | 339 | }; |
436 | const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL }; | 340 | const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL }; |
437 | u8 mrdmode; | 341 | u8 mrdmode; |
342 | int rc; | ||
343 | |||
344 | rc = pcim_enable_device(pdev); | ||
345 | if (rc) | ||
346 | return rc; | ||
438 | 347 | ||
439 | pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev); | 348 | pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev); |
440 | class_rev &= 0xFF; | 349 | class_rev &= 0xFF; |
441 | 350 | ||
442 | if (id->driver_data == 0) /* 643 */ | 351 | if (id->driver_data == 0) /* 643 */ |
443 | ata_pci_clear_simplex(pdev); | 352 | ata_pci_bmdma_clear_simplex(pdev); |
444 | 353 | ||
445 | if (pdev->device == PCI_DEVICE_ID_CMD_646) { | 354 | if (pdev->device == PCI_DEVICE_ID_CMD_646) { |
446 | /* Does UDMA work ? */ | 355 | /* Does UDMA work ? */ |
@@ -464,13 +373,20 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
464 | pci_write_config_byte(pdev, UDIDETCR0, 0xF0); | 373 | pci_write_config_byte(pdev, UDIDETCR0, 0xF0); |
465 | #endif | 374 | #endif |
466 | 375 | ||
467 | return ata_pci_init_one(pdev, ppi); | 376 | return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL); |
468 | } | 377 | } |
469 | 378 | ||
470 | #ifdef CONFIG_PM | 379 | #ifdef CONFIG_PM |
471 | static int cmd64x_reinit_one(struct pci_dev *pdev) | 380 | static int cmd64x_reinit_one(struct pci_dev *pdev) |
472 | { | 381 | { |
382 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
473 | u8 mrdmode; | 383 | u8 mrdmode; |
384 | int rc; | ||
385 | |||
386 | rc = ata_pci_device_do_resume(pdev); | ||
387 | if (rc) | ||
388 | return rc; | ||
389 | |||
474 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); | 390 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); |
475 | pci_read_config_byte(pdev, MRDMODE, &mrdmode); | 391 | pci_read_config_byte(pdev, MRDMODE, &mrdmode); |
476 | mrdmode &= ~ 0x30; /* IRQ set up */ | 392 | mrdmode &= ~ 0x30; /* IRQ set up */ |
@@ -479,7 +395,8 @@ static int cmd64x_reinit_one(struct pci_dev *pdev) | |||
479 | #ifdef CONFIG_PPC | 395 | #ifdef CONFIG_PPC |
480 | pci_write_config_byte(pdev, UDIDETCR0, 0xF0); | 396 | pci_write_config_byte(pdev, UDIDETCR0, 0xF0); |
481 | #endif | 397 | #endif |
482 | return ata_pci_device_resume(pdev); | 398 | ata_host_resume(host); |
399 | return 0; | ||
483 | } | 400 | } |
484 | #endif | 401 | #endif |
485 | 402 | ||
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index 7ed279b0a12e..1186bcd2781c 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c | |||
@@ -140,51 +140,16 @@ static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
140 | } | 140 | } |
141 | 141 | ||
142 | static struct scsi_host_template cs5520_sht = { | 142 | static struct scsi_host_template cs5520_sht = { |
143 | .module = THIS_MODULE, | 143 | ATA_BMDMA_SHT(DRV_NAME), |
144 | .name = DRV_NAME, | ||
145 | .ioctl = ata_scsi_ioctl, | ||
146 | .queuecommand = ata_scsi_queuecmd, | ||
147 | .can_queue = ATA_DEF_QUEUE, | ||
148 | .this_id = ATA_SHT_THIS_ID, | ||
149 | .sg_tablesize = LIBATA_DUMB_MAX_PRD, | 144 | .sg_tablesize = LIBATA_DUMB_MAX_PRD, |
150 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
151 | .emulated = ATA_SHT_EMULATED, | ||
152 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
153 | .proc_name = DRV_NAME, | ||
154 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
155 | .slave_configure = ata_scsi_slave_config, | ||
156 | .slave_destroy = ata_scsi_slave_destroy, | ||
157 | .bios_param = ata_std_bios_param, | ||
158 | }; | 145 | }; |
159 | 146 | ||
160 | static struct ata_port_operations cs5520_port_ops = { | 147 | static struct ata_port_operations cs5520_port_ops = { |
148 | .inherits = &ata_bmdma_port_ops, | ||
149 | .qc_prep = ata_sff_dumb_qc_prep, | ||
150 | .cable_detect = ata_cable_40wire, | ||
161 | .set_piomode = cs5520_set_piomode, | 151 | .set_piomode = cs5520_set_piomode, |
162 | .set_dmamode = cs5520_set_dmamode, | 152 | .set_dmamode = cs5520_set_dmamode, |
163 | |||
164 | .tf_load = ata_tf_load, | ||
165 | .tf_read = ata_tf_read, | ||
166 | .check_status = ata_check_status, | ||
167 | .exec_command = ata_exec_command, | ||
168 | .dev_select = ata_std_dev_select, | ||
169 | |||
170 | .freeze = ata_bmdma_freeze, | ||
171 | .thaw = ata_bmdma_thaw, | ||
172 | .error_handler = ata_bmdma_error_handler, | ||
173 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
174 | .cable_detect = ata_cable_40wire, | ||
175 | |||
176 | .bmdma_setup = ata_bmdma_setup, | ||
177 | .bmdma_start = ata_bmdma_start, | ||
178 | .bmdma_stop = ata_bmdma_stop, | ||
179 | .bmdma_status = ata_bmdma_status, | ||
180 | .qc_prep = ata_dumb_qc_prep, | ||
181 | .qc_issue = ata_qc_issue_prot, | ||
182 | .data_xfer = ata_data_xfer, | ||
183 | |||
184 | .irq_clear = ata_bmdma_irq_clear, | ||
185 | .irq_on = ata_irq_on, | ||
186 | |||
187 | .port_start = ata_sff_port_start, | ||
188 | }; | 153 | }; |
189 | 154 | ||
190 | static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 155 | static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
@@ -203,6 +168,10 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi | |||
203 | struct ata_ioports *ioaddr; | 168 | struct ata_ioports *ioaddr; |
204 | int i, rc; | 169 | int i, rc; |
205 | 170 | ||
171 | rc = pcim_enable_device(pdev); | ||
172 | if (rc) | ||
173 | return rc; | ||
174 | |||
206 | /* IDE port enable bits */ | 175 | /* IDE port enable bits */ |
207 | pci_read_config_byte(pdev, 0x60, &pcicfg); | 176 | pci_read_config_byte(pdev, 0x60, &pcicfg); |
208 | 177 | ||
@@ -258,7 +227,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi | |||
258 | ioaddr->ctl_addr = iomap[1]; | 227 | ioaddr->ctl_addr = iomap[1]; |
259 | ioaddr->altstatus_addr = iomap[1]; | 228 | ioaddr->altstatus_addr = iomap[1]; |
260 | ioaddr->bmdma_addr = iomap[4]; | 229 | ioaddr->bmdma_addr = iomap[4]; |
261 | ata_std_ports(ioaddr); | 230 | ata_sff_std_ports(ioaddr); |
262 | 231 | ||
263 | ata_port_desc(host->ports[0], | 232 | ata_port_desc(host->ports[0], |
264 | "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]); | 233 | "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]); |
@@ -269,7 +238,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi | |||
269 | ioaddr->ctl_addr = iomap[3]; | 238 | ioaddr->ctl_addr = iomap[3]; |
270 | ioaddr->altstatus_addr = iomap[3]; | 239 | ioaddr->altstatus_addr = iomap[3]; |
271 | ioaddr->bmdma_addr = iomap[4] + 8; | 240 | ioaddr->bmdma_addr = iomap[4] + 8; |
272 | ata_std_ports(ioaddr); | 241 | ata_sff_std_ports(ioaddr); |
273 | 242 | ||
274 | ata_port_desc(host->ports[1], | 243 | ata_port_desc(host->ports[1], |
275 | "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]); | 244 | "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]); |
@@ -289,7 +258,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi | |||
289 | continue; | 258 | continue; |
290 | 259 | ||
291 | rc = devm_request_irq(&pdev->dev, irq[ap->port_no], | 260 | rc = devm_request_irq(&pdev->dev, irq[ap->port_no], |
292 | ata_interrupt, 0, DRV_NAME, host); | 261 | ata_sff_interrupt, 0, DRV_NAME, host); |
293 | if (rc) | 262 | if (rc) |
294 | return rc; | 263 | return rc; |
295 | 264 | ||
@@ -310,11 +279,20 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi | |||
310 | 279 | ||
311 | static int cs5520_reinit_one(struct pci_dev *pdev) | 280 | static int cs5520_reinit_one(struct pci_dev *pdev) |
312 | { | 281 | { |
282 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
313 | u8 pcicfg; | 283 | u8 pcicfg; |
284 | int rc; | ||
285 | |||
286 | rc = ata_pci_device_do_resume(pdev); | ||
287 | if (rc) | ||
288 | return rc; | ||
289 | |||
314 | pci_read_config_byte(pdev, 0x60, &pcicfg); | 290 | pci_read_config_byte(pdev, 0x60, &pcicfg); |
315 | if ((pcicfg & 0x40) == 0) | 291 | if ((pcicfg & 0x40) == 0) |
316 | pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); | 292 | pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); |
317 | return ata_pci_device_resume(pdev); | 293 | |
294 | ata_host_resume(host); | ||
295 | return 0; | ||
318 | } | 296 | } |
319 | 297 | ||
320 | /** | 298 | /** |
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c index e1818fdd9159..744beebaaf49 100644 --- a/drivers/ata/pata_cs5530.c +++ b/drivers/ata/pata_cs5530.c | |||
@@ -133,7 +133,7 @@ static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
133 | } | 133 | } |
134 | 134 | ||
135 | /** | 135 | /** |
136 | * cs5530_qc_issue_prot - command issue | 136 | * cs5530_qc_issue - command issue |
137 | * @qc: command pending | 137 | * @qc: command pending |
138 | * | 138 | * |
139 | * Called when the libata layer is about to issue a command. We wrap | 139 | * Called when the libata layer is about to issue a command. We wrap |
@@ -142,7 +142,7 @@ static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
142 | * one MWDMA/UDMA bit. | 142 | * one MWDMA/UDMA bit. |
143 | */ | 143 | */ |
144 | 144 | ||
145 | static unsigned int cs5530_qc_issue_prot(struct ata_queued_cmd *qc) | 145 | static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc) |
146 | { | 146 | { |
147 | struct ata_port *ap = qc->ap; | 147 | struct ata_port *ap = qc->ap; |
148 | struct ata_device *adev = qc->dev; | 148 | struct ata_device *adev = qc->dev; |
@@ -157,59 +157,23 @@ static unsigned int cs5530_qc_issue_prot(struct ata_queued_cmd *qc) | |||
157 | cs5530_set_dmamode(ap, adev); | 157 | cs5530_set_dmamode(ap, adev); |
158 | } | 158 | } |
159 | 159 | ||
160 | return ata_qc_issue_prot(qc); | 160 | return ata_sff_qc_issue(qc); |
161 | } | 161 | } |
162 | 162 | ||
163 | static struct scsi_host_template cs5530_sht = { | 163 | static struct scsi_host_template cs5530_sht = { |
164 | .module = THIS_MODULE, | 164 | ATA_BMDMA_SHT(DRV_NAME), |
165 | .name = DRV_NAME, | 165 | .sg_tablesize = LIBATA_DUMB_MAX_PRD, |
166 | .ioctl = ata_scsi_ioctl, | ||
167 | .queuecommand = ata_scsi_queuecmd, | ||
168 | .can_queue = ATA_DEF_QUEUE, | ||
169 | .this_id = ATA_SHT_THIS_ID, | ||
170 | .sg_tablesize = LIBATA_DUMB_MAX_PRD, | ||
171 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
172 | .emulated = ATA_SHT_EMULATED, | ||
173 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
174 | .proc_name = DRV_NAME, | ||
175 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
176 | .slave_configure = ata_scsi_slave_config, | ||
177 | .slave_destroy = ata_scsi_slave_destroy, | ||
178 | .bios_param = ata_std_bios_param, | ||
179 | }; | 166 | }; |
180 | 167 | ||
181 | static struct ata_port_operations cs5530_port_ops = { | 168 | static struct ata_port_operations cs5530_port_ops = { |
182 | .set_piomode = cs5530_set_piomode, | 169 | .inherits = &ata_bmdma_port_ops, |
183 | .set_dmamode = cs5530_set_dmamode, | ||
184 | .mode_filter = ata_pci_default_filter, | ||
185 | |||
186 | .tf_load = ata_tf_load, | ||
187 | .tf_read = ata_tf_read, | ||
188 | .check_status = ata_check_status, | ||
189 | .exec_command = ata_exec_command, | ||
190 | .dev_select = ata_std_dev_select, | ||
191 | |||
192 | .bmdma_setup = ata_bmdma_setup, | ||
193 | .bmdma_start = ata_bmdma_start, | ||
194 | .bmdma_stop = ata_bmdma_stop, | ||
195 | .bmdma_status = ata_bmdma_status, | ||
196 | |||
197 | .freeze = ata_bmdma_freeze, | ||
198 | .thaw = ata_bmdma_thaw, | ||
199 | .error_handler = ata_bmdma_error_handler, | ||
200 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
201 | .cable_detect = ata_cable_40wire, | ||
202 | |||
203 | .qc_prep = ata_dumb_qc_prep, | ||
204 | .qc_issue = cs5530_qc_issue_prot, | ||
205 | 170 | ||
206 | .data_xfer = ata_data_xfer, | 171 | .qc_prep = ata_sff_dumb_qc_prep, |
172 | .qc_issue = cs5530_qc_issue, | ||
207 | 173 | ||
208 | .irq_handler = ata_interrupt, | 174 | .cable_detect = ata_cable_40wire, |
209 | .irq_clear = ata_bmdma_irq_clear, | 175 | .set_piomode = cs5530_set_piomode, |
210 | .irq_on = ata_irq_on, | 176 | .set_dmamode = cs5530_set_dmamode, |
211 | |||
212 | .port_start = ata_sff_port_start, | ||
213 | }; | 177 | }; |
214 | 178 | ||
215 | static const struct dmi_system_id palmax_dmi_table[] = { | 179 | static const struct dmi_system_id palmax_dmi_table[] = { |
@@ -334,7 +298,6 @@ fail_put: | |||
334 | static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 298 | static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
335 | { | 299 | { |
336 | static const struct ata_port_info info = { | 300 | static const struct ata_port_info info = { |
337 | .sht = &cs5530_sht, | ||
338 | .flags = ATA_FLAG_SLAVE_POSS, | 301 | .flags = ATA_FLAG_SLAVE_POSS, |
339 | .pio_mask = 0x1f, | 302 | .pio_mask = 0x1f, |
340 | .mwdma_mask = 0x07, | 303 | .mwdma_mask = 0x07, |
@@ -343,12 +306,16 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
343 | }; | 306 | }; |
344 | /* The docking connector doesn't do UDMA, and it seems not MWDMA */ | 307 | /* The docking connector doesn't do UDMA, and it seems not MWDMA */ |
345 | static const struct ata_port_info info_palmax_secondary = { | 308 | static const struct ata_port_info info_palmax_secondary = { |
346 | .sht = &cs5530_sht, | ||
347 | .flags = ATA_FLAG_SLAVE_POSS, | 309 | .flags = ATA_FLAG_SLAVE_POSS, |
348 | .pio_mask = 0x1f, | 310 | .pio_mask = 0x1f, |
349 | .port_ops = &cs5530_port_ops | 311 | .port_ops = &cs5530_port_ops |
350 | }; | 312 | }; |
351 | const struct ata_port_info *ppi[] = { &info, NULL }; | 313 | const struct ata_port_info *ppi[] = { &info, NULL }; |
314 | int rc; | ||
315 | |||
316 | rc = pcim_enable_device(pdev); | ||
317 | if (rc) | ||
318 | return rc; | ||
352 | 319 | ||
353 | /* Chip initialisation */ | 320 | /* Chip initialisation */ |
354 | if (cs5530_init_chip()) | 321 | if (cs5530_init_chip()) |
@@ -358,16 +325,25 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
358 | ppi[1] = &info_palmax_secondary; | 325 | ppi[1] = &info_palmax_secondary; |
359 | 326 | ||
360 | /* Now kick off ATA set up */ | 327 | /* Now kick off ATA set up */ |
361 | return ata_pci_init_one(pdev, ppi); | 328 | return ata_pci_sff_init_one(pdev, ppi, &cs5530_sht, NULL); |
362 | } | 329 | } |
363 | 330 | ||
364 | #ifdef CONFIG_PM | 331 | #ifdef CONFIG_PM |
365 | static int cs5530_reinit_one(struct pci_dev *pdev) | 332 | static int cs5530_reinit_one(struct pci_dev *pdev) |
366 | { | 333 | { |
334 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
335 | int rc; | ||
336 | |||
337 | rc = ata_pci_device_do_resume(pdev); | ||
338 | if (rc) | ||
339 | return rc; | ||
340 | |||
367 | /* If we fail on resume we are doomed */ | 341 | /* If we fail on resume we are doomed */ |
368 | if (cs5530_init_chip()) | 342 | if (cs5530_init_chip()) |
369 | BUG(); | 343 | return -EIO; |
370 | return ata_pci_device_resume(pdev); | 344 | |
345 | ata_host_resume(host); | ||
346 | return 0; | ||
371 | } | 347 | } |
372 | #endif /* CONFIG_PM */ | 348 | #endif /* CONFIG_PM */ |
373 | 349 | ||
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c index 01324530d052..f1b6556f0483 100644 --- a/drivers/ata/pata_cs5535.c +++ b/drivers/ata/pata_cs5535.c | |||
@@ -158,55 +158,14 @@ static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
158 | } | 158 | } |
159 | 159 | ||
160 | static struct scsi_host_template cs5535_sht = { | 160 | static struct scsi_host_template cs5535_sht = { |
161 | .module = THIS_MODULE, | 161 | ATA_BMDMA_SHT(DRV_NAME), |
162 | .name = DRV_NAME, | ||
163 | .ioctl = ata_scsi_ioctl, | ||
164 | .queuecommand = ata_scsi_queuecmd, | ||
165 | .can_queue = ATA_DEF_QUEUE, | ||
166 | .this_id = ATA_SHT_THIS_ID, | ||
167 | .sg_tablesize = LIBATA_MAX_PRD, | ||
168 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
169 | .emulated = ATA_SHT_EMULATED, | ||
170 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
171 | .proc_name = DRV_NAME, | ||
172 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
173 | .slave_configure = ata_scsi_slave_config, | ||
174 | .slave_destroy = ata_scsi_slave_destroy, | ||
175 | .bios_param = ata_std_bios_param, | ||
176 | }; | 162 | }; |
177 | 163 | ||
178 | static struct ata_port_operations cs5535_port_ops = { | 164 | static struct ata_port_operations cs5535_port_ops = { |
165 | .inherits = &ata_bmdma_port_ops, | ||
166 | .cable_detect = cs5535_cable_detect, | ||
179 | .set_piomode = cs5535_set_piomode, | 167 | .set_piomode = cs5535_set_piomode, |
180 | .set_dmamode = cs5535_set_dmamode, | 168 | .set_dmamode = cs5535_set_dmamode, |
181 | .mode_filter = ata_pci_default_filter, | ||
182 | |||
183 | .tf_load = ata_tf_load, | ||
184 | .tf_read = ata_tf_read, | ||
185 | .check_status = ata_check_status, | ||
186 | .exec_command = ata_exec_command, | ||
187 | .dev_select = ata_std_dev_select, | ||
188 | |||
189 | .freeze = ata_bmdma_freeze, | ||
190 | .thaw = ata_bmdma_thaw, | ||
191 | .error_handler = ata_bmdma_error_handler, | ||
192 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
193 | .cable_detect = cs5535_cable_detect, | ||
194 | |||
195 | .bmdma_setup = ata_bmdma_setup, | ||
196 | .bmdma_start = ata_bmdma_start, | ||
197 | .bmdma_stop = ata_bmdma_stop, | ||
198 | .bmdma_status = ata_bmdma_status, | ||
199 | |||
200 | .qc_prep = ata_qc_prep, | ||
201 | .qc_issue = ata_qc_issue_prot, | ||
202 | |||
203 | .data_xfer = ata_data_xfer, | ||
204 | |||
205 | .irq_handler = ata_interrupt, | ||
206 | .irq_clear = ata_bmdma_irq_clear, | ||
207 | .irq_on = ata_irq_on, | ||
208 | |||
209 | .port_start = ata_sff_port_start, | ||
210 | }; | 169 | }; |
211 | 170 | ||
212 | /** | 171 | /** |
@@ -222,7 +181,6 @@ static struct ata_port_operations cs5535_port_ops = { | |||
222 | static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 181 | static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
223 | { | 182 | { |
224 | static const struct ata_port_info info = { | 183 | static const struct ata_port_info info = { |
225 | .sht = &cs5535_sht, | ||
226 | .flags = ATA_FLAG_SLAVE_POSS, | 184 | .flags = ATA_FLAG_SLAVE_POSS, |
227 | .pio_mask = 0x1f, | 185 | .pio_mask = 0x1f, |
228 | .mwdma_mask = 0x07, | 186 | .mwdma_mask = 0x07, |
@@ -241,7 +199,7 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
241 | rdmsr(ATAC_CH0D1_PIO, timings, dummy); | 199 | rdmsr(ATAC_CH0D1_PIO, timings, dummy); |
242 | if (CS5535_BAD_PIO(timings)) | 200 | if (CS5535_BAD_PIO(timings)) |
243 | wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0); | 201 | wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0); |
244 | return ata_pci_init_one(dev, ppi); | 202 | return ata_pci_sff_init_one(dev, ppi, &cs5535_sht, NULL); |
245 | } | 203 | } |
246 | 204 | ||
247 | static const struct pci_device_id cs5535[] = { | 205 | static const struct pci_device_id cs5535[] = { |
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c index 1c4ff9b52b5c..73f8332cb679 100644 --- a/drivers/ata/pata_cs5536.c +++ b/drivers/ata/pata_cs5536.c | |||
@@ -221,55 +221,14 @@ static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
221 | } | 221 | } |
222 | 222 | ||
223 | static struct scsi_host_template cs5536_sht = { | 223 | static struct scsi_host_template cs5536_sht = { |
224 | .module = THIS_MODULE, | 224 | ATA_BMDMA_SHT(DRV_NAME), |
225 | .name = DRV_NAME, | ||
226 | .ioctl = ata_scsi_ioctl, | ||
227 | .queuecommand = ata_scsi_queuecmd, | ||
228 | .can_queue = ATA_DEF_QUEUE, | ||
229 | .this_id = ATA_SHT_THIS_ID, | ||
230 | .sg_tablesize = LIBATA_MAX_PRD, | ||
231 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
232 | .emulated = ATA_SHT_EMULATED, | ||
233 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
234 | .proc_name = DRV_NAME, | ||
235 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
236 | .slave_configure = ata_scsi_slave_config, | ||
237 | .slave_destroy = ata_scsi_slave_destroy, | ||
238 | .bios_param = ata_std_bios_param, | ||
239 | }; | 225 | }; |
240 | 226 | ||
241 | static struct ata_port_operations cs5536_port_ops = { | 227 | static struct ata_port_operations cs5536_port_ops = { |
228 | .inherits = &ata_bmdma_port_ops, | ||
229 | .cable_detect = cs5536_cable_detect, | ||
242 | .set_piomode = cs5536_set_piomode, | 230 | .set_piomode = cs5536_set_piomode, |
243 | .set_dmamode = cs5536_set_dmamode, | 231 | .set_dmamode = cs5536_set_dmamode, |
244 | .mode_filter = ata_pci_default_filter, | ||
245 | |||
246 | .tf_load = ata_tf_load, | ||
247 | .tf_read = ata_tf_read, | ||
248 | .check_status = ata_check_status, | ||
249 | .exec_command = ata_exec_command, | ||
250 | .dev_select = ata_std_dev_select, | ||
251 | |||
252 | .freeze = ata_bmdma_freeze, | ||
253 | .thaw = ata_bmdma_thaw, | ||
254 | .error_handler = ata_bmdma_error_handler, | ||
255 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
256 | .cable_detect = cs5536_cable_detect, | ||
257 | |||
258 | .bmdma_setup = ata_bmdma_setup, | ||
259 | .bmdma_start = ata_bmdma_start, | ||
260 | .bmdma_stop = ata_bmdma_stop, | ||
261 | .bmdma_status = ata_bmdma_status, | ||
262 | |||
263 | .qc_prep = ata_qc_prep, | ||
264 | .qc_issue = ata_qc_issue_prot, | ||
265 | |||
266 | .data_xfer = ata_data_xfer, | ||
267 | |||
268 | .irq_handler = ata_interrupt, | ||
269 | .irq_clear = ata_bmdma_irq_clear, | ||
270 | .irq_on = ata_irq_on, | ||
271 | |||
272 | .port_start = ata_port_start, | ||
273 | }; | 232 | }; |
274 | 233 | ||
275 | /** | 234 | /** |
@@ -282,7 +241,6 @@ static struct ata_port_operations cs5536_port_ops = { | |||
282 | static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 241 | static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
283 | { | 242 | { |
284 | static const struct ata_port_info info = { | 243 | static const struct ata_port_info info = { |
285 | .sht = &cs5536_sht, | ||
286 | .flags = ATA_FLAG_SLAVE_POSS, | 244 | .flags = ATA_FLAG_SLAVE_POSS, |
287 | .pio_mask = 0x1f, | 245 | .pio_mask = 0x1f, |
288 | .mwdma_mask = 0x07, | 246 | .mwdma_mask = 0x07, |
@@ -303,7 +261,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
303 | return -ENODEV; | 261 | return -ENODEV; |
304 | } | 262 | } |
305 | 263 | ||
306 | return ata_pci_init_one(dev, ppi); | 264 | return ata_pci_sff_init_one(dev, ppi, &cs5536_sht, NULL); |
307 | } | 265 | } |
308 | 266 | ||
309 | static const struct pci_device_id cs5536[] = { | 267 | static const struct pci_device_id cs5536[] = { |
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c index fc5f9c4e5d87..a9c3218e22fd 100644 --- a/drivers/ata/pata_cypress.c +++ b/drivers/ata/pata_cypress.c | |||
@@ -110,61 +110,19 @@ static void cy82c693_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
110 | } | 110 | } |
111 | 111 | ||
112 | static struct scsi_host_template cy82c693_sht = { | 112 | static struct scsi_host_template cy82c693_sht = { |
113 | .module = THIS_MODULE, | 113 | ATA_BMDMA_SHT(DRV_NAME), |
114 | .name = DRV_NAME, | ||
115 | .ioctl = ata_scsi_ioctl, | ||
116 | .queuecommand = ata_scsi_queuecmd, | ||
117 | .can_queue = ATA_DEF_QUEUE, | ||
118 | .this_id = ATA_SHT_THIS_ID, | ||
119 | .sg_tablesize = LIBATA_MAX_PRD, | ||
120 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
121 | .emulated = ATA_SHT_EMULATED, | ||
122 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
123 | .proc_name = DRV_NAME, | ||
124 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
125 | .slave_configure = ata_scsi_slave_config, | ||
126 | .slave_destroy = ata_scsi_slave_destroy, | ||
127 | .bios_param = ata_std_bios_param, | ||
128 | }; | 114 | }; |
129 | 115 | ||
130 | static struct ata_port_operations cy82c693_port_ops = { | 116 | static struct ata_port_operations cy82c693_port_ops = { |
117 | .inherits = &ata_bmdma_port_ops, | ||
118 | .cable_detect = ata_cable_40wire, | ||
131 | .set_piomode = cy82c693_set_piomode, | 119 | .set_piomode = cy82c693_set_piomode, |
132 | .set_dmamode = cy82c693_set_dmamode, | 120 | .set_dmamode = cy82c693_set_dmamode, |
133 | .mode_filter = ata_pci_default_filter, | ||
134 | |||
135 | .tf_load = ata_tf_load, | ||
136 | .tf_read = ata_tf_read, | ||
137 | .check_status = ata_check_status, | ||
138 | .exec_command = ata_exec_command, | ||
139 | .dev_select = ata_std_dev_select, | ||
140 | |||
141 | .freeze = ata_bmdma_freeze, | ||
142 | .thaw = ata_bmdma_thaw, | ||
143 | .error_handler = ata_bmdma_error_handler, | ||
144 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
145 | .cable_detect = ata_cable_40wire, | ||
146 | |||
147 | .bmdma_setup = ata_bmdma_setup, | ||
148 | .bmdma_start = ata_bmdma_start, | ||
149 | .bmdma_stop = ata_bmdma_stop, | ||
150 | .bmdma_status = ata_bmdma_status, | ||
151 | |||
152 | .qc_prep = ata_qc_prep, | ||
153 | .qc_issue = ata_qc_issue_prot, | ||
154 | |||
155 | .data_xfer = ata_data_xfer, | ||
156 | |||
157 | .irq_handler = ata_interrupt, | ||
158 | .irq_clear = ata_bmdma_irq_clear, | ||
159 | .irq_on = ata_irq_on, | ||
160 | |||
161 | .port_start = ata_sff_port_start, | ||
162 | }; | 121 | }; |
163 | 122 | ||
164 | static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 123 | static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
165 | { | 124 | { |
166 | static const struct ata_port_info info = { | 125 | static const struct ata_port_info info = { |
167 | .sht = &cy82c693_sht, | ||
168 | .flags = ATA_FLAG_SLAVE_POSS, | 126 | .flags = ATA_FLAG_SLAVE_POSS, |
169 | .pio_mask = 0x1f, | 127 | .pio_mask = 0x1f, |
170 | .mwdma_mask = 0x07, | 128 | .mwdma_mask = 0x07, |
@@ -178,7 +136,7 @@ static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *i | |||
178 | if (PCI_FUNC(pdev->devfn) != 1) | 136 | if (PCI_FUNC(pdev->devfn) != 1) |
179 | return -ENODEV; | 137 | return -ENODEV; |
180 | 138 | ||
181 | return ata_pci_init_one(pdev, ppi); | 139 | return ata_pci_sff_init_one(pdev, ppi, &cy82c693_sht, NULL); |
182 | } | 140 | } |
183 | 141 | ||
184 | static const struct pci_device_id cy82c693[] = { | 142 | static const struct pci_device_id cy82c693[] = { |
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c index dc33220fe5b2..9fba82976ba6 100644 --- a/drivers/ata/pata_efar.c +++ b/drivers/ata/pata_efar.c | |||
@@ -45,20 +45,7 @@ static int efar_pre_reset(struct ata_link *link, unsigned long deadline) | |||
45 | if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no])) | 45 | if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no])) |
46 | return -ENOENT; | 46 | return -ENOENT; |
47 | 47 | ||
48 | return ata_std_prereset(link, deadline); | 48 | return ata_sff_prereset(link, deadline); |
49 | } | ||
50 | |||
51 | /** | ||
52 | * efar_probe_reset - Probe specified port on PATA host controller | ||
53 | * @ap: Port to probe | ||
54 | * | ||
55 | * LOCKING: | ||
56 | * None (inherited from caller). | ||
57 | */ | ||
58 | |||
59 | static void efar_error_handler(struct ata_port *ap) | ||
60 | { | ||
61 | ata_bmdma_drive_eh(ap, efar_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
62 | } | 49 | } |
63 | 50 | ||
64 | /** | 51 | /** |
@@ -233,53 +220,15 @@ static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
233 | } | 220 | } |
234 | 221 | ||
235 | static struct scsi_host_template efar_sht = { | 222 | static struct scsi_host_template efar_sht = { |
236 | .module = THIS_MODULE, | 223 | ATA_BMDMA_SHT(DRV_NAME), |
237 | .name = DRV_NAME, | ||
238 | .ioctl = ata_scsi_ioctl, | ||
239 | .queuecommand = ata_scsi_queuecmd, | ||
240 | .can_queue = ATA_DEF_QUEUE, | ||
241 | .this_id = ATA_SHT_THIS_ID, | ||
242 | .sg_tablesize = LIBATA_MAX_PRD, | ||
243 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
244 | .emulated = ATA_SHT_EMULATED, | ||
245 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
246 | .proc_name = DRV_NAME, | ||
247 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
248 | .slave_configure = ata_scsi_slave_config, | ||
249 | .slave_destroy = ata_scsi_slave_destroy, | ||
250 | .bios_param = ata_std_bios_param, | ||
251 | }; | 224 | }; |
252 | 225 | ||
253 | static const struct ata_port_operations efar_ops = { | 226 | static struct ata_port_operations efar_ops = { |
227 | .inherits = &ata_bmdma_port_ops, | ||
228 | .cable_detect = efar_cable_detect, | ||
254 | .set_piomode = efar_set_piomode, | 229 | .set_piomode = efar_set_piomode, |
255 | .set_dmamode = efar_set_dmamode, | 230 | .set_dmamode = efar_set_dmamode, |
256 | .mode_filter = ata_pci_default_filter, | 231 | .prereset = efar_pre_reset, |
257 | |||
258 | .tf_load = ata_tf_load, | ||
259 | .tf_read = ata_tf_read, | ||
260 | .check_status = ata_check_status, | ||
261 | .exec_command = ata_exec_command, | ||
262 | .dev_select = ata_std_dev_select, | ||
263 | |||
264 | .freeze = ata_bmdma_freeze, | ||
265 | .thaw = ata_bmdma_thaw, | ||
266 | .error_handler = efar_error_handler, | ||
267 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
268 | .cable_detect = efar_cable_detect, | ||
269 | |||
270 | .bmdma_setup = ata_bmdma_setup, | ||
271 | .bmdma_start = ata_bmdma_start, | ||
272 | .bmdma_stop = ata_bmdma_stop, | ||
273 | .bmdma_status = ata_bmdma_status, | ||
274 | .qc_prep = ata_qc_prep, | ||
275 | .qc_issue = ata_qc_issue_prot, | ||
276 | .data_xfer = ata_data_xfer, | ||
277 | |||
278 | .irq_handler = ata_interrupt, | ||
279 | .irq_clear = ata_bmdma_irq_clear, | ||
280 | .irq_on = ata_irq_on, | ||
281 | |||
282 | .port_start = ata_sff_port_start, | ||
283 | }; | 232 | }; |
284 | 233 | ||
285 | 234 | ||
@@ -301,7 +250,6 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
301 | { | 250 | { |
302 | static int printed_version; | 251 | static int printed_version; |
303 | static const struct ata_port_info info = { | 252 | static const struct ata_port_info info = { |
304 | .sht = &efar_sht, | ||
305 | .flags = ATA_FLAG_SLAVE_POSS, | 253 | .flags = ATA_FLAG_SLAVE_POSS, |
306 | .pio_mask = 0x1f, /* pio0-4 */ | 254 | .pio_mask = 0x1f, /* pio0-4 */ |
307 | .mwdma_mask = 0x07, /* mwdma1-2 */ | 255 | .mwdma_mask = 0x07, /* mwdma1-2 */ |
@@ -314,7 +262,7 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
314 | dev_printk(KERN_DEBUG, &pdev->dev, | 262 | dev_printk(KERN_DEBUG, &pdev->dev, |
315 | "version " DRV_VERSION "\n"); | 263 | "version " DRV_VERSION "\n"); |
316 | 264 | ||
317 | return ata_pci_init_one(pdev, ppi); | 265 | return ata_pci_sff_init_one(pdev, ppi, &efar_sht, NULL); |
318 | } | 266 | } |
319 | 267 | ||
320 | static const struct pci_device_id efar_pci_tbl[] = { | 268 | static const struct pci_device_id efar_pci_tbl[] = { |
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index a742efa0da2b..f2b83eabc7c7 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c | |||
@@ -184,7 +184,7 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask) | |||
184 | if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) | 184 | if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) |
185 | mask &= ~(0xF0 << ATA_SHIFT_UDMA); | 185 | mask &= ~(0xF0 << ATA_SHIFT_UDMA); |
186 | } | 186 | } |
187 | return ata_pci_default_filter(adev, mask); | 187 | return ata_bmdma_mode_filter(adev, mask); |
188 | } | 188 | } |
189 | 189 | ||
190 | /** | 190 | /** |
@@ -290,21 +290,7 @@ static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
290 | } | 290 | } |
291 | 291 | ||
292 | static struct scsi_host_template hpt36x_sht = { | 292 | static struct scsi_host_template hpt36x_sht = { |
293 | .module = THIS_MODULE, | 293 | ATA_BMDMA_SHT(DRV_NAME), |
294 | .name = DRV_NAME, | ||
295 | .ioctl = ata_scsi_ioctl, | ||
296 | .queuecommand = ata_scsi_queuecmd, | ||
297 | .can_queue = ATA_DEF_QUEUE, | ||
298 | .this_id = ATA_SHT_THIS_ID, | ||
299 | .sg_tablesize = LIBATA_MAX_PRD, | ||
300 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
301 | .emulated = ATA_SHT_EMULATED, | ||
302 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
303 | .proc_name = DRV_NAME, | ||
304 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
305 | .slave_configure = ata_scsi_slave_config, | ||
306 | .slave_destroy = ata_scsi_slave_destroy, | ||
307 | .bios_param = ata_std_bios_param, | ||
308 | }; | 294 | }; |
309 | 295 | ||
310 | /* | 296 | /* |
@@ -312,37 +298,11 @@ static struct scsi_host_template hpt36x_sht = { | |||
312 | */ | 298 | */ |
313 | 299 | ||
314 | static struct ata_port_operations hpt366_port_ops = { | 300 | static struct ata_port_operations hpt366_port_ops = { |
301 | .inherits = &ata_bmdma_port_ops, | ||
302 | .cable_detect = hpt36x_cable_detect, | ||
303 | .mode_filter = hpt366_filter, | ||
315 | .set_piomode = hpt366_set_piomode, | 304 | .set_piomode = hpt366_set_piomode, |
316 | .set_dmamode = hpt366_set_dmamode, | 305 | .set_dmamode = hpt366_set_dmamode, |
317 | .mode_filter = hpt366_filter, | ||
318 | |||
319 | .tf_load = ata_tf_load, | ||
320 | .tf_read = ata_tf_read, | ||
321 | .check_status = ata_check_status, | ||
322 | .exec_command = ata_exec_command, | ||
323 | .dev_select = ata_std_dev_select, | ||
324 | |||
325 | .freeze = ata_bmdma_freeze, | ||
326 | .thaw = ata_bmdma_thaw, | ||
327 | .error_handler = ata_bmdma_error_handler, | ||
328 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
329 | .cable_detect = hpt36x_cable_detect, | ||
330 | |||
331 | .bmdma_setup = ata_bmdma_setup, | ||
332 | .bmdma_start = ata_bmdma_start, | ||
333 | .bmdma_stop = ata_bmdma_stop, | ||
334 | .bmdma_status = ata_bmdma_status, | ||
335 | |||
336 | .qc_prep = ata_qc_prep, | ||
337 | .qc_issue = ata_qc_issue_prot, | ||
338 | |||
339 | .data_xfer = ata_data_xfer, | ||
340 | |||
341 | .irq_handler = ata_interrupt, | ||
342 | .irq_clear = ata_bmdma_irq_clear, | ||
343 | .irq_on = ata_irq_on, | ||
344 | |||
345 | .port_start = ata_sff_port_start, | ||
346 | }; | 306 | }; |
347 | 307 | ||
348 | /** | 308 | /** |
@@ -390,18 +350,22 @@ static void hpt36x_init_chipset(struct pci_dev *dev) | |||
390 | static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 350 | static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
391 | { | 351 | { |
392 | static const struct ata_port_info info_hpt366 = { | 352 | static const struct ata_port_info info_hpt366 = { |
393 | .sht = &hpt36x_sht, | ||
394 | .flags = ATA_FLAG_SLAVE_POSS, | 353 | .flags = ATA_FLAG_SLAVE_POSS, |
395 | .pio_mask = 0x1f, | 354 | .pio_mask = 0x1f, |
396 | .mwdma_mask = 0x07, | 355 | .mwdma_mask = 0x07, |
397 | .udma_mask = ATA_UDMA4, | 356 | .udma_mask = ATA_UDMA4, |
398 | .port_ops = &hpt366_port_ops | 357 | .port_ops = &hpt366_port_ops |
399 | }; | 358 | }; |
400 | struct ata_port_info info = info_hpt366; | 359 | const struct ata_port_info *ppi[] = { &info_hpt366, NULL }; |
401 | const struct ata_port_info *ppi[] = { &info, NULL }; | ||
402 | 360 | ||
361 | void *hpriv = NULL; | ||
403 | u32 class_rev; | 362 | u32 class_rev; |
404 | u32 reg1; | 363 | u32 reg1; |
364 | int rc; | ||
365 | |||
366 | rc = pcim_enable_device(dev); | ||
367 | if (rc) | ||
368 | return rc; | ||
405 | 369 | ||
406 | pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); | 370 | pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); |
407 | class_rev &= 0xFF; | 371 | class_rev &= 0xFF; |
@@ -419,24 +383,31 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
419 | /* info_hpt366 is safe against re-entry so we can scribble on it */ | 383 | /* info_hpt366 is safe against re-entry so we can scribble on it */ |
420 | switch((reg1 & 0x700) >> 8) { | 384 | switch((reg1 & 0x700) >> 8) { |
421 | case 5: | 385 | case 5: |
422 | info.private_data = &hpt366_40; | 386 | hpriv = &hpt366_40; |
423 | break; | 387 | break; |
424 | case 9: | 388 | case 9: |
425 | info.private_data = &hpt366_25; | 389 | hpriv = &hpt366_25; |
426 | break; | 390 | break; |
427 | default: | 391 | default: |
428 | info.private_data = &hpt366_33; | 392 | hpriv = &hpt366_33; |
429 | break; | 393 | break; |
430 | } | 394 | } |
431 | /* Now kick off ATA set up */ | 395 | /* Now kick off ATA set up */ |
432 | return ata_pci_init_one(dev, ppi); | 396 | return ata_pci_sff_init_one(dev, ppi, &hpt36x_sht, hpriv); |
433 | } | 397 | } |
434 | 398 | ||
435 | #ifdef CONFIG_PM | 399 | #ifdef CONFIG_PM |
436 | static int hpt36x_reinit_one(struct pci_dev *dev) | 400 | static int hpt36x_reinit_one(struct pci_dev *dev) |
437 | { | 401 | { |
402 | struct ata_host *host = dev_get_drvdata(&dev->dev); | ||
403 | int rc; | ||
404 | |||
405 | rc = ata_pci_device_do_resume(dev); | ||
406 | if (rc) | ||
407 | return rc; | ||
438 | hpt36x_init_chipset(dev); | 408 | hpt36x_init_chipset(dev); |
439 | return ata_pci_device_resume(dev); | 409 | ata_host_resume(host); |
410 | return 0; | ||
440 | } | 411 | } |
441 | #endif | 412 | #endif |
442 | 413 | ||
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 9a10878b2ad8..42163998de9a 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c | |||
@@ -283,7 +283,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask) | |||
283 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) | 283 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) |
284 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); | 284 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); |
285 | } | 285 | } |
286 | return ata_pci_default_filter(adev, mask); | 286 | return ata_bmdma_mode_filter(adev, mask); |
287 | } | 287 | } |
288 | 288 | ||
289 | /** | 289 | /** |
@@ -299,7 +299,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask) | |||
299 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) | 299 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) |
300 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); | 300 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); |
301 | } | 301 | } |
302 | return ata_pci_default_filter(adev, mask); | 302 | return ata_bmdma_mode_filter(adev, mask); |
303 | } | 303 | } |
304 | 304 | ||
305 | /** | 305 | /** |
@@ -338,22 +338,10 @@ static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline) | |||
338 | pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); | 338 | pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); |
339 | udelay(100); | 339 | udelay(100); |
340 | 340 | ||
341 | return ata_std_prereset(link, deadline); | 341 | return ata_sff_prereset(link, deadline); |
342 | } | 342 | } |
343 | 343 | ||
344 | /** | 344 | static int hpt374_fn1_pre_reset(struct ata_link *link, unsigned long deadline) |
345 | * hpt37x_error_handler - reset the hpt374 | ||
346 | * @ap: ATA port to reset | ||
347 | * | ||
348 | * Perform probe for HPT37x, except for HPT374 channel 2 | ||
349 | */ | ||
350 | |||
351 | static void hpt37x_error_handler(struct ata_port *ap) | ||
352 | { | ||
353 | ata_bmdma_drive_eh(ap, hpt37x_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
354 | } | ||
355 | |||
356 | static int hpt374_pre_reset(struct ata_link *link, unsigned long deadline) | ||
357 | { | 345 | { |
358 | static const struct pci_bits hpt37x_enable_bits[] = { | 346 | static const struct pci_bits hpt37x_enable_bits[] = { |
359 | { 0x50, 1, 0x04, 0x04 }, | 347 | { 0x50, 1, 0x04, 0x04 }, |
@@ -386,26 +374,7 @@ static int hpt374_pre_reset(struct ata_link *link, unsigned long deadline) | |||
386 | pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); | 374 | pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); |
387 | udelay(100); | 375 | udelay(100); |
388 | 376 | ||
389 | return ata_std_prereset(link, deadline); | 377 | return ata_sff_prereset(link, deadline); |
390 | } | ||
391 | |||
392 | /** | ||
393 | * hpt374_error_handler - reset the hpt374 | ||
394 | * @classes: | ||
395 | * | ||
396 | * The 374 cable detect is a little different due to the extra | ||
397 | * channels. The function 0 channels work like usual but function 1 | ||
398 | * is special | ||
399 | */ | ||
400 | |||
401 | static void hpt374_error_handler(struct ata_port *ap) | ||
402 | { | ||
403 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | ||
404 | |||
405 | if (!(PCI_FUNC(pdev->devfn) & 1)) | ||
406 | hpt37x_error_handler(ap); | ||
407 | else | ||
408 | ata_bmdma_drive_eh(ap, hpt374_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
409 | } | 378 | } |
410 | 379 | ||
411 | /** | 380 | /** |
@@ -619,21 +588,7 @@ static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc) | |||
619 | 588 | ||
620 | 589 | ||
621 | static struct scsi_host_template hpt37x_sht = { | 590 | static struct scsi_host_template hpt37x_sht = { |
622 | .module = THIS_MODULE, | 591 | ATA_BMDMA_SHT(DRV_NAME), |
623 | .name = DRV_NAME, | ||
624 | .ioctl = ata_scsi_ioctl, | ||
625 | .queuecommand = ata_scsi_queuecmd, | ||
626 | .can_queue = ATA_DEF_QUEUE, | ||
627 | .this_id = ATA_SHT_THIS_ID, | ||
628 | .sg_tablesize = LIBATA_MAX_PRD, | ||
629 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
630 | .emulated = ATA_SHT_EMULATED, | ||
631 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
632 | .proc_name = DRV_NAME, | ||
633 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
634 | .slave_configure = ata_scsi_slave_config, | ||
635 | .slave_destroy = ata_scsi_slave_destroy, | ||
636 | .bios_param = ata_std_bios_param, | ||
637 | }; | 592 | }; |
638 | 593 | ||
639 | /* | 594 | /* |
@@ -641,36 +596,15 @@ static struct scsi_host_template hpt37x_sht = { | |||
641 | */ | 596 | */ |
642 | 597 | ||
643 | static struct ata_port_operations hpt370_port_ops = { | 598 | static struct ata_port_operations hpt370_port_ops = { |
644 | .set_piomode = hpt370_set_piomode, | 599 | .inherits = &ata_bmdma_port_ops, |
645 | .set_dmamode = hpt370_set_dmamode, | ||
646 | .mode_filter = hpt370_filter, | ||
647 | |||
648 | .tf_load = ata_tf_load, | ||
649 | .tf_read = ata_tf_read, | ||
650 | .check_status = ata_check_status, | ||
651 | .exec_command = ata_exec_command, | ||
652 | .dev_select = ata_std_dev_select, | ||
653 | 600 | ||
654 | .freeze = ata_bmdma_freeze, | ||
655 | .thaw = ata_bmdma_thaw, | ||
656 | .error_handler = hpt37x_error_handler, | ||
657 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
658 | |||
659 | .bmdma_setup = ata_bmdma_setup, | ||
660 | .bmdma_start = hpt370_bmdma_start, | 601 | .bmdma_start = hpt370_bmdma_start, |
661 | .bmdma_stop = hpt370_bmdma_stop, | 602 | .bmdma_stop = hpt370_bmdma_stop, |
662 | .bmdma_status = ata_bmdma_status, | ||
663 | |||
664 | .qc_prep = ata_qc_prep, | ||
665 | .qc_issue = ata_qc_issue_prot, | ||
666 | |||
667 | .data_xfer = ata_data_xfer, | ||
668 | |||
669 | .irq_handler = ata_interrupt, | ||
670 | .irq_clear = ata_bmdma_irq_clear, | ||
671 | .irq_on = ata_irq_on, | ||
672 | 603 | ||
673 | .port_start = ata_sff_port_start, | 604 | .mode_filter = hpt370_filter, |
605 | .set_piomode = hpt370_set_piomode, | ||
606 | .set_dmamode = hpt370_set_dmamode, | ||
607 | .prereset = hpt37x_pre_reset, | ||
674 | }; | 608 | }; |
675 | 609 | ||
676 | /* | 610 | /* |
@@ -678,36 +612,8 @@ static struct ata_port_operations hpt370_port_ops = { | |||
678 | */ | 612 | */ |
679 | 613 | ||
680 | static struct ata_port_operations hpt370a_port_ops = { | 614 | static struct ata_port_operations hpt370a_port_ops = { |
681 | .set_piomode = hpt370_set_piomode, | 615 | .inherits = &hpt370_port_ops, |
682 | .set_dmamode = hpt370_set_dmamode, | ||
683 | .mode_filter = hpt370a_filter, | 616 | .mode_filter = hpt370a_filter, |
684 | |||
685 | .tf_load = ata_tf_load, | ||
686 | .tf_read = ata_tf_read, | ||
687 | .check_status = ata_check_status, | ||
688 | .exec_command = ata_exec_command, | ||
689 | .dev_select = ata_std_dev_select, | ||
690 | |||
691 | .freeze = ata_bmdma_freeze, | ||
692 | .thaw = ata_bmdma_thaw, | ||
693 | .error_handler = hpt37x_error_handler, | ||
694 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
695 | |||
696 | .bmdma_setup = ata_bmdma_setup, | ||
697 | .bmdma_start = hpt370_bmdma_start, | ||
698 | .bmdma_stop = hpt370_bmdma_stop, | ||
699 | .bmdma_status = ata_bmdma_status, | ||
700 | |||
701 | .qc_prep = ata_qc_prep, | ||
702 | .qc_issue = ata_qc_issue_prot, | ||
703 | |||
704 | .data_xfer = ata_data_xfer, | ||
705 | |||
706 | .irq_handler = ata_interrupt, | ||
707 | .irq_clear = ata_bmdma_irq_clear, | ||
708 | .irq_on = ata_irq_on, | ||
709 | |||
710 | .port_start = ata_sff_port_start, | ||
711 | }; | 617 | }; |
712 | 618 | ||
713 | /* | 619 | /* |
@@ -716,74 +622,23 @@ static struct ata_port_operations hpt370a_port_ops = { | |||
716 | */ | 622 | */ |
717 | 623 | ||
718 | static struct ata_port_operations hpt372_port_ops = { | 624 | static struct ata_port_operations hpt372_port_ops = { |
719 | .set_piomode = hpt372_set_piomode, | 625 | .inherits = &ata_bmdma_port_ops, |
720 | .set_dmamode = hpt372_set_dmamode, | ||
721 | .mode_filter = ata_pci_default_filter, | ||
722 | |||
723 | .tf_load = ata_tf_load, | ||
724 | .tf_read = ata_tf_read, | ||
725 | .check_status = ata_check_status, | ||
726 | .exec_command = ata_exec_command, | ||
727 | .dev_select = ata_std_dev_select, | ||
728 | 626 | ||
729 | .freeze = ata_bmdma_freeze, | ||
730 | .thaw = ata_bmdma_thaw, | ||
731 | .error_handler = hpt37x_error_handler, | ||
732 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
733 | |||
734 | .bmdma_setup = ata_bmdma_setup, | ||
735 | .bmdma_start = ata_bmdma_start, | ||
736 | .bmdma_stop = hpt37x_bmdma_stop, | 627 | .bmdma_stop = hpt37x_bmdma_stop, |
737 | .bmdma_status = ata_bmdma_status, | ||
738 | |||
739 | .qc_prep = ata_qc_prep, | ||
740 | .qc_issue = ata_qc_issue_prot, | ||
741 | |||
742 | .data_xfer = ata_data_xfer, | ||
743 | |||
744 | .irq_handler = ata_interrupt, | ||
745 | .irq_clear = ata_bmdma_irq_clear, | ||
746 | .irq_on = ata_irq_on, | ||
747 | 628 | ||
748 | .port_start = ata_sff_port_start, | 629 | .set_piomode = hpt372_set_piomode, |
630 | .set_dmamode = hpt372_set_dmamode, | ||
631 | .prereset = hpt37x_pre_reset, | ||
749 | }; | 632 | }; |
750 | 633 | ||
751 | /* | 634 | /* |
752 | * Configuration for HPT374. Mode setting works like 372 and friends | 635 | * Configuration for HPT374. Mode setting works like 372 and friends |
753 | * but we have a different cable detection procedure. | 636 | * but we have a different cable detection procedure for function 1. |
754 | */ | 637 | */ |
755 | 638 | ||
756 | static struct ata_port_operations hpt374_port_ops = { | 639 | static struct ata_port_operations hpt374_fn1_port_ops = { |
757 | .set_piomode = hpt372_set_piomode, | 640 | .inherits = &hpt372_port_ops, |
758 | .set_dmamode = hpt372_set_dmamode, | 641 | .prereset = hpt374_fn1_pre_reset, |
759 | .mode_filter = ata_pci_default_filter, | ||
760 | |||
761 | .tf_load = ata_tf_load, | ||
762 | .tf_read = ata_tf_read, | ||
763 | .check_status = ata_check_status, | ||
764 | .exec_command = ata_exec_command, | ||
765 | .dev_select = ata_std_dev_select, | ||
766 | |||
767 | .freeze = ata_bmdma_freeze, | ||
768 | .thaw = ata_bmdma_thaw, | ||
769 | .error_handler = hpt374_error_handler, | ||
770 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
771 | |||
772 | .bmdma_setup = ata_bmdma_setup, | ||
773 | .bmdma_start = ata_bmdma_start, | ||
774 | .bmdma_stop = hpt37x_bmdma_stop, | ||
775 | .bmdma_status = ata_bmdma_status, | ||
776 | |||
777 | .qc_prep = ata_qc_prep, | ||
778 | .qc_issue = ata_qc_issue_prot, | ||
779 | |||
780 | .data_xfer = ata_data_xfer, | ||
781 | |||
782 | .irq_handler = ata_interrupt, | ||
783 | .irq_clear = ata_bmdma_irq_clear, | ||
784 | .irq_on = ata_irq_on, | ||
785 | |||
786 | .port_start = ata_sff_port_start, | ||
787 | }; | 642 | }; |
788 | 643 | ||
789 | /** | 644 | /** |
@@ -897,7 +752,6 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
897 | { | 752 | { |
898 | /* HPT370 - UDMA100 */ | 753 | /* HPT370 - UDMA100 */ |
899 | static const struct ata_port_info info_hpt370 = { | 754 | static const struct ata_port_info info_hpt370 = { |
900 | .sht = &hpt37x_sht, | ||
901 | .flags = ATA_FLAG_SLAVE_POSS, | 755 | .flags = ATA_FLAG_SLAVE_POSS, |
902 | .pio_mask = 0x1f, | 756 | .pio_mask = 0x1f, |
903 | .mwdma_mask = 0x07, | 757 | .mwdma_mask = 0x07, |
@@ -906,7 +760,6 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
906 | }; | 760 | }; |
907 | /* HPT370A - UDMA100 */ | 761 | /* HPT370A - UDMA100 */ |
908 | static const struct ata_port_info info_hpt370a = { | 762 | static const struct ata_port_info info_hpt370a = { |
909 | .sht = &hpt37x_sht, | ||
910 | .flags = ATA_FLAG_SLAVE_POSS, | 763 | .flags = ATA_FLAG_SLAVE_POSS, |
911 | .pio_mask = 0x1f, | 764 | .pio_mask = 0x1f, |
912 | .mwdma_mask = 0x07, | 765 | .mwdma_mask = 0x07, |
@@ -915,7 +768,6 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
915 | }; | 768 | }; |
916 | /* HPT370 - UDMA100 */ | 769 | /* HPT370 - UDMA100 */ |
917 | static const struct ata_port_info info_hpt370_33 = { | 770 | static const struct ata_port_info info_hpt370_33 = { |
918 | .sht = &hpt37x_sht, | ||
919 | .flags = ATA_FLAG_SLAVE_POSS, | 771 | .flags = ATA_FLAG_SLAVE_POSS, |
920 | .pio_mask = 0x1f, | 772 | .pio_mask = 0x1f, |
921 | .mwdma_mask = 0x07, | 773 | .mwdma_mask = 0x07, |
@@ -924,7 +776,6 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
924 | }; | 776 | }; |
925 | /* HPT370A - UDMA100 */ | 777 | /* HPT370A - UDMA100 */ |
926 | static const struct ata_port_info info_hpt370a_33 = { | 778 | static const struct ata_port_info info_hpt370a_33 = { |
927 | .sht = &hpt37x_sht, | ||
928 | .flags = ATA_FLAG_SLAVE_POSS, | 779 | .flags = ATA_FLAG_SLAVE_POSS, |
929 | .pio_mask = 0x1f, | 780 | .pio_mask = 0x1f, |
930 | .mwdma_mask = 0x07, | 781 | .mwdma_mask = 0x07, |
@@ -933,28 +784,31 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
933 | }; | 784 | }; |
934 | /* HPT371, 372 and friends - UDMA133 */ | 785 | /* HPT371, 372 and friends - UDMA133 */ |
935 | static const struct ata_port_info info_hpt372 = { | 786 | static const struct ata_port_info info_hpt372 = { |
936 | .sht = &hpt37x_sht, | ||
937 | .flags = ATA_FLAG_SLAVE_POSS, | 787 | .flags = ATA_FLAG_SLAVE_POSS, |
938 | .pio_mask = 0x1f, | 788 | .pio_mask = 0x1f, |
939 | .mwdma_mask = 0x07, | 789 | .mwdma_mask = 0x07, |
940 | .udma_mask = ATA_UDMA6, | 790 | .udma_mask = ATA_UDMA6, |
941 | .port_ops = &hpt372_port_ops | 791 | .port_ops = &hpt372_port_ops |
942 | }; | 792 | }; |
943 | /* HPT374 - UDMA100 */ | 793 | /* HPT374 - UDMA100, function 1 uses different prereset method */ |
944 | static const struct ata_port_info info_hpt374 = { | 794 | static const struct ata_port_info info_hpt374_fn0 = { |
945 | .sht = &hpt37x_sht, | ||
946 | .flags = ATA_FLAG_SLAVE_POSS, | 795 | .flags = ATA_FLAG_SLAVE_POSS, |
947 | .pio_mask = 0x1f, | 796 | .pio_mask = 0x1f, |
948 | .mwdma_mask = 0x07, | 797 | .mwdma_mask = 0x07, |
949 | .udma_mask = ATA_UDMA5, | 798 | .udma_mask = ATA_UDMA5, |
950 | .port_ops = &hpt374_port_ops | 799 | .port_ops = &hpt372_port_ops |
800 | }; | ||
801 | static const struct ata_port_info info_hpt374_fn1 = { | ||
802 | .flags = ATA_FLAG_SLAVE_POSS, | ||
803 | .pio_mask = 0x1f, | ||
804 | .mwdma_mask = 0x07, | ||
805 | .udma_mask = ATA_UDMA5, | ||
806 | .port_ops = &hpt374_fn1_port_ops | ||
951 | }; | 807 | }; |
952 | 808 | ||
953 | static const int MHz[4] = { 33, 40, 50, 66 }; | 809 | static const int MHz[4] = { 33, 40, 50, 66 }; |
954 | const struct ata_port_info *port; | ||
955 | void *private_data = NULL; | 810 | void *private_data = NULL; |
956 | struct ata_port_info port_info; | 811 | const struct ata_port_info *ppi[] = { NULL, NULL }; |
957 | const struct ata_port_info *ppi[] = { &port_info, NULL }; | ||
958 | 812 | ||
959 | u8 irqmask; | 813 | u8 irqmask; |
960 | u32 class_rev; | 814 | u32 class_rev; |
@@ -966,6 +820,11 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
966 | 820 | ||
967 | const struct hpt_chip *chip_table; | 821 | const struct hpt_chip *chip_table; |
968 | int clock_slot; | 822 | int clock_slot; |
823 | int rc; | ||
824 | |||
825 | rc = pcim_enable_device(dev); | ||
826 | if (rc) | ||
827 | return rc; | ||
969 | 828 | ||
970 | pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); | 829 | pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); |
971 | class_rev &= 0xFF; | 830 | class_rev &= 0xFF; |
@@ -981,17 +840,17 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
981 | 840 | ||
982 | switch(class_rev) { | 841 | switch(class_rev) { |
983 | case 3: | 842 | case 3: |
984 | port = &info_hpt370; | 843 | ppi[0] = &info_hpt370; |
985 | chip_table = &hpt370; | 844 | chip_table = &hpt370; |
986 | prefer_dpll = 0; | 845 | prefer_dpll = 0; |
987 | break; | 846 | break; |
988 | case 4: | 847 | case 4: |
989 | port = &info_hpt370a; | 848 | ppi[0] = &info_hpt370a; |
990 | chip_table = &hpt370a; | 849 | chip_table = &hpt370a; |
991 | prefer_dpll = 0; | 850 | prefer_dpll = 0; |
992 | break; | 851 | break; |
993 | case 5: | 852 | case 5: |
994 | port = &info_hpt372; | 853 | ppi[0] = &info_hpt372; |
995 | chip_table = &hpt372; | 854 | chip_table = &hpt372; |
996 | break; | 855 | break; |
997 | default: | 856 | default: |
@@ -1004,21 +863,21 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
1004 | /* 372N if rev >= 2*/ | 863 | /* 372N if rev >= 2*/ |
1005 | if (class_rev >= 2) | 864 | if (class_rev >= 2) |
1006 | return -ENODEV; | 865 | return -ENODEV; |
1007 | port = &info_hpt372; | 866 | ppi[0] = &info_hpt372; |
1008 | chip_table = &hpt372a; | 867 | chip_table = &hpt372a; |
1009 | break; | 868 | break; |
1010 | case PCI_DEVICE_ID_TTI_HPT302: | 869 | case PCI_DEVICE_ID_TTI_HPT302: |
1011 | /* 302N if rev > 1 */ | 870 | /* 302N if rev > 1 */ |
1012 | if (class_rev > 1) | 871 | if (class_rev > 1) |
1013 | return -ENODEV; | 872 | return -ENODEV; |
1014 | port = &info_hpt372; | 873 | ppi[0] = &info_hpt372; |
1015 | /* Check this */ | 874 | /* Check this */ |
1016 | chip_table = &hpt302; | 875 | chip_table = &hpt302; |
1017 | break; | 876 | break; |
1018 | case PCI_DEVICE_ID_TTI_HPT371: | 877 | case PCI_DEVICE_ID_TTI_HPT371: |
1019 | if (class_rev > 1) | 878 | if (class_rev > 1) |
1020 | return -ENODEV; | 879 | return -ENODEV; |
1021 | port = &info_hpt372; | 880 | ppi[0] = &info_hpt372; |
1022 | chip_table = &hpt371; | 881 | chip_table = &hpt371; |
1023 | /* Single channel device, master is not present | 882 | /* Single channel device, master is not present |
1024 | but the BIOS (or us for non x86) must mark it | 883 | but the BIOS (or us for non x86) must mark it |
@@ -1029,7 +888,10 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
1029 | break; | 888 | break; |
1030 | case PCI_DEVICE_ID_TTI_HPT374: | 889 | case PCI_DEVICE_ID_TTI_HPT374: |
1031 | chip_table = &hpt374; | 890 | chip_table = &hpt374; |
1032 | port = &info_hpt374; | 891 | if (!(PCI_FUNC(dev->devfn) & 1)) |
892 | *ppi = &info_hpt374_fn0; | ||
893 | else | ||
894 | *ppi = &info_hpt374_fn1; | ||
1033 | break; | 895 | break; |
1034 | default: | 896 | default: |
1035 | printk(KERN_ERR "pata_hpt37x: PCI table is bogus please report (%d).\n", dev->device); | 897 | printk(KERN_ERR "pata_hpt37x: PCI table is bogus please report (%d).\n", dev->device); |
@@ -1108,7 +970,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
1108 | int dpll, adjust; | 970 | int dpll, adjust; |
1109 | 971 | ||
1110 | /* Compute DPLL */ | 972 | /* Compute DPLL */ |
1111 | dpll = (port->udma_mask & 0xC0) ? 3 : 2; | 973 | dpll = (ppi[0]->udma_mask & 0xC0) ? 3 : 2; |
1112 | 974 | ||
1113 | f_low = (MHz[clock_slot] * 48) / MHz[dpll]; | 975 | f_low = (MHz[clock_slot] * 48) / MHz[dpll]; |
1114 | f_high = f_low + 2; | 976 | f_high = f_low + 2; |
@@ -1148,19 +1010,16 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
1148 | * about lack of UDMA133 support on lower clocks | 1010 | * about lack of UDMA133 support on lower clocks |
1149 | */ | 1011 | */ |
1150 | 1012 | ||
1151 | if (clock_slot < 2 && port == &info_hpt370) | 1013 | if (clock_slot < 2 && ppi[0] == &info_hpt370) |
1152 | port = &info_hpt370_33; | 1014 | ppi[0] = &info_hpt370_33; |
1153 | if (clock_slot < 2 && port == &info_hpt370a) | 1015 | if (clock_slot < 2 && ppi[0] == &info_hpt370a) |
1154 | port = &info_hpt370a_33; | 1016 | ppi[0] = &info_hpt370a_33; |
1155 | printk(KERN_INFO "pata_hpt37x: %s using %dMHz bus clock.\n", | 1017 | printk(KERN_INFO "pata_hpt37x: %s using %dMHz bus clock.\n", |
1156 | chip_table->name, MHz[clock_slot]); | 1018 | chip_table->name, MHz[clock_slot]); |
1157 | } | 1019 | } |
1158 | 1020 | ||
1159 | /* Now kick off ATA set up */ | 1021 | /* Now kick off ATA set up */ |
1160 | port_info = *port; | 1022 | return ata_pci_sff_init_one(dev, ppi, &hpt37x_sht, private_data); |
1161 | port_info.private_data = private_data; | ||
1162 | |||
1163 | return ata_pci_init_one(dev, ppi); | ||
1164 | } | 1023 | } |
1165 | 1024 | ||
1166 | static const struct pci_device_id hpt37x[] = { | 1025 | static const struct pci_device_id hpt37x[] = { |
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c index 9f1c084f846f..d5c9fd7b82bb 100644 --- a/drivers/ata/pata_hpt3x2n.c +++ b/drivers/ata/pata_hpt3x2n.c | |||
@@ -148,7 +148,7 @@ static int hpt3x2n_cable_detect(struct ata_port *ap) | |||
148 | * Reset the hardware and state machine, | 148 | * Reset the hardware and state machine, |
149 | */ | 149 | */ |
150 | 150 | ||
151 | static int hpt3xn_pre_reset(struct ata_link *link, unsigned long deadline) | 151 | static int hpt3x2n_pre_reset(struct ata_link *link, unsigned long deadline) |
152 | { | 152 | { |
153 | struct ata_port *ap = link->ap; | 153 | struct ata_port *ap = link->ap; |
154 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 154 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
@@ -156,19 +156,7 @@ static int hpt3xn_pre_reset(struct ata_link *link, unsigned long deadline) | |||
156 | pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); | 156 | pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); |
157 | udelay(100); | 157 | udelay(100); |
158 | 158 | ||
159 | return ata_std_prereset(link, deadline); | 159 | return ata_sff_prereset(link, deadline); |
160 | } | ||
161 | |||
162 | /** | ||
163 | * hpt3x2n_error_handler - probe the hpt3x2n bus | ||
164 | * @ap: ATA port to reset | ||
165 | * | ||
166 | * Perform the probe reset handling for the 3x2N | ||
167 | */ | ||
168 | |||
169 | static void hpt3x2n_error_handler(struct ata_port *ap) | ||
170 | { | ||
171 | ata_bmdma_drive_eh(ap, hpt3xn_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
172 | } | 160 | } |
173 | 161 | ||
174 | /** | 162 | /** |
@@ -320,7 +308,7 @@ static int hpt3x2n_use_dpll(struct ata_port *ap, int writing) | |||
320 | return 0; | 308 | return 0; |
321 | } | 309 | } |
322 | 310 | ||
323 | static unsigned int hpt3x2n_qc_issue_prot(struct ata_queued_cmd *qc) | 311 | static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc) |
324 | { | 312 | { |
325 | struct ata_taskfile *tf = &qc->tf; | 313 | struct ata_taskfile *tf = &qc->tf; |
326 | struct ata_port *ap = qc->ap; | 314 | struct ata_port *ap = qc->ap; |
@@ -335,25 +323,11 @@ static unsigned int hpt3x2n_qc_issue_prot(struct ata_queued_cmd *qc) | |||
335 | hpt3x2n_set_clock(ap, 0x23); | 323 | hpt3x2n_set_clock(ap, 0x23); |
336 | } | 324 | } |
337 | } | 325 | } |
338 | return ata_qc_issue_prot(qc); | 326 | return ata_sff_qc_issue(qc); |
339 | } | 327 | } |
340 | 328 | ||
341 | static struct scsi_host_template hpt3x2n_sht = { | 329 | static struct scsi_host_template hpt3x2n_sht = { |
342 | .module = THIS_MODULE, | 330 | ATA_BMDMA_SHT(DRV_NAME), |
343 | .name = DRV_NAME, | ||
344 | .ioctl = ata_scsi_ioctl, | ||
345 | .queuecommand = ata_scsi_queuecmd, | ||
346 | .can_queue = ATA_DEF_QUEUE, | ||
347 | .this_id = ATA_SHT_THIS_ID, | ||
348 | .sg_tablesize = LIBATA_MAX_PRD, | ||
349 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
350 | .emulated = ATA_SHT_EMULATED, | ||
351 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
352 | .proc_name = DRV_NAME, | ||
353 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
354 | .slave_configure = ata_scsi_slave_config, | ||
355 | .slave_destroy = ata_scsi_slave_destroy, | ||
356 | .bios_param = ata_std_bios_param, | ||
357 | }; | 331 | }; |
358 | 332 | ||
359 | /* | 333 | /* |
@@ -361,37 +335,15 @@ static struct scsi_host_template hpt3x2n_sht = { | |||
361 | */ | 335 | */ |
362 | 336 | ||
363 | static struct ata_port_operations hpt3x2n_port_ops = { | 337 | static struct ata_port_operations hpt3x2n_port_ops = { |
364 | .set_piomode = hpt3x2n_set_piomode, | 338 | .inherits = &ata_bmdma_port_ops, |
365 | .set_dmamode = hpt3x2n_set_dmamode, | ||
366 | .mode_filter = ata_pci_default_filter, | ||
367 | |||
368 | .tf_load = ata_tf_load, | ||
369 | .tf_read = ata_tf_read, | ||
370 | .check_status = ata_check_status, | ||
371 | .exec_command = ata_exec_command, | ||
372 | .dev_select = ata_std_dev_select, | ||
373 | |||
374 | .freeze = ata_bmdma_freeze, | ||
375 | .thaw = ata_bmdma_thaw, | ||
376 | .error_handler = hpt3x2n_error_handler, | ||
377 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
378 | .cable_detect = hpt3x2n_cable_detect, | ||
379 | 339 | ||
380 | .bmdma_setup = ata_bmdma_setup, | ||
381 | .bmdma_start = ata_bmdma_start, | ||
382 | .bmdma_stop = hpt3x2n_bmdma_stop, | 340 | .bmdma_stop = hpt3x2n_bmdma_stop, |
383 | .bmdma_status = ata_bmdma_status, | 341 | .qc_issue = hpt3x2n_qc_issue, |
384 | |||
385 | .qc_prep = ata_qc_prep, | ||
386 | .qc_issue = hpt3x2n_qc_issue_prot, | ||
387 | |||
388 | .data_xfer = ata_data_xfer, | ||
389 | |||
390 | .irq_handler = ata_interrupt, | ||
391 | .irq_clear = ata_bmdma_irq_clear, | ||
392 | .irq_on = ata_irq_on, | ||
393 | 342 | ||
394 | .port_start = ata_sff_port_start, | 343 | .cable_detect = hpt3x2n_cable_detect, |
344 | .set_piomode = hpt3x2n_set_piomode, | ||
345 | .set_dmamode = hpt3x2n_set_dmamode, | ||
346 | .prereset = hpt3x2n_pre_reset, | ||
395 | }; | 347 | }; |
396 | 348 | ||
397 | /** | 349 | /** |
@@ -488,15 +440,13 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
488 | { | 440 | { |
489 | /* HPT372N and friends - UDMA133 */ | 441 | /* HPT372N and friends - UDMA133 */ |
490 | static const struct ata_port_info info = { | 442 | static const struct ata_port_info info = { |
491 | .sht = &hpt3x2n_sht, | ||
492 | .flags = ATA_FLAG_SLAVE_POSS, | 443 | .flags = ATA_FLAG_SLAVE_POSS, |
493 | .pio_mask = 0x1f, | 444 | .pio_mask = 0x1f, |
494 | .mwdma_mask = 0x07, | 445 | .mwdma_mask = 0x07, |
495 | .udma_mask = ATA_UDMA6, | 446 | .udma_mask = ATA_UDMA6, |
496 | .port_ops = &hpt3x2n_port_ops | 447 | .port_ops = &hpt3x2n_port_ops |
497 | }; | 448 | }; |
498 | struct ata_port_info port = info; | 449 | const struct ata_port_info *ppi[] = { &info, NULL }; |
499 | const struct ata_port_info *ppi[] = { &port, NULL }; | ||
500 | 450 | ||
501 | u8 irqmask; | 451 | u8 irqmask; |
502 | u32 class_rev; | 452 | u32 class_rev; |
@@ -505,6 +455,12 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
505 | unsigned int f_low, f_high; | 455 | unsigned int f_low, f_high; |
506 | int adjust; | 456 | int adjust; |
507 | unsigned long iobase = pci_resource_start(dev, 4); | 457 | unsigned long iobase = pci_resource_start(dev, 4); |
458 | void *hpriv = NULL; | ||
459 | int rc; | ||
460 | |||
461 | rc = pcim_enable_device(dev); | ||
462 | if (rc) | ||
463 | return rc; | ||
508 | 464 | ||
509 | pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); | 465 | pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); |
510 | class_rev &= 0xFF; | 466 | class_rev &= 0xFF; |
@@ -586,9 +542,8 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
586 | pci_mhz); | 542 | pci_mhz); |
587 | /* Set our private data up. We only need a few flags so we use | 543 | /* Set our private data up. We only need a few flags so we use |
588 | it directly */ | 544 | it directly */ |
589 | port.private_data = NULL; | ||
590 | if (pci_mhz > 60) { | 545 | if (pci_mhz > 60) { |
591 | port.private_data = (void *)PCI66; | 546 | hpriv = (void *)PCI66; |
592 | /* | 547 | /* |
593 | * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in | 548 | * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in |
594 | * the MISC. register to stretch the UltraDMA Tss timing. | 549 | * the MISC. register to stretch the UltraDMA Tss timing. |
@@ -599,7 +554,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
599 | } | 554 | } |
600 | 555 | ||
601 | /* Now kick off ATA set up */ | 556 | /* Now kick off ATA set up */ |
602 | return ata_pci_init_one(dev, ppi); | 557 | return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv); |
603 | } | 558 | } |
604 | 559 | ||
605 | static const struct pci_device_id hpt3x2n[] = { | 560 | static const struct pci_device_id hpt3x2n[] = { |
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c index cb8bdb6887de..f11a320337c0 100644 --- a/drivers/ata/pata_hpt3x3.c +++ b/drivers/ata/pata_hpt3x3.c | |||
@@ -102,58 +102,17 @@ static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc) | |||
102 | } | 102 | } |
103 | 103 | ||
104 | static struct scsi_host_template hpt3x3_sht = { | 104 | static struct scsi_host_template hpt3x3_sht = { |
105 | .module = THIS_MODULE, | 105 | ATA_BMDMA_SHT(DRV_NAME), |
106 | .name = DRV_NAME, | ||
107 | .ioctl = ata_scsi_ioctl, | ||
108 | .queuecommand = ata_scsi_queuecmd, | ||
109 | .can_queue = ATA_DEF_QUEUE, | ||
110 | .this_id = ATA_SHT_THIS_ID, | ||
111 | .sg_tablesize = LIBATA_MAX_PRD, | ||
112 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
113 | .emulated = ATA_SHT_EMULATED, | ||
114 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
115 | .proc_name = DRV_NAME, | ||
116 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
117 | .slave_configure = ata_scsi_slave_config, | ||
118 | .slave_destroy = ata_scsi_slave_destroy, | ||
119 | .bios_param = ata_std_bios_param, | ||
120 | }; | 106 | }; |
121 | 107 | ||
122 | static struct ata_port_operations hpt3x3_port_ops = { | 108 | static struct ata_port_operations hpt3x3_port_ops = { |
109 | .inherits = &ata_bmdma_port_ops, | ||
110 | .check_atapi_dma= hpt3x3_atapi_dma, | ||
111 | .cable_detect = ata_cable_40wire, | ||
123 | .set_piomode = hpt3x3_set_piomode, | 112 | .set_piomode = hpt3x3_set_piomode, |
124 | #if defined(CONFIG_PATA_HPT3X3_DMA) | 113 | #if defined(CONFIG_PATA_HPT3X3_DMA) |
125 | .set_dmamode = hpt3x3_set_dmamode, | 114 | .set_dmamode = hpt3x3_set_dmamode, |
126 | #endif | 115 | #endif |
127 | .mode_filter = ata_pci_default_filter, | ||
128 | |||
129 | .tf_load = ata_tf_load, | ||
130 | .tf_read = ata_tf_read, | ||
131 | .check_status = ata_check_status, | ||
132 | .exec_command = ata_exec_command, | ||
133 | .dev_select = ata_std_dev_select, | ||
134 | |||
135 | .freeze = ata_bmdma_freeze, | ||
136 | .thaw = ata_bmdma_thaw, | ||
137 | .error_handler = ata_bmdma_error_handler, | ||
138 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
139 | .cable_detect = ata_cable_40wire, | ||
140 | |||
141 | .bmdma_setup = ata_bmdma_setup, | ||
142 | .bmdma_start = ata_bmdma_start, | ||
143 | .bmdma_stop = ata_bmdma_stop, | ||
144 | .bmdma_status = ata_bmdma_status, | ||
145 | .check_atapi_dma= hpt3x3_atapi_dma, | ||
146 | |||
147 | .qc_prep = ata_qc_prep, | ||
148 | .qc_issue = ata_qc_issue_prot, | ||
149 | |||
150 | .data_xfer = ata_data_xfer, | ||
151 | |||
152 | .irq_handler = ata_interrupt, | ||
153 | .irq_clear = ata_bmdma_irq_clear, | ||
154 | .irq_on = ata_irq_on, | ||
155 | |||
156 | .port_start = ata_sff_port_start, | ||
157 | }; | 116 | }; |
158 | 117 | ||
159 | /** | 118 | /** |
@@ -189,7 +148,6 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
189 | { | 148 | { |
190 | static int printed_version; | 149 | static int printed_version; |
191 | static const struct ata_port_info info = { | 150 | static const struct ata_port_info info = { |
192 | .sht = &hpt3x3_sht, | ||
193 | .flags = ATA_FLAG_SLAVE_POSS, | 151 | .flags = ATA_FLAG_SLAVE_POSS, |
194 | .pio_mask = 0x1f, | 152 | .pio_mask = 0x1f, |
195 | #if defined(CONFIG_PATA_HPT3X3_DMA) | 153 | #if defined(CONFIG_PATA_HPT3X3_DMA) |
@@ -244,15 +202,15 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
244 | ioaddr->altstatus_addr = | 202 | ioaddr->altstatus_addr = |
245 | ioaddr->ctl_addr = base + offset_ctl[i]; | 203 | ioaddr->ctl_addr = base + offset_ctl[i]; |
246 | ioaddr->scr_addr = NULL; | 204 | ioaddr->scr_addr = NULL; |
247 | ata_std_ports(ioaddr); | 205 | ata_sff_std_ports(ioaddr); |
248 | ioaddr->bmdma_addr = base + 8 * i; | 206 | ioaddr->bmdma_addr = base + 8 * i; |
249 | 207 | ||
250 | ata_port_pbar_desc(ap, 4, -1, "ioport"); | 208 | ata_port_pbar_desc(ap, 4, -1, "ioport"); |
251 | ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd"); | 209 | ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd"); |
252 | } | 210 | } |
253 | pci_set_master(pdev); | 211 | pci_set_master(pdev); |
254 | return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, | 212 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, |
255 | &hpt3x3_sht); | 213 | IRQF_SHARED, &hpt3x3_sht); |
256 | } | 214 | } |
257 | 215 | ||
258 | #ifdef CONFIG_PM | 216 | #ifdef CONFIG_PM |
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index f97068be2d79..17138436423d 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c | |||
@@ -250,7 +250,7 @@ static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc) | |||
250 | set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ); | 250 | set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ); |
251 | 251 | ||
252 | /* issue r/w command */ | 252 | /* issue r/w command */ |
253 | ap->ops->exec_command(ap, &qc->tf); | 253 | ap->ops->sff_exec_command(ap, &qc->tf); |
254 | } | 254 | } |
255 | 255 | ||
256 | static void pata_icside_bmdma_start(struct ata_queued_cmd *qc) | 256 | static void pata_icside_bmdma_start(struct ata_queued_cmd *qc) |
@@ -270,7 +270,7 @@ static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc) | |||
270 | disable_dma(state->dma); | 270 | disable_dma(state->dma); |
271 | 271 | ||
272 | /* see ata_bmdma_stop */ | 272 | /* see ata_bmdma_stop */ |
273 | ata_altstatus(ap); | 273 | ata_sff_altstatus(ap); |
274 | } | 274 | } |
275 | 275 | ||
276 | static u8 pata_icside_bmdma_status(struct ata_port *ap) | 276 | static u8 pata_icside_bmdma_status(struct ata_port *ap) |
@@ -305,35 +305,18 @@ static int icside_dma_init(struct pata_icside_info *info) | |||
305 | 305 | ||
306 | 306 | ||
307 | static struct scsi_host_template pata_icside_sht = { | 307 | static struct scsi_host_template pata_icside_sht = { |
308 | .module = THIS_MODULE, | 308 | ATA_BASE_SHT(DRV_NAME), |
309 | .name = DRV_NAME, | ||
310 | .ioctl = ata_scsi_ioctl, | ||
311 | .queuecommand = ata_scsi_queuecmd, | ||
312 | .can_queue = ATA_DEF_QUEUE, | ||
313 | .this_id = ATA_SHT_THIS_ID, | ||
314 | .sg_tablesize = PATA_ICSIDE_MAX_SG, | 309 | .sg_tablesize = PATA_ICSIDE_MAX_SG, |
315 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
316 | .emulated = ATA_SHT_EMULATED, | ||
317 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
318 | .proc_name = DRV_NAME, | ||
319 | .dma_boundary = ~0, /* no dma boundaries */ | 310 | .dma_boundary = ~0, /* no dma boundaries */ |
320 | .slave_configure = ata_scsi_slave_config, | ||
321 | .slave_destroy = ata_scsi_slave_destroy, | ||
322 | .bios_param = ata_std_bios_param, | ||
323 | }; | 311 | }; |
324 | 312 | ||
325 | /* wish this was exported from libata-core */ | ||
326 | static void ata_dummy_noret(struct ata_port *port) | ||
327 | { | ||
328 | } | ||
329 | |||
330 | static void pata_icside_postreset(struct ata_link *link, unsigned int *classes) | 313 | static void pata_icside_postreset(struct ata_link *link, unsigned int *classes) |
331 | { | 314 | { |
332 | struct ata_port *ap = link->ap; | 315 | struct ata_port *ap = link->ap; |
333 | struct pata_icside_state *state = ap->host->private_data; | 316 | struct pata_icside_state *state = ap->host->private_data; |
334 | 317 | ||
335 | if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE) | 318 | if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE) |
336 | return ata_std_postreset(link, classes); | 319 | return ata_sff_postreset(link, classes); |
337 | 320 | ||
338 | state->port[ap->port_no].disabled = 1; | 321 | state->port[ap->port_no].disabled = 1; |
339 | 322 | ||
@@ -349,42 +332,20 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes) | |||
349 | } | 332 | } |
350 | } | 333 | } |
351 | 334 | ||
352 | static void pata_icside_error_handler(struct ata_port *ap) | ||
353 | { | ||
354 | ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, NULL, | ||
355 | pata_icside_postreset); | ||
356 | } | ||
357 | |||
358 | static struct ata_port_operations pata_icside_port_ops = { | 335 | static struct ata_port_operations pata_icside_port_ops = { |
359 | .set_dmamode = pata_icside_set_dmamode, | 336 | .inherits = &ata_sff_port_ops, |
360 | |||
361 | .tf_load = ata_tf_load, | ||
362 | .tf_read = ata_tf_read, | ||
363 | .exec_command = ata_exec_command, | ||
364 | .check_status = ata_check_status, | ||
365 | .dev_select = ata_std_dev_select, | ||
366 | |||
367 | .cable_detect = ata_cable_40wire, | ||
368 | |||
369 | .bmdma_setup = pata_icside_bmdma_setup, | ||
370 | .bmdma_start = pata_icside_bmdma_start, | ||
371 | |||
372 | .data_xfer = ata_data_xfer_noirq, | ||
373 | |||
374 | /* no need to build any PRD tables for DMA */ | 337 | /* no need to build any PRD tables for DMA */ |
375 | .qc_prep = ata_noop_qc_prep, | 338 | .qc_prep = ata_noop_qc_prep, |
376 | .qc_issue = ata_qc_issue_prot, | 339 | .sff_data_xfer = ata_sff_data_xfer_noirq, |
377 | 340 | .bmdma_setup = pata_icside_bmdma_setup, | |
378 | .freeze = ata_bmdma_freeze, | 341 | .bmdma_start = pata_icside_bmdma_start, |
379 | .thaw = ata_bmdma_thaw, | ||
380 | .error_handler = pata_icside_error_handler, | ||
381 | .post_internal_cmd = pata_icside_bmdma_stop, | ||
382 | |||
383 | .irq_clear = ata_dummy_noret, | ||
384 | .irq_on = ata_irq_on, | ||
385 | |||
386 | .bmdma_stop = pata_icside_bmdma_stop, | 342 | .bmdma_stop = pata_icside_bmdma_stop, |
387 | .bmdma_status = pata_icside_bmdma_status, | 343 | .bmdma_status = pata_icside_bmdma_status, |
344 | |||
345 | .cable_detect = ata_cable_40wire, | ||
346 | .set_dmamode = pata_icside_set_dmamode, | ||
347 | .postreset = pata_icside_postreset, | ||
348 | .post_internal_cmd = pata_icside_bmdma_stop, | ||
388 | }; | 349 | }; |
389 | 350 | ||
390 | static void __devinit | 351 | static void __devinit |
@@ -520,7 +481,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info) | |||
520 | pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]); | 481 | pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]); |
521 | } | 482 | } |
522 | 483 | ||
523 | return ata_host_activate(host, ec->irq, ata_interrupt, 0, | 484 | return ata_host_activate(host, ec->irq, ata_sff_interrupt, 0, |
524 | &pata_icside_sht); | 485 | &pata_icside_sht); |
525 | } | 486 | } |
526 | 487 | ||
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c index 4320e7986321..6a111baab523 100644 --- a/drivers/ata/pata_isapnp.c +++ b/drivers/ata/pata_isapnp.c | |||
@@ -20,45 +20,12 @@ | |||
20 | #define DRV_VERSION "0.2.2" | 20 | #define DRV_VERSION "0.2.2" |
21 | 21 | ||
22 | static struct scsi_host_template isapnp_sht = { | 22 | static struct scsi_host_template isapnp_sht = { |
23 | .module = THIS_MODULE, | 23 | ATA_PIO_SHT(DRV_NAME), |
24 | .name = DRV_NAME, | ||
25 | .ioctl = ata_scsi_ioctl, | ||
26 | .queuecommand = ata_scsi_queuecmd, | ||
27 | .can_queue = ATA_DEF_QUEUE, | ||
28 | .this_id = ATA_SHT_THIS_ID, | ||
29 | .sg_tablesize = LIBATA_MAX_PRD, | ||
30 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
31 | .emulated = ATA_SHT_EMULATED, | ||
32 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
33 | .proc_name = DRV_NAME, | ||
34 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
35 | .slave_configure = ata_scsi_slave_config, | ||
36 | .slave_destroy = ata_scsi_slave_destroy, | ||
37 | .bios_param = ata_std_bios_param, | ||
38 | }; | 24 | }; |
39 | 25 | ||
40 | static struct ata_port_operations isapnp_port_ops = { | 26 | static struct ata_port_operations isapnp_port_ops = { |
41 | .tf_load = ata_tf_load, | 27 | .inherits = &ata_sff_port_ops, |
42 | .tf_read = ata_tf_read, | ||
43 | .check_status = ata_check_status, | ||
44 | .exec_command = ata_exec_command, | ||
45 | .dev_select = ata_std_dev_select, | ||
46 | |||
47 | .freeze = ata_bmdma_freeze, | ||
48 | .thaw = ata_bmdma_thaw, | ||
49 | .error_handler = ata_bmdma_error_handler, | ||
50 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
51 | .cable_detect = ata_cable_40wire, | 28 | .cable_detect = ata_cable_40wire, |
52 | |||
53 | .qc_prep = ata_qc_prep, | ||
54 | .qc_issue = ata_qc_issue_prot, | ||
55 | |||
56 | .data_xfer = ata_data_xfer, | ||
57 | |||
58 | .irq_clear = ata_bmdma_irq_clear, | ||
59 | .irq_on = ata_irq_on, | ||
60 | |||
61 | .port_start = ata_sff_port_start, | ||
62 | }; | 29 | }; |
63 | 30 | ||
64 | /** | 31 | /** |
@@ -83,7 +50,7 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev | |||
83 | 50 | ||
84 | if (pnp_irq_valid(idev, 0)) { | 51 | if (pnp_irq_valid(idev, 0)) { |
85 | irq = pnp_irq(idev, 0); | 52 | irq = pnp_irq(idev, 0); |
86 | handler = ata_interrupt; | 53 | handler = ata_sff_interrupt; |
87 | } | 54 | } |
88 | 55 | ||
89 | /* allocate host */ | 56 | /* allocate host */ |
@@ -111,7 +78,7 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev | |||
111 | ap->ioaddr.ctl_addr = ctl_addr; | 78 | ap->ioaddr.ctl_addr = ctl_addr; |
112 | } | 79 | } |
113 | 80 | ||
114 | ata_std_ports(&ap->ioaddr); | 81 | ata_sff_std_ports(&ap->ioaddr); |
115 | 82 | ||
116 | ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", | 83 | ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", |
117 | (unsigned long long)pnp_port_start(idev, 0), | 84 | (unsigned long long)pnp_port_start(idev, 0), |
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c index e0c2cc29d0ca..c113d7c079c8 100644 --- a/drivers/ata/pata_it8213.c +++ b/drivers/ata/pata_it8213.c | |||
@@ -40,20 +40,7 @@ static int it8213_pre_reset(struct ata_link *link, unsigned long deadline) | |||
40 | if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no])) | 40 | if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no])) |
41 | return -ENOENT; | 41 | return -ENOENT; |
42 | 42 | ||
43 | return ata_std_prereset(link, deadline); | 43 | return ata_sff_prereset(link, deadline); |
44 | } | ||
45 | |||
46 | /** | ||
47 | * it8213_error_handler - Probe specified port on PATA host controller | ||
48 | * @ap: Port to probe | ||
49 | * | ||
50 | * LOCKING: | ||
51 | * None (inherited from caller). | ||
52 | */ | ||
53 | |||
54 | static void it8213_error_handler(struct ata_port *ap) | ||
55 | { | ||
56 | ata_bmdma_drive_eh(ap, it8213_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
57 | } | 44 | } |
58 | 45 | ||
59 | /** | 46 | /** |
@@ -243,53 +230,16 @@ static void it8213_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
243 | } | 230 | } |
244 | 231 | ||
245 | static struct scsi_host_template it8213_sht = { | 232 | static struct scsi_host_template it8213_sht = { |
246 | .module = THIS_MODULE, | 233 | ATA_BMDMA_SHT(DRV_NAME), |
247 | .name = DRV_NAME, | ||
248 | .ioctl = ata_scsi_ioctl, | ||
249 | .queuecommand = ata_scsi_queuecmd, | ||
250 | .can_queue = ATA_DEF_QUEUE, | ||
251 | .this_id = ATA_SHT_THIS_ID, | ||
252 | .sg_tablesize = LIBATA_MAX_PRD, | ||
253 | .max_sectors = ATA_MAX_SECTORS, | ||
254 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
255 | .emulated = ATA_SHT_EMULATED, | ||
256 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
257 | .proc_name = DRV_NAME, | ||
258 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
259 | .slave_configure = ata_scsi_slave_config, | ||
260 | .bios_param = ata_std_bios_param, | ||
261 | }; | 234 | }; |
262 | 235 | ||
263 | static const struct ata_port_operations it8213_ops = { | 236 | |
237 | static struct ata_port_operations it8213_ops = { | ||
238 | .inherits = &ata_bmdma_port_ops, | ||
239 | .cable_detect = it8213_cable_detect, | ||
264 | .set_piomode = it8213_set_piomode, | 240 | .set_piomode = it8213_set_piomode, |
265 | .set_dmamode = it8213_set_dmamode, | 241 | .set_dmamode = it8213_set_dmamode, |
266 | .mode_filter = ata_pci_default_filter, | 242 | .prereset = it8213_pre_reset, |
267 | |||
268 | .tf_load = ata_tf_load, | ||
269 | .tf_read = ata_tf_read, | ||
270 | .check_status = ata_check_status, | ||
271 | .exec_command = ata_exec_command, | ||
272 | .dev_select = ata_std_dev_select, | ||
273 | |||
274 | .freeze = ata_bmdma_freeze, | ||
275 | .thaw = ata_bmdma_thaw, | ||
276 | .error_handler = it8213_error_handler, | ||
277 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
278 | .cable_detect = it8213_cable_detect, | ||
279 | |||
280 | .bmdma_setup = ata_bmdma_setup, | ||
281 | .bmdma_start = ata_bmdma_start, | ||
282 | .bmdma_stop = ata_bmdma_stop, | ||
283 | .bmdma_status = ata_bmdma_status, | ||
284 | .qc_prep = ata_qc_prep, | ||
285 | .qc_issue = ata_qc_issue_prot, | ||
286 | .data_xfer = ata_data_xfer, | ||
287 | |||
288 | .irq_handler = ata_interrupt, | ||
289 | .irq_clear = ata_bmdma_irq_clear, | ||
290 | .irq_on = ata_irq_on, | ||
291 | |||
292 | .port_start = ata_sff_port_start, | ||
293 | }; | 243 | }; |
294 | 244 | ||
295 | 245 | ||
@@ -311,7 +261,6 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en | |||
311 | { | 261 | { |
312 | static int printed_version; | 262 | static int printed_version; |
313 | static const struct ata_port_info info = { | 263 | static const struct ata_port_info info = { |
314 | .sht = &it8213_sht, | ||
315 | .flags = ATA_FLAG_SLAVE_POSS, | 264 | .flags = ATA_FLAG_SLAVE_POSS, |
316 | .pio_mask = 0x1f, /* pio0-4 */ | 265 | .pio_mask = 0x1f, /* pio0-4 */ |
317 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 266 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
@@ -325,7 +274,7 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en | |||
325 | dev_printk(KERN_DEBUG, &pdev->dev, | 274 | dev_printk(KERN_DEBUG, &pdev->dev, |
326 | "version " DRV_VERSION "\n"); | 275 | "version " DRV_VERSION "\n"); |
327 | 276 | ||
328 | return ata_pci_init_one(pdev, ppi); | 277 | return ata_pci_sff_init_one(pdev, ppi, &it8213_sht, NULL); |
329 | } | 278 | } |
330 | 279 | ||
331 | static const struct pci_device_id it8213_pci_tbl[] = { | 280 | static const struct pci_device_id it8213_pci_tbl[] = { |
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index 257951d03dbb..e10816931b2f 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c | |||
@@ -395,11 +395,11 @@ static void it821x_passthru_dev_select(struct ata_port *ap, | |||
395 | it821x_program(ap, adev, itdev->pio[adev->devno]); | 395 | it821x_program(ap, adev, itdev->pio[adev->devno]); |
396 | itdev->last_device = device; | 396 | itdev->last_device = device; |
397 | } | 397 | } |
398 | ata_std_dev_select(ap, device); | 398 | ata_sff_dev_select(ap, device); |
399 | } | 399 | } |
400 | 400 | ||
401 | /** | 401 | /** |
402 | * it821x_smart_qc_issue_prot - wrap qc issue prot | 402 | * it821x_smart_qc_issue - wrap qc issue prot |
403 | * @qc: command | 403 | * @qc: command |
404 | * | 404 | * |
405 | * Wrap the command issue sequence for the IT821x. We need to | 405 | * Wrap the command issue sequence for the IT821x. We need to |
@@ -407,7 +407,7 @@ static void it821x_passthru_dev_select(struct ata_port *ap, | |||
407 | * usual happenings kick off | 407 | * usual happenings kick off |
408 | */ | 408 | */ |
409 | 409 | ||
410 | static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc) | 410 | static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc) |
411 | { | 411 | { |
412 | switch(qc->tf.command) | 412 | switch(qc->tf.command) |
413 | { | 413 | { |
@@ -427,14 +427,14 @@ static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc) | |||
427 | case ATA_CMD_ID_ATA: | 427 | case ATA_CMD_ID_ATA: |
428 | /* Arguably should just no-op this one */ | 428 | /* Arguably should just no-op this one */ |
429 | case ATA_CMD_SET_FEATURES: | 429 | case ATA_CMD_SET_FEATURES: |
430 | return ata_qc_issue_prot(qc); | 430 | return ata_sff_qc_issue(qc); |
431 | } | 431 | } |
432 | printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command); | 432 | printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command); |
433 | return AC_ERR_DEV; | 433 | return AC_ERR_DEV; |
434 | } | 434 | } |
435 | 435 | ||
436 | /** | 436 | /** |
437 | * it821x_passthru_qc_issue_prot - wrap qc issue prot | 437 | * it821x_passthru_qc_issue - wrap qc issue prot |
438 | * @qc: command | 438 | * @qc: command |
439 | * | 439 | * |
440 | * Wrap the command issue sequence for the IT821x. We need to | 440 | * Wrap the command issue sequence for the IT821x. We need to |
@@ -442,10 +442,10 @@ static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc) | |||
442 | * usual happenings kick off | 442 | * usual happenings kick off |
443 | */ | 443 | */ |
444 | 444 | ||
445 | static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc) | 445 | static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc) |
446 | { | 446 | { |
447 | it821x_passthru_dev_select(qc->ap, qc->dev->devno); | 447 | it821x_passthru_dev_select(qc->ap, qc->dev->devno); |
448 | return ata_qc_issue_prot(qc); | 448 | return ata_sff_qc_issue(qc); |
449 | } | 449 | } |
450 | 450 | ||
451 | /** | 451 | /** |
@@ -632,89 +632,34 @@ static int it821x_port_start(struct ata_port *ap) | |||
632 | } | 632 | } |
633 | 633 | ||
634 | static struct scsi_host_template it821x_sht = { | 634 | static struct scsi_host_template it821x_sht = { |
635 | .module = THIS_MODULE, | 635 | ATA_BMDMA_SHT(DRV_NAME), |
636 | .name = DRV_NAME, | ||
637 | .ioctl = ata_scsi_ioctl, | ||
638 | .queuecommand = ata_scsi_queuecmd, | ||
639 | .can_queue = ATA_DEF_QUEUE, | ||
640 | .this_id = ATA_SHT_THIS_ID, | ||
641 | .sg_tablesize = LIBATA_MAX_PRD, | ||
642 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
643 | .emulated = ATA_SHT_EMULATED, | ||
644 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
645 | .proc_name = DRV_NAME, | ||
646 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
647 | .slave_configure = ata_scsi_slave_config, | ||
648 | .slave_destroy = ata_scsi_slave_destroy, | ||
649 | .bios_param = ata_std_bios_param, | ||
650 | }; | 636 | }; |
651 | 637 | ||
652 | static struct ata_port_operations it821x_smart_port_ops = { | 638 | static struct ata_port_operations it821x_smart_port_ops = { |
653 | .set_mode = it821x_smart_set_mode, | 639 | .inherits = &ata_bmdma_port_ops, |
654 | .tf_load = ata_tf_load, | ||
655 | .tf_read = ata_tf_read, | ||
656 | .mode_filter = ata_pci_default_filter, | ||
657 | 640 | ||
658 | .check_status = ata_check_status, | ||
659 | .check_atapi_dma= it821x_check_atapi_dma, | 641 | .check_atapi_dma= it821x_check_atapi_dma, |
660 | .exec_command = ata_exec_command, | 642 | .qc_issue = it821x_smart_qc_issue, |
661 | .dev_select = ata_std_dev_select, | ||
662 | .dev_config = it821x_dev_config, | ||
663 | 643 | ||
664 | .freeze = ata_bmdma_freeze, | ||
665 | .thaw = ata_bmdma_thaw, | ||
666 | .error_handler = ata_bmdma_error_handler, | ||
667 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
668 | .cable_detect = it821x_ident_hack, | 644 | .cable_detect = it821x_ident_hack, |
669 | 645 | .set_mode = it821x_smart_set_mode, | |
670 | .bmdma_setup = ata_bmdma_setup, | 646 | .dev_config = it821x_dev_config, |
671 | .bmdma_start = ata_bmdma_start, | ||
672 | .bmdma_stop = ata_bmdma_stop, | ||
673 | .bmdma_status = ata_bmdma_status, | ||
674 | |||
675 | .qc_prep = ata_qc_prep, | ||
676 | .qc_issue = it821x_smart_qc_issue_prot, | ||
677 | |||
678 | .data_xfer = ata_data_xfer, | ||
679 | |||
680 | .irq_handler = ata_interrupt, | ||
681 | .irq_clear = ata_bmdma_irq_clear, | ||
682 | .irq_on = ata_irq_on, | ||
683 | 647 | ||
684 | .port_start = it821x_port_start, | 648 | .port_start = it821x_port_start, |
685 | }; | 649 | }; |
686 | 650 | ||
687 | static struct ata_port_operations it821x_passthru_port_ops = { | 651 | static struct ata_port_operations it821x_passthru_port_ops = { |
688 | .set_piomode = it821x_passthru_set_piomode, | 652 | .inherits = &ata_bmdma_port_ops, |
689 | .set_dmamode = it821x_passthru_set_dmamode, | ||
690 | .mode_filter = ata_pci_default_filter, | ||
691 | 653 | ||
692 | .tf_load = ata_tf_load, | ||
693 | .tf_read = ata_tf_read, | ||
694 | .check_status = ata_check_status, | ||
695 | .exec_command = ata_exec_command, | ||
696 | .check_atapi_dma= it821x_check_atapi_dma, | 654 | .check_atapi_dma= it821x_check_atapi_dma, |
697 | .dev_select = it821x_passthru_dev_select, | 655 | .sff_dev_select = it821x_passthru_dev_select, |
698 | |||
699 | .freeze = ata_bmdma_freeze, | ||
700 | .thaw = ata_bmdma_thaw, | ||
701 | .error_handler = ata_bmdma_error_handler, | ||
702 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
703 | .cable_detect = ata_cable_unknown, | ||
704 | |||
705 | .bmdma_setup = ata_bmdma_setup, | ||
706 | .bmdma_start = it821x_passthru_bmdma_start, | 656 | .bmdma_start = it821x_passthru_bmdma_start, |
707 | .bmdma_stop = it821x_passthru_bmdma_stop, | 657 | .bmdma_stop = it821x_passthru_bmdma_stop, |
708 | .bmdma_status = ata_bmdma_status, | 658 | .qc_issue = it821x_passthru_qc_issue, |
709 | |||
710 | .qc_prep = ata_qc_prep, | ||
711 | .qc_issue = it821x_passthru_qc_issue_prot, | ||
712 | |||
713 | .data_xfer = ata_data_xfer, | ||
714 | 659 | ||
715 | .irq_clear = ata_bmdma_irq_clear, | 660 | .cable_detect = ata_cable_unknown, |
716 | .irq_handler = ata_interrupt, | 661 | .set_piomode = it821x_passthru_set_piomode, |
717 | .irq_on = ata_irq_on, | 662 | .set_dmamode = it821x_passthru_set_dmamode, |
718 | 663 | ||
719 | .port_start = it821x_port_start, | 664 | .port_start = it821x_port_start, |
720 | }; | 665 | }; |
@@ -742,14 +687,12 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
742 | u8 conf; | 687 | u8 conf; |
743 | 688 | ||
744 | static const struct ata_port_info info_smart = { | 689 | static const struct ata_port_info info_smart = { |
745 | .sht = &it821x_sht, | ||
746 | .flags = ATA_FLAG_SLAVE_POSS, | 690 | .flags = ATA_FLAG_SLAVE_POSS, |
747 | .pio_mask = 0x1f, | 691 | .pio_mask = 0x1f, |
748 | .mwdma_mask = 0x07, | 692 | .mwdma_mask = 0x07, |
749 | .port_ops = &it821x_smart_port_ops | 693 | .port_ops = &it821x_smart_port_ops |
750 | }; | 694 | }; |
751 | static const struct ata_port_info info_passthru = { | 695 | static const struct ata_port_info info_passthru = { |
752 | .sht = &it821x_sht, | ||
753 | .flags = ATA_FLAG_SLAVE_POSS, | 696 | .flags = ATA_FLAG_SLAVE_POSS, |
754 | .pio_mask = 0x1f, | 697 | .pio_mask = 0x1f, |
755 | .mwdma_mask = 0x07, | 698 | .mwdma_mask = 0x07, |
@@ -759,6 +702,11 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
759 | 702 | ||
760 | const struct ata_port_info *ppi[] = { NULL, NULL }; | 703 | const struct ata_port_info *ppi[] = { NULL, NULL }; |
761 | static char *mode[2] = { "pass through", "smart" }; | 704 | static char *mode[2] = { "pass through", "smart" }; |
705 | int rc; | ||
706 | |||
707 | rc = pcim_enable_device(pdev); | ||
708 | if (rc) | ||
709 | return rc; | ||
762 | 710 | ||
763 | /* Force the card into bypass mode if so requested */ | 711 | /* Force the card into bypass mode if so requested */ |
764 | if (it8212_noraid) { | 712 | if (it8212_noraid) { |
@@ -774,16 +722,23 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
774 | else | 722 | else |
775 | ppi[0] = &info_smart; | 723 | ppi[0] = &info_smart; |
776 | 724 | ||
777 | return ata_pci_init_one(pdev, ppi); | 725 | return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL); |
778 | } | 726 | } |
779 | 727 | ||
780 | #ifdef CONFIG_PM | 728 | #ifdef CONFIG_PM |
781 | static int it821x_reinit_one(struct pci_dev *pdev) | 729 | static int it821x_reinit_one(struct pci_dev *pdev) |
782 | { | 730 | { |
731 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
732 | int rc; | ||
733 | |||
734 | rc = ata_pci_device_do_resume(pdev); | ||
735 | if (rc) | ||
736 | return rc; | ||
783 | /* Resume - turn raid back off if need be */ | 737 | /* Resume - turn raid back off if need be */ |
784 | if (it8212_noraid) | 738 | if (it8212_noraid) |
785 | it821x_disable_raid(pdev); | 739 | it821x_disable_raid(pdev); |
786 | return ata_pci_device_resume(pdev); | 740 | ata_host_resume(host); |
741 | return rc; | ||
787 | } | 742 | } |
788 | #endif | 743 | #endif |
789 | 744 | ||
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index 030878fedeb5..8a175f23b907 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c | |||
@@ -88,48 +88,14 @@ static unsigned int ixp4xx_mmio_data_xfer(struct ata_device *dev, | |||
88 | } | 88 | } |
89 | 89 | ||
90 | static struct scsi_host_template ixp4xx_sht = { | 90 | static struct scsi_host_template ixp4xx_sht = { |
91 | .module = THIS_MODULE, | 91 | ATA_PIO_SHT(DRV_NAME), |
92 | .name = DRV_NAME, | ||
93 | .ioctl = ata_scsi_ioctl, | ||
94 | .queuecommand = ata_scsi_queuecmd, | ||
95 | .can_queue = ATA_DEF_QUEUE, | ||
96 | .this_id = ATA_SHT_THIS_ID, | ||
97 | .sg_tablesize = LIBATA_MAX_PRD, | ||
98 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
99 | .emulated = ATA_SHT_EMULATED, | ||
100 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
101 | .proc_name = DRV_NAME, | ||
102 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
103 | .slave_configure = ata_scsi_slave_config, | ||
104 | .slave_destroy = ata_scsi_slave_destroy, | ||
105 | .bios_param = ata_std_bios_param, | ||
106 | }; | 92 | }; |
107 | 93 | ||
108 | static struct ata_port_operations ixp4xx_port_ops = { | 94 | static struct ata_port_operations ixp4xx_port_ops = { |
109 | .set_mode = ixp4xx_set_mode, | 95 | .inherits = &ata_sff_port_ops, |
110 | .mode_filter = ata_pci_default_filter, | 96 | .sff_data_xfer = ixp4xx_mmio_data_xfer, |
111 | |||
112 | .tf_load = ata_tf_load, | ||
113 | .tf_read = ata_tf_read, | ||
114 | .exec_command = ata_exec_command, | ||
115 | .check_status = ata_check_status, | ||
116 | .dev_select = ata_std_dev_select, | ||
117 | |||
118 | .freeze = ata_bmdma_freeze, | ||
119 | .thaw = ata_bmdma_thaw, | ||
120 | .error_handler = ata_bmdma_error_handler, | ||
121 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
122 | |||
123 | .qc_prep = ata_qc_prep, | ||
124 | .qc_issue = ata_qc_issue_prot, | ||
125 | .data_xfer = ixp4xx_mmio_data_xfer, | ||
126 | .cable_detect = ata_cable_40wire, | 97 | .cable_detect = ata_cable_40wire, |
127 | 98 | .set_mode = ixp4xx_set_mode, | |
128 | .irq_handler = ata_interrupt, | ||
129 | .irq_clear = ata_bmdma_irq_clear, | ||
130 | .irq_on = ata_irq_on, | ||
131 | |||
132 | .port_start = ata_port_start, | ||
133 | }; | 99 | }; |
134 | 100 | ||
135 | static void ixp4xx_setup_port(struct ata_port *ap, | 101 | static void ixp4xx_setup_port(struct ata_port *ap, |
@@ -144,7 +110,7 @@ static void ixp4xx_setup_port(struct ata_port *ap, | |||
144 | ioaddr->altstatus_addr = data->cs1 + 0x06; | 110 | ioaddr->altstatus_addr = data->cs1 + 0x06; |
145 | ioaddr->ctl_addr = data->cs1 + 0x06; | 111 | ioaddr->ctl_addr = data->cs1 + 0x06; |
146 | 112 | ||
147 | ata_std_ports(ioaddr); | 113 | ata_sff_std_ports(ioaddr); |
148 | 114 | ||
149 | #ifndef __ARMEB__ | 115 | #ifndef __ARMEB__ |
150 | 116 | ||
@@ -220,7 +186,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev) | |||
220 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); | 186 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); |
221 | 187 | ||
222 | /* activate host */ | 188 | /* activate host */ |
223 | return ata_host_activate(host, irq, ata_interrupt, 0, &ixp4xx_sht); | 189 | return ata_host_activate(host, irq, ata_sff_interrupt, 0, &ixp4xx_sht); |
224 | } | 190 | } |
225 | 191 | ||
226 | static __devexit int ixp4xx_pata_remove(struct platform_device *dev) | 192 | static __devexit int ixp4xx_pata_remove(struct platform_device *dev) |
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c index 00bbbbd50e97..73b7596816b4 100644 --- a/drivers/ata/pata_jmicron.c +++ b/drivers/ata/pata_jmicron.c | |||
@@ -102,73 +102,18 @@ static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline) | |||
102 | ap->cbl = ATA_CBL_SATA; | 102 | ap->cbl = ATA_CBL_SATA; |
103 | break; | 103 | break; |
104 | } | 104 | } |
105 | return ata_std_prereset(link, deadline); | 105 | return ata_sff_prereset(link, deadline); |
106 | } | ||
107 | |||
108 | /** | ||
109 | * jmicron_error_handler - Setup and error handler | ||
110 | * @ap: Port to handle | ||
111 | * | ||
112 | * LOCKING: | ||
113 | * None (inherited from caller). | ||
114 | */ | ||
115 | |||
116 | static void jmicron_error_handler(struct ata_port *ap) | ||
117 | { | ||
118 | ata_bmdma_drive_eh(ap, jmicron_pre_reset, ata_std_softreset, NULL, | ||
119 | ata_std_postreset); | ||
120 | } | 106 | } |
121 | 107 | ||
122 | /* No PIO or DMA methods needed for this device */ | 108 | /* No PIO or DMA methods needed for this device */ |
123 | 109 | ||
124 | static struct scsi_host_template jmicron_sht = { | 110 | static struct scsi_host_template jmicron_sht = { |
125 | .module = THIS_MODULE, | 111 | ATA_BMDMA_SHT(DRV_NAME), |
126 | .name = DRV_NAME, | ||
127 | .ioctl = ata_scsi_ioctl, | ||
128 | .queuecommand = ata_scsi_queuecmd, | ||
129 | .can_queue = ATA_DEF_QUEUE, | ||
130 | .this_id = ATA_SHT_THIS_ID, | ||
131 | .sg_tablesize = LIBATA_MAX_PRD, | ||
132 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
133 | .emulated = ATA_SHT_EMULATED, | ||
134 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
135 | .proc_name = DRV_NAME, | ||
136 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
137 | .slave_configure = ata_scsi_slave_config, | ||
138 | .slave_destroy = ata_scsi_slave_destroy, | ||
139 | /* Use standard CHS mapping rules */ | ||
140 | .bios_param = ata_std_bios_param, | ||
141 | }; | 112 | }; |
142 | 113 | ||
143 | static const struct ata_port_operations jmicron_ops = { | 114 | static struct ata_port_operations jmicron_ops = { |
144 | /* Task file is PCI ATA format, use helpers */ | 115 | .inherits = &ata_bmdma_port_ops, |
145 | .tf_load = ata_tf_load, | 116 | .prereset = jmicron_pre_reset, |
146 | .tf_read = ata_tf_read, | ||
147 | .check_status = ata_check_status, | ||
148 | .exec_command = ata_exec_command, | ||
149 | .dev_select = ata_std_dev_select, | ||
150 | |||
151 | .freeze = ata_bmdma_freeze, | ||
152 | .thaw = ata_bmdma_thaw, | ||
153 | .error_handler = jmicron_error_handler, | ||
154 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
155 | |||
156 | /* BMDMA handling is PCI ATA format, use helpers */ | ||
157 | .bmdma_setup = ata_bmdma_setup, | ||
158 | .bmdma_start = ata_bmdma_start, | ||
159 | .bmdma_stop = ata_bmdma_stop, | ||
160 | .bmdma_status = ata_bmdma_status, | ||
161 | .qc_prep = ata_qc_prep, | ||
162 | .qc_issue = ata_qc_issue_prot, | ||
163 | .data_xfer = ata_data_xfer, | ||
164 | |||
165 | /* IRQ-related hooks */ | ||
166 | .irq_handler = ata_interrupt, | ||
167 | .irq_clear = ata_bmdma_irq_clear, | ||
168 | .irq_on = ata_irq_on, | ||
169 | |||
170 | /* Generic PATA PCI ATA helpers */ | ||
171 | .port_start = ata_port_start, | ||
172 | }; | 117 | }; |
173 | 118 | ||
174 | 119 | ||
@@ -189,7 +134,6 @@ static const struct ata_port_operations jmicron_ops = { | |||
189 | static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | 134 | static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id) |
190 | { | 135 | { |
191 | static const struct ata_port_info info = { | 136 | static const struct ata_port_info info = { |
192 | .sht = &jmicron_sht, | ||
193 | .flags = ATA_FLAG_SLAVE_POSS, | 137 | .flags = ATA_FLAG_SLAVE_POSS, |
194 | 138 | ||
195 | .pio_mask = 0x1f, | 139 | .pio_mask = 0x1f, |
@@ -200,7 +144,7 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i | |||
200 | }; | 144 | }; |
201 | const struct ata_port_info *ppi[] = { &info, NULL }; | 145 | const struct ata_port_info *ppi[] = { &info, NULL }; |
202 | 146 | ||
203 | return ata_pci_init_one(pdev, ppi); | 147 | return ata_pci_sff_init_one(pdev, ppi, &jmicron_sht, NULL); |
204 | } | 148 | } |
205 | 149 | ||
206 | static const struct pci_device_id jmicron_pci_tbl[] = { | 150 | static const struct pci_device_id jmicron_pci_tbl[] = { |
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index 50fe08ebe23c..7af4b29cc422 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c | |||
@@ -208,21 +208,12 @@ static int legacy_set_mode(struct ata_link *link, struct ata_device **unused) | |||
208 | } | 208 | } |
209 | 209 | ||
210 | static struct scsi_host_template legacy_sht = { | 210 | static struct scsi_host_template legacy_sht = { |
211 | .module = THIS_MODULE, | 211 | ATA_PIO_SHT(DRV_NAME), |
212 | .name = DRV_NAME, | 212 | }; |
213 | .ioctl = ata_scsi_ioctl, | 213 | |
214 | .queuecommand = ata_scsi_queuecmd, | 214 | static const struct ata_port_operations legacy_base_port_ops = { |
215 | .can_queue = ATA_DEF_QUEUE, | 215 | .inherits = &ata_sff_port_ops, |
216 | .this_id = ATA_SHT_THIS_ID, | 216 | .cable_detect = ata_cable_40wire, |
217 | .sg_tablesize = LIBATA_MAX_PRD, | ||
218 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
219 | .emulated = ATA_SHT_EMULATED, | ||
220 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
221 | .proc_name = DRV_NAME, | ||
222 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
223 | .slave_configure = ata_scsi_slave_config, | ||
224 | .slave_destroy = ata_scsi_slave_destroy, | ||
225 | .bios_param = ata_std_bios_param, | ||
226 | }; | 217 | }; |
227 | 218 | ||
228 | /* | 219 | /* |
@@ -234,55 +225,14 @@ static struct scsi_host_template legacy_sht = { | |||
234 | */ | 225 | */ |
235 | 226 | ||
236 | static struct ata_port_operations simple_port_ops = { | 227 | static struct ata_port_operations simple_port_ops = { |
237 | .tf_load = ata_tf_load, | 228 | .inherits = &legacy_base_port_ops, |
238 | .tf_read = ata_tf_read, | 229 | .sff_data_xfer = ata_sff_data_xfer_noirq, |
239 | .check_status = ata_check_status, | ||
240 | .exec_command = ata_exec_command, | ||
241 | .dev_select = ata_std_dev_select, | ||
242 | |||
243 | .freeze = ata_bmdma_freeze, | ||
244 | .thaw = ata_bmdma_thaw, | ||
245 | .error_handler = ata_bmdma_error_handler, | ||
246 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
247 | .cable_detect = ata_cable_40wire, | ||
248 | |||
249 | .qc_prep = ata_qc_prep, | ||
250 | .qc_issue = ata_qc_issue_prot, | ||
251 | |||
252 | .data_xfer = ata_data_xfer_noirq, | ||
253 | |||
254 | .irq_handler = ata_interrupt, | ||
255 | .irq_clear = ata_bmdma_irq_clear, | ||
256 | .irq_on = ata_irq_on, | ||
257 | |||
258 | .port_start = ata_sff_port_start, | ||
259 | }; | 230 | }; |
260 | 231 | ||
261 | static struct ata_port_operations legacy_port_ops = { | 232 | static struct ata_port_operations legacy_port_ops = { |
233 | .inherits = &legacy_base_port_ops, | ||
234 | .sff_data_xfer = ata_sff_data_xfer_noirq, | ||
262 | .set_mode = legacy_set_mode, | 235 | .set_mode = legacy_set_mode, |
263 | |||
264 | .tf_load = ata_tf_load, | ||
265 | .tf_read = ata_tf_read, | ||
266 | .check_status = ata_check_status, | ||
267 | .exec_command = ata_exec_command, | ||
268 | .dev_select = ata_std_dev_select, | ||
269 | .cable_detect = ata_cable_40wire, | ||
270 | |||
271 | .freeze = ata_bmdma_freeze, | ||
272 | .thaw = ata_bmdma_thaw, | ||
273 | .error_handler = ata_bmdma_error_handler, | ||
274 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
275 | |||
276 | .qc_prep = ata_qc_prep, | ||
277 | .qc_issue = ata_qc_issue_prot, | ||
278 | |||
279 | .data_xfer = ata_data_xfer_noirq, | ||
280 | |||
281 | .irq_handler = ata_interrupt, | ||
282 | .irq_clear = ata_bmdma_irq_clear, | ||
283 | .irq_on = ata_irq_on, | ||
284 | |||
285 | .port_start = ata_sff_port_start, | ||
286 | }; | 236 | }; |
287 | 237 | ||
288 | /* | 238 | /* |
@@ -367,36 +317,15 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev, | |||
367 | } | 317 | } |
368 | local_irq_restore(flags); | 318 | local_irq_restore(flags); |
369 | } else | 319 | } else |
370 | buflen = ata_data_xfer_noirq(dev, buf, buflen, rw); | 320 | buflen = ata_sff_data_xfer_noirq(dev, buf, buflen, rw); |
371 | 321 | ||
372 | return buflen; | 322 | return buflen; |
373 | } | 323 | } |
374 | 324 | ||
375 | static struct ata_port_operations pdc20230_port_ops = { | 325 | static struct ata_port_operations pdc20230_port_ops = { |
326 | .inherits = &legacy_base_port_ops, | ||
376 | .set_piomode = pdc20230_set_piomode, | 327 | .set_piomode = pdc20230_set_piomode, |
377 | 328 | .sff_data_xfer = pdc_data_xfer_vlb, | |
378 | .tf_load = ata_tf_load, | ||
379 | .tf_read = ata_tf_read, | ||
380 | .check_status = ata_check_status, | ||
381 | .exec_command = ata_exec_command, | ||
382 | .dev_select = ata_std_dev_select, | ||
383 | |||
384 | .freeze = ata_bmdma_freeze, | ||
385 | .thaw = ata_bmdma_thaw, | ||
386 | .error_handler = ata_bmdma_error_handler, | ||
387 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
388 | .cable_detect = ata_cable_40wire, | ||
389 | |||
390 | .qc_prep = ata_qc_prep, | ||
391 | .qc_issue = ata_qc_issue_prot, | ||
392 | |||
393 | .data_xfer = pdc_data_xfer_vlb, | ||
394 | |||
395 | .irq_handler = ata_interrupt, | ||
396 | .irq_clear = ata_bmdma_irq_clear, | ||
397 | .irq_on = ata_irq_on, | ||
398 | |||
399 | .port_start = ata_sff_port_start, | ||
400 | }; | 329 | }; |
401 | 330 | ||
402 | /* | 331 | /* |
@@ -427,30 +356,8 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
427 | } | 356 | } |
428 | 357 | ||
429 | static struct ata_port_operations ht6560a_port_ops = { | 358 | static struct ata_port_operations ht6560a_port_ops = { |
359 | .inherits = &legacy_base_port_ops, | ||
430 | .set_piomode = ht6560a_set_piomode, | 360 | .set_piomode = ht6560a_set_piomode, |
431 | |||
432 | .tf_load = ata_tf_load, | ||
433 | .tf_read = ata_tf_read, | ||
434 | .check_status = ata_check_status, | ||
435 | .exec_command = ata_exec_command, | ||
436 | .dev_select = ata_std_dev_select, | ||
437 | |||
438 | .freeze = ata_bmdma_freeze, | ||
439 | .thaw = ata_bmdma_thaw, | ||
440 | .error_handler = ata_bmdma_error_handler, | ||
441 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
442 | .cable_detect = ata_cable_40wire, | ||
443 | |||
444 | .qc_prep = ata_qc_prep, | ||
445 | .qc_issue = ata_qc_issue_prot, | ||
446 | |||
447 | .data_xfer = ata_data_xfer, /* Check vlb/noirq */ | ||
448 | |||
449 | .irq_handler = ata_interrupt, | ||
450 | .irq_clear = ata_bmdma_irq_clear, | ||
451 | .irq_on = ata_irq_on, | ||
452 | |||
453 | .port_start = ata_sff_port_start, | ||
454 | }; | 361 | }; |
455 | 362 | ||
456 | /* | 363 | /* |
@@ -492,30 +399,8 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
492 | } | 399 | } |
493 | 400 | ||
494 | static struct ata_port_operations ht6560b_port_ops = { | 401 | static struct ata_port_operations ht6560b_port_ops = { |
402 | .inherits = &legacy_base_port_ops, | ||
495 | .set_piomode = ht6560b_set_piomode, | 403 | .set_piomode = ht6560b_set_piomode, |
496 | |||
497 | .tf_load = ata_tf_load, | ||
498 | .tf_read = ata_tf_read, | ||
499 | .check_status = ata_check_status, | ||
500 | .exec_command = ata_exec_command, | ||
501 | .dev_select = ata_std_dev_select, | ||
502 | |||
503 | .freeze = ata_bmdma_freeze, | ||
504 | .thaw = ata_bmdma_thaw, | ||
505 | .error_handler = ata_bmdma_error_handler, | ||
506 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
507 | .cable_detect = ata_cable_40wire, | ||
508 | |||
509 | .qc_prep = ata_qc_prep, | ||
510 | .qc_issue = ata_qc_issue_prot, | ||
511 | |||
512 | .data_xfer = ata_data_xfer, /* FIXME: Check 32bit and noirq */ | ||
513 | |||
514 | .irq_handler = ata_interrupt, | ||
515 | .irq_clear = ata_bmdma_irq_clear, | ||
516 | .irq_on = ata_irq_on, | ||
517 | |||
518 | .port_start = ata_sff_port_start, | ||
519 | }; | 404 | }; |
520 | 405 | ||
521 | /* | 406 | /* |
@@ -613,30 +498,8 @@ static void opti82c611a_set_piomode(struct ata_port *ap, | |||
613 | 498 | ||
614 | 499 | ||
615 | static struct ata_port_operations opti82c611a_port_ops = { | 500 | static struct ata_port_operations opti82c611a_port_ops = { |
501 | .inherits = &legacy_base_port_ops, | ||
616 | .set_piomode = opti82c611a_set_piomode, | 502 | .set_piomode = opti82c611a_set_piomode, |
617 | |||
618 | .tf_load = ata_tf_load, | ||
619 | .tf_read = ata_tf_read, | ||
620 | .check_status = ata_check_status, | ||
621 | .exec_command = ata_exec_command, | ||
622 | .dev_select = ata_std_dev_select, | ||
623 | |||
624 | .freeze = ata_bmdma_freeze, | ||
625 | .thaw = ata_bmdma_thaw, | ||
626 | .error_handler = ata_bmdma_error_handler, | ||
627 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
628 | .cable_detect = ata_cable_40wire, | ||
629 | |||
630 | .qc_prep = ata_qc_prep, | ||
631 | .qc_issue = ata_qc_issue_prot, | ||
632 | |||
633 | .data_xfer = ata_data_xfer, | ||
634 | |||
635 | .irq_handler = ata_interrupt, | ||
636 | .irq_clear = ata_bmdma_irq_clear, | ||
637 | .irq_on = ata_irq_on, | ||
638 | |||
639 | .port_start = ata_sff_port_start, | ||
640 | }; | 503 | }; |
641 | 504 | ||
642 | /* | 505 | /* |
@@ -716,7 +579,7 @@ static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
716 | } | 579 | } |
717 | 580 | ||
718 | /** | 581 | /** |
719 | * opt82c465mv_qc_issue_prot - command issue | 582 | * opt82c465mv_qc_issue - command issue |
720 | * @qc: command pending | 583 | * @qc: command pending |
721 | * | 584 | * |
722 | * Called when the libata layer is about to issue a command. We wrap | 585 | * Called when the libata layer is about to issue a command. We wrap |
@@ -730,7 +593,7 @@ static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
730 | * FIXME: dual channel needs ->serialize support | 593 | * FIXME: dual channel needs ->serialize support |
731 | */ | 594 | */ |
732 | 595 | ||
733 | static unsigned int opti82c46x_qc_issue_prot(struct ata_queued_cmd *qc) | 596 | static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc) |
734 | { | 597 | { |
735 | struct ata_port *ap = qc->ap; | 598 | struct ata_port *ap = qc->ap; |
736 | struct ata_device *adev = qc->dev; | 599 | struct ata_device *adev = qc->dev; |
@@ -741,34 +604,13 @@ static unsigned int opti82c46x_qc_issue_prot(struct ata_queued_cmd *qc) | |||
741 | && ap->host->private_data != NULL) | 604 | && ap->host->private_data != NULL) |
742 | opti82c46x_set_piomode(ap, adev); | 605 | opti82c46x_set_piomode(ap, adev); |
743 | 606 | ||
744 | return ata_qc_issue_prot(qc); | 607 | return ata_sff_qc_issue(qc); |
745 | } | 608 | } |
746 | 609 | ||
747 | static struct ata_port_operations opti82c46x_port_ops = { | 610 | static struct ata_port_operations opti82c46x_port_ops = { |
611 | .inherits = &legacy_base_port_ops, | ||
748 | .set_piomode = opti82c46x_set_piomode, | 612 | .set_piomode = opti82c46x_set_piomode, |
749 | 613 | .qc_issue = opti82c46x_qc_issue, | |
750 | .tf_load = ata_tf_load, | ||
751 | .tf_read = ata_tf_read, | ||
752 | .check_status = ata_check_status, | ||
753 | .exec_command = ata_exec_command, | ||
754 | .dev_select = ata_std_dev_select, | ||
755 | |||
756 | .freeze = ata_bmdma_freeze, | ||
757 | .thaw = ata_bmdma_thaw, | ||
758 | .error_handler = ata_bmdma_error_handler, | ||
759 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
760 | .cable_detect = ata_cable_40wire, | ||
761 | |||
762 | .qc_prep = ata_qc_prep, | ||
763 | .qc_issue = opti82c46x_qc_issue_prot, | ||
764 | |||
765 | .data_xfer = ata_data_xfer, | ||
766 | |||
767 | .irq_handler = ata_interrupt, | ||
768 | .irq_clear = ata_bmdma_irq_clear, | ||
769 | .irq_on = ata_irq_on, | ||
770 | |||
771 | .port_start = ata_sff_port_start, | ||
772 | }; | 614 | }; |
773 | 615 | ||
774 | static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev) | 616 | static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev) |
@@ -802,7 +644,7 @@ static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
802 | * @irq: interrupt line | 644 | * @irq: interrupt line |
803 | * | 645 | * |
804 | * In dual channel mode the 6580 has one clock per channel and we have | 646 | * In dual channel mode the 6580 has one clock per channel and we have |
805 | * to software clockswitch in qc_issue_prot. | 647 | * to software clockswitch in qc_issue. |
806 | */ | 648 | */ |
807 | 649 | ||
808 | static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev) | 650 | static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev) |
@@ -868,14 +710,14 @@ static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
868 | } | 710 | } |
869 | 711 | ||
870 | /** | 712 | /** |
871 | * qdi_qc_issue_prot - command issue | 713 | * qdi_qc_issue - command issue |
872 | * @qc: command pending | 714 | * @qc: command pending |
873 | * | 715 | * |
874 | * Called when the libata layer is about to issue a command. We wrap | 716 | * Called when the libata layer is about to issue a command. We wrap |
875 | * this interface so that we can load the correct ATA timings. | 717 | * this interface so that we can load the correct ATA timings. |
876 | */ | 718 | */ |
877 | 719 | ||
878 | static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc) | 720 | static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc) |
879 | { | 721 | { |
880 | struct ata_port *ap = qc->ap; | 722 | struct ata_port *ap = qc->ap; |
881 | struct ata_device *adev = qc->dev; | 723 | struct ata_device *adev = qc->dev; |
@@ -888,7 +730,7 @@ static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc) | |||
888 | 2 * ap->port_no); | 730 | 2 * ap->port_no); |
889 | } | 731 | } |
890 | } | 732 | } |
891 | return ata_qc_issue_prot(qc); | 733 | return ata_sff_qc_issue(qc); |
892 | } | 734 | } |
893 | 735 | ||
894 | static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf, | 736 | static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf, |
@@ -917,7 +759,7 @@ static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf, | |||
917 | } | 759 | } |
918 | return (buflen + 3) & ~3; | 760 | return (buflen + 3) & ~3; |
919 | } else | 761 | } else |
920 | return ata_data_xfer(adev, buf, buflen, rw); | 762 | return ata_sff_data_xfer(adev, buf, buflen, rw); |
921 | } | 763 | } |
922 | 764 | ||
923 | static int qdi_port(struct platform_device *dev, | 765 | static int qdi_port(struct platform_device *dev, |
@@ -930,84 +772,22 @@ static int qdi_port(struct platform_device *dev, | |||
930 | } | 772 | } |
931 | 773 | ||
932 | static struct ata_port_operations qdi6500_port_ops = { | 774 | static struct ata_port_operations qdi6500_port_ops = { |
775 | .inherits = &legacy_base_port_ops, | ||
933 | .set_piomode = qdi6500_set_piomode, | 776 | .set_piomode = qdi6500_set_piomode, |
934 | 777 | .qc_issue = qdi_qc_issue, | |
935 | .tf_load = ata_tf_load, | 778 | .sff_data_xfer = vlb32_data_xfer, |
936 | .tf_read = ata_tf_read, | ||
937 | .check_status = ata_check_status, | ||
938 | .exec_command = ata_exec_command, | ||
939 | .dev_select = ata_std_dev_select, | ||
940 | |||
941 | .freeze = ata_bmdma_freeze, | ||
942 | .thaw = ata_bmdma_thaw, | ||
943 | .error_handler = ata_bmdma_error_handler, | ||
944 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
945 | .cable_detect = ata_cable_40wire, | ||
946 | |||
947 | .qc_prep = ata_qc_prep, | ||
948 | .qc_issue = qdi_qc_issue_prot, | ||
949 | |||
950 | .data_xfer = vlb32_data_xfer, | ||
951 | |||
952 | .irq_handler = ata_interrupt, | ||
953 | .irq_clear = ata_bmdma_irq_clear, | ||
954 | .irq_on = ata_irq_on, | ||
955 | |||
956 | .port_start = ata_sff_port_start, | ||
957 | }; | 779 | }; |
958 | 780 | ||
959 | static struct ata_port_operations qdi6580_port_ops = { | 781 | static struct ata_port_operations qdi6580_port_ops = { |
782 | .inherits = &legacy_base_port_ops, | ||
960 | .set_piomode = qdi6580_set_piomode, | 783 | .set_piomode = qdi6580_set_piomode, |
961 | 784 | .sff_data_xfer = vlb32_data_xfer, | |
962 | .tf_load = ata_tf_load, | ||
963 | .tf_read = ata_tf_read, | ||
964 | .check_status = ata_check_status, | ||
965 | .exec_command = ata_exec_command, | ||
966 | .dev_select = ata_std_dev_select, | ||
967 | |||
968 | .freeze = ata_bmdma_freeze, | ||
969 | .thaw = ata_bmdma_thaw, | ||
970 | .error_handler = ata_bmdma_error_handler, | ||
971 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
972 | .cable_detect = ata_cable_40wire, | ||
973 | |||
974 | .qc_prep = ata_qc_prep, | ||
975 | .qc_issue = ata_qc_issue_prot, | ||
976 | |||
977 | .data_xfer = vlb32_data_xfer, | ||
978 | |||
979 | .irq_handler = ata_interrupt, | ||
980 | .irq_clear = ata_bmdma_irq_clear, | ||
981 | .irq_on = ata_irq_on, | ||
982 | |||
983 | .port_start = ata_sff_port_start, | ||
984 | }; | 785 | }; |
985 | 786 | ||
986 | static struct ata_port_operations qdi6580dp_port_ops = { | 787 | static struct ata_port_operations qdi6580dp_port_ops = { |
788 | .inherits = &legacy_base_port_ops, | ||
987 | .set_piomode = qdi6580dp_set_piomode, | 789 | .set_piomode = qdi6580dp_set_piomode, |
988 | 790 | .sff_data_xfer = vlb32_data_xfer, | |
989 | .tf_load = ata_tf_load, | ||
990 | .tf_read = ata_tf_read, | ||
991 | .check_status = ata_check_status, | ||
992 | .exec_command = ata_exec_command, | ||
993 | .dev_select = ata_std_dev_select, | ||
994 | |||
995 | .freeze = ata_bmdma_freeze, | ||
996 | .thaw = ata_bmdma_thaw, | ||
997 | .error_handler = ata_bmdma_error_handler, | ||
998 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
999 | .cable_detect = ata_cable_40wire, | ||
1000 | |||
1001 | .qc_prep = ata_qc_prep, | ||
1002 | .qc_issue = qdi_qc_issue_prot, | ||
1003 | |||
1004 | .data_xfer = vlb32_data_xfer, | ||
1005 | |||
1006 | .irq_handler = ata_interrupt, | ||
1007 | .irq_clear = ata_bmdma_irq_clear, | ||
1008 | .irq_on = ata_irq_on, | ||
1009 | |||
1010 | .port_start = ata_sff_port_start, | ||
1011 | }; | 791 | }; |
1012 | 792 | ||
1013 | static DEFINE_SPINLOCK(winbond_lock); | 793 | static DEFINE_SPINLOCK(winbond_lock); |
@@ -1076,29 +856,9 @@ static int winbond_port(struct platform_device *dev, | |||
1076 | } | 856 | } |
1077 | 857 | ||
1078 | static struct ata_port_operations winbond_port_ops = { | 858 | static struct ata_port_operations winbond_port_ops = { |
859 | .inherits = &legacy_base_port_ops, | ||
1079 | .set_piomode = winbond_set_piomode, | 860 | .set_piomode = winbond_set_piomode, |
1080 | 861 | .sff_data_xfer = vlb32_data_xfer, | |
1081 | .tf_load = ata_tf_load, | ||
1082 | .tf_read = ata_tf_read, | ||
1083 | .check_status = ata_check_status, | ||
1084 | .exec_command = ata_exec_command, | ||
1085 | .dev_select = ata_std_dev_select, | ||
1086 | |||
1087 | .freeze = ata_bmdma_freeze, | ||
1088 | .thaw = ata_bmdma_thaw, | ||
1089 | .error_handler = ata_bmdma_error_handler, | ||
1090 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
1091 | .cable_detect = ata_cable_40wire, | ||
1092 | |||
1093 | .qc_prep = ata_qc_prep, | ||
1094 | .qc_issue = ata_qc_issue_prot, | ||
1095 | |||
1096 | .data_xfer = vlb32_data_xfer, | ||
1097 | |||
1098 | .irq_clear = ata_bmdma_irq_clear, | ||
1099 | .irq_on = ata_irq_on, | ||
1100 | |||
1101 | .port_start = ata_sff_port_start, | ||
1102 | }; | 862 | }; |
1103 | 863 | ||
1104 | static struct legacy_controller controllers[] = { | 864 | static struct legacy_controller controllers[] = { |
@@ -1256,13 +1016,13 @@ static __init int legacy_init_one(struct legacy_probe *probe) | |||
1256 | ap->ioaddr.cmd_addr = io_addr; | 1016 | ap->ioaddr.cmd_addr = io_addr; |
1257 | ap->ioaddr.altstatus_addr = ctrl_addr; | 1017 | ap->ioaddr.altstatus_addr = ctrl_addr; |
1258 | ap->ioaddr.ctl_addr = ctrl_addr; | 1018 | ap->ioaddr.ctl_addr = ctrl_addr; |
1259 | ata_std_ports(&ap->ioaddr); | 1019 | ata_sff_std_ports(&ap->ioaddr); |
1260 | ap->host->private_data = ld; | 1020 | ap->host->private_data = ld; |
1261 | 1021 | ||
1262 | ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, io + 0x0206); | 1022 | ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, io + 0x0206); |
1263 | 1023 | ||
1264 | ret = ata_host_activate(host, probe->irq, ata_interrupt, 0, | 1024 | ret = ata_host_activate(host, probe->irq, ata_sff_interrupt, 0, |
1265 | &legacy_sht); | 1025 | &legacy_sht); |
1266 | if (ret) | 1026 | if (ret) |
1267 | goto fail; | 1027 | goto fail; |
1268 | ld->platform_dev = pdev; | 1028 | ld->platform_dev = pdev; |
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index a81f25d87235..24a011b25024 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c | |||
@@ -55,7 +55,7 @@ static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) | |||
55 | (!(devices & 0x10))) /* PATA enable ? */ | 55 | (!(devices & 0x10))) /* PATA enable ? */ |
56 | return -ENOENT; | 56 | return -ENOENT; |
57 | 57 | ||
58 | return ata_std_prereset(link, deadline); | 58 | return ata_sff_prereset(link, deadline); |
59 | } | 59 | } |
60 | 60 | ||
61 | static int marvell_cable_detect(struct ata_port *ap) | 61 | static int marvell_cable_detect(struct ata_port *ap) |
@@ -75,71 +75,16 @@ static int marvell_cable_detect(struct ata_port *ap) | |||
75 | return 0; /* Our BUG macro needs the right markup */ | 75 | return 0; /* Our BUG macro needs the right markup */ |
76 | } | 76 | } |
77 | 77 | ||
78 | /** | ||
79 | * marvell_error_handler - Setup and error handler | ||
80 | * @ap: Port to handle | ||
81 | * | ||
82 | * LOCKING: | ||
83 | * None (inherited from caller). | ||
84 | */ | ||
85 | |||
86 | static void marvell_error_handler(struct ata_port *ap) | ||
87 | { | ||
88 | ata_bmdma_drive_eh(ap, marvell_pre_reset, ata_std_softreset, NULL, | ||
89 | ata_std_postreset); | ||
90 | } | ||
91 | |||
92 | /* No PIO or DMA methods needed for this device */ | 78 | /* No PIO or DMA methods needed for this device */ |
93 | 79 | ||
94 | static struct scsi_host_template marvell_sht = { | 80 | static struct scsi_host_template marvell_sht = { |
95 | .module = THIS_MODULE, | 81 | ATA_BMDMA_SHT(DRV_NAME), |
96 | .name = DRV_NAME, | ||
97 | .ioctl = ata_scsi_ioctl, | ||
98 | .queuecommand = ata_scsi_queuecmd, | ||
99 | .can_queue = ATA_DEF_QUEUE, | ||
100 | .this_id = ATA_SHT_THIS_ID, | ||
101 | .sg_tablesize = LIBATA_MAX_PRD, | ||
102 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
103 | .emulated = ATA_SHT_EMULATED, | ||
104 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
105 | .proc_name = DRV_NAME, | ||
106 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
107 | .slave_configure = ata_scsi_slave_config, | ||
108 | .slave_destroy = ata_scsi_slave_destroy, | ||
109 | /* Use standard CHS mapping rules */ | ||
110 | .bios_param = ata_std_bios_param, | ||
111 | }; | 82 | }; |
112 | 83 | ||
113 | static const struct ata_port_operations marvell_ops = { | 84 | static struct ata_port_operations marvell_ops = { |
114 | /* Task file is PCI ATA format, use helpers */ | 85 | .inherits = &ata_bmdma_port_ops, |
115 | .tf_load = ata_tf_load, | ||
116 | .tf_read = ata_tf_read, | ||
117 | .check_status = ata_check_status, | ||
118 | .exec_command = ata_exec_command, | ||
119 | .dev_select = ata_std_dev_select, | ||
120 | |||
121 | .freeze = ata_bmdma_freeze, | ||
122 | .thaw = ata_bmdma_thaw, | ||
123 | .error_handler = marvell_error_handler, | ||
124 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
125 | .cable_detect = marvell_cable_detect, | 86 | .cable_detect = marvell_cable_detect, |
126 | 87 | .prereset = marvell_pre_reset, | |
127 | /* BMDMA handling is PCI ATA format, use helpers */ | ||
128 | .bmdma_setup = ata_bmdma_setup, | ||
129 | .bmdma_start = ata_bmdma_start, | ||
130 | .bmdma_stop = ata_bmdma_stop, | ||
131 | .bmdma_status = ata_bmdma_status, | ||
132 | .qc_prep = ata_qc_prep, | ||
133 | .qc_issue = ata_qc_issue_prot, | ||
134 | .data_xfer = ata_data_xfer, | ||
135 | |||
136 | /* Timeout handling */ | ||
137 | .irq_handler = ata_interrupt, | ||
138 | .irq_clear = ata_bmdma_irq_clear, | ||
139 | .irq_on = ata_irq_on, | ||
140 | |||
141 | /* Generic PATA PCI ATA helpers */ | ||
142 | .port_start = ata_sff_port_start, | ||
143 | }; | 88 | }; |
144 | 89 | ||
145 | 90 | ||
@@ -160,7 +105,6 @@ static const struct ata_port_operations marvell_ops = { | |||
160 | static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | 105 | static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id) |
161 | { | 106 | { |
162 | static const struct ata_port_info info = { | 107 | static const struct ata_port_info info = { |
163 | .sht = &marvell_sht, | ||
164 | .flags = ATA_FLAG_SLAVE_POSS, | 108 | .flags = ATA_FLAG_SLAVE_POSS, |
165 | 109 | ||
166 | .pio_mask = 0x1f, | 110 | .pio_mask = 0x1f, |
@@ -170,7 +114,6 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i | |||
170 | .port_ops = &marvell_ops, | 114 | .port_ops = &marvell_ops, |
171 | }; | 115 | }; |
172 | static const struct ata_port_info info_sata = { | 116 | static const struct ata_port_info info_sata = { |
173 | .sht = &marvell_sht, | ||
174 | /* Slave possible as its magically mapped not real */ | 117 | /* Slave possible as its magically mapped not real */ |
175 | .flags = ATA_FLAG_SLAVE_POSS, | 118 | .flags = ATA_FLAG_SLAVE_POSS, |
176 | 119 | ||
@@ -185,7 +128,7 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i | |||
185 | if (pdev->device == 0x6101) | 128 | if (pdev->device == 0x6101) |
186 | ppi[1] = &ata_dummy_port_info; | 129 | ppi[1] = &ata_dummy_port_info; |
187 | 130 | ||
188 | return ata_pci_init_one(pdev, ppi); | 131 | return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL); |
189 | } | 132 | } |
190 | 133 | ||
191 | static const struct pci_device_id marvell_pci_tbl[] = { | 134 | static const struct pci_device_id marvell_pci_tbl[] = { |
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index 5413ebfa72e5..bc79df6e7cb0 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c | |||
@@ -252,53 +252,19 @@ mpc52xx_ata_dev_select(struct ata_port *ap, unsigned int device) | |||
252 | if (device != priv->csel) | 252 | if (device != priv->csel) |
253 | mpc52xx_ata_apply_timings(priv, device); | 253 | mpc52xx_ata_apply_timings(priv, device); |
254 | 254 | ||
255 | ata_std_dev_select(ap,device); | 255 | ata_sff_dev_select(ap,device); |
256 | } | 256 | } |
257 | 257 | ||
258 | static void | ||
259 | mpc52xx_ata_error_handler(struct ata_port *ap) | ||
260 | { | ||
261 | ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, NULL, | ||
262 | ata_std_postreset); | ||
263 | } | ||
264 | |||
265 | |||
266 | |||
267 | static struct scsi_host_template mpc52xx_ata_sht = { | 258 | static struct scsi_host_template mpc52xx_ata_sht = { |
268 | .module = THIS_MODULE, | 259 | ATA_PIO_SHT(DRV_NAME), |
269 | .name = DRV_NAME, | ||
270 | .ioctl = ata_scsi_ioctl, | ||
271 | .queuecommand = ata_scsi_queuecmd, | ||
272 | .can_queue = ATA_DEF_QUEUE, | ||
273 | .this_id = ATA_SHT_THIS_ID, | ||
274 | .sg_tablesize = LIBATA_MAX_PRD, | ||
275 | .max_sectors = ATA_MAX_SECTORS, | ||
276 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
277 | .emulated = ATA_SHT_EMULATED, | ||
278 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
279 | .proc_name = DRV_NAME, | ||
280 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
281 | .slave_configure = ata_scsi_slave_config, | ||
282 | .bios_param = ata_std_bios_param, | ||
283 | }; | 260 | }; |
284 | 261 | ||
285 | static struct ata_port_operations mpc52xx_ata_port_ops = { | 262 | static struct ata_port_operations mpc52xx_ata_port_ops = { |
286 | .set_piomode = mpc52xx_ata_set_piomode, | 263 | .inherits = &ata_sff_port_ops, |
287 | .dev_select = mpc52xx_ata_dev_select, | 264 | .sff_dev_select = mpc52xx_ata_dev_select, |
288 | .tf_load = ata_tf_load, | ||
289 | .tf_read = ata_tf_read, | ||
290 | .check_status = ata_check_status, | ||
291 | .exec_command = ata_exec_command, | ||
292 | .freeze = ata_bmdma_freeze, | ||
293 | .thaw = ata_bmdma_thaw, | ||
294 | .error_handler = mpc52xx_ata_error_handler, | ||
295 | .cable_detect = ata_cable_40wire, | 265 | .cable_detect = ata_cable_40wire, |
296 | .qc_prep = ata_qc_prep, | 266 | .set_piomode = mpc52xx_ata_set_piomode, |
297 | .qc_issue = ata_qc_issue_prot, | 267 | .post_internal_cmd = ATA_OP_NULL, |
298 | .data_xfer = ata_data_xfer, | ||
299 | .irq_clear = ata_bmdma_irq_clear, | ||
300 | .irq_on = ata_irq_on, | ||
301 | .port_start = ata_port_start, | ||
302 | }; | 268 | }; |
303 | 269 | ||
304 | static int __devinit | 270 | static int __devinit |
@@ -339,7 +305,7 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv, | |||
339 | ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs); | 305 | ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs); |
340 | 306 | ||
341 | /* activate host */ | 307 | /* activate host */ |
342 | return ata_host_activate(host, priv->ata_irq, ata_interrupt, 0, | 308 | return ata_host_activate(host, priv->ata_irq, ata_sff_interrupt, 0, |
343 | &mpc52xx_ata_sht); | 309 | &mpc52xx_ata_sht); |
344 | } | 310 | } |
345 | 311 | ||
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c index c0d9e0cf208c..7d7e3fdab71f 100644 --- a/drivers/ata/pata_mpiix.c +++ b/drivers/ata/pata_mpiix.c | |||
@@ -55,21 +55,7 @@ static int mpiix_pre_reset(struct ata_link *link, unsigned long deadline) | |||
55 | if (!pci_test_config_bits(pdev, &mpiix_enable_bits)) | 55 | if (!pci_test_config_bits(pdev, &mpiix_enable_bits)) |
56 | return -ENOENT; | 56 | return -ENOENT; |
57 | 57 | ||
58 | return ata_std_prereset(link, deadline); | 58 | return ata_sff_prereset(link, deadline); |
59 | } | ||
60 | |||
61 | /** | ||
62 | * mpiix_error_handler - probe reset | ||
63 | * @ap: ATA port | ||
64 | * | ||
65 | * Perform the ATA probe and bus reset sequence plus specific handling | ||
66 | * for this hardware. The MPIIX has the enable bits in a different place | ||
67 | * to PIIX4 and friends. As a pure PIO device it has no cable detect | ||
68 | */ | ||
69 | |||
70 | static void mpiix_error_handler(struct ata_port *ap) | ||
71 | { | ||
72 | ata_bmdma_drive_eh(ap, mpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
73 | } | 59 | } |
74 | 60 | ||
75 | /** | 61 | /** |
@@ -83,8 +69,8 @@ static void mpiix_error_handler(struct ata_port *ap) | |||
83 | * | 69 | * |
84 | * This would get very ugly because we can only program timing for one | 70 | * This would get very ugly because we can only program timing for one |
85 | * device at a time, the other gets PIO0. Fortunately libata calls | 71 | * device at a time, the other gets PIO0. Fortunately libata calls |
86 | * our qc_issue_prot command before a command is issued so we can | 72 | * our qc_issue command before a command is issued so we can flip the |
87 | * flip the timings back and forth to reduce the pain. | 73 | * timings back and forth to reduce the pain. |
88 | */ | 74 | */ |
89 | 75 | ||
90 | static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev) | 76 | static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev) |
@@ -124,7 +110,7 @@ static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
124 | } | 110 | } |
125 | 111 | ||
126 | /** | 112 | /** |
127 | * mpiix_qc_issue_prot - command issue | 113 | * mpiix_qc_issue - command issue |
128 | * @qc: command pending | 114 | * @qc: command pending |
129 | * | 115 | * |
130 | * Called when the libata layer is about to issue a command. We wrap | 116 | * Called when the libata layer is about to issue a command. We wrap |
@@ -134,7 +120,7 @@ static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
134 | * be made PIO0. | 120 | * be made PIO0. |
135 | */ | 121 | */ |
136 | 122 | ||
137 | static unsigned int mpiix_qc_issue_prot(struct ata_queued_cmd *qc) | 123 | static unsigned int mpiix_qc_issue(struct ata_queued_cmd *qc) |
138 | { | 124 | { |
139 | struct ata_port *ap = qc->ap; | 125 | struct ata_port *ap = qc->ap; |
140 | struct ata_device *adev = qc->dev; | 126 | struct ata_device *adev = qc->dev; |
@@ -147,50 +133,19 @@ static unsigned int mpiix_qc_issue_prot(struct ata_queued_cmd *qc) | |||
147 | if (adev->pio_mode && adev != ap->private_data) | 133 | if (adev->pio_mode && adev != ap->private_data) |
148 | mpiix_set_piomode(ap, adev); | 134 | mpiix_set_piomode(ap, adev); |
149 | 135 | ||
150 | return ata_qc_issue_prot(qc); | 136 | return ata_sff_qc_issue(qc); |
151 | } | 137 | } |
152 | 138 | ||
153 | static struct scsi_host_template mpiix_sht = { | 139 | static struct scsi_host_template mpiix_sht = { |
154 | .module = THIS_MODULE, | 140 | ATA_PIO_SHT(DRV_NAME), |
155 | .name = DRV_NAME, | ||
156 | .ioctl = ata_scsi_ioctl, | ||
157 | .queuecommand = ata_scsi_queuecmd, | ||
158 | .can_queue = ATA_DEF_QUEUE, | ||
159 | .this_id = ATA_SHT_THIS_ID, | ||
160 | .sg_tablesize = LIBATA_MAX_PRD, | ||
161 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
162 | .emulated = ATA_SHT_EMULATED, | ||
163 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
164 | .proc_name = DRV_NAME, | ||
165 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
166 | .slave_configure = ata_scsi_slave_config, | ||
167 | .slave_destroy = ata_scsi_slave_destroy, | ||
168 | .bios_param = ata_std_bios_param, | ||
169 | }; | 141 | }; |
170 | 142 | ||
171 | static struct ata_port_operations mpiix_port_ops = { | 143 | static struct ata_port_operations mpiix_port_ops = { |
172 | .set_piomode = mpiix_set_piomode, | 144 | .inherits = &ata_sff_port_ops, |
173 | 145 | .qc_issue = mpiix_qc_issue, | |
174 | .tf_load = ata_tf_load, | ||
175 | .tf_read = ata_tf_read, | ||
176 | .check_status = ata_check_status, | ||
177 | .exec_command = ata_exec_command, | ||
178 | .dev_select = ata_std_dev_select, | ||
179 | |||
180 | .freeze = ata_bmdma_freeze, | ||
181 | .thaw = ata_bmdma_thaw, | ||
182 | .error_handler = mpiix_error_handler, | ||
183 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
184 | .cable_detect = ata_cable_40wire, | 146 | .cable_detect = ata_cable_40wire, |
185 | 147 | .set_piomode = mpiix_set_piomode, | |
186 | .qc_prep = ata_qc_prep, | 148 | .prereset = mpiix_pre_reset, |
187 | .qc_issue = mpiix_qc_issue_prot, | ||
188 | .data_xfer = ata_data_xfer, | ||
189 | |||
190 | .irq_clear = ata_bmdma_irq_clear, | ||
191 | .irq_on = ata_irq_on, | ||
192 | |||
193 | .port_start = ata_sff_port_start, | ||
194 | }; | 149 | }; |
195 | 150 | ||
196 | static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 151 | static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
@@ -252,10 +207,10 @@ static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
252 | ap->ioaddr.altstatus_addr = ctl_addr; | 207 | ap->ioaddr.altstatus_addr = ctl_addr; |
253 | 208 | ||
254 | /* Let libata fill in the port details */ | 209 | /* Let libata fill in the port details */ |
255 | ata_std_ports(&ap->ioaddr); | 210 | ata_sff_std_ports(&ap->ioaddr); |
256 | 211 | ||
257 | /* activate host */ | 212 | /* activate host */ |
258 | return ata_host_activate(host, irq, ata_interrupt, IRQF_SHARED, | 213 | return ata_host_activate(host, irq, ata_sff_interrupt, IRQF_SHARED, |
259 | &mpiix_sht); | 214 | &mpiix_sht); |
260 | } | 215 | } |
261 | 216 | ||
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c index 25c922abd554..d9719c8b9dbe 100644 --- a/drivers/ata/pata_netcell.c +++ b/drivers/ata/pata_netcell.c | |||
@@ -21,54 +21,12 @@ | |||
21 | /* No PIO or DMA methods needed for this device */ | 21 | /* No PIO or DMA methods needed for this device */ |
22 | 22 | ||
23 | static struct scsi_host_template netcell_sht = { | 23 | static struct scsi_host_template netcell_sht = { |
24 | .module = THIS_MODULE, | 24 | ATA_BMDMA_SHT(DRV_NAME), |
25 | .name = DRV_NAME, | ||
26 | .ioctl = ata_scsi_ioctl, | ||
27 | .queuecommand = ata_scsi_queuecmd, | ||
28 | .can_queue = ATA_DEF_QUEUE, | ||
29 | .this_id = ATA_SHT_THIS_ID, | ||
30 | .sg_tablesize = LIBATA_MAX_PRD, | ||
31 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
32 | .emulated = ATA_SHT_EMULATED, | ||
33 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
34 | .proc_name = DRV_NAME, | ||
35 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
36 | .slave_configure = ata_scsi_slave_config, | ||
37 | .slave_destroy = ata_scsi_slave_destroy, | ||
38 | /* Use standard CHS mapping rules */ | ||
39 | .bios_param = ata_std_bios_param, | ||
40 | }; | 25 | }; |
41 | 26 | ||
42 | static const struct ata_port_operations netcell_ops = { | 27 | static struct ata_port_operations netcell_ops = { |
43 | /* Task file is PCI ATA format, use helpers */ | 28 | .inherits = &ata_bmdma_port_ops, |
44 | .tf_load = ata_tf_load, | ||
45 | .tf_read = ata_tf_read, | ||
46 | .check_status = ata_check_status, | ||
47 | .exec_command = ata_exec_command, | ||
48 | .dev_select = ata_std_dev_select, | ||
49 | |||
50 | .freeze = ata_bmdma_freeze, | ||
51 | .thaw = ata_bmdma_thaw, | ||
52 | .error_handler = ata_bmdma_error_handler, | ||
53 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
54 | .cable_detect = ata_cable_80wire, | 29 | .cable_detect = ata_cable_80wire, |
55 | |||
56 | /* BMDMA handling is PCI ATA format, use helpers */ | ||
57 | .bmdma_setup = ata_bmdma_setup, | ||
58 | .bmdma_start = ata_bmdma_start, | ||
59 | .bmdma_stop = ata_bmdma_stop, | ||
60 | .bmdma_status = ata_bmdma_status, | ||
61 | .qc_prep = ata_qc_prep, | ||
62 | .qc_issue = ata_qc_issue_prot, | ||
63 | .data_xfer = ata_data_xfer, | ||
64 | |||
65 | /* IRQ-related hooks */ | ||
66 | .irq_handler = ata_interrupt, | ||
67 | .irq_clear = ata_bmdma_irq_clear, | ||
68 | .irq_on = ata_irq_on, | ||
69 | |||
70 | /* Generic PATA PCI ATA helpers */ | ||
71 | .port_start = ata_sff_port_start, | ||
72 | }; | 30 | }; |
73 | 31 | ||
74 | 32 | ||
@@ -90,7 +48,6 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
90 | { | 48 | { |
91 | static int printed_version; | 49 | static int printed_version; |
92 | static const struct ata_port_info info = { | 50 | static const struct ata_port_info info = { |
93 | .sht = &netcell_sht, | ||
94 | .flags = ATA_FLAG_SLAVE_POSS, | 51 | .flags = ATA_FLAG_SLAVE_POSS, |
95 | /* Actually we don't really care about these as the | 52 | /* Actually we don't really care about these as the |
96 | firmware deals with it */ | 53 | firmware deals with it */ |
@@ -100,16 +57,21 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
100 | .port_ops = &netcell_ops, | 57 | .port_ops = &netcell_ops, |
101 | }; | 58 | }; |
102 | const struct ata_port_info *port_info[] = { &info, NULL }; | 59 | const struct ata_port_info *port_info[] = { &info, NULL }; |
60 | int rc; | ||
103 | 61 | ||
104 | if (!printed_version++) | 62 | if (!printed_version++) |
105 | dev_printk(KERN_DEBUG, &pdev->dev, | 63 | dev_printk(KERN_DEBUG, &pdev->dev, |
106 | "version " DRV_VERSION "\n"); | 64 | "version " DRV_VERSION "\n"); |
107 | 65 | ||
66 | rc = pcim_enable_device(pdev); | ||
67 | if (rc) | ||
68 | return rc; | ||
69 | |||
108 | /* Any chip specific setup/optimisation/messages here */ | 70 | /* Any chip specific setup/optimisation/messages here */ |
109 | ata_pci_clear_simplex(pdev); | 71 | ata_pci_bmdma_clear_simplex(pdev); |
110 | 72 | ||
111 | /* And let the library code do the work */ | 73 | /* And let the library code do the work */ |
112 | return ata_pci_init_one(pdev, port_info); | 74 | return ata_pci_sff_init_one(pdev, port_info, &netcell_sht, NULL); |
113 | } | 75 | } |
114 | 76 | ||
115 | static const struct pci_device_id netcell_pci_tbl[] = { | 77 | static const struct pci_device_id netcell_pci_tbl[] = { |
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c index 15dd649f89ee..565e67cd13fa 100644 --- a/drivers/ata/pata_ninja32.c +++ b/drivers/ata/pata_ninja32.c | |||
@@ -73,60 +73,20 @@ static void ninja32_dev_select(struct ata_port *ap, unsigned int device) | |||
73 | struct ata_device *adev = &ap->link.device[device]; | 73 | struct ata_device *adev = &ap->link.device[device]; |
74 | if (ap->private_data != adev) { | 74 | if (ap->private_data != adev) { |
75 | iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f); | 75 | iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f); |
76 | ata_std_dev_select(ap, device); | 76 | ata_sff_dev_select(ap, device); |
77 | ninja32_set_piomode(ap, adev); | 77 | ninja32_set_piomode(ap, adev); |
78 | } | 78 | } |
79 | } | 79 | } |
80 | 80 | ||
81 | static struct scsi_host_template ninja32_sht = { | 81 | static struct scsi_host_template ninja32_sht = { |
82 | .module = THIS_MODULE, | 82 | ATA_BMDMA_SHT(DRV_NAME), |
83 | .name = DRV_NAME, | ||
84 | .ioctl = ata_scsi_ioctl, | ||
85 | .queuecommand = ata_scsi_queuecmd, | ||
86 | .can_queue = ATA_DEF_QUEUE, | ||
87 | .this_id = ATA_SHT_THIS_ID, | ||
88 | .sg_tablesize = LIBATA_MAX_PRD, | ||
89 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
90 | .emulated = ATA_SHT_EMULATED, | ||
91 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
92 | .proc_name = DRV_NAME, | ||
93 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
94 | .slave_configure = ata_scsi_slave_config, | ||
95 | .slave_destroy = ata_scsi_slave_destroy, | ||
96 | .bios_param = ata_std_bios_param, | ||
97 | }; | 83 | }; |
98 | 84 | ||
99 | static struct ata_port_operations ninja32_port_ops = { | 85 | static struct ata_port_operations ninja32_port_ops = { |
100 | .set_piomode = ninja32_set_piomode, | 86 | .inherits = &ata_bmdma_port_ops, |
101 | .mode_filter = ata_pci_default_filter, | 87 | .sff_dev_select = ninja32_dev_select, |
102 | |||
103 | .tf_load = ata_tf_load, | ||
104 | .tf_read = ata_tf_read, | ||
105 | .check_status = ata_check_status, | ||
106 | .exec_command = ata_exec_command, | ||
107 | .dev_select = ninja32_dev_select, | ||
108 | |||
109 | .freeze = ata_bmdma_freeze, | ||
110 | .thaw = ata_bmdma_thaw, | ||
111 | .error_handler = ata_bmdma_error_handler, | ||
112 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
113 | .cable_detect = ata_cable_40wire, | 88 | .cable_detect = ata_cable_40wire, |
114 | 89 | .set_piomode = ninja32_set_piomode, | |
115 | .bmdma_setup = ata_bmdma_setup, | ||
116 | .bmdma_start = ata_bmdma_start, | ||
117 | .bmdma_stop = ata_bmdma_stop, | ||
118 | .bmdma_status = ata_bmdma_status, | ||
119 | |||
120 | .qc_prep = ata_qc_prep, | ||
121 | .qc_issue = ata_qc_issue_prot, | ||
122 | |||
123 | .data_xfer = ata_data_xfer, | ||
124 | |||
125 | .irq_handler = ata_interrupt, | ||
126 | .irq_clear = ata_bmdma_irq_clear, | ||
127 | .irq_on = ata_irq_on, | ||
128 | |||
129 | .port_start = ata_sff_port_start, | ||
130 | }; | 90 | }; |
131 | 91 | ||
132 | static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 92 | static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
@@ -172,7 +132,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
172 | ap->ioaddr.ctl_addr = base + 0x1E; | 132 | ap->ioaddr.ctl_addr = base + 0x1E; |
173 | ap->ioaddr.altstatus_addr = base + 0x1E; | 133 | ap->ioaddr.altstatus_addr = base + 0x1E; |
174 | ap->ioaddr.bmdma_addr = base; | 134 | ap->ioaddr.bmdma_addr = base; |
175 | ata_std_ports(&ap->ioaddr); | 135 | ata_sff_std_ports(&ap->ioaddr); |
176 | 136 | ||
177 | iowrite8(0x05, base + 0x01); /* Enable interrupt lines */ | 137 | iowrite8(0x05, base + 0x01); /* Enable interrupt lines */ |
178 | iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */ | 138 | iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */ |
@@ -182,7 +142,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
182 | iowrite8(0xa4, base + 0x1c); /* Unknown */ | 142 | iowrite8(0xa4, base + 0x1c); /* Unknown */ |
183 | iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */ | 143 | iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */ |
184 | /* FIXME: Should we disable them at remove ? */ | 144 | /* FIXME: Should we disable them at remove ? */ |
185 | return ata_host_activate(host, dev->irq, ata_interrupt, | 145 | return ata_host_activate(host, dev->irq, ata_sff_interrupt, |
186 | IRQF_SHARED, &ninja32_sht); | 146 | IRQF_SHARED, &ninja32_sht); |
187 | } | 147 | } |
188 | 148 | ||
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c index 9fe66fd75017..76d2455bc453 100644 --- a/drivers/ata/pata_ns87410.c +++ b/drivers/ata/pata_ns87410.c | |||
@@ -50,21 +50,7 @@ static int ns87410_pre_reset(struct ata_link *link, unsigned long deadline) | |||
50 | if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no])) | 50 | if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no])) |
51 | return -ENOENT; | 51 | return -ENOENT; |
52 | 52 | ||
53 | return ata_std_prereset(link, deadline); | 53 | return ata_sff_prereset(link, deadline); |
54 | } | ||
55 | |||
56 | /** | ||
57 | * ns87410_error_handler - probe reset | ||
58 | * @ap: ATA port | ||
59 | * | ||
60 | * Perform the ATA probe and bus reset sequence plus specific handling | ||
61 | * for this hardware. The MPIIX has the enable bits in a different place | ||
62 | * to PIIX4 and friends. As a pure PIO device it has no cable detect | ||
63 | */ | ||
64 | |||
65 | static void ns87410_error_handler(struct ata_port *ap) | ||
66 | { | ||
67 | ata_bmdma_drive_eh(ap, ns87410_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
68 | } | 54 | } |
69 | 55 | ||
70 | /** | 56 | /** |
@@ -119,7 +105,7 @@ static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
119 | } | 105 | } |
120 | 106 | ||
121 | /** | 107 | /** |
122 | * ns87410_qc_issue_prot - command issue | 108 | * ns87410_qc_issue - command issue |
123 | * @qc: command pending | 109 | * @qc: command pending |
124 | * | 110 | * |
125 | * Called when the libata layer is about to issue a command. We wrap | 111 | * Called when the libata layer is about to issue a command. We wrap |
@@ -127,7 +113,7 @@ static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
127 | * necessary. | 113 | * necessary. |
128 | */ | 114 | */ |
129 | 115 | ||
130 | static unsigned int ns87410_qc_issue_prot(struct ata_queued_cmd *qc) | 116 | static unsigned int ns87410_qc_issue(struct ata_queued_cmd *qc) |
131 | { | 117 | { |
132 | struct ata_port *ap = qc->ap; | 118 | struct ata_port *ap = qc->ap; |
133 | struct ata_device *adev = qc->dev; | 119 | struct ata_device *adev = qc->dev; |
@@ -140,64 +126,30 @@ static unsigned int ns87410_qc_issue_prot(struct ata_queued_cmd *qc) | |||
140 | if (adev->pio_mode && adev != ap->private_data) | 126 | if (adev->pio_mode && adev != ap->private_data) |
141 | ns87410_set_piomode(ap, adev); | 127 | ns87410_set_piomode(ap, adev); |
142 | 128 | ||
143 | return ata_qc_issue_prot(qc); | 129 | return ata_sff_qc_issue(qc); |
144 | } | 130 | } |
145 | 131 | ||
146 | static struct scsi_host_template ns87410_sht = { | 132 | static struct scsi_host_template ns87410_sht = { |
147 | .module = THIS_MODULE, | 133 | ATA_PIO_SHT(DRV_NAME), |
148 | .name = DRV_NAME, | ||
149 | .ioctl = ata_scsi_ioctl, | ||
150 | .queuecommand = ata_scsi_queuecmd, | ||
151 | .can_queue = ATA_DEF_QUEUE, | ||
152 | .this_id = ATA_SHT_THIS_ID, | ||
153 | .sg_tablesize = LIBATA_MAX_PRD, | ||
154 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
155 | .emulated = ATA_SHT_EMULATED, | ||
156 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
157 | .proc_name = DRV_NAME, | ||
158 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
159 | .slave_configure = ata_scsi_slave_config, | ||
160 | .slave_destroy = ata_scsi_slave_destroy, | ||
161 | .bios_param = ata_std_bios_param, | ||
162 | }; | 134 | }; |
163 | 135 | ||
164 | static struct ata_port_operations ns87410_port_ops = { | 136 | static struct ata_port_operations ns87410_port_ops = { |
165 | .set_piomode = ns87410_set_piomode, | 137 | .inherits = &ata_sff_port_ops, |
166 | 138 | .qc_issue = ns87410_qc_issue, | |
167 | .tf_load = ata_tf_load, | ||
168 | .tf_read = ata_tf_read, | ||
169 | .check_status = ata_check_status, | ||
170 | .exec_command = ata_exec_command, | ||
171 | .dev_select = ata_std_dev_select, | ||
172 | |||
173 | .freeze = ata_bmdma_freeze, | ||
174 | .thaw = ata_bmdma_thaw, | ||
175 | .error_handler = ns87410_error_handler, | ||
176 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
177 | .cable_detect = ata_cable_40wire, | 139 | .cable_detect = ata_cable_40wire, |
178 | 140 | .set_piomode = ns87410_set_piomode, | |
179 | .qc_prep = ata_qc_prep, | 141 | .prereset = ns87410_pre_reset, |
180 | .qc_issue = ns87410_qc_issue_prot, | ||
181 | |||
182 | .data_xfer = ata_data_xfer, | ||
183 | |||
184 | .irq_handler = ata_interrupt, | ||
185 | .irq_clear = ata_bmdma_irq_clear, | ||
186 | .irq_on = ata_irq_on, | ||
187 | |||
188 | .port_start = ata_sff_port_start, | ||
189 | }; | 142 | }; |
190 | 143 | ||
191 | static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 144 | static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
192 | { | 145 | { |
193 | static const struct ata_port_info info = { | 146 | static const struct ata_port_info info = { |
194 | .sht = &ns87410_sht, | ||
195 | .flags = ATA_FLAG_SLAVE_POSS, | 147 | .flags = ATA_FLAG_SLAVE_POSS, |
196 | .pio_mask = 0x0F, | 148 | .pio_mask = 0x0F, |
197 | .port_ops = &ns87410_port_ops | 149 | .port_ops = &ns87410_port_ops |
198 | }; | 150 | }; |
199 | const struct ata_port_info *ppi[] = { &info, NULL }; | 151 | const struct ata_port_info *ppi[] = { &info, NULL }; |
200 | return ata_pci_init_one(dev, ppi); | 152 | return ata_pci_sff_init_one(dev, ppi, &ns87410_sht, NULL); |
201 | } | 153 | } |
202 | 154 | ||
203 | static const struct pci_device_id ns87410[] = { | 155 | static const struct pci_device_id ns87410[] = { |
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c index d0e2e50823b1..ae92b0049bd5 100644 --- a/drivers/ata/pata_ns87415.c +++ b/drivers/ata/pata_ns87415.c | |||
@@ -138,7 +138,7 @@ static void ns87415_bmdma_setup(struct ata_queued_cmd *qc) | |||
138 | dmactl |= ATA_DMA_WR; | 138 | dmactl |= ATA_DMA_WR; |
139 | iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | 139 | iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
140 | /* issue r/w command */ | 140 | /* issue r/w command */ |
141 | ap->ops->exec_command(ap, &qc->tf); | 141 | ap->ops->sff_exec_command(ap, &qc->tf); |
142 | } | 142 | } |
143 | 143 | ||
144 | /** | 144 | /** |
@@ -172,14 +172,14 @@ static void ns87415_bmdma_stop(struct ata_queued_cmd *qc) | |||
172 | } | 172 | } |
173 | 173 | ||
174 | /** | 174 | /** |
175 | * ns87415_bmdma_irq_clear - Clear interrupt | 175 | * ns87415_irq_clear - Clear interrupt |
176 | * @ap: Channel to clear | 176 | * @ap: Channel to clear |
177 | * | 177 | * |
178 | * Erratum: Due to a chip bug regisers 02 and 0A bit 1 and 2 (the | 178 | * Erratum: Due to a chip bug regisers 02 and 0A bit 1 and 2 (the |
179 | * error bits) are reset by writing to register 00 or 08. | 179 | * error bits) are reset by writing to register 00 or 08. |
180 | */ | 180 | */ |
181 | 181 | ||
182 | static void ns87415_bmdma_irq_clear(struct ata_port *ap) | 182 | static void ns87415_irq_clear(struct ata_port *ap) |
183 | { | 183 | { |
184 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | 184 | void __iomem *mmio = ap->ioaddr.bmdma_addr; |
185 | 185 | ||
@@ -297,90 +297,32 @@ static u8 ns87560_bmdma_status(struct ata_port *ap) | |||
297 | { | 297 | { |
298 | return ns87560_read_buggy(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | 298 | return ns87560_read_buggy(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); |
299 | } | 299 | } |
300 | |||
301 | static const struct ata_port_operations ns87560_pata_ops = { | ||
302 | .set_piomode = ns87415_set_piomode, | ||
303 | .mode_filter = ata_pci_default_filter, | ||
304 | |||
305 | .tf_load = ata_tf_load, | ||
306 | .tf_read = ns87560_tf_read, | ||
307 | .check_status = ns87560_check_status, | ||
308 | .check_atapi_dma = ns87415_check_atapi_dma, | ||
309 | .exec_command = ata_exec_command, | ||
310 | .dev_select = ata_std_dev_select, | ||
311 | |||
312 | .freeze = ata_bmdma_freeze, | ||
313 | .thaw = ata_bmdma_thaw, | ||
314 | .error_handler = ata_bmdma_error_handler, | ||
315 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
316 | .cable_detect = ata_cable_40wire, | ||
317 | |||
318 | .bmdma_setup = ns87415_bmdma_setup, | ||
319 | .bmdma_start = ns87415_bmdma_start, | ||
320 | .bmdma_stop = ns87415_bmdma_stop, | ||
321 | .bmdma_status = ns87560_bmdma_status, | ||
322 | .qc_prep = ata_qc_prep, | ||
323 | .qc_issue = ata_qc_issue_prot, | ||
324 | .data_xfer = ata_data_xfer, | ||
325 | |||
326 | .irq_handler = ata_interrupt, | ||
327 | .irq_clear = ns87415_bmdma_irq_clear, | ||
328 | .irq_on = ata_irq_on, | ||
329 | |||
330 | .port_start = ata_sff_port_start, | ||
331 | }; | ||
332 | |||
333 | #endif /* 87560 SuperIO Support */ | 300 | #endif /* 87560 SuperIO Support */ |
334 | 301 | ||
302 | static struct ata_port_operations ns87415_pata_ops = { | ||
303 | .inherits = &ata_bmdma_port_ops, | ||
335 | 304 | ||
336 | static const struct ata_port_operations ns87415_pata_ops = { | ||
337 | .set_piomode = ns87415_set_piomode, | ||
338 | .mode_filter = ata_pci_default_filter, | ||
339 | |||
340 | .tf_load = ata_tf_load, | ||
341 | .tf_read = ata_tf_read, | ||
342 | .check_status = ata_check_status, | ||
343 | .check_atapi_dma = ns87415_check_atapi_dma, | 305 | .check_atapi_dma = ns87415_check_atapi_dma, |
344 | .exec_command = ata_exec_command, | ||
345 | .dev_select = ata_std_dev_select, | ||
346 | |||
347 | .freeze = ata_bmdma_freeze, | ||
348 | .thaw = ata_bmdma_thaw, | ||
349 | .error_handler = ata_bmdma_error_handler, | ||
350 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
351 | .cable_detect = ata_cable_40wire, | ||
352 | |||
353 | .bmdma_setup = ns87415_bmdma_setup, | 306 | .bmdma_setup = ns87415_bmdma_setup, |
354 | .bmdma_start = ns87415_bmdma_start, | 307 | .bmdma_start = ns87415_bmdma_start, |
355 | .bmdma_stop = ns87415_bmdma_stop, | 308 | .bmdma_stop = ns87415_bmdma_stop, |
356 | .bmdma_status = ata_bmdma_status, | 309 | .sff_irq_clear = ns87415_irq_clear, |
357 | .qc_prep = ata_qc_prep, | ||
358 | .qc_issue = ata_qc_issue_prot, | ||
359 | .data_xfer = ata_data_xfer, | ||
360 | 310 | ||
361 | .irq_handler = ata_interrupt, | 311 | .cable_detect = ata_cable_40wire, |
362 | .irq_clear = ns87415_bmdma_irq_clear, | 312 | .set_piomode = ns87415_set_piomode, |
363 | .irq_on = ata_irq_on, | 313 | }; |
364 | 314 | ||
365 | .port_start = ata_sff_port_start, | 315 | #if defined(CONFIG_SUPERIO) |
316 | static struct ata_port_operations ns87560_pata_ops = { | ||
317 | .inherits = &ns87415_pata_ops, | ||
318 | .sff_tf_read = ns87560_tf_read, | ||
319 | .sff_check_status = ns87560_check_status, | ||
320 | .bmdma_status = ns87560_bmdma_status, | ||
366 | }; | 321 | }; |
322 | #endif | ||
367 | 323 | ||
368 | static struct scsi_host_template ns87415_sht = { | 324 | static struct scsi_host_template ns87415_sht = { |
369 | .module = THIS_MODULE, | 325 | ATA_BMDMA_SHT(DRV_NAME), |
370 | .name = DRV_NAME, | ||
371 | .ioctl = ata_scsi_ioctl, | ||
372 | .queuecommand = ata_scsi_queuecmd, | ||
373 | .can_queue = ATA_DEF_QUEUE, | ||
374 | .this_id = ATA_SHT_THIS_ID, | ||
375 | .sg_tablesize = LIBATA_MAX_PRD, | ||
376 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
377 | .emulated = ATA_SHT_EMULATED, | ||
378 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
379 | .proc_name = DRV_NAME, | ||
380 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
381 | .slave_configure = ata_scsi_slave_config, | ||
382 | .slave_destroy = ata_scsi_slave_destroy, | ||
383 | .bios_param = ata_std_bios_param, | ||
384 | }; | 326 | }; |
385 | 327 | ||
386 | 328 | ||
@@ -403,16 +345,15 @@ static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
403 | { | 345 | { |
404 | static int printed_version; | 346 | static int printed_version; |
405 | static const struct ata_port_info info = { | 347 | static const struct ata_port_info info = { |
406 | .sht = &ns87415_sht, | ||
407 | .flags = ATA_FLAG_SLAVE_POSS, | 348 | .flags = ATA_FLAG_SLAVE_POSS, |
408 | .pio_mask = 0x1f, /* pio0-4 */ | 349 | .pio_mask = 0x1f, /* pio0-4 */ |
409 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 350 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
410 | .port_ops = &ns87415_pata_ops, | 351 | .port_ops = &ns87415_pata_ops, |
411 | }; | 352 | }; |
412 | const struct ata_port_info *ppi[] = { &info, NULL }; | 353 | const struct ata_port_info *ppi[] = { &info, NULL }; |
354 | int rc; | ||
413 | #if defined(CONFIG_SUPERIO) | 355 | #if defined(CONFIG_SUPERIO) |
414 | static const struct ata_port_info info87560 = { | 356 | static const struct ata_port_info info87560 = { |
415 | .sht = &ns87415_sht, | ||
416 | .flags = ATA_FLAG_SLAVE_POSS, | 357 | .flags = ATA_FLAG_SLAVE_POSS, |
417 | .pio_mask = 0x1f, /* pio0-4 */ | 358 | .pio_mask = 0x1f, /* pio0-4 */ |
418 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 359 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
@@ -425,11 +366,16 @@ static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
425 | if (!printed_version++) | 366 | if (!printed_version++) |
426 | dev_printk(KERN_DEBUG, &pdev->dev, | 367 | dev_printk(KERN_DEBUG, &pdev->dev, |
427 | "version " DRV_VERSION "\n"); | 368 | "version " DRV_VERSION "\n"); |
369 | |||
370 | rc = pcim_enable_device(pdev); | ||
371 | if (rc) | ||
372 | return rc; | ||
373 | |||
428 | /* Select 512 byte sectors */ | 374 | /* Select 512 byte sectors */ |
429 | pci_write_config_byte(pdev, 0x55, 0xEE); | 375 | pci_write_config_byte(pdev, 0x55, 0xEE); |
430 | /* Select PIO0 8bit clocking */ | 376 | /* Select PIO0 8bit clocking */ |
431 | pci_write_config_byte(pdev, 0x54, 0xB7); | 377 | pci_write_config_byte(pdev, 0x54, 0xB7); |
432 | return ata_pci_init_one(pdev, ppi); | 378 | return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL); |
433 | } | 379 | } |
434 | 380 | ||
435 | static const struct pci_device_id ns87415_pci_tbl[] = { | 381 | static const struct pci_device_id ns87415_pci_tbl[] = { |
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c index 44da09ace52c..e678af383d13 100644 --- a/drivers/ata/pata_oldpiix.c +++ b/drivers/ata/pata_oldpiix.c | |||
@@ -47,21 +47,7 @@ static int oldpiix_pre_reset(struct ata_link *link, unsigned long deadline) | |||
47 | if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no])) | 47 | if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no])) |
48 | return -ENOENT; | 48 | return -ENOENT; |
49 | 49 | ||
50 | return ata_std_prereset(link, deadline); | 50 | return ata_sff_prereset(link, deadline); |
51 | } | ||
52 | |||
53 | /** | ||
54 | * oldpiix_pata_error_handler - Probe specified port on PATA host controller | ||
55 | * @ap: Port to probe | ||
56 | * @classes: | ||
57 | * | ||
58 | * LOCKING: | ||
59 | * None (inherited from caller). | ||
60 | */ | ||
61 | |||
62 | static void oldpiix_pata_error_handler(struct ata_port *ap) | ||
63 | { | ||
64 | ata_bmdma_drive_eh(ap, oldpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
65 | } | 51 | } |
66 | 52 | ||
67 | /** | 53 | /** |
@@ -195,7 +181,7 @@ static void oldpiix_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
195 | } | 181 | } |
196 | 182 | ||
197 | /** | 183 | /** |
198 | * oldpiix_qc_issue_prot - command issue | 184 | * oldpiix_qc_issue - command issue |
199 | * @qc: command pending | 185 | * @qc: command pending |
200 | * | 186 | * |
201 | * Called when the libata layer is about to issue a command. We wrap | 187 | * Called when the libata layer is about to issue a command. We wrap |
@@ -205,7 +191,7 @@ static void oldpiix_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
205 | * be made PIO0. | 191 | * be made PIO0. |
206 | */ | 192 | */ |
207 | 193 | ||
208 | static unsigned int oldpiix_qc_issue_prot(struct ata_queued_cmd *qc) | 194 | static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc) |
209 | { | 195 | { |
210 | struct ata_port *ap = qc->ap; | 196 | struct ata_port *ap = qc->ap; |
211 | struct ata_device *adev = qc->dev; | 197 | struct ata_device *adev = qc->dev; |
@@ -215,58 +201,21 @@ static unsigned int oldpiix_qc_issue_prot(struct ata_queued_cmd *qc) | |||
215 | if (adev->dma_mode) | 201 | if (adev->dma_mode) |
216 | oldpiix_set_dmamode(ap, adev); | 202 | oldpiix_set_dmamode(ap, adev); |
217 | } | 203 | } |
218 | return ata_qc_issue_prot(qc); | 204 | return ata_sff_qc_issue(qc); |
219 | } | 205 | } |
220 | 206 | ||
221 | 207 | ||
222 | static struct scsi_host_template oldpiix_sht = { | 208 | static struct scsi_host_template oldpiix_sht = { |
223 | .module = THIS_MODULE, | 209 | ATA_BMDMA_SHT(DRV_NAME), |
224 | .name = DRV_NAME, | ||
225 | .ioctl = ata_scsi_ioctl, | ||
226 | .queuecommand = ata_scsi_queuecmd, | ||
227 | .can_queue = ATA_DEF_QUEUE, | ||
228 | .this_id = ATA_SHT_THIS_ID, | ||
229 | .sg_tablesize = LIBATA_MAX_PRD, | ||
230 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
231 | .emulated = ATA_SHT_EMULATED, | ||
232 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
233 | .proc_name = DRV_NAME, | ||
234 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
235 | .slave_configure = ata_scsi_slave_config, | ||
236 | .slave_destroy = ata_scsi_slave_destroy, | ||
237 | .bios_param = ata_std_bios_param, | ||
238 | }; | 210 | }; |
239 | 211 | ||
240 | static const struct ata_port_operations oldpiix_pata_ops = { | 212 | static struct ata_port_operations oldpiix_pata_ops = { |
213 | .inherits = &ata_bmdma_port_ops, | ||
214 | .qc_issue = oldpiix_qc_issue, | ||
215 | .cable_detect = ata_cable_40wire, | ||
241 | .set_piomode = oldpiix_set_piomode, | 216 | .set_piomode = oldpiix_set_piomode, |
242 | .set_dmamode = oldpiix_set_dmamode, | 217 | .set_dmamode = oldpiix_set_dmamode, |
243 | .mode_filter = ata_pci_default_filter, | 218 | .prereset = oldpiix_pre_reset, |
244 | |||
245 | .tf_load = ata_tf_load, | ||
246 | .tf_read = ata_tf_read, | ||
247 | .check_status = ata_check_status, | ||
248 | .exec_command = ata_exec_command, | ||
249 | .dev_select = ata_std_dev_select, | ||
250 | |||
251 | .freeze = ata_bmdma_freeze, | ||
252 | .thaw = ata_bmdma_thaw, | ||
253 | .error_handler = oldpiix_pata_error_handler, | ||
254 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
255 | .cable_detect = ata_cable_40wire, | ||
256 | |||
257 | .bmdma_setup = ata_bmdma_setup, | ||
258 | .bmdma_start = ata_bmdma_start, | ||
259 | .bmdma_stop = ata_bmdma_stop, | ||
260 | .bmdma_status = ata_bmdma_status, | ||
261 | .qc_prep = ata_qc_prep, | ||
262 | .qc_issue = oldpiix_qc_issue_prot, | ||
263 | .data_xfer = ata_data_xfer, | ||
264 | |||
265 | .irq_handler = ata_interrupt, | ||
266 | .irq_clear = ata_bmdma_irq_clear, | ||
267 | .irq_on = ata_irq_on, | ||
268 | |||
269 | .port_start = ata_sff_port_start, | ||
270 | }; | 219 | }; |
271 | 220 | ||
272 | 221 | ||
@@ -289,7 +238,6 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
289 | { | 238 | { |
290 | static int printed_version; | 239 | static int printed_version; |
291 | static const struct ata_port_info info = { | 240 | static const struct ata_port_info info = { |
292 | .sht = &oldpiix_sht, | ||
293 | .flags = ATA_FLAG_SLAVE_POSS, | 241 | .flags = ATA_FLAG_SLAVE_POSS, |
294 | .pio_mask = 0x1f, /* pio0-4 */ | 242 | .pio_mask = 0x1f, /* pio0-4 */ |
295 | .mwdma_mask = 0x07, /* mwdma1-2 */ | 243 | .mwdma_mask = 0x07, /* mwdma1-2 */ |
@@ -301,7 +249,7 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
301 | dev_printk(KERN_DEBUG, &pdev->dev, | 249 | dev_printk(KERN_DEBUG, &pdev->dev, |
302 | "version " DRV_VERSION "\n"); | 250 | "version " DRV_VERSION "\n"); |
303 | 251 | ||
304 | return ata_pci_init_one(pdev, ppi); | 252 | return ata_pci_sff_init_one(pdev, ppi, &oldpiix_sht, NULL); |
305 | } | 253 | } |
306 | 254 | ||
307 | static const struct pci_device_id oldpiix_pci_tbl[] = { | 255 | static const struct pci_device_id oldpiix_pci_tbl[] = { |
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c index 8f79447b6151..fb2cf661b0e8 100644 --- a/drivers/ata/pata_opti.c +++ b/drivers/ata/pata_opti.c | |||
@@ -64,22 +64,7 @@ static int opti_pre_reset(struct ata_link *link, unsigned long deadline) | |||
64 | if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no])) | 64 | if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no])) |
65 | return -ENOENT; | 65 | return -ENOENT; |
66 | 66 | ||
67 | return ata_std_prereset(link, deadline); | 67 | return ata_sff_prereset(link, deadline); |
68 | } | ||
69 | |||
70 | /** | ||
71 | * opti_probe_reset - probe reset | ||
72 | * @ap: ATA port | ||
73 | * | ||
74 | * Perform the ATA probe and bus reset sequence plus specific handling | ||
75 | * for this hardware. The Opti needs little handling - we have no UDMA66 | ||
76 | * capability that needs cable detection. All we must do is check the port | ||
77 | * is enabled. | ||
78 | */ | ||
79 | |||
80 | static void opti_error_handler(struct ata_port *ap) | ||
81 | { | ||
82 | ata_bmdma_drive_eh(ap, opti_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
83 | } | 68 | } |
84 | 69 | ||
85 | /** | 70 | /** |
@@ -165,58 +150,19 @@ static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
165 | } | 150 | } |
166 | 151 | ||
167 | static struct scsi_host_template opti_sht = { | 152 | static struct scsi_host_template opti_sht = { |
168 | .module = THIS_MODULE, | 153 | ATA_PIO_SHT(DRV_NAME), |
169 | .name = DRV_NAME, | ||
170 | .ioctl = ata_scsi_ioctl, | ||
171 | .queuecommand = ata_scsi_queuecmd, | ||
172 | .can_queue = ATA_DEF_QUEUE, | ||
173 | .this_id = ATA_SHT_THIS_ID, | ||
174 | .sg_tablesize = LIBATA_MAX_PRD, | ||
175 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
176 | .emulated = ATA_SHT_EMULATED, | ||
177 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
178 | .proc_name = DRV_NAME, | ||
179 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
180 | .slave_configure = ata_scsi_slave_config, | ||
181 | .slave_destroy = ata_scsi_slave_destroy, | ||
182 | .bios_param = ata_std_bios_param, | ||
183 | }; | 154 | }; |
184 | 155 | ||
185 | static struct ata_port_operations opti_port_ops = { | 156 | static struct ata_port_operations opti_port_ops = { |
186 | .set_piomode = opti_set_piomode, | 157 | .inherits = &ata_sff_port_ops, |
187 | .tf_load = ata_tf_load, | ||
188 | .tf_read = ata_tf_read, | ||
189 | .check_status = ata_check_status, | ||
190 | .exec_command = ata_exec_command, | ||
191 | .dev_select = ata_std_dev_select, | ||
192 | |||
193 | .freeze = ata_bmdma_freeze, | ||
194 | .thaw = ata_bmdma_thaw, | ||
195 | .error_handler = opti_error_handler, | ||
196 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
197 | .cable_detect = ata_cable_40wire, | 158 | .cable_detect = ata_cable_40wire, |
198 | 159 | .set_piomode = opti_set_piomode, | |
199 | .bmdma_setup = ata_bmdma_setup, | 160 | .prereset = opti_pre_reset, |
200 | .bmdma_start = ata_bmdma_start, | ||
201 | .bmdma_stop = ata_bmdma_stop, | ||
202 | .bmdma_status = ata_bmdma_status, | ||
203 | |||
204 | .qc_prep = ata_qc_prep, | ||
205 | .qc_issue = ata_qc_issue_prot, | ||
206 | |||
207 | .data_xfer = ata_data_xfer, | ||
208 | |||
209 | .irq_handler = ata_interrupt, | ||
210 | .irq_clear = ata_bmdma_irq_clear, | ||
211 | .irq_on = ata_irq_on, | ||
212 | |||
213 | .port_start = ata_sff_port_start, | ||
214 | }; | 161 | }; |
215 | 162 | ||
216 | static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 163 | static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
217 | { | 164 | { |
218 | static const struct ata_port_info info = { | 165 | static const struct ata_port_info info = { |
219 | .sht = &opti_sht, | ||
220 | .flags = ATA_FLAG_SLAVE_POSS, | 166 | .flags = ATA_FLAG_SLAVE_POSS, |
221 | .pio_mask = 0x1f, | 167 | .pio_mask = 0x1f, |
222 | .port_ops = &opti_port_ops | 168 | .port_ops = &opti_port_ops |
@@ -227,7 +173,7 @@ static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
227 | if (!printed_version++) | 173 | if (!printed_version++) |
228 | dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); | 174 | dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); |
229 | 175 | ||
230 | return ata_pci_init_one(dev, ppi); | 176 | return ata_pci_sff_init_one(dev, ppi, &opti_sht, NULL); |
231 | } | 177 | } |
232 | 178 | ||
233 | static const struct pci_device_id opti[] = { | 179 | static const struct pci_device_id opti[] = { |
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c index f9b485a487ae..4cd744456313 100644 --- a/drivers/ata/pata_optidma.c +++ b/drivers/ata/pata_optidma.c | |||
@@ -64,22 +64,7 @@ static int optidma_pre_reset(struct ata_link *link, unsigned long deadline) | |||
64 | if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits)) | 64 | if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits)) |
65 | return -ENOENT; | 65 | return -ENOENT; |
66 | 66 | ||
67 | return ata_std_prereset(link, deadline); | 67 | return ata_sff_prereset(link, deadline); |
68 | } | ||
69 | |||
70 | /** | ||
71 | * optidma_probe_reset - probe reset | ||
72 | * @ap: ATA port | ||
73 | * | ||
74 | * Perform the ATA probe and bus reset sequence plus specific handling | ||
75 | * for this hardware. The Opti needs little handling - we have no UDMA66 | ||
76 | * capability that needs cable detection. All we must do is check the port | ||
77 | * is enabled. | ||
78 | */ | ||
79 | |||
80 | static void optidma_error_handler(struct ata_port *ap) | ||
81 | { | ||
82 | ata_bmdma_drive_eh(ap, optidma_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
83 | } | 68 | } |
84 | 69 | ||
85 | /** | 70 | /** |
@@ -350,89 +335,22 @@ static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed) | |||
350 | } | 335 | } |
351 | 336 | ||
352 | static struct scsi_host_template optidma_sht = { | 337 | static struct scsi_host_template optidma_sht = { |
353 | .module = THIS_MODULE, | 338 | ATA_BMDMA_SHT(DRV_NAME), |
354 | .name = DRV_NAME, | ||
355 | .ioctl = ata_scsi_ioctl, | ||
356 | .queuecommand = ata_scsi_queuecmd, | ||
357 | .can_queue = ATA_DEF_QUEUE, | ||
358 | .this_id = ATA_SHT_THIS_ID, | ||
359 | .sg_tablesize = LIBATA_MAX_PRD, | ||
360 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
361 | .emulated = ATA_SHT_EMULATED, | ||
362 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
363 | .proc_name = DRV_NAME, | ||
364 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
365 | .slave_configure = ata_scsi_slave_config, | ||
366 | .slave_destroy = ata_scsi_slave_destroy, | ||
367 | .bios_param = ata_std_bios_param, | ||
368 | }; | 339 | }; |
369 | 340 | ||
370 | static struct ata_port_operations optidma_port_ops = { | 341 | static struct ata_port_operations optidma_port_ops = { |
342 | .inherits = &ata_bmdma_port_ops, | ||
343 | .cable_detect = ata_cable_40wire, | ||
371 | .set_piomode = optidma_set_pio_mode, | 344 | .set_piomode = optidma_set_pio_mode, |
372 | .set_dmamode = optidma_set_dma_mode, | 345 | .set_dmamode = optidma_set_dma_mode, |
373 | |||
374 | .tf_load = ata_tf_load, | ||
375 | .tf_read = ata_tf_read, | ||
376 | .check_status = ata_check_status, | ||
377 | .exec_command = ata_exec_command, | ||
378 | .dev_select = ata_std_dev_select, | ||
379 | |||
380 | .freeze = ata_bmdma_freeze, | ||
381 | .thaw = ata_bmdma_thaw, | ||
382 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
383 | .error_handler = optidma_error_handler, | ||
384 | .set_mode = optidma_set_mode, | 346 | .set_mode = optidma_set_mode, |
385 | .cable_detect = ata_cable_40wire, | 347 | .prereset = optidma_pre_reset, |
386 | |||
387 | .bmdma_setup = ata_bmdma_setup, | ||
388 | .bmdma_start = ata_bmdma_start, | ||
389 | .bmdma_stop = ata_bmdma_stop, | ||
390 | .bmdma_status = ata_bmdma_status, | ||
391 | |||
392 | .qc_prep = ata_qc_prep, | ||
393 | .qc_issue = ata_qc_issue_prot, | ||
394 | |||
395 | .data_xfer = ata_data_xfer, | ||
396 | |||
397 | .irq_handler = ata_interrupt, | ||
398 | .irq_clear = ata_bmdma_irq_clear, | ||
399 | .irq_on = ata_irq_on, | ||
400 | |||
401 | .port_start = ata_sff_port_start, | ||
402 | }; | 348 | }; |
403 | 349 | ||
404 | static struct ata_port_operations optiplus_port_ops = { | 350 | static struct ata_port_operations optiplus_port_ops = { |
351 | .inherits = &optidma_port_ops, | ||
405 | .set_piomode = optiplus_set_pio_mode, | 352 | .set_piomode = optiplus_set_pio_mode, |
406 | .set_dmamode = optiplus_set_dma_mode, | 353 | .set_dmamode = optiplus_set_dma_mode, |
407 | |||
408 | .tf_load = ata_tf_load, | ||
409 | .tf_read = ata_tf_read, | ||
410 | .check_status = ata_check_status, | ||
411 | .exec_command = ata_exec_command, | ||
412 | .dev_select = ata_std_dev_select, | ||
413 | |||
414 | .freeze = ata_bmdma_freeze, | ||
415 | .thaw = ata_bmdma_thaw, | ||
416 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
417 | .error_handler = optidma_error_handler, | ||
418 | .set_mode = optidma_set_mode, | ||
419 | .cable_detect = ata_cable_40wire, | ||
420 | |||
421 | .bmdma_setup = ata_bmdma_setup, | ||
422 | .bmdma_start = ata_bmdma_start, | ||
423 | .bmdma_stop = ata_bmdma_stop, | ||
424 | .bmdma_status = ata_bmdma_status, | ||
425 | |||
426 | .qc_prep = ata_qc_prep, | ||
427 | .qc_issue = ata_qc_issue_prot, | ||
428 | |||
429 | .data_xfer = ata_data_xfer, | ||
430 | |||
431 | .irq_handler = ata_interrupt, | ||
432 | .irq_clear = ata_bmdma_irq_clear, | ||
433 | .irq_on = ata_irq_on, | ||
434 | |||
435 | .port_start = ata_sff_port_start, | ||
436 | }; | 354 | }; |
437 | 355 | ||
438 | /** | 356 | /** |
@@ -481,14 +399,12 @@ done_nomsg: /* Wrong chip revision */ | |||
481 | static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 399 | static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
482 | { | 400 | { |
483 | static const struct ata_port_info info_82c700 = { | 401 | static const struct ata_port_info info_82c700 = { |
484 | .sht = &optidma_sht, | ||
485 | .flags = ATA_FLAG_SLAVE_POSS, | 402 | .flags = ATA_FLAG_SLAVE_POSS, |
486 | .pio_mask = 0x1f, | 403 | .pio_mask = 0x1f, |
487 | .mwdma_mask = 0x07, | 404 | .mwdma_mask = 0x07, |
488 | .port_ops = &optidma_port_ops | 405 | .port_ops = &optidma_port_ops |
489 | }; | 406 | }; |
490 | static const struct ata_port_info info_82c700_udma = { | 407 | static const struct ata_port_info info_82c700_udma = { |
491 | .sht = &optidma_sht, | ||
492 | .flags = ATA_FLAG_SLAVE_POSS, | 408 | .flags = ATA_FLAG_SLAVE_POSS, |
493 | .pio_mask = 0x1f, | 409 | .pio_mask = 0x1f, |
494 | .mwdma_mask = 0x07, | 410 | .mwdma_mask = 0x07, |
@@ -497,10 +413,15 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
497 | }; | 413 | }; |
498 | const struct ata_port_info *ppi[] = { &info_82c700, NULL }; | 414 | const struct ata_port_info *ppi[] = { &info_82c700, NULL }; |
499 | static int printed_version; | 415 | static int printed_version; |
416 | int rc; | ||
500 | 417 | ||
501 | if (!printed_version++) | 418 | if (!printed_version++) |
502 | dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); | 419 | dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); |
503 | 420 | ||
421 | rc = pcim_enable_device(dev); | ||
422 | if (rc) | ||
423 | return rc; | ||
424 | |||
504 | /* Fixed location chipset magic */ | 425 | /* Fixed location chipset magic */ |
505 | inw(0x1F1); | 426 | inw(0x1F1); |
506 | inw(0x1F1); | 427 | inw(0x1F1); |
@@ -509,7 +430,7 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
509 | if (optiplus_with_udma(dev)) | 430 | if (optiplus_with_udma(dev)) |
510 | ppi[0] = &info_82c700_udma; | 431 | ppi[0] = &info_82c700_udma; |
511 | 432 | ||
512 | return ata_pci_init_one(dev, ppi); | 433 | return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL); |
513 | } | 434 | } |
514 | 435 | ||
515 | static const struct pci_device_id optidma[] = { | 436 | static const struct pci_device_id optidma[] = { |
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index 3e7f6a9da28b..3d39f9dfec5a 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c | |||
@@ -128,71 +128,21 @@ static unsigned int ata_data_xfer_8bit(struct ata_device *dev, | |||
128 | 128 | ||
129 | 129 | ||
130 | static struct scsi_host_template pcmcia_sht = { | 130 | static struct scsi_host_template pcmcia_sht = { |
131 | .module = THIS_MODULE, | 131 | ATA_PIO_SHT(DRV_NAME), |
132 | .name = DRV_NAME, | ||
133 | .ioctl = ata_scsi_ioctl, | ||
134 | .queuecommand = ata_scsi_queuecmd, | ||
135 | .can_queue = ATA_DEF_QUEUE, | ||
136 | .this_id = ATA_SHT_THIS_ID, | ||
137 | .sg_tablesize = LIBATA_MAX_PRD, | ||
138 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
139 | .emulated = ATA_SHT_EMULATED, | ||
140 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
141 | .proc_name = DRV_NAME, | ||
142 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
143 | .slave_configure = ata_scsi_slave_config, | ||
144 | .slave_destroy = ata_scsi_slave_destroy, | ||
145 | .bios_param = ata_std_bios_param, | ||
146 | }; | 132 | }; |
147 | 133 | ||
148 | static struct ata_port_operations pcmcia_port_ops = { | 134 | static struct ata_port_operations pcmcia_port_ops = { |
149 | .set_mode = pcmcia_set_mode, | 135 | .inherits = &ata_sff_port_ops, |
150 | .tf_load = ata_tf_load, | 136 | .sff_data_xfer = ata_sff_data_xfer_noirq, |
151 | .tf_read = ata_tf_read, | ||
152 | .check_status = ata_check_status, | ||
153 | .exec_command = ata_exec_command, | ||
154 | .dev_select = ata_std_dev_select, | ||
155 | |||
156 | .freeze = ata_bmdma_freeze, | ||
157 | .thaw = ata_bmdma_thaw, | ||
158 | .error_handler = ata_bmdma_error_handler, | ||
159 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
160 | .cable_detect = ata_cable_40wire, | 137 | .cable_detect = ata_cable_40wire, |
161 | 138 | .set_mode = pcmcia_set_mode, | |
162 | .qc_prep = ata_qc_prep, | ||
163 | .qc_issue = ata_qc_issue_prot, | ||
164 | |||
165 | .data_xfer = ata_data_xfer_noirq, | ||
166 | |||
167 | .irq_clear = ata_bmdma_irq_clear, | ||
168 | .irq_on = ata_irq_on, | ||
169 | |||
170 | .port_start = ata_sff_port_start, | ||
171 | }; | 139 | }; |
172 | 140 | ||
173 | static struct ata_port_operations pcmcia_8bit_port_ops = { | 141 | static struct ata_port_operations pcmcia_8bit_port_ops = { |
174 | .set_mode = pcmcia_set_mode_8bit, | 142 | .inherits = &ata_sff_port_ops, |
175 | .tf_load = ata_tf_load, | 143 | .sff_data_xfer = ata_data_xfer_8bit, |
176 | .tf_read = ata_tf_read, | ||
177 | .check_status = ata_check_status, | ||
178 | .exec_command = ata_exec_command, | ||
179 | .dev_select = ata_std_dev_select, | ||
180 | |||
181 | .freeze = ata_bmdma_freeze, | ||
182 | .thaw = ata_bmdma_thaw, | ||
183 | .error_handler = ata_bmdma_error_handler, | ||
184 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
185 | .cable_detect = ata_cable_40wire, | 144 | .cable_detect = ata_cable_40wire, |
186 | 145 | .set_mode = pcmcia_set_mode_8bit, | |
187 | .qc_prep = ata_qc_prep, | ||
188 | .qc_issue = ata_qc_issue_prot, | ||
189 | |||
190 | .data_xfer = ata_data_xfer_8bit, | ||
191 | |||
192 | .irq_clear = ata_bmdma_irq_clear, | ||
193 | .irq_on = ata_irq_on, | ||
194 | |||
195 | .port_start = ata_sff_port_start, | ||
196 | }; | 146 | }; |
197 | 147 | ||
198 | #define CS_CHECK(fn, ret) \ | 148 | #define CS_CHECK(fn, ret) \ |
@@ -373,13 +323,13 @@ next_entry: | |||
373 | ap->ioaddr.cmd_addr = io_addr + 0x10 * p; | 323 | ap->ioaddr.cmd_addr = io_addr + 0x10 * p; |
374 | ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p; | 324 | ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p; |
375 | ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p; | 325 | ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p; |
376 | ata_std_ports(&ap->ioaddr); | 326 | ata_sff_std_ports(&ap->ioaddr); |
377 | 327 | ||
378 | ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base); | 328 | ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base); |
379 | } | 329 | } |
380 | 330 | ||
381 | /* activate */ | 331 | /* activate */ |
382 | ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_interrupt, | 332 | ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_sff_interrupt, |
383 | IRQF_SHARED, &pcmcia_sht); | 333 | IRQF_SHARED, &pcmcia_sht); |
384 | if (ret) | 334 | if (ret) |
385 | goto failed; | 335 | goto failed; |
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index 511c89b9bae8..0e1c2c1134d3 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c | |||
@@ -63,7 +63,7 @@ enum { | |||
63 | }; | 63 | }; |
64 | 64 | ||
65 | static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | 65 | static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
66 | static void pdc2027x_error_handler(struct ata_port *ap); | 66 | static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline); |
67 | static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev); | 67 | static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev); |
68 | static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev); | 68 | static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev); |
69 | static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc); | 69 | static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc); |
@@ -129,84 +129,22 @@ static struct pci_driver pdc2027x_pci_driver = { | |||
129 | }; | 129 | }; |
130 | 130 | ||
131 | static struct scsi_host_template pdc2027x_sht = { | 131 | static struct scsi_host_template pdc2027x_sht = { |
132 | .module = THIS_MODULE, | 132 | ATA_BMDMA_SHT(DRV_NAME), |
133 | .name = DRV_NAME, | ||
134 | .ioctl = ata_scsi_ioctl, | ||
135 | .queuecommand = ata_scsi_queuecmd, | ||
136 | .can_queue = ATA_DEF_QUEUE, | ||
137 | .this_id = ATA_SHT_THIS_ID, | ||
138 | .sg_tablesize = LIBATA_MAX_PRD, | ||
139 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
140 | .emulated = ATA_SHT_EMULATED, | ||
141 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
142 | .proc_name = DRV_NAME, | ||
143 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
144 | .slave_configure = ata_scsi_slave_config, | ||
145 | .slave_destroy = ata_scsi_slave_destroy, | ||
146 | .bios_param = ata_std_bios_param, | ||
147 | }; | 133 | }; |
148 | 134 | ||
149 | static struct ata_port_operations pdc2027x_pata100_ops = { | 135 | static struct ata_port_operations pdc2027x_pata100_ops = { |
150 | .mode_filter = ata_pci_default_filter, | 136 | .inherits = &ata_bmdma_port_ops, |
151 | |||
152 | .tf_load = ata_tf_load, | ||
153 | .tf_read = ata_tf_read, | ||
154 | .check_status = ata_check_status, | ||
155 | .exec_command = ata_exec_command, | ||
156 | .dev_select = ata_std_dev_select, | ||
157 | |||
158 | .check_atapi_dma = pdc2027x_check_atapi_dma, | 137 | .check_atapi_dma = pdc2027x_check_atapi_dma, |
159 | .bmdma_setup = ata_bmdma_setup, | ||
160 | .bmdma_start = ata_bmdma_start, | ||
161 | .bmdma_stop = ata_bmdma_stop, | ||
162 | .bmdma_status = ata_bmdma_status, | ||
163 | .qc_prep = ata_qc_prep, | ||
164 | .qc_issue = ata_qc_issue_prot, | ||
165 | .data_xfer = ata_data_xfer, | ||
166 | |||
167 | .freeze = ata_bmdma_freeze, | ||
168 | .thaw = ata_bmdma_thaw, | ||
169 | .error_handler = pdc2027x_error_handler, | ||
170 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
171 | .cable_detect = pdc2027x_cable_detect, | 138 | .cable_detect = pdc2027x_cable_detect, |
172 | 139 | .prereset = pdc2027x_prereset, | |
173 | .irq_clear = ata_bmdma_irq_clear, | ||
174 | .irq_on = ata_irq_on, | ||
175 | |||
176 | .port_start = ata_sff_port_start, | ||
177 | }; | 140 | }; |
178 | 141 | ||
179 | static struct ata_port_operations pdc2027x_pata133_ops = { | 142 | static struct ata_port_operations pdc2027x_pata133_ops = { |
143 | .inherits = &pdc2027x_pata100_ops, | ||
144 | .mode_filter = pdc2027x_mode_filter, | ||
180 | .set_piomode = pdc2027x_set_piomode, | 145 | .set_piomode = pdc2027x_set_piomode, |
181 | .set_dmamode = pdc2027x_set_dmamode, | 146 | .set_dmamode = pdc2027x_set_dmamode, |
182 | .set_mode = pdc2027x_set_mode, | 147 | .set_mode = pdc2027x_set_mode, |
183 | .mode_filter = pdc2027x_mode_filter, | ||
184 | |||
185 | .tf_load = ata_tf_load, | ||
186 | .tf_read = ata_tf_read, | ||
187 | .check_status = ata_check_status, | ||
188 | .exec_command = ata_exec_command, | ||
189 | .dev_select = ata_std_dev_select, | ||
190 | |||
191 | .check_atapi_dma = pdc2027x_check_atapi_dma, | ||
192 | .bmdma_setup = ata_bmdma_setup, | ||
193 | .bmdma_start = ata_bmdma_start, | ||
194 | .bmdma_stop = ata_bmdma_stop, | ||
195 | .bmdma_status = ata_bmdma_status, | ||
196 | .qc_prep = ata_qc_prep, | ||
197 | .qc_issue = ata_qc_issue_prot, | ||
198 | .data_xfer = ata_data_xfer, | ||
199 | |||
200 | .freeze = ata_bmdma_freeze, | ||
201 | .thaw = ata_bmdma_thaw, | ||
202 | .error_handler = pdc2027x_error_handler, | ||
203 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
204 | .cable_detect = pdc2027x_cable_detect, | ||
205 | |||
206 | .irq_clear = ata_bmdma_irq_clear, | ||
207 | .irq_on = ata_irq_on, | ||
208 | |||
209 | .port_start = ata_sff_port_start, | ||
210 | }; | 148 | }; |
211 | 149 | ||
212 | static struct ata_port_info pdc2027x_port_info[] = { | 150 | static struct ata_port_info pdc2027x_port_info[] = { |
@@ -310,22 +248,7 @@ static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline) | |||
310 | /* Check whether port enabled */ | 248 | /* Check whether port enabled */ |
311 | if (!pdc2027x_port_enabled(link->ap)) | 249 | if (!pdc2027x_port_enabled(link->ap)) |
312 | return -ENOENT; | 250 | return -ENOENT; |
313 | return ata_std_prereset(link, deadline); | 251 | return ata_sff_prereset(link, deadline); |
314 | } | ||
315 | |||
316 | /** | ||
317 | * pdc2027x_error_handler - Perform reset on PATA port and classify | ||
318 | * @ap: Port to reset | ||
319 | * | ||
320 | * Reset PATA phy and classify attached devices. | ||
321 | * | ||
322 | * LOCKING: | ||
323 | * None (inherited from caller). | ||
324 | */ | ||
325 | |||
326 | static void pdc2027x_error_handler(struct ata_port *ap) | ||
327 | { | ||
328 | ata_bmdma_drive_eh(ap, pdc2027x_prereset, ata_std_softreset, NULL, ata_std_postreset); | ||
329 | } | 252 | } |
330 | 253 | ||
331 | /** | 254 | /** |
@@ -342,7 +265,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long | |||
342 | struct ata_device *pair = ata_dev_pair(adev); | 265 | struct ata_device *pair = ata_dev_pair(adev); |
343 | 266 | ||
344 | if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL) | 267 | if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL) |
345 | return ata_pci_default_filter(adev, mask); | 268 | return ata_bmdma_mode_filter(adev, mask); |
346 | 269 | ||
347 | /* Check for slave of a Maxtor at UDMA6 */ | 270 | /* Check for slave of a Maxtor at UDMA6 */ |
348 | ata_id_c_string(pair->id, model_num, ATA_ID_PROD, | 271 | ata_id_c_string(pair->id, model_num, ATA_ID_PROD, |
@@ -351,7 +274,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long | |||
351 | if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6) | 274 | if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6) |
352 | mask &= ~ (1 << (6 + ATA_SHIFT_UDMA)); | 275 | mask &= ~ (1 << (6 + ATA_SHIFT_UDMA)); |
353 | 276 | ||
354 | return ata_pci_default_filter(adev, mask); | 277 | return ata_bmdma_mode_filter(adev, mask); |
355 | } | 278 | } |
356 | 279 | ||
357 | /** | 280 | /** |
@@ -836,8 +759,8 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de | |||
836 | return -EIO; | 759 | return -EIO; |
837 | 760 | ||
838 | pci_set_master(pdev); | 761 | pci_set_master(pdev); |
839 | return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, | 762 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, |
840 | &pdc2027x_sht); | 763 | IRQF_SHARED, &pdc2027x_sht); |
841 | } | 764 | } |
842 | 765 | ||
843 | /** | 766 | /** |
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c index 3ed866723e0c..d2673060bc8d 100644 --- a/drivers/ata/pata_pdc202xx_old.c +++ b/drivers/ata/pata_pdc202xx_old.c | |||
@@ -262,94 +262,34 @@ static int pdc2026x_check_atapi_dma(struct ata_queued_cmd *qc) | |||
262 | } | 262 | } |
263 | 263 | ||
264 | static struct scsi_host_template pdc202xx_sht = { | 264 | static struct scsi_host_template pdc202xx_sht = { |
265 | .module = THIS_MODULE, | 265 | ATA_BMDMA_SHT(DRV_NAME), |
266 | .name = DRV_NAME, | ||
267 | .ioctl = ata_scsi_ioctl, | ||
268 | .queuecommand = ata_scsi_queuecmd, | ||
269 | .can_queue = ATA_DEF_QUEUE, | ||
270 | .this_id = ATA_SHT_THIS_ID, | ||
271 | .sg_tablesize = LIBATA_MAX_PRD, | ||
272 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
273 | .emulated = ATA_SHT_EMULATED, | ||
274 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
275 | .proc_name = DRV_NAME, | ||
276 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
277 | .slave_configure = ata_scsi_slave_config, | ||
278 | .slave_destroy = ata_scsi_slave_destroy, | ||
279 | .bios_param = ata_std_bios_param, | ||
280 | }; | 266 | }; |
281 | 267 | ||
282 | static struct ata_port_operations pdc2024x_port_ops = { | 268 | static struct ata_port_operations pdc2024x_port_ops = { |
283 | .set_piomode = pdc202xx_set_piomode, | 269 | .inherits = &ata_bmdma_port_ops, |
284 | .set_dmamode = pdc202xx_set_dmamode, | 270 | |
285 | .mode_filter = ata_pci_default_filter, | 271 | .cable_detect = ata_cable_40wire, |
286 | .tf_load = ata_tf_load, | 272 | .set_piomode = pdc202xx_set_piomode, |
287 | .tf_read = ata_tf_read, | 273 | .set_dmamode = pdc202xx_set_dmamode, |
288 | .check_status = ata_check_status, | ||
289 | .exec_command = ata_exec_command, | ||
290 | .dev_select = ata_std_dev_select, | ||
291 | |||
292 | .freeze = ata_bmdma_freeze, | ||
293 | .thaw = ata_bmdma_thaw, | ||
294 | .error_handler = ata_bmdma_error_handler, | ||
295 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
296 | .cable_detect = ata_cable_40wire, | ||
297 | |||
298 | .bmdma_setup = ata_bmdma_setup, | ||
299 | .bmdma_start = ata_bmdma_start, | ||
300 | .bmdma_stop = ata_bmdma_stop, | ||
301 | .bmdma_status = ata_bmdma_status, | ||
302 | |||
303 | .qc_prep = ata_qc_prep, | ||
304 | .qc_issue = ata_qc_issue_prot, | ||
305 | .data_xfer = ata_data_xfer, | ||
306 | |||
307 | .irq_handler = ata_interrupt, | ||
308 | .irq_clear = ata_bmdma_irq_clear, | ||
309 | .irq_on = ata_irq_on, | ||
310 | |||
311 | .port_start = ata_sff_port_start, | ||
312 | }; | 274 | }; |
313 | 275 | ||
314 | static struct ata_port_operations pdc2026x_port_ops = { | 276 | static struct ata_port_operations pdc2026x_port_ops = { |
315 | .set_piomode = pdc202xx_set_piomode, | 277 | .inherits = &pdc2024x_port_ops, |
316 | .set_dmamode = pdc202xx_set_dmamode, | 278 | |
317 | .mode_filter = ata_pci_default_filter, | 279 | .check_atapi_dma = pdc2026x_check_atapi_dma, |
318 | .tf_load = ata_tf_load, | 280 | .bmdma_start = pdc2026x_bmdma_start, |
319 | .tf_read = ata_tf_read, | 281 | .bmdma_stop = pdc2026x_bmdma_stop, |
320 | .check_status = ata_check_status, | 282 | |
321 | .exec_command = ata_exec_command, | 283 | .cable_detect = pdc2026x_cable_detect, |
322 | .dev_select = ata_std_dev_select, | 284 | .dev_config = pdc2026x_dev_config, |
323 | .dev_config = pdc2026x_dev_config, | 285 | |
324 | 286 | .port_start = pdc2026x_port_start, | |
325 | .freeze = ata_bmdma_freeze, | ||
326 | .thaw = ata_bmdma_thaw, | ||
327 | .error_handler = ata_bmdma_error_handler, | ||
328 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
329 | .cable_detect = pdc2026x_cable_detect, | ||
330 | |||
331 | .check_atapi_dma= pdc2026x_check_atapi_dma, | ||
332 | .bmdma_setup = ata_bmdma_setup, | ||
333 | .bmdma_start = pdc2026x_bmdma_start, | ||
334 | .bmdma_stop = pdc2026x_bmdma_stop, | ||
335 | .bmdma_status = ata_bmdma_status, | ||
336 | |||
337 | .qc_prep = ata_qc_prep, | ||
338 | .qc_issue = ata_qc_issue_prot, | ||
339 | .data_xfer = ata_data_xfer, | ||
340 | |||
341 | .irq_handler = ata_interrupt, | ||
342 | .irq_clear = ata_bmdma_irq_clear, | ||
343 | .irq_on = ata_irq_on, | ||
344 | |||
345 | .port_start = pdc2026x_port_start, | ||
346 | }; | 287 | }; |
347 | 288 | ||
348 | static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 289 | static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
349 | { | 290 | { |
350 | static const struct ata_port_info info[3] = { | 291 | static const struct ata_port_info info[3] = { |
351 | { | 292 | { |
352 | .sht = &pdc202xx_sht, | ||
353 | .flags = ATA_FLAG_SLAVE_POSS, | 293 | .flags = ATA_FLAG_SLAVE_POSS, |
354 | .pio_mask = 0x1f, | 294 | .pio_mask = 0x1f, |
355 | .mwdma_mask = 0x07, | 295 | .mwdma_mask = 0x07, |
@@ -357,7 +297,6 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id | |||
357 | .port_ops = &pdc2024x_port_ops | 297 | .port_ops = &pdc2024x_port_ops |
358 | }, | 298 | }, |
359 | { | 299 | { |
360 | .sht = &pdc202xx_sht, | ||
361 | .flags = ATA_FLAG_SLAVE_POSS, | 300 | .flags = ATA_FLAG_SLAVE_POSS, |
362 | .pio_mask = 0x1f, | 301 | .pio_mask = 0x1f, |
363 | .mwdma_mask = 0x07, | 302 | .mwdma_mask = 0x07, |
@@ -365,7 +304,6 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id | |||
365 | .port_ops = &pdc2026x_port_ops | 304 | .port_ops = &pdc2026x_port_ops |
366 | }, | 305 | }, |
367 | { | 306 | { |
368 | .sht = &pdc202xx_sht, | ||
369 | .flags = ATA_FLAG_SLAVE_POSS, | 307 | .flags = ATA_FLAG_SLAVE_POSS, |
370 | .pio_mask = 0x1f, | 308 | .pio_mask = 0x1f, |
371 | .mwdma_mask = 0x07, | 309 | .mwdma_mask = 0x07, |
@@ -386,7 +324,7 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id | |||
386 | return -ENODEV; | 324 | return -ENODEV; |
387 | } | 325 | } |
388 | } | 326 | } |
389 | return ata_pci_init_one(dev, ppi); | 327 | return ata_pci_sff_init_one(dev, ppi, &pdc202xx_sht, NULL); |
390 | } | 328 | } |
391 | 329 | ||
392 | static const struct pci_device_id pdc202xx[] = { | 330 | static const struct pci_device_id pdc202xx[] = { |
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c index aad7adc6ea56..6527c56c34a3 100644 --- a/drivers/ata/pata_platform.c +++ b/drivers/ata/pata_platform.c | |||
@@ -46,50 +46,16 @@ static int pata_platform_set_mode(struct ata_link *link, struct ata_device **unu | |||
46 | return 0; | 46 | return 0; |
47 | } | 47 | } |
48 | 48 | ||
49 | static int ata_dummy_ret0(struct ata_port *ap) { return 0; } | ||
50 | |||
51 | static struct scsi_host_template pata_platform_sht = { | 49 | static struct scsi_host_template pata_platform_sht = { |
52 | .module = THIS_MODULE, | 50 | ATA_PIO_SHT(DRV_NAME), |
53 | .name = DRV_NAME, | ||
54 | .ioctl = ata_scsi_ioctl, | ||
55 | .queuecommand = ata_scsi_queuecmd, | ||
56 | .can_queue = ATA_DEF_QUEUE, | ||
57 | .this_id = ATA_SHT_THIS_ID, | ||
58 | .sg_tablesize = LIBATA_MAX_PRD, | ||
59 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
60 | .emulated = ATA_SHT_EMULATED, | ||
61 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
62 | .proc_name = DRV_NAME, | ||
63 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
64 | .slave_configure = ata_scsi_slave_config, | ||
65 | .slave_destroy = ata_scsi_slave_destroy, | ||
66 | .bios_param = ata_std_bios_param, | ||
67 | }; | 51 | }; |
68 | 52 | ||
69 | static struct ata_port_operations pata_platform_port_ops = { | 53 | static struct ata_port_operations pata_platform_port_ops = { |
70 | .set_mode = pata_platform_set_mode, | 54 | .inherits = &ata_sff_port_ops, |
71 | 55 | .sff_data_xfer = ata_sff_data_xfer_noirq, | |
72 | .tf_load = ata_tf_load, | ||
73 | .tf_read = ata_tf_read, | ||
74 | .check_status = ata_check_status, | ||
75 | .exec_command = ata_exec_command, | ||
76 | .dev_select = ata_std_dev_select, | ||
77 | |||
78 | .freeze = ata_bmdma_freeze, | ||
79 | .thaw = ata_bmdma_thaw, | ||
80 | .error_handler = ata_bmdma_error_handler, | ||
81 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
82 | .cable_detect = ata_cable_unknown, | 56 | .cable_detect = ata_cable_unknown, |
83 | 57 | .set_mode = pata_platform_set_mode, | |
84 | .qc_prep = ata_qc_prep, | 58 | .port_start = ATA_OP_NULL, |
85 | .qc_issue = ata_qc_issue_prot, | ||
86 | |||
87 | .data_xfer = ata_data_xfer_noirq, | ||
88 | |||
89 | .irq_clear = ata_bmdma_irq_clear, | ||
90 | .irq_on = ata_irq_on, | ||
91 | |||
92 | .port_start = ata_dummy_ret0, | ||
93 | }; | 59 | }; |
94 | 60 | ||
95 | static void pata_platform_setup_port(struct ata_ioports *ioaddr, | 61 | static void pata_platform_setup_port(struct ata_ioports *ioaddr, |
@@ -210,7 +176,7 @@ int __devinit __pata_platform_probe(struct device *dev, | |||
210 | (unsigned long long)ctl_res->start); | 176 | (unsigned long long)ctl_res->start); |
211 | 177 | ||
212 | /* activate */ | 178 | /* activate */ |
213 | return ata_host_activate(host, irq, irq ? ata_interrupt : NULL, | 179 | return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL, |
214 | irq_flags, &pata_platform_sht); | 180 | irq_flags, &pata_platform_sht); |
215 | } | 181 | } |
216 | EXPORT_SYMBOL_GPL(__pata_platform_probe); | 182 | EXPORT_SYMBOL_GPL(__pata_platform_probe); |
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c index 9f308ed76cc8..bf45cf017753 100644 --- a/drivers/ata/pata_qdi.c +++ b/drivers/ata/pata_qdi.c | |||
@@ -102,14 +102,14 @@ static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
102 | } | 102 | } |
103 | 103 | ||
104 | /** | 104 | /** |
105 | * qdi_qc_issue_prot - command issue | 105 | * qdi_qc_issue - command issue |
106 | * @qc: command pending | 106 | * @qc: command pending |
107 | * | 107 | * |
108 | * Called when the libata layer is about to issue a command. We wrap | 108 | * Called when the libata layer is about to issue a command. We wrap |
109 | * this interface so that we can load the correct ATA timings. | 109 | * this interface so that we can load the correct ATA timings. |
110 | */ | 110 | */ |
111 | 111 | ||
112 | static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc) | 112 | static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc) |
113 | { | 113 | { |
114 | struct ata_port *ap = qc->ap; | 114 | struct ata_port *ap = qc->ap; |
115 | struct ata_device *adev = qc->dev; | 115 | struct ata_device *adev = qc->dev; |
@@ -121,7 +121,7 @@ static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc) | |||
121 | outb(qdi->clock[adev->devno], qdi->timing); | 121 | outb(qdi->clock[adev->devno], qdi->timing); |
122 | } | 122 | } |
123 | } | 123 | } |
124 | return ata_qc_issue_prot(qc); | 124 | return ata_sff_qc_issue(qc); |
125 | } | 125 | } |
126 | 126 | ||
127 | static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf, | 127 | static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf, |
@@ -148,79 +148,26 @@ static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf, | |||
148 | buflen += 4 - slop; | 148 | buflen += 4 - slop; |
149 | } | 149 | } |
150 | } else | 150 | } else |
151 | buflen = ata_data_xfer(dev, buf, buflen, rw); | 151 | buflen = ata_sff_data_xfer(dev, buf, buflen, rw); |
152 | 152 | ||
153 | return buflen; | 153 | return buflen; |
154 | } | 154 | } |
155 | 155 | ||
156 | static struct scsi_host_template qdi_sht = { | 156 | static struct scsi_host_template qdi_sht = { |
157 | .module = THIS_MODULE, | 157 | ATA_PIO_SHT(DRV_NAME), |
158 | .name = DRV_NAME, | ||
159 | .ioctl = ata_scsi_ioctl, | ||
160 | .queuecommand = ata_scsi_queuecmd, | ||
161 | .can_queue = ATA_DEF_QUEUE, | ||
162 | .this_id = ATA_SHT_THIS_ID, | ||
163 | .sg_tablesize = LIBATA_MAX_PRD, | ||
164 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
165 | .emulated = ATA_SHT_EMULATED, | ||
166 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
167 | .proc_name = DRV_NAME, | ||
168 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
169 | .slave_configure = ata_scsi_slave_config, | ||
170 | .slave_destroy = ata_scsi_slave_destroy, | ||
171 | .bios_param = ata_std_bios_param, | ||
172 | }; | 158 | }; |
173 | 159 | ||
174 | static struct ata_port_operations qdi6500_port_ops = { | 160 | static struct ata_port_operations qdi6500_port_ops = { |
175 | .set_piomode = qdi6500_set_piomode, | 161 | .inherits = &ata_sff_port_ops, |
176 | 162 | .qc_issue = qdi_qc_issue, | |
177 | .tf_load = ata_tf_load, | 163 | .sff_data_xfer = qdi_data_xfer, |
178 | .tf_read = ata_tf_read, | ||
179 | .check_status = ata_check_status, | ||
180 | .exec_command = ata_exec_command, | ||
181 | .dev_select = ata_std_dev_select, | ||
182 | |||
183 | .freeze = ata_bmdma_freeze, | ||
184 | .thaw = ata_bmdma_thaw, | ||
185 | .error_handler = ata_bmdma_error_handler, | ||
186 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
187 | .cable_detect = ata_cable_40wire, | 164 | .cable_detect = ata_cable_40wire, |
188 | 165 | .set_piomode = qdi6500_set_piomode, | |
189 | .qc_prep = ata_qc_prep, | ||
190 | .qc_issue = qdi_qc_issue_prot, | ||
191 | |||
192 | .data_xfer = qdi_data_xfer, | ||
193 | |||
194 | .irq_clear = ata_bmdma_irq_clear, | ||
195 | .irq_on = ata_irq_on, | ||
196 | |||
197 | .port_start = ata_sff_port_start, | ||
198 | }; | 166 | }; |
199 | 167 | ||
200 | static struct ata_port_operations qdi6580_port_ops = { | 168 | static struct ata_port_operations qdi6580_port_ops = { |
169 | .inherits = &qdi6500_port_ops, | ||
201 | .set_piomode = qdi6580_set_piomode, | 170 | .set_piomode = qdi6580_set_piomode, |
202 | |||
203 | .tf_load = ata_tf_load, | ||
204 | .tf_read = ata_tf_read, | ||
205 | .check_status = ata_check_status, | ||
206 | .exec_command = ata_exec_command, | ||
207 | .dev_select = ata_std_dev_select, | ||
208 | |||
209 | .freeze = ata_bmdma_freeze, | ||
210 | .thaw = ata_bmdma_thaw, | ||
211 | .error_handler = ata_bmdma_error_handler, | ||
212 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
213 | .cable_detect = ata_cable_40wire, | ||
214 | |||
215 | .qc_prep = ata_qc_prep, | ||
216 | .qc_issue = qdi_qc_issue_prot, | ||
217 | |||
218 | .data_xfer = qdi_data_xfer, | ||
219 | |||
220 | .irq_clear = ata_bmdma_irq_clear, | ||
221 | .irq_on = ata_irq_on, | ||
222 | |||
223 | .port_start = ata_sff_port_start, | ||
224 | }; | 171 | }; |
225 | 172 | ||
226 | /** | 173 | /** |
@@ -276,7 +223,7 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i | |||
276 | ap->ioaddr.cmd_addr = io_addr; | 223 | ap->ioaddr.cmd_addr = io_addr; |
277 | ap->ioaddr.altstatus_addr = ctl_addr; | 224 | ap->ioaddr.altstatus_addr = ctl_addr; |
278 | ap->ioaddr.ctl_addr = ctl_addr; | 225 | ap->ioaddr.ctl_addr = ctl_addr; |
279 | ata_std_ports(&ap->ioaddr); | 226 | ata_sff_std_ports(&ap->ioaddr); |
280 | 227 | ||
281 | ata_port_desc(ap, "cmd %lx ctl %lx", io, ctl); | 228 | ata_port_desc(ap, "cmd %lx ctl %lx", io, ctl); |
282 | 229 | ||
@@ -292,7 +239,7 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i | |||
292 | printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io); | 239 | printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io); |
293 | 240 | ||
294 | /* activate */ | 241 | /* activate */ |
295 | ret = ata_host_activate(host, irq, ata_interrupt, 0, &qdi_sht); | 242 | ret = ata_host_activate(host, irq, ata_sff_interrupt, 0, &qdi_sht); |
296 | if (ret) | 243 | if (ret) |
297 | goto fail; | 244 | goto fail; |
298 | 245 | ||
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c index 8109b08fc024..1c0d9fa7ee54 100644 --- a/drivers/ata/pata_radisys.c +++ b/drivers/ata/pata_radisys.c | |||
@@ -156,7 +156,7 @@ static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
156 | } | 156 | } |
157 | 157 | ||
158 | /** | 158 | /** |
159 | * radisys_qc_issue_prot - command issue | 159 | * radisys_qc_issue - command issue |
160 | * @qc: command pending | 160 | * @qc: command pending |
161 | * | 161 | * |
162 | * Called when the libata layer is about to issue a command. We wrap | 162 | * Called when the libata layer is about to issue a command. We wrap |
@@ -166,7 +166,7 @@ static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
166 | * be made PIO0. | 166 | * be made PIO0. |
167 | */ | 167 | */ |
168 | 168 | ||
169 | static unsigned int radisys_qc_issue_prot(struct ata_queued_cmd *qc) | 169 | static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc) |
170 | { | 170 | { |
171 | struct ata_port *ap = qc->ap; | 171 | struct ata_port *ap = qc->ap; |
172 | struct ata_device *adev = qc->dev; | 172 | struct ata_device *adev = qc->dev; |
@@ -180,58 +180,20 @@ static unsigned int radisys_qc_issue_prot(struct ata_queued_cmd *qc) | |||
180 | radisys_set_piomode(ap, adev); | 180 | radisys_set_piomode(ap, adev); |
181 | } | 181 | } |
182 | } | 182 | } |
183 | return ata_qc_issue_prot(qc); | 183 | return ata_sff_qc_issue(qc); |
184 | } | 184 | } |
185 | 185 | ||
186 | 186 | ||
187 | static struct scsi_host_template radisys_sht = { | 187 | static struct scsi_host_template radisys_sht = { |
188 | .module = THIS_MODULE, | 188 | ATA_BMDMA_SHT(DRV_NAME), |
189 | .name = DRV_NAME, | ||
190 | .ioctl = ata_scsi_ioctl, | ||
191 | .queuecommand = ata_scsi_queuecmd, | ||
192 | .can_queue = ATA_DEF_QUEUE, | ||
193 | .this_id = ATA_SHT_THIS_ID, | ||
194 | .sg_tablesize = LIBATA_MAX_PRD, | ||
195 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
196 | .emulated = ATA_SHT_EMULATED, | ||
197 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
198 | .proc_name = DRV_NAME, | ||
199 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
200 | .slave_configure = ata_scsi_slave_config, | ||
201 | .slave_destroy = ata_scsi_slave_destroy, | ||
202 | .bios_param = ata_std_bios_param, | ||
203 | }; | 189 | }; |
204 | 190 | ||
205 | static const struct ata_port_operations radisys_pata_ops = { | 191 | static struct ata_port_operations radisys_pata_ops = { |
192 | .inherits = &ata_bmdma_port_ops, | ||
193 | .qc_issue = radisys_qc_issue, | ||
194 | .cable_detect = ata_cable_unknown, | ||
206 | .set_piomode = radisys_set_piomode, | 195 | .set_piomode = radisys_set_piomode, |
207 | .set_dmamode = radisys_set_dmamode, | 196 | .set_dmamode = radisys_set_dmamode, |
208 | .mode_filter = ata_pci_default_filter, | ||
209 | |||
210 | .tf_load = ata_tf_load, | ||
211 | .tf_read = ata_tf_read, | ||
212 | .check_status = ata_check_status, | ||
213 | .exec_command = ata_exec_command, | ||
214 | .dev_select = ata_std_dev_select, | ||
215 | |||
216 | .freeze = ata_bmdma_freeze, | ||
217 | .thaw = ata_bmdma_thaw, | ||
218 | .error_handler = ata_bmdma_error_handler, | ||
219 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
220 | .cable_detect = ata_cable_unknown, | ||
221 | |||
222 | .bmdma_setup = ata_bmdma_setup, | ||
223 | .bmdma_start = ata_bmdma_start, | ||
224 | .bmdma_stop = ata_bmdma_stop, | ||
225 | .bmdma_status = ata_bmdma_status, | ||
226 | .qc_prep = ata_qc_prep, | ||
227 | .qc_issue = radisys_qc_issue_prot, | ||
228 | .data_xfer = ata_data_xfer, | ||
229 | |||
230 | .irq_handler = ata_interrupt, | ||
231 | .irq_clear = ata_bmdma_irq_clear, | ||
232 | .irq_on = ata_irq_on, | ||
233 | |||
234 | .port_start = ata_sff_port_start, | ||
235 | }; | 197 | }; |
236 | 198 | ||
237 | 199 | ||
@@ -254,7 +216,6 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
254 | { | 216 | { |
255 | static int printed_version; | 217 | static int printed_version; |
256 | static const struct ata_port_info info = { | 218 | static const struct ata_port_info info = { |
257 | .sht = &radisys_sht, | ||
258 | .flags = ATA_FLAG_SLAVE_POSS, | 219 | .flags = ATA_FLAG_SLAVE_POSS, |
259 | .pio_mask = 0x1f, /* pio0-4 */ | 220 | .pio_mask = 0x1f, /* pio0-4 */ |
260 | .mwdma_mask = 0x07, /* mwdma1-2 */ | 221 | .mwdma_mask = 0x07, /* mwdma1-2 */ |
@@ -267,7 +228,7 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
267 | dev_printk(KERN_DEBUG, &pdev->dev, | 228 | dev_printk(KERN_DEBUG, &pdev->dev, |
268 | "version " DRV_VERSION "\n"); | 229 | "version " DRV_VERSION "\n"); |
269 | 230 | ||
270 | return ata_pci_init_one(pdev, ppi); | 231 | return ata_pci_sff_init_one(pdev, ppi, &radisys_sht, NULL); |
271 | } | 232 | } |
272 | 233 | ||
273 | static const struct pci_device_id radisys_pci_tbl[] = { | 234 | static const struct pci_device_id radisys_pci_tbl[] = { |
diff --git a/drivers/ata/pata_rb500_cf.c b/drivers/ata/pata_rb500_cf.c index 4ce9b03fe6c8..800ae4601f44 100644 --- a/drivers/ata/pata_rb500_cf.c +++ b/drivers/ata/pata_rb500_cf.c | |||
@@ -57,7 +57,7 @@ static inline void rb500_pata_finish_io(struct ata_port *ap) | |||
57 | struct ata_host *ah = ap->host; | 57 | struct ata_host *ah = ap->host; |
58 | struct rb500_cf_info *info = ah->private_data; | 58 | struct rb500_cf_info *info = ah->private_data; |
59 | 59 | ||
60 | ata_altstatus(ap); | 60 | ata_sff_altstatus(ap); |
61 | ndelay(RB500_CF_IO_DELAY); | 61 | ndelay(RB500_CF_IO_DELAY); |
62 | 62 | ||
63 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); | 63 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); |
@@ -109,7 +109,7 @@ static irqreturn_t rb500_pata_irq_handler(int irq, void *dev_instance) | |||
109 | if (gpio_get_value(info->gpio_line)) { | 109 | if (gpio_get_value(info->gpio_line)) { |
110 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); | 110 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); |
111 | if (!info->frozen) | 111 | if (!info->frozen) |
112 | ata_interrupt(info->irq, dev_instance); | 112 | ata_sff_interrupt(info->irq, dev_instance); |
113 | } else { | 113 | } else { |
114 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); | 114 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); |
115 | } | 115 | } |
@@ -117,58 +117,18 @@ static irqreturn_t rb500_pata_irq_handler(int irq, void *dev_instance) | |||
117 | return IRQ_HANDLED; | 117 | return IRQ_HANDLED; |
118 | } | 118 | } |
119 | 119 | ||
120 | static void rb500_pata_irq_clear(struct ata_port *ap) | ||
121 | { | ||
122 | } | ||
123 | |||
124 | static int rb500_pata_port_start(struct ata_port *ap) | ||
125 | { | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static struct ata_port_operations rb500_pata_port_ops = { | 120 | static struct ata_port_operations rb500_pata_port_ops = { |
130 | .tf_load = ata_tf_load, | 121 | .inherits = &ata_sff_port_ops, |
131 | .tf_read = ata_tf_read, | 122 | .sff_exec_command = rb500_pata_exec_command, |
132 | 123 | .sff_data_xfer = rb500_pata_data_xfer, | |
133 | .exec_command = rb500_pata_exec_command, | ||
134 | .check_status = ata_check_status, | ||
135 | .dev_select = ata_std_dev_select, | ||
136 | |||
137 | .data_xfer = rb500_pata_data_xfer, | ||
138 | |||
139 | .qc_prep = ata_qc_prep, | ||
140 | .qc_issue = ata_qc_issue_prot, | ||
141 | |||
142 | .freeze = rb500_pata_freeze, | 124 | .freeze = rb500_pata_freeze, |
143 | .thaw = rb500_pata_thaw, | 125 | .thaw = rb500_pata_thaw, |
144 | .error_handler = ata_bmdma_error_handler, | ||
145 | |||
146 | .irq_handler = rb500_pata_irq_handler, | ||
147 | .irq_clear = rb500_pata_irq_clear, | ||
148 | .irq_on = ata_irq_on, | ||
149 | |||
150 | .port_start = rb500_pata_port_start, | ||
151 | }; | 126 | }; |
152 | 127 | ||
153 | /* ------------------------------------------------------------------------ */ | 128 | /* ------------------------------------------------------------------------ */ |
154 | 129 | ||
155 | static struct scsi_host_template rb500_pata_sht = { | 130 | static struct scsi_host_template rb500_pata_sht = { |
156 | .module = THIS_MODULE, | 131 | ATA_PIO_SHT(DRV_NAME), |
157 | .name = DRV_NAME, | ||
158 | .ioctl = ata_scsi_ioctl, | ||
159 | .queuecommand = ata_scsi_queuecmd, | ||
160 | .slave_configure = ata_scsi_slave_config, | ||
161 | .slave_destroy = ata_scsi_slave_destroy, | ||
162 | .bios_param = ata_std_bios_param, | ||
163 | .proc_name = DRV_NAME, | ||
164 | |||
165 | .can_queue = ATA_DEF_QUEUE, | ||
166 | .this_id = ATA_SHT_THIS_ID, | ||
167 | .sg_tablesize = LIBATA_MAX_PRD, | ||
168 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
169 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
170 | .emulated = ATA_SHT_EMULATED, | ||
171 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
172 | }; | 132 | }; |
173 | 133 | ||
174 | /* ------------------------------------------------------------------------ */ | 134 | /* ------------------------------------------------------------------------ */ |
@@ -188,7 +148,7 @@ static void rb500_pata_setup_ports(struct ata_host *ah) | |||
188 | ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL; | 148 | ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL; |
189 | ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL; | 149 | ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL; |
190 | 150 | ||
191 | ata_std_ports(&ap->ioaddr); | 151 | ata_sff_std_ports(&ap->ioaddr); |
192 | 152 | ||
193 | ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DATA; | 153 | ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DATA; |
194 | } | 154 | } |
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c index ba8a31c55edb..7dfd1f3f6f3a 100644 --- a/drivers/ata/pata_rz1000.c +++ b/drivers/ata/pata_rz1000.c | |||
@@ -53,53 +53,13 @@ static int rz1000_set_mode(struct ata_link *link, struct ata_device **unused) | |||
53 | 53 | ||
54 | 54 | ||
55 | static struct scsi_host_template rz1000_sht = { | 55 | static struct scsi_host_template rz1000_sht = { |
56 | .module = THIS_MODULE, | 56 | ATA_PIO_SHT(DRV_NAME), |
57 | .name = DRV_NAME, | ||
58 | .ioctl = ata_scsi_ioctl, | ||
59 | .queuecommand = ata_scsi_queuecmd, | ||
60 | .can_queue = ATA_DEF_QUEUE, | ||
61 | .this_id = ATA_SHT_THIS_ID, | ||
62 | .sg_tablesize = LIBATA_MAX_PRD, | ||
63 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
64 | .emulated = ATA_SHT_EMULATED, | ||
65 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
66 | .proc_name = DRV_NAME, | ||
67 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
68 | .slave_configure = ata_scsi_slave_config, | ||
69 | .slave_destroy = ata_scsi_slave_destroy, | ||
70 | .bios_param = ata_std_bios_param, | ||
71 | }; | 57 | }; |
72 | 58 | ||
73 | static struct ata_port_operations rz1000_port_ops = { | 59 | static struct ata_port_operations rz1000_port_ops = { |
74 | .set_mode = rz1000_set_mode, | 60 | .inherits = &ata_sff_port_ops, |
75 | |||
76 | .tf_load = ata_tf_load, | ||
77 | .tf_read = ata_tf_read, | ||
78 | .check_status = ata_check_status, | ||
79 | .exec_command = ata_exec_command, | ||
80 | .dev_select = ata_std_dev_select, | ||
81 | |||
82 | .bmdma_setup = ata_bmdma_setup, | ||
83 | .bmdma_start = ata_bmdma_start, | ||
84 | .bmdma_stop = ata_bmdma_stop, | ||
85 | .bmdma_status = ata_bmdma_status, | ||
86 | |||
87 | .qc_prep = ata_qc_prep, | ||
88 | .qc_issue = ata_qc_issue_prot, | ||
89 | |||
90 | .data_xfer = ata_data_xfer, | ||
91 | |||
92 | .freeze = ata_bmdma_freeze, | ||
93 | .thaw = ata_bmdma_thaw, | ||
94 | .error_handler = ata_bmdma_error_handler, | ||
95 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
96 | .cable_detect = ata_cable_40wire, | 61 | .cable_detect = ata_cable_40wire, |
97 | 62 | .set_mode = rz1000_set_mode, | |
98 | .irq_handler = ata_interrupt, | ||
99 | .irq_clear = ata_bmdma_irq_clear, | ||
100 | .irq_on = ata_irq_on, | ||
101 | |||
102 | .port_start = ata_sff_port_start, | ||
103 | }; | 63 | }; |
104 | 64 | ||
105 | static int rz1000_fifo_disable(struct pci_dev *pdev) | 65 | static int rz1000_fifo_disable(struct pci_dev *pdev) |
@@ -129,7 +89,6 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en | |||
129 | { | 89 | { |
130 | static int printed_version; | 90 | static int printed_version; |
131 | static const struct ata_port_info info = { | 91 | static const struct ata_port_info info = { |
132 | .sht = &rz1000_sht, | ||
133 | .flags = ATA_FLAG_SLAVE_POSS, | 92 | .flags = ATA_FLAG_SLAVE_POSS, |
134 | .pio_mask = 0x1f, | 93 | .pio_mask = 0x1f, |
135 | .port_ops = &rz1000_port_ops | 94 | .port_ops = &rz1000_port_ops |
@@ -140,7 +99,7 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en | |||
140 | printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); | 99 | printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); |
141 | 100 | ||
142 | if (rz1000_fifo_disable(pdev) == 0) | 101 | if (rz1000_fifo_disable(pdev) == 0) |
143 | return ata_pci_init_one(pdev, ppi); | 102 | return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL); |
144 | 103 | ||
145 | printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n"); | 104 | printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n"); |
146 | /* Not safe to use so skip */ | 105 | /* Not safe to use so skip */ |
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c index 725a8586cd6e..cbab397e3db7 100644 --- a/drivers/ata/pata_sc1200.c +++ b/drivers/ata/pata_sc1200.c | |||
@@ -151,7 +151,7 @@ static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
151 | } | 151 | } |
152 | 152 | ||
153 | /** | 153 | /** |
154 | * sc1200_qc_issue_prot - command issue | 154 | * sc1200_qc_issue - command issue |
155 | * @qc: command pending | 155 | * @qc: command pending |
156 | * | 156 | * |
157 | * Called when the libata layer is about to issue a command. We wrap | 157 | * Called when the libata layer is about to issue a command. We wrap |
@@ -160,7 +160,7 @@ static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
160 | * one MWDMA/UDMA bit. | 160 | * one MWDMA/UDMA bit. |
161 | */ | 161 | */ |
162 | 162 | ||
163 | static unsigned int sc1200_qc_issue_prot(struct ata_queued_cmd *qc) | 163 | static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc) |
164 | { | 164 | { |
165 | struct ata_port *ap = qc->ap; | 165 | struct ata_port *ap = qc->ap; |
166 | struct ata_device *adev = qc->dev; | 166 | struct ata_device *adev = qc->dev; |
@@ -175,59 +175,21 @@ static unsigned int sc1200_qc_issue_prot(struct ata_queued_cmd *qc) | |||
175 | sc1200_set_dmamode(ap, adev); | 175 | sc1200_set_dmamode(ap, adev); |
176 | } | 176 | } |
177 | 177 | ||
178 | return ata_qc_issue_prot(qc); | 178 | return ata_sff_qc_issue(qc); |
179 | } | 179 | } |
180 | 180 | ||
181 | static struct scsi_host_template sc1200_sht = { | 181 | static struct scsi_host_template sc1200_sht = { |
182 | .module = THIS_MODULE, | 182 | ATA_BMDMA_SHT(DRV_NAME), |
183 | .name = DRV_NAME, | 183 | .sg_tablesize = LIBATA_DUMB_MAX_PRD, |
184 | .ioctl = ata_scsi_ioctl, | ||
185 | .queuecommand = ata_scsi_queuecmd, | ||
186 | .can_queue = ATA_DEF_QUEUE, | ||
187 | .this_id = ATA_SHT_THIS_ID, | ||
188 | .sg_tablesize = LIBATA_DUMB_MAX_PRD, | ||
189 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
190 | .emulated = ATA_SHT_EMULATED, | ||
191 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
192 | .proc_name = DRV_NAME, | ||
193 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
194 | .slave_configure = ata_scsi_slave_config, | ||
195 | .slave_destroy = ata_scsi_slave_destroy, | ||
196 | .bios_param = ata_std_bios_param, | ||
197 | }; | 184 | }; |
198 | 185 | ||
199 | static struct ata_port_operations sc1200_port_ops = { | 186 | static struct ata_port_operations sc1200_port_ops = { |
187 | .inherits = &ata_bmdma_port_ops, | ||
188 | .qc_prep = ata_sff_dumb_qc_prep, | ||
189 | .qc_issue = sc1200_qc_issue, | ||
190 | .cable_detect = ata_cable_40wire, | ||
200 | .set_piomode = sc1200_set_piomode, | 191 | .set_piomode = sc1200_set_piomode, |
201 | .set_dmamode = sc1200_set_dmamode, | 192 | .set_dmamode = sc1200_set_dmamode, |
202 | .mode_filter = ata_pci_default_filter, | ||
203 | |||
204 | .tf_load = ata_tf_load, | ||
205 | .tf_read = ata_tf_read, | ||
206 | .check_status = ata_check_status, | ||
207 | .exec_command = ata_exec_command, | ||
208 | .dev_select = ata_std_dev_select, | ||
209 | |||
210 | .freeze = ata_bmdma_freeze, | ||
211 | .thaw = ata_bmdma_thaw, | ||
212 | .error_handler = ata_bmdma_error_handler, | ||
213 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
214 | .cable_detect = ata_cable_40wire, | ||
215 | |||
216 | .bmdma_setup = ata_bmdma_setup, | ||
217 | .bmdma_start = ata_bmdma_start, | ||
218 | .bmdma_stop = ata_bmdma_stop, | ||
219 | .bmdma_status = ata_bmdma_status, | ||
220 | |||
221 | .qc_prep = ata_dumb_qc_prep, | ||
222 | .qc_issue = sc1200_qc_issue_prot, | ||
223 | |||
224 | .data_xfer = ata_data_xfer, | ||
225 | |||
226 | .irq_handler = ata_interrupt, | ||
227 | .irq_clear = ata_bmdma_irq_clear, | ||
228 | .irq_on = ata_irq_on, | ||
229 | |||
230 | .port_start = ata_sff_port_start, | ||
231 | }; | 193 | }; |
232 | 194 | ||
233 | /** | 195 | /** |
@@ -242,7 +204,6 @@ static struct ata_port_operations sc1200_port_ops = { | |||
242 | static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 204 | static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
243 | { | 205 | { |
244 | static const struct ata_port_info info = { | 206 | static const struct ata_port_info info = { |
245 | .sht = &sc1200_sht, | ||
246 | .flags = ATA_FLAG_SLAVE_POSS, | 207 | .flags = ATA_FLAG_SLAVE_POSS, |
247 | .pio_mask = 0x1f, | 208 | .pio_mask = 0x1f, |
248 | .mwdma_mask = 0x07, | 209 | .mwdma_mask = 0x07, |
@@ -252,7 +213,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
252 | /* Can't enable port 2 yet, see top comments */ | 213 | /* Can't enable port 2 yet, see top comments */ |
253 | const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; | 214 | const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; |
254 | 215 | ||
255 | return ata_pci_init_one(dev, ppi); | 216 | return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL); |
256 | } | 217 | } |
257 | 218 | ||
258 | static const struct pci_device_id sc1200[] = { | 219 | static const struct pci_device_id sc1200[] = { |
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index 6c016deeaed8..e965b251ca24 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c | |||
@@ -266,7 +266,7 @@ unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask) | |||
266 | printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME); | 266 | printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME); |
267 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); | 267 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); |
268 | } | 268 | } |
269 | return ata_pci_default_filter(adev, mask); | 269 | return ata_bmdma_mode_filter(adev, mask); |
270 | } | 270 | } |
271 | 271 | ||
272 | /** | 272 | /** |
@@ -274,7 +274,7 @@ unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask) | |||
274 | * @ap: Port to which output is sent | 274 | * @ap: Port to which output is sent |
275 | * @tf: ATA taskfile register set | 275 | * @tf: ATA taskfile register set |
276 | * | 276 | * |
277 | * Note: Original code is ata_tf_load(). | 277 | * Note: Original code is ata_sff_tf_load(). |
278 | */ | 278 | */ |
279 | 279 | ||
280 | static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf) | 280 | static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf) |
@@ -341,7 +341,7 @@ static u8 scc_check_status (struct ata_port *ap) | |||
341 | * @ap: Port from which input is read | 341 | * @ap: Port from which input is read |
342 | * @tf: ATA taskfile register set for storing input | 342 | * @tf: ATA taskfile register set for storing input |
343 | * | 343 | * |
344 | * Note: Original code is ata_tf_read(). | 344 | * Note: Original code is ata_sff_tf_read(). |
345 | */ | 345 | */ |
346 | 346 | ||
347 | static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf) | 347 | static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf) |
@@ -373,7 +373,7 @@ static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf) | |||
373 | * @ap: port to which command is being issued | 373 | * @ap: port to which command is being issued |
374 | * @tf: ATA taskfile register set | 374 | * @tf: ATA taskfile register set |
375 | * | 375 | * |
376 | * Note: Original code is ata_exec_command(). | 376 | * Note: Original code is ata_sff_exec_command(). |
377 | */ | 377 | */ |
378 | 378 | ||
379 | static void scc_exec_command (struct ata_port *ap, | 379 | static void scc_exec_command (struct ata_port *ap, |
@@ -382,7 +382,7 @@ static void scc_exec_command (struct ata_port *ap, | |||
382 | DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); | 382 | DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); |
383 | 383 | ||
384 | out_be32(ap->ioaddr.command_addr, tf->command); | 384 | out_be32(ap->ioaddr.command_addr, tf->command); |
385 | ata_pause(ap); | 385 | ata_sff_pause(ap); |
386 | } | 386 | } |
387 | 387 | ||
388 | /** | 388 | /** |
@@ -396,14 +396,14 @@ static u8 scc_check_altstatus (struct ata_port *ap) | |||
396 | } | 396 | } |
397 | 397 | ||
398 | /** | 398 | /** |
399 | * scc_std_dev_select - Select device 0/1 on ATA bus | 399 | * scc_dev_select - Select device 0/1 on ATA bus |
400 | * @ap: ATA channel to manipulate | 400 | * @ap: ATA channel to manipulate |
401 | * @device: ATA device (numbered from zero) to select | 401 | * @device: ATA device (numbered from zero) to select |
402 | * | 402 | * |
403 | * Note: Original code is ata_std_dev_select(). | 403 | * Note: Original code is ata_sff_dev_select(). |
404 | */ | 404 | */ |
405 | 405 | ||
406 | static void scc_std_dev_select (struct ata_port *ap, unsigned int device) | 406 | static void scc_dev_select (struct ata_port *ap, unsigned int device) |
407 | { | 407 | { |
408 | u8 tmp; | 408 | u8 tmp; |
409 | 409 | ||
@@ -413,7 +413,7 @@ static void scc_std_dev_select (struct ata_port *ap, unsigned int device) | |||
413 | tmp = ATA_DEVICE_OBS | ATA_DEV1; | 413 | tmp = ATA_DEVICE_OBS | ATA_DEV1; |
414 | 414 | ||
415 | out_be32(ap->ioaddr.device_addr, tmp); | 415 | out_be32(ap->ioaddr.device_addr, tmp); |
416 | ata_pause(ap); | 416 | ata_sff_pause(ap); |
417 | } | 417 | } |
418 | 418 | ||
419 | /** | 419 | /** |
@@ -441,7 +441,7 @@ static void scc_bmdma_setup (struct ata_queued_cmd *qc) | |||
441 | out_be32(mmio + SCC_DMA_CMD, dmactl); | 441 | out_be32(mmio + SCC_DMA_CMD, dmactl); |
442 | 442 | ||
443 | /* issue r/w command */ | 443 | /* issue r/w command */ |
444 | ap->ops->exec_command(ap, &qc->tf); | 444 | ap->ops->sff_exec_command(ap, &qc->tf); |
445 | } | 445 | } |
446 | 446 | ||
447 | /** | 447 | /** |
@@ -476,7 +476,7 @@ static unsigned int scc_devchk (struct ata_port *ap, | |||
476 | struct ata_ioports *ioaddr = &ap->ioaddr; | 476 | struct ata_ioports *ioaddr = &ap->ioaddr; |
477 | u8 nsect, lbal; | 477 | u8 nsect, lbal; |
478 | 478 | ||
479 | ap->ops->dev_select(ap, device); | 479 | ap->ops->sff_dev_select(ap, device); |
480 | 480 | ||
481 | out_be32(ioaddr->nsect_addr, 0x55); | 481 | out_be32(ioaddr->nsect_addr, 0x55); |
482 | out_be32(ioaddr->lbal_addr, 0xaa); | 482 | out_be32(ioaddr->lbal_addr, 0xaa); |
@@ -497,57 +497,78 @@ static unsigned int scc_devchk (struct ata_port *ap, | |||
497 | } | 497 | } |
498 | 498 | ||
499 | /** | 499 | /** |
500 | * scc_bus_post_reset - PATA device post reset | 500 | * scc_wait_after_reset - wait for devices to become ready after reset |
501 | * | 501 | * |
502 | * Note: Original code is ata_bus_post_reset(). | 502 | * Note: Original code is ata_sff_wait_after_reset |
503 | */ | 503 | */ |
504 | 504 | ||
505 | static int scc_bus_post_reset(struct ata_port *ap, unsigned int devmask, | 505 | int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, |
506 | unsigned long deadline) | 506 | unsigned long deadline) |
507 | { | 507 | { |
508 | struct ata_port *ap = link->ap; | ||
508 | struct ata_ioports *ioaddr = &ap->ioaddr; | 509 | struct ata_ioports *ioaddr = &ap->ioaddr; |
509 | unsigned int dev0 = devmask & (1 << 0); | 510 | unsigned int dev0 = devmask & (1 << 0); |
510 | unsigned int dev1 = devmask & (1 << 1); | 511 | unsigned int dev1 = devmask & (1 << 1); |
511 | int rc; | 512 | int rc, ret = 0; |
512 | 513 | ||
513 | /* if device 0 was found in ata_devchk, wait for its | 514 | /* Spec mandates ">= 2ms" before checking status. We wait |
514 | * BSY bit to clear | 515 | * 150ms, because that was the magic delay used for ATAPI |
516 | * devices in Hale Landis's ATADRVR, for the period of time | ||
517 | * between when the ATA command register is written, and then | ||
518 | * status is checked. Because waiting for "a while" before | ||
519 | * checking status is fine, post SRST, we perform this magic | ||
520 | * delay here as well. | ||
521 | * | ||
522 | * Old drivers/ide uses the 2mS rule and then waits for ready. | ||
515 | */ | 523 | */ |
516 | if (dev0) { | 524 | msleep(150); |
517 | rc = ata_wait_ready(ap, deadline); | ||
518 | if (rc && rc != -ENODEV) | ||
519 | return rc; | ||
520 | } | ||
521 | 525 | ||
522 | /* if device 1 was found in ata_devchk, wait for | 526 | /* always check readiness of the master device */ |
523 | * register access, then wait for BSY to clear | 527 | rc = ata_sff_wait_ready(link, deadline); |
528 | /* -ENODEV means the odd clown forgot the D7 pulldown resistor | ||
529 | * and TF status is 0xff, bail out on it too. | ||
524 | */ | 530 | */ |
525 | while (dev1) { | 531 | if (rc) |
526 | u8 nsect, lbal; | 532 | return rc; |
527 | 533 | ||
528 | ap->ops->dev_select(ap, 1); | 534 | /* if device 1 was found in ata_devchk, wait for register |
529 | nsect = in_be32(ioaddr->nsect_addr); | 535 | * access briefly, then wait for BSY to clear. |
530 | lbal = in_be32(ioaddr->lbal_addr); | 536 | */ |
531 | if ((nsect == 1) && (lbal == 1)) | ||
532 | break; | ||
533 | if (time_after(jiffies, deadline)) | ||
534 | return -EBUSY; | ||
535 | msleep(50); /* give drive a breather */ | ||
536 | } | ||
537 | if (dev1) { | 537 | if (dev1) { |
538 | rc = ata_wait_ready(ap, deadline); | 538 | int i; |
539 | if (rc && rc != -ENODEV) | 539 | |
540 | return rc; | 540 | ap->ops->sff_dev_select(ap, 1); |
541 | |||
542 | /* Wait for register access. Some ATAPI devices fail | ||
543 | * to set nsect/lbal after reset, so don't waste too | ||
544 | * much time on it. We're gonna wait for !BSY anyway. | ||
545 | */ | ||
546 | for (i = 0; i < 2; i++) { | ||
547 | u8 nsect, lbal; | ||
548 | |||
549 | nsect = in_be32(ioaddr->nsect_addr); | ||
550 | lbal = in_be32(ioaddr->lbal_addr); | ||
551 | if ((nsect == 1) && (lbal == 1)) | ||
552 | break; | ||
553 | msleep(50); /* give drive a breather */ | ||
554 | } | ||
555 | |||
556 | rc = ata_sff_wait_ready(link, deadline); | ||
557 | if (rc) { | ||
558 | if (rc != -ENODEV) | ||
559 | return rc; | ||
560 | ret = rc; | ||
561 | } | ||
541 | } | 562 | } |
542 | 563 | ||
543 | /* is all this really necessary? */ | 564 | /* is all this really necessary? */ |
544 | ap->ops->dev_select(ap, 0); | 565 | ap->ops->sff_dev_select(ap, 0); |
545 | if (dev1) | 566 | if (dev1) |
546 | ap->ops->dev_select(ap, 1); | 567 | ap->ops->sff_dev_select(ap, 1); |
547 | if (dev0) | 568 | if (dev0) |
548 | ap->ops->dev_select(ap, 0); | 569 | ap->ops->sff_dev_select(ap, 0); |
549 | 570 | ||
550 | return 0; | 571 | return ret; |
551 | } | 572 | } |
552 | 573 | ||
553 | /** | 574 | /** |
@@ -570,32 +591,22 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, | |||
570 | udelay(20); | 591 | udelay(20); |
571 | out_be32(ioaddr->ctl_addr, ap->ctl); | 592 | out_be32(ioaddr->ctl_addr, ap->ctl); |
572 | 593 | ||
573 | /* wait a while before checking status */ | 594 | scc_wait_after_reset(&ap->link, devmask, deadline); |
574 | ata_wait_after_reset(ap, deadline); | ||
575 | |||
576 | /* Before we perform post reset processing we want to see if | ||
577 | * the bus shows 0xFF because the odd clown forgets the D7 | ||
578 | * pulldown resistor. | ||
579 | */ | ||
580 | if (scc_check_status(ap) == 0xFF) | ||
581 | return 0; | ||
582 | |||
583 | scc_bus_post_reset(ap, devmask, deadline); | ||
584 | 595 | ||
585 | return 0; | 596 | return 0; |
586 | } | 597 | } |
587 | 598 | ||
588 | /** | 599 | /** |
589 | * scc_std_softreset - reset host port via ATA SRST | 600 | * scc_softreset - reset host port via ATA SRST |
590 | * @ap: port to reset | 601 | * @ap: port to reset |
591 | * @classes: resulting classes of attached devices | 602 | * @classes: resulting classes of attached devices |
592 | * @deadline: deadline jiffies for the operation | 603 | * @deadline: deadline jiffies for the operation |
593 | * | 604 | * |
594 | * Note: Original code is ata_std_softreset(). | 605 | * Note: Original code is ata_sff_softreset(). |
595 | */ | 606 | */ |
596 | 607 | ||
597 | static int scc_std_softreset(struct ata_link *link, unsigned int *classes, | 608 | static int scc_softreset(struct ata_link *link, unsigned int *classes, |
598 | unsigned long deadline) | 609 | unsigned long deadline) |
599 | { | 610 | { |
600 | struct ata_port *ap = link->ap; | 611 | struct ata_port *ap = link->ap; |
601 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | 612 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; |
@@ -604,11 +615,6 @@ static int scc_std_softreset(struct ata_link *link, unsigned int *classes, | |||
604 | 615 | ||
605 | DPRINTK("ENTER\n"); | 616 | DPRINTK("ENTER\n"); |
606 | 617 | ||
607 | if (ata_link_offline(link)) { | ||
608 | classes[0] = ATA_DEV_NONE; | ||
609 | goto out; | ||
610 | } | ||
611 | |||
612 | /* determine if device 0/1 are present */ | 618 | /* determine if device 0/1 are present */ |
613 | if (scc_devchk(ap, 0)) | 619 | if (scc_devchk(ap, 0)) |
614 | devmask |= (1 << 0); | 620 | devmask |= (1 << 0); |
@@ -616,7 +622,7 @@ static int scc_std_softreset(struct ata_link *link, unsigned int *classes, | |||
616 | devmask |= (1 << 1); | 622 | devmask |= (1 << 1); |
617 | 623 | ||
618 | /* select device 0 again */ | 624 | /* select device 0 again */ |
619 | ap->ops->dev_select(ap, 0); | 625 | ap->ops->sff_dev_select(ap, 0); |
620 | 626 | ||
621 | /* issue bus reset */ | 627 | /* issue bus reset */ |
622 | DPRINTK("about to softreset, devmask=%x\n", devmask); | 628 | DPRINTK("about to softreset, devmask=%x\n", devmask); |
@@ -628,13 +634,12 @@ static int scc_std_softreset(struct ata_link *link, unsigned int *classes, | |||
628 | } | 634 | } |
629 | 635 | ||
630 | /* determine by signature whether we have ATA or ATAPI devices */ | 636 | /* determine by signature whether we have ATA or ATAPI devices */ |
631 | classes[0] = ata_dev_try_classify(&ap->link.device[0], | 637 | classes[0] = ata_sff_dev_classify(&ap->link.device[0], |
632 | devmask & (1 << 0), &err); | 638 | devmask & (1 << 0), &err); |
633 | if (slave_possible && err != 0x81) | 639 | if (slave_possible && err != 0x81) |
634 | classes[1] = ata_dev_try_classify(&ap->link.device[1], | 640 | classes[1] = ata_sff_dev_classify(&ap->link.device[1], |
635 | devmask & (1 << 1), &err); | 641 | devmask & (1 << 1), &err); |
636 | 642 | ||
637 | out: | ||
638 | DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); | 643 | DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); |
639 | return 0; | 644 | return 0; |
640 | } | 645 | } |
@@ -695,7 +700,7 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc) | |||
695 | printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME); | 700 | printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME); |
696 | out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT); | 701 | out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT); |
697 | /* TBD: SW reset */ | 702 | /* TBD: SW reset */ |
698 | scc_std_softreset(&ap->link, &classes, deadline); | 703 | scc_softreset(&ap->link, &classes, deadline); |
699 | continue; | 704 | continue; |
700 | } | 705 | } |
701 | 706 | ||
@@ -721,7 +726,7 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc) | |||
721 | in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); | 726 | in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); |
722 | 727 | ||
723 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | 728 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ |
724 | ata_altstatus(ap); /* dummy read */ | 729 | ata_sff_altstatus(ap); /* dummy read */ |
725 | } | 730 | } |
726 | 731 | ||
727 | /** | 732 | /** |
@@ -742,7 +747,7 @@ static u8 scc_bmdma_status (struct ata_port *ap) | |||
742 | return host_stat; | 747 | return host_stat; |
743 | 748 | ||
744 | /* errata A252,A308 workaround: Step4 */ | 749 | /* errata A252,A308 workaround: Step4 */ |
745 | if ((ata_altstatus(ap) & ATA_ERR) && (int_status & INTSTS_INTRQ)) | 750 | if ((ata_sff_altstatus(ap) & ATA_ERR) && (int_status & INTSTS_INTRQ)) |
746 | return (host_stat | ATA_DMA_INTR); | 751 | return (host_stat | ATA_DMA_INTR); |
747 | 752 | ||
748 | /* errata A308 workaround Step5 */ | 753 | /* errata A308 workaround Step5 */ |
@@ -773,7 +778,7 @@ static u8 scc_bmdma_status (struct ata_port *ap) | |||
773 | * @buflen: buffer length | 778 | * @buflen: buffer length |
774 | * @rw: read/write | 779 | * @rw: read/write |
775 | * | 780 | * |
776 | * Note: Original code is ata_data_xfer(). | 781 | * Note: Original code is ata_sff_data_xfer(). |
777 | */ | 782 | */ |
778 | 783 | ||
779 | static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf, | 784 | static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf, |
@@ -782,28 +787,28 @@ static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf, | |||
782 | struct ata_port *ap = dev->link->ap; | 787 | struct ata_port *ap = dev->link->ap; |
783 | unsigned int words = buflen >> 1; | 788 | unsigned int words = buflen >> 1; |
784 | unsigned int i; | 789 | unsigned int i; |
785 | u16 *buf16 = (u16 *) buf; | 790 | __le16 *buf16 = (__le16 *) buf; |
786 | void __iomem *mmio = ap->ioaddr.data_addr; | 791 | void __iomem *mmio = ap->ioaddr.data_addr; |
787 | 792 | ||
788 | /* Transfer multiple of 2 bytes */ | 793 | /* Transfer multiple of 2 bytes */ |
789 | if (rw == READ) | 794 | if (rw == READ) |
790 | for (i = 0; i < words; i++) | 795 | for (i = 0; i < words; i++) |
791 | buf16[i] = le16_to_cpu(in_be32(mmio)); | 796 | buf16[i] = cpu_to_le16(in_be32(mmio)); |
792 | else | 797 | else |
793 | for (i = 0; i < words; i++) | 798 | for (i = 0; i < words; i++) |
794 | out_be32(mmio, cpu_to_le16(buf16[i])); | 799 | out_be32(mmio, le16_to_cpu(buf16[i])); |
795 | 800 | ||
796 | /* Transfer trailing 1 byte, if any. */ | 801 | /* Transfer trailing 1 byte, if any. */ |
797 | if (unlikely(buflen & 0x01)) { | 802 | if (unlikely(buflen & 0x01)) { |
798 | u16 align_buf[1] = { 0 }; | 803 | __le16 align_buf[1] = { 0 }; |
799 | unsigned char *trailing_buf = buf + buflen - 1; | 804 | unsigned char *trailing_buf = buf + buflen - 1; |
800 | 805 | ||
801 | if (rw == READ) { | 806 | if (rw == READ) { |
802 | align_buf[0] = le16_to_cpu(in_be32(mmio)); | 807 | align_buf[0] = cpu_to_le16(in_be32(mmio)); |
803 | memcpy(trailing_buf, align_buf, 1); | 808 | memcpy(trailing_buf, align_buf, 1); |
804 | } else { | 809 | } else { |
805 | memcpy(align_buf, trailing_buf, 1); | 810 | memcpy(align_buf, trailing_buf, 1); |
806 | out_be32(mmio, cpu_to_le16(align_buf[0])); | 811 | out_be32(mmio, le16_to_cpu(align_buf[0])); |
807 | } | 812 | } |
808 | words++; | 813 | words++; |
809 | } | 814 | } |
@@ -815,7 +820,7 @@ static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf, | |||
815 | * scc_irq_on - Enable interrupts on a port. | 820 | * scc_irq_on - Enable interrupts on a port. |
816 | * @ap: Port on which interrupts are enabled. | 821 | * @ap: Port on which interrupts are enabled. |
817 | * | 822 | * |
818 | * Note: Original code is ata_irq_on(). | 823 | * Note: Original code is ata_sff_irq_on(). |
819 | */ | 824 | */ |
820 | 825 | ||
821 | static u8 scc_irq_on (struct ata_port *ap) | 826 | static u8 scc_irq_on (struct ata_port *ap) |
@@ -829,19 +834,19 @@ static u8 scc_irq_on (struct ata_port *ap) | |||
829 | out_be32(ioaddr->ctl_addr, ap->ctl); | 834 | out_be32(ioaddr->ctl_addr, ap->ctl); |
830 | tmp = ata_wait_idle(ap); | 835 | tmp = ata_wait_idle(ap); |
831 | 836 | ||
832 | ap->ops->irq_clear(ap); | 837 | ap->ops->sff_irq_clear(ap); |
833 | 838 | ||
834 | return tmp; | 839 | return tmp; |
835 | } | 840 | } |
836 | 841 | ||
837 | /** | 842 | /** |
838 | * scc_bmdma_freeze - Freeze BMDMA controller port | 843 | * scc_freeze - Freeze BMDMA controller port |
839 | * @ap: port to freeze | 844 | * @ap: port to freeze |
840 | * | 845 | * |
841 | * Note: Original code is ata_bmdma_freeze(). | 846 | * Note: Original code is ata_sff_freeze(). |
842 | */ | 847 | */ |
843 | 848 | ||
844 | static void scc_bmdma_freeze (struct ata_port *ap) | 849 | static void scc_freeze (struct ata_port *ap) |
845 | { | 850 | { |
846 | struct ata_ioports *ioaddr = &ap->ioaddr; | 851 | struct ata_ioports *ioaddr = &ap->ioaddr; |
847 | 852 | ||
@@ -854,9 +859,9 @@ static void scc_bmdma_freeze (struct ata_port *ap) | |||
854 | * ATA_NIEN manipulation. Also, many controllers fail to mask | 859 | * ATA_NIEN manipulation. Also, many controllers fail to mask |
855 | * previously pending IRQ on ATA_NIEN assertion. Clear it. | 860 | * previously pending IRQ on ATA_NIEN assertion. Clear it. |
856 | */ | 861 | */ |
857 | ata_chk_status(ap); | 862 | ap->ops->sff_check_status(ap); |
858 | 863 | ||
859 | ap->ops->irq_clear(ap); | 864 | ap->ops->sff_irq_clear(ap); |
860 | } | 865 | } |
861 | 866 | ||
862 | /** | 867 | /** |
@@ -868,18 +873,18 @@ static void scc_bmdma_freeze (struct ata_port *ap) | |||
868 | static int scc_pata_prereset(struct ata_link *link, unsigned long deadline) | 873 | static int scc_pata_prereset(struct ata_link *link, unsigned long deadline) |
869 | { | 874 | { |
870 | link->ap->cbl = ATA_CBL_PATA80; | 875 | link->ap->cbl = ATA_CBL_PATA80; |
871 | return ata_std_prereset(link, deadline); | 876 | return ata_sff_prereset(link, deadline); |
872 | } | 877 | } |
873 | 878 | ||
874 | /** | 879 | /** |
875 | * scc_std_postreset - standard postreset callback | 880 | * scc_postreset - standard postreset callback |
876 | * @ap: the target ata_port | 881 | * @ap: the target ata_port |
877 | * @classes: classes of attached devices | 882 | * @classes: classes of attached devices |
878 | * | 883 | * |
879 | * Note: Original code is ata_std_postreset(). | 884 | * Note: Original code is ata_sff_postreset(). |
880 | */ | 885 | */ |
881 | 886 | ||
882 | static void scc_std_postreset(struct ata_link *link, unsigned int *classes) | 887 | static void scc_postreset(struct ata_link *link, unsigned int *classes) |
883 | { | 888 | { |
884 | struct ata_port *ap = link->ap; | 889 | struct ata_port *ap = link->ap; |
885 | 890 | ||
@@ -887,9 +892,9 @@ static void scc_std_postreset(struct ata_link *link, unsigned int *classes) | |||
887 | 892 | ||
888 | /* is double-select really necessary? */ | 893 | /* is double-select really necessary? */ |
889 | if (classes[0] != ATA_DEV_NONE) | 894 | if (classes[0] != ATA_DEV_NONE) |
890 | ap->ops->dev_select(ap, 1); | 895 | ap->ops->sff_dev_select(ap, 1); |
891 | if (classes[1] != ATA_DEV_NONE) | 896 | if (classes[1] != ATA_DEV_NONE) |
892 | ap->ops->dev_select(ap, 0); | 897 | ap->ops->sff_dev_select(ap, 0); |
893 | 898 | ||
894 | /* bail out if no device is present */ | 899 | /* bail out if no device is present */ |
895 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { | 900 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { |
@@ -905,24 +910,13 @@ static void scc_std_postreset(struct ata_link *link, unsigned int *classes) | |||
905 | } | 910 | } |
906 | 911 | ||
907 | /** | 912 | /** |
908 | * scc_error_handler - Stock error handler for BMDMA controller | 913 | * scc_irq_clear - Clear PCI IDE BMDMA interrupt. |
909 | * @ap: port to handle error for | ||
910 | */ | ||
911 | |||
912 | static void scc_error_handler (struct ata_port *ap) | ||
913 | { | ||
914 | ata_bmdma_drive_eh(ap, scc_pata_prereset, scc_std_softreset, NULL, | ||
915 | scc_std_postreset); | ||
916 | } | ||
917 | |||
918 | /** | ||
919 | * scc_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. | ||
920 | * @ap: Port associated with this ATA transaction. | 914 | * @ap: Port associated with this ATA transaction. |
921 | * | 915 | * |
922 | * Note: Original code is ata_bmdma_irq_clear(). | 916 | * Note: Original code is ata_sff_irq_clear(). |
923 | */ | 917 | */ |
924 | 918 | ||
925 | static void scc_bmdma_irq_clear (struct ata_port *ap) | 919 | static void scc_irq_clear (struct ata_port *ap) |
926 | { | 920 | { |
927 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | 921 | void __iomem *mmio = ap->ioaddr.bmdma_addr; |
928 | 922 | ||
@@ -968,52 +962,37 @@ static void scc_port_stop (struct ata_port *ap) | |||
968 | } | 962 | } |
969 | 963 | ||
970 | static struct scsi_host_template scc_sht = { | 964 | static struct scsi_host_template scc_sht = { |
971 | .module = THIS_MODULE, | 965 | ATA_BMDMA_SHT(DRV_NAME), |
972 | .name = DRV_NAME, | ||
973 | .ioctl = ata_scsi_ioctl, | ||
974 | .queuecommand = ata_scsi_queuecmd, | ||
975 | .can_queue = ATA_DEF_QUEUE, | ||
976 | .this_id = ATA_SHT_THIS_ID, | ||
977 | .sg_tablesize = LIBATA_MAX_PRD, | ||
978 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
979 | .emulated = ATA_SHT_EMULATED, | ||
980 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
981 | .proc_name = DRV_NAME, | ||
982 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
983 | .slave_configure = ata_scsi_slave_config, | ||
984 | .slave_destroy = ata_scsi_slave_destroy, | ||
985 | .bios_param = ata_std_bios_param, | ||
986 | }; | 966 | }; |
987 | 967 | ||
988 | static const struct ata_port_operations scc_pata_ops = { | 968 | static struct ata_port_operations scc_pata_ops = { |
969 | .inherits = &ata_bmdma_port_ops, | ||
970 | |||
989 | .set_piomode = scc_set_piomode, | 971 | .set_piomode = scc_set_piomode, |
990 | .set_dmamode = scc_set_dmamode, | 972 | .set_dmamode = scc_set_dmamode, |
991 | .mode_filter = scc_mode_filter, | 973 | .mode_filter = scc_mode_filter, |
992 | 974 | ||
993 | .tf_load = scc_tf_load, | 975 | .sff_tf_load = scc_tf_load, |
994 | .tf_read = scc_tf_read, | 976 | .sff_tf_read = scc_tf_read, |
995 | .exec_command = scc_exec_command, | 977 | .sff_exec_command = scc_exec_command, |
996 | .check_status = scc_check_status, | 978 | .sff_check_status = scc_check_status, |
997 | .check_altstatus = scc_check_altstatus, | 979 | .sff_check_altstatus = scc_check_altstatus, |
998 | .dev_select = scc_std_dev_select, | 980 | .sff_dev_select = scc_dev_select, |
999 | 981 | ||
1000 | .bmdma_setup = scc_bmdma_setup, | 982 | .bmdma_setup = scc_bmdma_setup, |
1001 | .bmdma_start = scc_bmdma_start, | 983 | .bmdma_start = scc_bmdma_start, |
1002 | .bmdma_stop = scc_bmdma_stop, | 984 | .bmdma_stop = scc_bmdma_stop, |
1003 | .bmdma_status = scc_bmdma_status, | 985 | .bmdma_status = scc_bmdma_status, |
1004 | .data_xfer = scc_data_xfer, | 986 | .sff_data_xfer = scc_data_xfer, |
1005 | |||
1006 | .qc_prep = ata_qc_prep, | ||
1007 | .qc_issue = ata_qc_issue_prot, | ||
1008 | |||
1009 | .freeze = scc_bmdma_freeze, | ||
1010 | .thaw = ata_bmdma_thaw, | ||
1011 | 987 | ||
1012 | .error_handler = scc_error_handler, | 988 | .freeze = scc_freeze, |
989 | .prereset = scc_pata_prereset, | ||
990 | .softreset = scc_softreset, | ||
991 | .postreset = scc_postreset, | ||
1013 | .post_internal_cmd = scc_bmdma_stop, | 992 | .post_internal_cmd = scc_bmdma_stop, |
1014 | 993 | ||
1015 | .irq_clear = scc_bmdma_irq_clear, | 994 | .sff_irq_clear = scc_irq_clear, |
1016 | .irq_on = scc_irq_on, | 995 | .sff_irq_on = scc_irq_on, |
1017 | 996 | ||
1018 | .port_start = scc_port_start, | 997 | .port_start = scc_port_start, |
1019 | .port_stop = scc_port_stop, | 998 | .port_stop = scc_port_stop, |
@@ -1166,8 +1145,8 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1166 | if (rc) | 1145 | if (rc) |
1167 | return rc; | 1146 | return rc; |
1168 | 1147 | ||
1169 | return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, | 1148 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, |
1170 | &scc_sht); | 1149 | IRQF_SHARED, &scc_sht); |
1171 | } | 1150 | } |
1172 | 1151 | ||
1173 | static struct pci_driver scc_pci_driver = { | 1152 | static struct pci_driver scc_pci_driver = { |
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index a589c0fa0dbb..ffd26d0dc50d 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c | |||
@@ -199,7 +199,7 @@ static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned l | |||
199 | { | 199 | { |
200 | if (adev->class == ATA_DEV_ATA) | 200 | if (adev->class == ATA_DEV_ATA) |
201 | mask &= ~ATA_MASK_UDMA; | 201 | mask &= ~ATA_MASK_UDMA; |
202 | return ata_pci_default_filter(adev, mask); | 202 | return ata_bmdma_mode_filter(adev, mask); |
203 | } | 203 | } |
204 | 204 | ||
205 | 205 | ||
@@ -219,7 +219,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo | |||
219 | 219 | ||
220 | /* Disk, UDMA */ | 220 | /* Disk, UDMA */ |
221 | if (adev->class != ATA_DEV_ATA) | 221 | if (adev->class != ATA_DEV_ATA) |
222 | return ata_pci_default_filter(adev, mask); | 222 | return ata_bmdma_mode_filter(adev, mask); |
223 | 223 | ||
224 | /* Actually do need to check */ | 224 | /* Actually do need to check */ |
225 | ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); | 225 | ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); |
@@ -228,7 +228,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo | |||
228 | if (!strcmp(p, model_num)) | 228 | if (!strcmp(p, model_num)) |
229 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); | 229 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); |
230 | } | 230 | } |
231 | return ata_pci_default_filter(adev, mask); | 231 | return ata_bmdma_mode_filter(adev, mask); |
232 | } | 232 | } |
233 | 233 | ||
234 | /** | 234 | /** |
@@ -298,89 +298,20 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev | |||
298 | } | 298 | } |
299 | 299 | ||
300 | static struct scsi_host_template serverworks_sht = { | 300 | static struct scsi_host_template serverworks_sht = { |
301 | .module = THIS_MODULE, | 301 | ATA_BMDMA_SHT(DRV_NAME), |
302 | .name = DRV_NAME, | ||
303 | .ioctl = ata_scsi_ioctl, | ||
304 | .queuecommand = ata_scsi_queuecmd, | ||
305 | .can_queue = ATA_DEF_QUEUE, | ||
306 | .this_id = ATA_SHT_THIS_ID, | ||
307 | .sg_tablesize = LIBATA_MAX_PRD, | ||
308 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
309 | .emulated = ATA_SHT_EMULATED, | ||
310 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
311 | .proc_name = DRV_NAME, | ||
312 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
313 | .slave_configure = ata_scsi_slave_config, | ||
314 | .slave_destroy = ata_scsi_slave_destroy, | ||
315 | .bios_param = ata_std_bios_param, | ||
316 | }; | 302 | }; |
317 | 303 | ||
318 | static struct ata_port_operations serverworks_osb4_port_ops = { | 304 | static struct ata_port_operations serverworks_osb4_port_ops = { |
305 | .inherits = &ata_bmdma_port_ops, | ||
306 | .cable_detect = serverworks_cable_detect, | ||
307 | .mode_filter = serverworks_osb4_filter, | ||
319 | .set_piomode = serverworks_set_piomode, | 308 | .set_piomode = serverworks_set_piomode, |
320 | .set_dmamode = serverworks_set_dmamode, | 309 | .set_dmamode = serverworks_set_dmamode, |
321 | .mode_filter = serverworks_osb4_filter, | ||
322 | |||
323 | .tf_load = ata_tf_load, | ||
324 | .tf_read = ata_tf_read, | ||
325 | .check_status = ata_check_status, | ||
326 | .exec_command = ata_exec_command, | ||
327 | .dev_select = ata_std_dev_select, | ||
328 | |||
329 | .freeze = ata_bmdma_freeze, | ||
330 | .thaw = ata_bmdma_thaw, | ||
331 | .error_handler = ata_bmdma_error_handler, | ||
332 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
333 | .cable_detect = serverworks_cable_detect, | ||
334 | |||
335 | .bmdma_setup = ata_bmdma_setup, | ||
336 | .bmdma_start = ata_bmdma_start, | ||
337 | .bmdma_stop = ata_bmdma_stop, | ||
338 | .bmdma_status = ata_bmdma_status, | ||
339 | |||
340 | .qc_prep = ata_qc_prep, | ||
341 | .qc_issue = ata_qc_issue_prot, | ||
342 | |||
343 | .data_xfer = ata_data_xfer, | ||
344 | |||
345 | .irq_handler = ata_interrupt, | ||
346 | .irq_clear = ata_bmdma_irq_clear, | ||
347 | .irq_on = ata_irq_on, | ||
348 | |||
349 | .port_start = ata_sff_port_start, | ||
350 | }; | 310 | }; |
351 | 311 | ||
352 | static struct ata_port_operations serverworks_csb_port_ops = { | 312 | static struct ata_port_operations serverworks_csb_port_ops = { |
353 | .set_piomode = serverworks_set_piomode, | 313 | .inherits = &serverworks_osb4_port_ops, |
354 | .set_dmamode = serverworks_set_dmamode, | ||
355 | .mode_filter = serverworks_csb_filter, | 314 | .mode_filter = serverworks_csb_filter, |
356 | |||
357 | .tf_load = ata_tf_load, | ||
358 | .tf_read = ata_tf_read, | ||
359 | .check_status = ata_check_status, | ||
360 | .exec_command = ata_exec_command, | ||
361 | .dev_select = ata_std_dev_select, | ||
362 | |||
363 | .freeze = ata_bmdma_freeze, | ||
364 | .thaw = ata_bmdma_thaw, | ||
365 | .error_handler = ata_bmdma_error_handler, | ||
366 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
367 | .cable_detect = serverworks_cable_detect, | ||
368 | |||
369 | .bmdma_setup = ata_bmdma_setup, | ||
370 | .bmdma_start = ata_bmdma_start, | ||
371 | .bmdma_stop = ata_bmdma_stop, | ||
372 | .bmdma_status = ata_bmdma_status, | ||
373 | |||
374 | .qc_prep = ata_qc_prep, | ||
375 | .qc_issue = ata_qc_issue_prot, | ||
376 | |||
377 | .data_xfer = ata_data_xfer, | ||
378 | |||
379 | .irq_handler = ata_interrupt, | ||
380 | .irq_clear = ata_bmdma_irq_clear, | ||
381 | .irq_on = ata_irq_on, | ||
382 | |||
383 | .port_start = ata_sff_port_start, | ||
384 | }; | 315 | }; |
385 | 316 | ||
386 | static int serverworks_fixup_osb4(struct pci_dev *pdev) | 317 | static int serverworks_fixup_osb4(struct pci_dev *pdev) |
@@ -468,28 +399,24 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id | |||
468 | { | 399 | { |
469 | static const struct ata_port_info info[4] = { | 400 | static const struct ata_port_info info[4] = { |
470 | { /* OSB4 */ | 401 | { /* OSB4 */ |
471 | .sht = &serverworks_sht, | ||
472 | .flags = ATA_FLAG_SLAVE_POSS, | 402 | .flags = ATA_FLAG_SLAVE_POSS, |
473 | .pio_mask = 0x1f, | 403 | .pio_mask = 0x1f, |
474 | .mwdma_mask = 0x07, | 404 | .mwdma_mask = 0x07, |
475 | .udma_mask = 0x07, | 405 | .udma_mask = 0x07, |
476 | .port_ops = &serverworks_osb4_port_ops | 406 | .port_ops = &serverworks_osb4_port_ops |
477 | }, { /* OSB4 no UDMA */ | 407 | }, { /* OSB4 no UDMA */ |
478 | .sht = &serverworks_sht, | ||
479 | .flags = ATA_FLAG_SLAVE_POSS, | 408 | .flags = ATA_FLAG_SLAVE_POSS, |
480 | .pio_mask = 0x1f, | 409 | .pio_mask = 0x1f, |
481 | .mwdma_mask = 0x07, | 410 | .mwdma_mask = 0x07, |
482 | .udma_mask = 0x00, | 411 | .udma_mask = 0x00, |
483 | .port_ops = &serverworks_osb4_port_ops | 412 | .port_ops = &serverworks_osb4_port_ops |
484 | }, { /* CSB5 */ | 413 | }, { /* CSB5 */ |
485 | .sht = &serverworks_sht, | ||
486 | .flags = ATA_FLAG_SLAVE_POSS, | 414 | .flags = ATA_FLAG_SLAVE_POSS, |
487 | .pio_mask = 0x1f, | 415 | .pio_mask = 0x1f, |
488 | .mwdma_mask = 0x07, | 416 | .mwdma_mask = 0x07, |
489 | .udma_mask = ATA_UDMA4, | 417 | .udma_mask = ATA_UDMA4, |
490 | .port_ops = &serverworks_csb_port_ops | 418 | .port_ops = &serverworks_csb_port_ops |
491 | }, { /* CSB5 - later revisions*/ | 419 | }, { /* CSB5 - later revisions*/ |
492 | .sht = &serverworks_sht, | ||
493 | .flags = ATA_FLAG_SLAVE_POSS, | 420 | .flags = ATA_FLAG_SLAVE_POSS, |
494 | .pio_mask = 0x1f, | 421 | .pio_mask = 0x1f, |
495 | .mwdma_mask = 0x07, | 422 | .mwdma_mask = 0x07, |
@@ -498,6 +425,11 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id | |||
498 | } | 425 | } |
499 | }; | 426 | }; |
500 | const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL }; | 427 | const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL }; |
428 | int rc; | ||
429 | |||
430 | rc = pcim_enable_device(pdev); | ||
431 | if (rc) | ||
432 | return rc; | ||
501 | 433 | ||
502 | /* Force master latency timer to 64 PCI clocks */ | 434 | /* Force master latency timer to 64 PCI clocks */ |
503 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40); | 435 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40); |
@@ -527,24 +459,30 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id | |||
527 | serverworks_fixup_ht1000(pdev); | 459 | serverworks_fixup_ht1000(pdev); |
528 | 460 | ||
529 | if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) | 461 | if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) |
530 | ata_pci_clear_simplex(pdev); | 462 | ata_pci_bmdma_clear_simplex(pdev); |
531 | 463 | ||
532 | return ata_pci_init_one(pdev, ppi); | 464 | return ata_pci_sff_init_one(pdev, ppi, &serverworks_sht, NULL); |
533 | } | 465 | } |
534 | 466 | ||
535 | #ifdef CONFIG_PM | 467 | #ifdef CONFIG_PM |
536 | static int serverworks_reinit_one(struct pci_dev *pdev) | 468 | static int serverworks_reinit_one(struct pci_dev *pdev) |
537 | { | 469 | { |
470 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
471 | int rc; | ||
472 | |||
473 | rc = ata_pci_device_do_resume(pdev); | ||
474 | if (rc) | ||
475 | return rc; | ||
476 | |||
538 | /* Force master latency timer to 64 PCI clocks */ | 477 | /* Force master latency timer to 64 PCI clocks */ |
539 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40); | 478 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40); |
540 | 479 | ||
541 | switch (pdev->device) | 480 | switch (pdev->device) { |
542 | { | ||
543 | case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE: | 481 | case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE: |
544 | serverworks_fixup_osb4(pdev); | 482 | serverworks_fixup_osb4(pdev); |
545 | break; | 483 | break; |
546 | case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE: | 484 | case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE: |
547 | ata_pci_clear_simplex(pdev); | 485 | ata_pci_bmdma_clear_simplex(pdev); |
548 | /* fall through */ | 486 | /* fall through */ |
549 | case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE: | 487 | case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE: |
550 | case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2: | 488 | case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2: |
@@ -554,7 +492,9 @@ static int serverworks_reinit_one(struct pci_dev *pdev) | |||
554 | serverworks_fixup_ht1000(pdev); | 492 | serverworks_fixup_ht1000(pdev); |
555 | break; | 493 | break; |
556 | } | 494 | } |
557 | return ata_pci_device_resume(pdev); | 495 | |
496 | ata_host_resume(host); | ||
497 | return 0; | ||
558 | } | 498 | } |
559 | #endif | 499 | #endif |
560 | 500 | ||
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index 7c5b2dd9a1a1..720b8645f58a 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c | |||
@@ -192,54 +192,14 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
192 | } | 192 | } |
193 | 193 | ||
194 | static struct scsi_host_template sil680_sht = { | 194 | static struct scsi_host_template sil680_sht = { |
195 | .module = THIS_MODULE, | 195 | ATA_BMDMA_SHT(DRV_NAME), |
196 | .name = DRV_NAME, | ||
197 | .ioctl = ata_scsi_ioctl, | ||
198 | .queuecommand = ata_scsi_queuecmd, | ||
199 | .can_queue = ATA_DEF_QUEUE, | ||
200 | .this_id = ATA_SHT_THIS_ID, | ||
201 | .sg_tablesize = LIBATA_MAX_PRD, | ||
202 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
203 | .emulated = ATA_SHT_EMULATED, | ||
204 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
205 | .proc_name = DRV_NAME, | ||
206 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
207 | .slave_configure = ata_scsi_slave_config, | ||
208 | .slave_destroy = ata_scsi_slave_destroy, | ||
209 | .bios_param = ata_std_bios_param, | ||
210 | }; | 196 | }; |
211 | 197 | ||
212 | static struct ata_port_operations sil680_port_ops = { | 198 | static struct ata_port_operations sil680_port_ops = { |
199 | .inherits = &ata_bmdma_port_ops, | ||
200 | .cable_detect = sil680_cable_detect, | ||
213 | .set_piomode = sil680_set_piomode, | 201 | .set_piomode = sil680_set_piomode, |
214 | .set_dmamode = sil680_set_dmamode, | 202 | .set_dmamode = sil680_set_dmamode, |
215 | .mode_filter = ata_pci_default_filter, | ||
216 | .tf_load = ata_tf_load, | ||
217 | .tf_read = ata_tf_read, | ||
218 | .check_status = ata_check_status, | ||
219 | .exec_command = ata_exec_command, | ||
220 | .dev_select = ata_std_dev_select, | ||
221 | |||
222 | .freeze = ata_bmdma_freeze, | ||
223 | .thaw = ata_bmdma_thaw, | ||
224 | .error_handler = ata_bmdma_error_handler, | ||
225 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
226 | .cable_detect = sil680_cable_detect, | ||
227 | |||
228 | .bmdma_setup = ata_bmdma_setup, | ||
229 | .bmdma_start = ata_bmdma_start, | ||
230 | .bmdma_stop = ata_bmdma_stop, | ||
231 | .bmdma_status = ata_bmdma_status, | ||
232 | |||
233 | .qc_prep = ata_qc_prep, | ||
234 | .qc_issue = ata_qc_issue_prot, | ||
235 | |||
236 | .data_xfer = ata_data_xfer, | ||
237 | |||
238 | .irq_handler = ata_interrupt, | ||
239 | .irq_clear = ata_bmdma_irq_clear, | ||
240 | .irq_on = ata_irq_on, | ||
241 | |||
242 | .port_start = ata_sff_port_start, | ||
243 | }; | 203 | }; |
244 | 204 | ||
245 | /** | 205 | /** |
@@ -322,7 +282,6 @@ static int __devinit sil680_init_one(struct pci_dev *pdev, | |||
322 | const struct pci_device_id *id) | 282 | const struct pci_device_id *id) |
323 | { | 283 | { |
324 | static const struct ata_port_info info = { | 284 | static const struct ata_port_info info = { |
325 | .sht = &sil680_sht, | ||
326 | .flags = ATA_FLAG_SLAVE_POSS, | 285 | .flags = ATA_FLAG_SLAVE_POSS, |
327 | .pio_mask = 0x1f, | 286 | .pio_mask = 0x1f, |
328 | .mwdma_mask = 0x07, | 287 | .mwdma_mask = 0x07, |
@@ -330,7 +289,6 @@ static int __devinit sil680_init_one(struct pci_dev *pdev, | |||
330 | .port_ops = &sil680_port_ops | 289 | .port_ops = &sil680_port_ops |
331 | }; | 290 | }; |
332 | static const struct ata_port_info info_slow = { | 291 | static const struct ata_port_info info_slow = { |
333 | .sht = &sil680_sht, | ||
334 | .flags = ATA_FLAG_SLAVE_POSS, | 292 | .flags = ATA_FLAG_SLAVE_POSS, |
335 | .pio_mask = 0x1f, | 293 | .pio_mask = 0x1f, |
336 | .mwdma_mask = 0x07, | 294 | .mwdma_mask = 0x07, |
@@ -346,6 +304,10 @@ static int __devinit sil680_init_one(struct pci_dev *pdev, | |||
346 | if (!printed_version++) | 304 | if (!printed_version++) |
347 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 305 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
348 | 306 | ||
307 | rc = pcim_enable_device(pdev); | ||
308 | if (rc) | ||
309 | return rc; | ||
310 | |||
349 | switch (sil680_init_chip(pdev, &try_mmio)) { | 311 | switch (sil680_init_chip(pdev, &try_mmio)) { |
350 | case 0: | 312 | case 0: |
351 | ppi[0] = &info_slow; | 313 | ppi[0] = &info_slow; |
@@ -388,28 +350,33 @@ static int __devinit sil680_init_one(struct pci_dev *pdev, | |||
388 | host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80; | 350 | host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80; |
389 | host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a; | 351 | host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a; |
390 | host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a; | 352 | host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a; |
391 | ata_std_ports(&host->ports[0]->ioaddr); | 353 | ata_sff_std_ports(&host->ports[0]->ioaddr); |
392 | host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08; | 354 | host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08; |
393 | host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0; | 355 | host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0; |
394 | host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca; | 356 | host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca; |
395 | host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca; | 357 | host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca; |
396 | ata_std_ports(&host->ports[1]->ioaddr); | 358 | ata_sff_std_ports(&host->ports[1]->ioaddr); |
397 | 359 | ||
398 | /* Register & activate */ | 360 | /* Register & activate */ |
399 | return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, | 361 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, |
400 | &sil680_sht); | 362 | IRQF_SHARED, &sil680_sht); |
401 | 363 | ||
402 | use_ioports: | 364 | use_ioports: |
403 | return ata_pci_init_one(pdev, ppi); | 365 | return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL); |
404 | } | 366 | } |
405 | 367 | ||
406 | #ifdef CONFIG_PM | 368 | #ifdef CONFIG_PM |
407 | static int sil680_reinit_one(struct pci_dev *pdev) | 369 | static int sil680_reinit_one(struct pci_dev *pdev) |
408 | { | 370 | { |
409 | int try_mmio; | 371 | struct ata_host *host = dev_get_drvdata(&pdev->dev); |
372 | int try_mmio, rc; | ||
410 | 373 | ||
374 | rc = ata_pci_device_do_resume(pdev); | ||
375 | if (rc) | ||
376 | return rc; | ||
411 | sil680_init_chip(pdev, &try_mmio); | 377 | sil680_init_chip(pdev, &try_mmio); |
412 | return ata_pci_device_resume(pdev); | 378 | ata_host_resume(host); |
379 | return 0; | ||
413 | } | 380 | } |
414 | #endif | 381 | #endif |
415 | 382 | ||
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index dc7e91562e43..e82c66e8d31b 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c | |||
@@ -156,24 +156,11 @@ static int sis_pre_reset(struct ata_link *link, unsigned long deadline) | |||
156 | /* Clear the FIFO settings. We can't enable the FIFO until | 156 | /* Clear the FIFO settings. We can't enable the FIFO until |
157 | we know we are poking at a disk */ | 157 | we know we are poking at a disk */ |
158 | pci_write_config_byte(pdev, 0x4B, 0); | 158 | pci_write_config_byte(pdev, 0x4B, 0); |
159 | return ata_std_prereset(link, deadline); | 159 | return ata_sff_prereset(link, deadline); |
160 | } | 160 | } |
161 | 161 | ||
162 | 162 | ||
163 | /** | 163 | /** |
164 | * sis_error_handler - Probe specified port on PATA host controller | ||
165 | * @ap: Port to probe | ||
166 | * | ||
167 | * LOCKING: | ||
168 | * None (inherited from caller). | ||
169 | */ | ||
170 | |||
171 | static void sis_error_handler(struct ata_port *ap) | ||
172 | { | ||
173 | ata_bmdma_drive_eh(ap, sis_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
174 | } | ||
175 | |||
176 | /** | ||
177 | * sis_set_fifo - Set RWP fifo bits for this device | 164 | * sis_set_fifo - Set RWP fifo bits for this device |
178 | * @ap: Port | 165 | * @ap: Port |
179 | * @adev: Device | 166 | * @adev: Device |
@@ -514,217 +501,57 @@ static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
514 | } | 501 | } |
515 | 502 | ||
516 | static struct scsi_host_template sis_sht = { | 503 | static struct scsi_host_template sis_sht = { |
517 | .module = THIS_MODULE, | 504 | ATA_BMDMA_SHT(DRV_NAME), |
518 | .name = DRV_NAME, | ||
519 | .ioctl = ata_scsi_ioctl, | ||
520 | .queuecommand = ata_scsi_queuecmd, | ||
521 | .can_queue = ATA_DEF_QUEUE, | ||
522 | .this_id = ATA_SHT_THIS_ID, | ||
523 | .sg_tablesize = LIBATA_MAX_PRD, | ||
524 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
525 | .emulated = ATA_SHT_EMULATED, | ||
526 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
527 | .proc_name = DRV_NAME, | ||
528 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
529 | .slave_configure = ata_scsi_slave_config, | ||
530 | .slave_destroy = ata_scsi_slave_destroy, | ||
531 | .bios_param = ata_std_bios_param, | ||
532 | }; | 505 | }; |
533 | 506 | ||
534 | static const struct ata_port_operations sis_133_ops = { | 507 | static struct ata_port_operations sis_133_for_sata_ops = { |
508 | .inherits = &ata_bmdma_port_ops, | ||
535 | .set_piomode = sis_133_set_piomode, | 509 | .set_piomode = sis_133_set_piomode, |
536 | .set_dmamode = sis_133_set_dmamode, | 510 | .set_dmamode = sis_133_set_dmamode, |
537 | .mode_filter = ata_pci_default_filter, | ||
538 | |||
539 | .tf_load = ata_tf_load, | ||
540 | .tf_read = ata_tf_read, | ||
541 | .check_status = ata_check_status, | ||
542 | .exec_command = ata_exec_command, | ||
543 | .dev_select = ata_std_dev_select, | ||
544 | |||
545 | .freeze = ata_bmdma_freeze, | ||
546 | .thaw = ata_bmdma_thaw, | ||
547 | .error_handler = sis_error_handler, | ||
548 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
549 | .cable_detect = sis_133_cable_detect, | 511 | .cable_detect = sis_133_cable_detect, |
512 | }; | ||
550 | 513 | ||
551 | .bmdma_setup = ata_bmdma_setup, | 514 | static struct ata_port_operations sis_base_ops = { |
552 | .bmdma_start = ata_bmdma_start, | 515 | .inherits = &ata_bmdma_port_ops, |
553 | .bmdma_stop = ata_bmdma_stop, | 516 | .prereset = sis_pre_reset, |
554 | .bmdma_status = ata_bmdma_status, | ||
555 | .qc_prep = ata_qc_prep, | ||
556 | .qc_issue = ata_qc_issue_prot, | ||
557 | .data_xfer = ata_data_xfer, | ||
558 | |||
559 | .irq_handler = ata_interrupt, | ||
560 | .irq_clear = ata_bmdma_irq_clear, | ||
561 | .irq_on = ata_irq_on, | ||
562 | |||
563 | .port_start = ata_sff_port_start, | ||
564 | }; | 517 | }; |
565 | 518 | ||
566 | static const struct ata_port_operations sis_133_for_sata_ops = { | 519 | static struct ata_port_operations sis_133_ops = { |
520 | .inherits = &sis_base_ops, | ||
567 | .set_piomode = sis_133_set_piomode, | 521 | .set_piomode = sis_133_set_piomode, |
568 | .set_dmamode = sis_133_set_dmamode, | 522 | .set_dmamode = sis_133_set_dmamode, |
569 | .mode_filter = ata_pci_default_filter, | ||
570 | |||
571 | .tf_load = ata_tf_load, | ||
572 | .tf_read = ata_tf_read, | ||
573 | .check_status = ata_check_status, | ||
574 | .exec_command = ata_exec_command, | ||
575 | .dev_select = ata_std_dev_select, | ||
576 | |||
577 | .freeze = ata_bmdma_freeze, | ||
578 | .thaw = ata_bmdma_thaw, | ||
579 | .error_handler = ata_bmdma_error_handler, | ||
580 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
581 | .cable_detect = sis_133_cable_detect, | 523 | .cable_detect = sis_133_cable_detect, |
582 | |||
583 | .bmdma_setup = ata_bmdma_setup, | ||
584 | .bmdma_start = ata_bmdma_start, | ||
585 | .bmdma_stop = ata_bmdma_stop, | ||
586 | .bmdma_status = ata_bmdma_status, | ||
587 | .qc_prep = ata_qc_prep, | ||
588 | .qc_issue = ata_qc_issue_prot, | ||
589 | .data_xfer = ata_data_xfer, | ||
590 | |||
591 | .irq_handler = ata_interrupt, | ||
592 | .irq_clear = ata_bmdma_irq_clear, | ||
593 | .irq_on = ata_irq_on, | ||
594 | |||
595 | .port_start = ata_sff_port_start, | ||
596 | }; | 524 | }; |
597 | 525 | ||
598 | static const struct ata_port_operations sis_133_early_ops = { | 526 | static struct ata_port_operations sis_133_early_ops = { |
527 | .inherits = &sis_base_ops, | ||
599 | .set_piomode = sis_100_set_piomode, | 528 | .set_piomode = sis_100_set_piomode, |
600 | .set_dmamode = sis_133_early_set_dmamode, | 529 | .set_dmamode = sis_133_early_set_dmamode, |
601 | .mode_filter = ata_pci_default_filter, | ||
602 | |||
603 | .tf_load = ata_tf_load, | ||
604 | .tf_read = ata_tf_read, | ||
605 | .check_status = ata_check_status, | ||
606 | .exec_command = ata_exec_command, | ||
607 | .dev_select = ata_std_dev_select, | ||
608 | |||
609 | .freeze = ata_bmdma_freeze, | ||
610 | .thaw = ata_bmdma_thaw, | ||
611 | .error_handler = sis_error_handler, | ||
612 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
613 | .cable_detect = sis_66_cable_detect, | 530 | .cable_detect = sis_66_cable_detect, |
614 | |||
615 | .bmdma_setup = ata_bmdma_setup, | ||
616 | .bmdma_start = ata_bmdma_start, | ||
617 | .bmdma_stop = ata_bmdma_stop, | ||
618 | .bmdma_status = ata_bmdma_status, | ||
619 | .qc_prep = ata_qc_prep, | ||
620 | .qc_issue = ata_qc_issue_prot, | ||
621 | .data_xfer = ata_data_xfer, | ||
622 | |||
623 | .irq_handler = ata_interrupt, | ||
624 | .irq_clear = ata_bmdma_irq_clear, | ||
625 | .irq_on = ata_irq_on, | ||
626 | |||
627 | .port_start = ata_sff_port_start, | ||
628 | }; | 531 | }; |
629 | 532 | ||
630 | static const struct ata_port_operations sis_100_ops = { | 533 | static struct ata_port_operations sis_100_ops = { |
534 | .inherits = &sis_base_ops, | ||
631 | .set_piomode = sis_100_set_piomode, | 535 | .set_piomode = sis_100_set_piomode, |
632 | .set_dmamode = sis_100_set_dmamode, | 536 | .set_dmamode = sis_100_set_dmamode, |
633 | .mode_filter = ata_pci_default_filter, | ||
634 | |||
635 | .tf_load = ata_tf_load, | ||
636 | .tf_read = ata_tf_read, | ||
637 | .check_status = ata_check_status, | ||
638 | .exec_command = ata_exec_command, | ||
639 | .dev_select = ata_std_dev_select, | ||
640 | |||
641 | .freeze = ata_bmdma_freeze, | ||
642 | .thaw = ata_bmdma_thaw, | ||
643 | .error_handler = sis_error_handler, | ||
644 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
645 | .cable_detect = sis_66_cable_detect, | 537 | .cable_detect = sis_66_cable_detect, |
646 | |||
647 | .bmdma_setup = ata_bmdma_setup, | ||
648 | .bmdma_start = ata_bmdma_start, | ||
649 | .bmdma_stop = ata_bmdma_stop, | ||
650 | .bmdma_status = ata_bmdma_status, | ||
651 | .qc_prep = ata_qc_prep, | ||
652 | .qc_issue = ata_qc_issue_prot, | ||
653 | .data_xfer = ata_data_xfer, | ||
654 | |||
655 | .irq_handler = ata_interrupt, | ||
656 | .irq_clear = ata_bmdma_irq_clear, | ||
657 | .irq_on = ata_irq_on, | ||
658 | |||
659 | .port_start = ata_sff_port_start, | ||
660 | }; | 538 | }; |
661 | 539 | ||
662 | static const struct ata_port_operations sis_66_ops = { | 540 | static struct ata_port_operations sis_66_ops = { |
541 | .inherits = &sis_base_ops, | ||
663 | .set_piomode = sis_old_set_piomode, | 542 | .set_piomode = sis_old_set_piomode, |
664 | .set_dmamode = sis_66_set_dmamode, | 543 | .set_dmamode = sis_66_set_dmamode, |
665 | .mode_filter = ata_pci_default_filter, | ||
666 | |||
667 | .tf_load = ata_tf_load, | ||
668 | .tf_read = ata_tf_read, | ||
669 | .check_status = ata_check_status, | ||
670 | .exec_command = ata_exec_command, | ||
671 | .dev_select = ata_std_dev_select, | ||
672 | .cable_detect = sis_66_cable_detect, | 544 | .cable_detect = sis_66_cable_detect, |
673 | |||
674 | .freeze = ata_bmdma_freeze, | ||
675 | .thaw = ata_bmdma_thaw, | ||
676 | .error_handler = sis_error_handler, | ||
677 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
678 | |||
679 | .bmdma_setup = ata_bmdma_setup, | ||
680 | .bmdma_start = ata_bmdma_start, | ||
681 | .bmdma_stop = ata_bmdma_stop, | ||
682 | .bmdma_status = ata_bmdma_status, | ||
683 | .qc_prep = ata_qc_prep, | ||
684 | .qc_issue = ata_qc_issue_prot, | ||
685 | .data_xfer = ata_data_xfer, | ||
686 | |||
687 | .irq_handler = ata_interrupt, | ||
688 | .irq_clear = ata_bmdma_irq_clear, | ||
689 | .irq_on = ata_irq_on, | ||
690 | |||
691 | .port_start = ata_sff_port_start, | ||
692 | }; | 545 | }; |
693 | 546 | ||
694 | static const struct ata_port_operations sis_old_ops = { | 547 | static struct ata_port_operations sis_old_ops = { |
548 | .inherits = &sis_base_ops, | ||
695 | .set_piomode = sis_old_set_piomode, | 549 | .set_piomode = sis_old_set_piomode, |
696 | .set_dmamode = sis_old_set_dmamode, | 550 | .set_dmamode = sis_old_set_dmamode, |
697 | .mode_filter = ata_pci_default_filter, | ||
698 | |||
699 | .tf_load = ata_tf_load, | ||
700 | .tf_read = ata_tf_read, | ||
701 | .check_status = ata_check_status, | ||
702 | .exec_command = ata_exec_command, | ||
703 | .dev_select = ata_std_dev_select, | ||
704 | |||
705 | .freeze = ata_bmdma_freeze, | ||
706 | .thaw = ata_bmdma_thaw, | ||
707 | .error_handler = sis_error_handler, | ||
708 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
709 | .cable_detect = ata_cable_40wire, | 551 | .cable_detect = ata_cable_40wire, |
710 | |||
711 | .bmdma_setup = ata_bmdma_setup, | ||
712 | .bmdma_start = ata_bmdma_start, | ||
713 | .bmdma_stop = ata_bmdma_stop, | ||
714 | .bmdma_status = ata_bmdma_status, | ||
715 | .qc_prep = ata_qc_prep, | ||
716 | .qc_issue = ata_qc_issue_prot, | ||
717 | .data_xfer = ata_data_xfer, | ||
718 | |||
719 | .irq_handler = ata_interrupt, | ||
720 | .irq_clear = ata_bmdma_irq_clear, | ||
721 | .irq_on = ata_irq_on, | ||
722 | |||
723 | .port_start = ata_sff_port_start, | ||
724 | }; | 552 | }; |
725 | 553 | ||
726 | static const struct ata_port_info sis_info = { | 554 | static const struct ata_port_info sis_info = { |
727 | .sht = &sis_sht, | ||
728 | .flags = ATA_FLAG_SLAVE_POSS, | 555 | .flags = ATA_FLAG_SLAVE_POSS, |
729 | .pio_mask = 0x1f, /* pio0-4 */ | 556 | .pio_mask = 0x1f, /* pio0-4 */ |
730 | .mwdma_mask = 0x07, | 557 | .mwdma_mask = 0x07, |
@@ -732,7 +559,6 @@ static const struct ata_port_info sis_info = { | |||
732 | .port_ops = &sis_old_ops, | 559 | .port_ops = &sis_old_ops, |
733 | }; | 560 | }; |
734 | static const struct ata_port_info sis_info33 = { | 561 | static const struct ata_port_info sis_info33 = { |
735 | .sht = &sis_sht, | ||
736 | .flags = ATA_FLAG_SLAVE_POSS, | 562 | .flags = ATA_FLAG_SLAVE_POSS, |
737 | .pio_mask = 0x1f, /* pio0-4 */ | 563 | .pio_mask = 0x1f, /* pio0-4 */ |
738 | .mwdma_mask = 0x07, | 564 | .mwdma_mask = 0x07, |
@@ -740,42 +566,36 @@ static const struct ata_port_info sis_info33 = { | |||
740 | .port_ops = &sis_old_ops, | 566 | .port_ops = &sis_old_ops, |
741 | }; | 567 | }; |
742 | static const struct ata_port_info sis_info66 = { | 568 | static const struct ata_port_info sis_info66 = { |
743 | .sht = &sis_sht, | ||
744 | .flags = ATA_FLAG_SLAVE_POSS, | 569 | .flags = ATA_FLAG_SLAVE_POSS, |
745 | .pio_mask = 0x1f, /* pio0-4 */ | 570 | .pio_mask = 0x1f, /* pio0-4 */ |
746 | .udma_mask = ATA_UDMA4, /* UDMA 66 */ | 571 | .udma_mask = ATA_UDMA4, /* UDMA 66 */ |
747 | .port_ops = &sis_66_ops, | 572 | .port_ops = &sis_66_ops, |
748 | }; | 573 | }; |
749 | static const struct ata_port_info sis_info100 = { | 574 | static const struct ata_port_info sis_info100 = { |
750 | .sht = &sis_sht, | ||
751 | .flags = ATA_FLAG_SLAVE_POSS, | 575 | .flags = ATA_FLAG_SLAVE_POSS, |
752 | .pio_mask = 0x1f, /* pio0-4 */ | 576 | .pio_mask = 0x1f, /* pio0-4 */ |
753 | .udma_mask = ATA_UDMA5, | 577 | .udma_mask = ATA_UDMA5, |
754 | .port_ops = &sis_100_ops, | 578 | .port_ops = &sis_100_ops, |
755 | }; | 579 | }; |
756 | static const struct ata_port_info sis_info100_early = { | 580 | static const struct ata_port_info sis_info100_early = { |
757 | .sht = &sis_sht, | ||
758 | .flags = ATA_FLAG_SLAVE_POSS, | 581 | .flags = ATA_FLAG_SLAVE_POSS, |
759 | .udma_mask = ATA_UDMA5, | 582 | .udma_mask = ATA_UDMA5, |
760 | .pio_mask = 0x1f, /* pio0-4 */ | 583 | .pio_mask = 0x1f, /* pio0-4 */ |
761 | .port_ops = &sis_66_ops, | 584 | .port_ops = &sis_66_ops, |
762 | }; | 585 | }; |
763 | static const struct ata_port_info sis_info133 = { | 586 | static const struct ata_port_info sis_info133 = { |
764 | .sht = &sis_sht, | ||
765 | .flags = ATA_FLAG_SLAVE_POSS, | 587 | .flags = ATA_FLAG_SLAVE_POSS, |
766 | .pio_mask = 0x1f, /* pio0-4 */ | 588 | .pio_mask = 0x1f, /* pio0-4 */ |
767 | .udma_mask = ATA_UDMA6, | 589 | .udma_mask = ATA_UDMA6, |
768 | .port_ops = &sis_133_ops, | 590 | .port_ops = &sis_133_ops, |
769 | }; | 591 | }; |
770 | const struct ata_port_info sis_info133_for_sata = { | 592 | const struct ata_port_info sis_info133_for_sata = { |
771 | .sht = &sis_sht, | ||
772 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, | 593 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, |
773 | .pio_mask = 0x1f, /* pio0-4 */ | 594 | .pio_mask = 0x1f, /* pio0-4 */ |
774 | .udma_mask = ATA_UDMA6, | 595 | .udma_mask = ATA_UDMA6, |
775 | .port_ops = &sis_133_for_sata_ops, | 596 | .port_ops = &sis_133_for_sata_ops, |
776 | }; | 597 | }; |
777 | static const struct ata_port_info sis_info133_early = { | 598 | static const struct ata_port_info sis_info133_early = { |
778 | .sht = &sis_sht, | ||
779 | .flags = ATA_FLAG_SLAVE_POSS, | 599 | .flags = ATA_FLAG_SLAVE_POSS, |
780 | .pio_mask = 0x1f, /* pio0-4 */ | 600 | .pio_mask = 0x1f, /* pio0-4 */ |
781 | .udma_mask = ATA_UDMA6, | 601 | .udma_mask = ATA_UDMA6, |
@@ -857,11 +677,11 @@ static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis) | |||
857 | static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 677 | static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) |
858 | { | 678 | { |
859 | static int printed_version; | 679 | static int printed_version; |
860 | struct ata_port_info port; | 680 | const struct ata_port_info *ppi[] = { NULL, NULL }; |
861 | const struct ata_port_info *ppi[] = { &port, NULL }; | ||
862 | struct pci_dev *host = NULL; | 681 | struct pci_dev *host = NULL; |
863 | struct sis_chipset *chipset = NULL; | 682 | struct sis_chipset *chipset = NULL; |
864 | struct sis_chipset *sets; | 683 | struct sis_chipset *sets; |
684 | int rc; | ||
865 | 685 | ||
866 | static struct sis_chipset sis_chipsets[] = { | 686 | static struct sis_chipset sis_chipsets[] = { |
867 | 687 | ||
@@ -914,8 +734,11 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
914 | dev_printk(KERN_DEBUG, &pdev->dev, | 734 | dev_printk(KERN_DEBUG, &pdev->dev, |
915 | "version " DRV_VERSION "\n"); | 735 | "version " DRV_VERSION "\n"); |
916 | 736 | ||
917 | /* We have to find the bridge first */ | 737 | rc = pcim_enable_device(pdev); |
738 | if (rc) | ||
739 | return rc; | ||
918 | 740 | ||
741 | /* We have to find the bridge first */ | ||
919 | for (sets = &sis_chipsets[0]; sets->device; sets++) { | 742 | for (sets = &sis_chipsets[0]; sets->device; sets++) { |
920 | host = pci_get_device(PCI_VENDOR_ID_SI, sets->device, NULL); | 743 | host = pci_get_device(PCI_VENDOR_ID_SI, sets->device, NULL); |
921 | if (host != NULL) { | 744 | if (host != NULL) { |
@@ -994,12 +817,11 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
994 | if (chipset == NULL) | 817 | if (chipset == NULL) |
995 | return -ENODEV; | 818 | return -ENODEV; |
996 | 819 | ||
997 | port = *chipset->info; | 820 | ppi[0] = chipset->info; |
998 | port.private_data = chipset; | ||
999 | 821 | ||
1000 | sis_fixup(pdev, chipset); | 822 | sis_fixup(pdev, chipset); |
1001 | 823 | ||
1002 | return ata_pci_init_one(pdev, ppi); | 824 | return ata_pci_sff_init_one(pdev, ppi, &sis_sht, chipset); |
1003 | } | 825 | } |
1004 | 826 | ||
1005 | static const struct pci_device_id sis_pci_tbl[] = { | 827 | static const struct pci_device_id sis_pci_tbl[] = { |
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c index 81ef207f8265..70d94fb28a5f 100644 --- a/drivers/ata/pata_sl82c105.c +++ b/drivers/ata/pata_sl82c105.c | |||
@@ -60,13 +60,7 @@ static int sl82c105_pre_reset(struct ata_link *link, unsigned long deadline) | |||
60 | 60 | ||
61 | if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) | 61 | if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) |
62 | return -ENOENT; | 62 | return -ENOENT; |
63 | return ata_std_prereset(link, deadline); | 63 | return ata_sff_prereset(link, deadline); |
64 | } | ||
65 | |||
66 | |||
67 | static void sl82c105_error_handler(struct ata_port *ap) | ||
68 | { | ||
69 | ata_bmdma_drive_eh(ap, sl82c105_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
70 | } | 64 | } |
71 | 65 | ||
72 | 66 | ||
@@ -235,55 +229,17 @@ static int sl82c105_qc_defer(struct ata_queued_cmd *qc) | |||
235 | } | 229 | } |
236 | 230 | ||
237 | static struct scsi_host_template sl82c105_sht = { | 231 | static struct scsi_host_template sl82c105_sht = { |
238 | .module = THIS_MODULE, | 232 | ATA_BMDMA_SHT(DRV_NAME), |
239 | .name = DRV_NAME, | ||
240 | .ioctl = ata_scsi_ioctl, | ||
241 | .queuecommand = ata_scsi_queuecmd, | ||
242 | .can_queue = ATA_DEF_QUEUE, | ||
243 | .this_id = ATA_SHT_THIS_ID, | ||
244 | .sg_tablesize = LIBATA_MAX_PRD, | ||
245 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
246 | .emulated = ATA_SHT_EMULATED, | ||
247 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
248 | .proc_name = DRV_NAME, | ||
249 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
250 | .slave_configure = ata_scsi_slave_config, | ||
251 | .slave_destroy = ata_scsi_slave_destroy, | ||
252 | .bios_param = ata_std_bios_param, | ||
253 | }; | 233 | }; |
254 | 234 | ||
255 | static struct ata_port_operations sl82c105_port_ops = { | 235 | static struct ata_port_operations sl82c105_port_ops = { |
256 | .set_piomode = sl82c105_set_piomode, | 236 | .inherits = &ata_bmdma_port_ops, |
257 | .mode_filter = ata_pci_default_filter, | 237 | .qc_defer = sl82c105_qc_defer, |
258 | |||
259 | .tf_load = ata_tf_load, | ||
260 | .tf_read = ata_tf_read, | ||
261 | .check_status = ata_check_status, | ||
262 | .exec_command = ata_exec_command, | ||
263 | .dev_select = ata_std_dev_select, | ||
264 | |||
265 | .freeze = ata_bmdma_freeze, | ||
266 | .thaw = ata_bmdma_thaw, | ||
267 | .error_handler = sl82c105_error_handler, | ||
268 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
269 | .cable_detect = ata_cable_40wire, | ||
270 | |||
271 | .bmdma_setup = ata_bmdma_setup, | ||
272 | .bmdma_start = sl82c105_bmdma_start, | 238 | .bmdma_start = sl82c105_bmdma_start, |
273 | .bmdma_stop = sl82c105_bmdma_stop, | 239 | .bmdma_stop = sl82c105_bmdma_stop, |
274 | .bmdma_status = ata_bmdma_status, | 240 | .cable_detect = ata_cable_40wire, |
275 | 241 | .set_piomode = sl82c105_set_piomode, | |
276 | .qc_defer = sl82c105_qc_defer, | 242 | .prereset = sl82c105_pre_reset, |
277 | .qc_prep = ata_qc_prep, | ||
278 | .qc_issue = ata_qc_issue_prot, | ||
279 | |||
280 | .data_xfer = ata_data_xfer, | ||
281 | |||
282 | .irq_handler = ata_interrupt, | ||
283 | .irq_clear = ata_bmdma_irq_clear, | ||
284 | .irq_on = ata_irq_on, | ||
285 | |||
286 | .port_start = ata_sff_port_start, | ||
287 | }; | 243 | }; |
288 | 244 | ||
289 | /** | 245 | /** |
@@ -327,14 +283,12 @@ static int sl82c105_bridge_revision(struct pci_dev *pdev) | |||
327 | static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 283 | static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
328 | { | 284 | { |
329 | static const struct ata_port_info info_dma = { | 285 | static const struct ata_port_info info_dma = { |
330 | .sht = &sl82c105_sht, | ||
331 | .flags = ATA_FLAG_SLAVE_POSS, | 286 | .flags = ATA_FLAG_SLAVE_POSS, |
332 | .pio_mask = 0x1f, | 287 | .pio_mask = 0x1f, |
333 | .mwdma_mask = 0x07, | 288 | .mwdma_mask = 0x07, |
334 | .port_ops = &sl82c105_port_ops | 289 | .port_ops = &sl82c105_port_ops |
335 | }; | 290 | }; |
336 | static const struct ata_port_info info_early = { | 291 | static const struct ata_port_info info_early = { |
337 | .sht = &sl82c105_sht, | ||
338 | .flags = ATA_FLAG_SLAVE_POSS, | 292 | .flags = ATA_FLAG_SLAVE_POSS, |
339 | .pio_mask = 0x1f, | 293 | .pio_mask = 0x1f, |
340 | .port_ops = &sl82c105_port_ops | 294 | .port_ops = &sl82c105_port_ops |
@@ -344,6 +298,11 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id | |||
344 | NULL }; | 298 | NULL }; |
345 | u32 val; | 299 | u32 val; |
346 | int rev; | 300 | int rev; |
301 | int rc; | ||
302 | |||
303 | rc = pcim_enable_device(dev); | ||
304 | if (rc) | ||
305 | return rc; | ||
347 | 306 | ||
348 | rev = sl82c105_bridge_revision(dev); | 307 | rev = sl82c105_bridge_revision(dev); |
349 | 308 | ||
@@ -358,7 +317,7 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id | |||
358 | val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; | 317 | val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; |
359 | pci_write_config_dword(dev, 0x40, val); | 318 | pci_write_config_dword(dev, 0x40, val); |
360 | 319 | ||
361 | return ata_pci_init_one(dev, ppi); | 320 | return ata_pci_sff_init_one(dev, ppi, &sl82c105_sht, NULL); |
362 | } | 321 | } |
363 | 322 | ||
364 | static const struct pci_device_id sl82c105[] = { | 323 | static const struct pci_device_id sl82c105[] = { |
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c index 403eafcffe12..b181261f2743 100644 --- a/drivers/ata/pata_triflex.c +++ b/drivers/ata/pata_triflex.c | |||
@@ -66,16 +66,11 @@ static int triflex_prereset(struct ata_link *link, unsigned long deadline) | |||
66 | if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no])) | 66 | if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no])) |
67 | return -ENOENT; | 67 | return -ENOENT; |
68 | 68 | ||
69 | return ata_std_prereset(link, deadline); | 69 | return ata_sff_prereset(link, deadline); |
70 | } | 70 | } |
71 | 71 | ||
72 | 72 | ||
73 | 73 | ||
74 | static void triflex_error_handler(struct ata_port *ap) | ||
75 | { | ||
76 | ata_bmdma_drive_eh(ap, triflex_prereset, ata_std_softreset, NULL, ata_std_postreset); | ||
77 | } | ||
78 | |||
79 | /** | 74 | /** |
80 | * triflex_load_timing - timing configuration | 75 | * triflex_load_timing - timing configuration |
81 | * @ap: ATA interface | 76 | * @ap: ATA interface |
@@ -180,60 +175,21 @@ static void triflex_bmdma_stop(struct ata_queued_cmd *qc) | |||
180 | } | 175 | } |
181 | 176 | ||
182 | static struct scsi_host_template triflex_sht = { | 177 | static struct scsi_host_template triflex_sht = { |
183 | .module = THIS_MODULE, | 178 | ATA_BMDMA_SHT(DRV_NAME), |
184 | .name = DRV_NAME, | ||
185 | .ioctl = ata_scsi_ioctl, | ||
186 | .queuecommand = ata_scsi_queuecmd, | ||
187 | .can_queue = ATA_DEF_QUEUE, | ||
188 | .this_id = ATA_SHT_THIS_ID, | ||
189 | .sg_tablesize = LIBATA_MAX_PRD, | ||
190 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
191 | .emulated = ATA_SHT_EMULATED, | ||
192 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
193 | .proc_name = DRV_NAME, | ||
194 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
195 | .slave_configure = ata_scsi_slave_config, | ||
196 | .slave_destroy = ata_scsi_slave_destroy, | ||
197 | .bios_param = ata_std_bios_param, | ||
198 | }; | 179 | }; |
199 | 180 | ||
200 | static struct ata_port_operations triflex_port_ops = { | 181 | static struct ata_port_operations triflex_port_ops = { |
201 | .set_piomode = triflex_set_piomode, | 182 | .inherits = &ata_bmdma_port_ops, |
202 | .mode_filter = ata_pci_default_filter, | ||
203 | |||
204 | .tf_load = ata_tf_load, | ||
205 | .tf_read = ata_tf_read, | ||
206 | .check_status = ata_check_status, | ||
207 | .exec_command = ata_exec_command, | ||
208 | .dev_select = ata_std_dev_select, | ||
209 | |||
210 | .freeze = ata_bmdma_freeze, | ||
211 | .thaw = ata_bmdma_thaw, | ||
212 | .error_handler = triflex_error_handler, | ||
213 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
214 | .cable_detect = ata_cable_40wire, | ||
215 | |||
216 | .bmdma_setup = ata_bmdma_setup, | ||
217 | .bmdma_start = triflex_bmdma_start, | 183 | .bmdma_start = triflex_bmdma_start, |
218 | .bmdma_stop = triflex_bmdma_stop, | 184 | .bmdma_stop = triflex_bmdma_stop, |
219 | .bmdma_status = ata_bmdma_status, | 185 | .cable_detect = ata_cable_40wire, |
220 | 186 | .set_piomode = triflex_set_piomode, | |
221 | .qc_prep = ata_qc_prep, | 187 | .prereset = triflex_prereset, |
222 | .qc_issue = ata_qc_issue_prot, | ||
223 | |||
224 | .data_xfer = ata_data_xfer, | ||
225 | |||
226 | .irq_handler = ata_interrupt, | ||
227 | .irq_clear = ata_bmdma_irq_clear, | ||
228 | .irq_on = ata_irq_on, | ||
229 | |||
230 | .port_start = ata_sff_port_start, | ||
231 | }; | 188 | }; |
232 | 189 | ||
233 | static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 190 | static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
234 | { | 191 | { |
235 | static const struct ata_port_info info = { | 192 | static const struct ata_port_info info = { |
236 | .sht = &triflex_sht, | ||
237 | .flags = ATA_FLAG_SLAVE_POSS, | 193 | .flags = ATA_FLAG_SLAVE_POSS, |
238 | .pio_mask = 0x1f, | 194 | .pio_mask = 0x1f, |
239 | .mwdma_mask = 0x07, | 195 | .mwdma_mask = 0x07, |
@@ -245,7 +201,7 @@ static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
245 | if (!printed_version++) | 201 | if (!printed_version++) |
246 | dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); | 202 | dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); |
247 | 203 | ||
248 | return ata_pci_init_one(dev, ppi); | 204 | return ata_pci_sff_init_one(dev, ppi, &triflex_sht, NULL); |
249 | } | 205 | } |
250 | 206 | ||
251 | static const struct pci_device_id triflex[] = { | 207 | static const struct pci_device_id triflex[] = { |
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index d119a68c388f..d4840748fb5c 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c | |||
@@ -210,23 +210,11 @@ static int via_pre_reset(struct ata_link *link, unsigned long deadline) | |||
210 | return -ENOENT; | 210 | return -ENOENT; |
211 | } | 211 | } |
212 | 212 | ||
213 | return ata_std_prereset(link, deadline); | 213 | return ata_sff_prereset(link, deadline); |
214 | } | 214 | } |
215 | 215 | ||
216 | 216 | ||
217 | /** | 217 | /** |
218 | * via_error_handler - reset for VIA chips | ||
219 | * @ap: ATA port | ||
220 | * | ||
221 | * Handle the reset callback for the later chips with cable detect | ||
222 | */ | ||
223 | |||
224 | static void via_error_handler(struct ata_port *ap) | ||
225 | { | ||
226 | ata_bmdma_drive_eh(ap, via_pre_reset, ata_std_softreset, NULL, ata_std_postreset); | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * via_do_set_mode - set initial PIO mode data | 218 | * via_do_set_mode - set initial PIO mode data |
231 | * @ap: ATA interface | 219 | * @ap: ATA interface |
232 | * @adev: ATA device | 220 | * @adev: ATA device |
@@ -335,89 +323,20 @@ static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
335 | } | 323 | } |
336 | 324 | ||
337 | static struct scsi_host_template via_sht = { | 325 | static struct scsi_host_template via_sht = { |
338 | .module = THIS_MODULE, | 326 | ATA_BMDMA_SHT(DRV_NAME), |
339 | .name = DRV_NAME, | ||
340 | .ioctl = ata_scsi_ioctl, | ||
341 | .queuecommand = ata_scsi_queuecmd, | ||
342 | .can_queue = ATA_DEF_QUEUE, | ||
343 | .this_id = ATA_SHT_THIS_ID, | ||
344 | .sg_tablesize = LIBATA_MAX_PRD, | ||
345 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
346 | .emulated = ATA_SHT_EMULATED, | ||
347 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
348 | .proc_name = DRV_NAME, | ||
349 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
350 | .slave_configure = ata_scsi_slave_config, | ||
351 | .slave_destroy = ata_scsi_slave_destroy, | ||
352 | .bios_param = ata_std_bios_param, | ||
353 | }; | 327 | }; |
354 | 328 | ||
355 | static struct ata_port_operations via_port_ops = { | 329 | static struct ata_port_operations via_port_ops = { |
330 | .inherits = &ata_bmdma_port_ops, | ||
331 | .cable_detect = via_cable_detect, | ||
356 | .set_piomode = via_set_piomode, | 332 | .set_piomode = via_set_piomode, |
357 | .set_dmamode = via_set_dmamode, | 333 | .set_dmamode = via_set_dmamode, |
358 | .mode_filter = ata_pci_default_filter, | 334 | .prereset = via_pre_reset, |
359 | |||
360 | .tf_load = ata_tf_load, | ||
361 | .tf_read = ata_tf_read, | ||
362 | .check_status = ata_check_status, | ||
363 | .exec_command = ata_exec_command, | ||
364 | .dev_select = ata_std_dev_select, | ||
365 | |||
366 | .freeze = ata_bmdma_freeze, | ||
367 | .thaw = ata_bmdma_thaw, | ||
368 | .error_handler = via_error_handler, | ||
369 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
370 | .cable_detect = via_cable_detect, | ||
371 | |||
372 | .bmdma_setup = ata_bmdma_setup, | ||
373 | .bmdma_start = ata_bmdma_start, | ||
374 | .bmdma_stop = ata_bmdma_stop, | ||
375 | .bmdma_status = ata_bmdma_status, | ||
376 | |||
377 | .qc_prep = ata_qc_prep, | ||
378 | .qc_issue = ata_qc_issue_prot, | ||
379 | |||
380 | .data_xfer = ata_data_xfer, | ||
381 | |||
382 | .irq_handler = ata_interrupt, | ||
383 | .irq_clear = ata_bmdma_irq_clear, | ||
384 | .irq_on = ata_irq_on, | ||
385 | |||
386 | .port_start = ata_sff_port_start, | ||
387 | }; | 335 | }; |
388 | 336 | ||
389 | static struct ata_port_operations via_port_ops_noirq = { | 337 | static struct ata_port_operations via_port_ops_noirq = { |
390 | .set_piomode = via_set_piomode, | 338 | .inherits = &via_port_ops, |
391 | .set_dmamode = via_set_dmamode, | 339 | .sff_data_xfer = ata_sff_data_xfer_noirq, |
392 | .mode_filter = ata_pci_default_filter, | ||
393 | |||
394 | .tf_load = ata_tf_load, | ||
395 | .tf_read = ata_tf_read, | ||
396 | .check_status = ata_check_status, | ||
397 | .exec_command = ata_exec_command, | ||
398 | .dev_select = ata_std_dev_select, | ||
399 | |||
400 | .freeze = ata_bmdma_freeze, | ||
401 | .thaw = ata_bmdma_thaw, | ||
402 | .error_handler = via_error_handler, | ||
403 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
404 | .cable_detect = via_cable_detect, | ||
405 | |||
406 | .bmdma_setup = ata_bmdma_setup, | ||
407 | .bmdma_start = ata_bmdma_start, | ||
408 | .bmdma_stop = ata_bmdma_stop, | ||
409 | .bmdma_status = ata_bmdma_status, | ||
410 | |||
411 | .qc_prep = ata_qc_prep, | ||
412 | .qc_issue = ata_qc_issue_prot, | ||
413 | |||
414 | .data_xfer = ata_data_xfer_noirq, | ||
415 | |||
416 | .irq_handler = ata_interrupt, | ||
417 | .irq_clear = ata_bmdma_irq_clear, | ||
418 | .irq_on = ata_irq_on, | ||
419 | |||
420 | .port_start = ata_sff_port_start, | ||
421 | }; | 340 | }; |
422 | 341 | ||
423 | /** | 342 | /** |
@@ -467,7 +386,6 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
467 | { | 386 | { |
468 | /* Early VIA without UDMA support */ | 387 | /* Early VIA without UDMA support */ |
469 | static const struct ata_port_info via_mwdma_info = { | 388 | static const struct ata_port_info via_mwdma_info = { |
470 | .sht = &via_sht, | ||
471 | .flags = ATA_FLAG_SLAVE_POSS, | 389 | .flags = ATA_FLAG_SLAVE_POSS, |
472 | .pio_mask = 0x1f, | 390 | .pio_mask = 0x1f, |
473 | .mwdma_mask = 0x07, | 391 | .mwdma_mask = 0x07, |
@@ -475,7 +393,6 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
475 | }; | 393 | }; |
476 | /* Ditto with IRQ masking required */ | 394 | /* Ditto with IRQ masking required */ |
477 | static const struct ata_port_info via_mwdma_info_borked = { | 395 | static const struct ata_port_info via_mwdma_info_borked = { |
478 | .sht = &via_sht, | ||
479 | .flags = ATA_FLAG_SLAVE_POSS, | 396 | .flags = ATA_FLAG_SLAVE_POSS, |
480 | .pio_mask = 0x1f, | 397 | .pio_mask = 0x1f, |
481 | .mwdma_mask = 0x07, | 398 | .mwdma_mask = 0x07, |
@@ -483,7 +400,6 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
483 | }; | 400 | }; |
484 | /* VIA UDMA 33 devices (and borked 66) */ | 401 | /* VIA UDMA 33 devices (and borked 66) */ |
485 | static const struct ata_port_info via_udma33_info = { | 402 | static const struct ata_port_info via_udma33_info = { |
486 | .sht = &via_sht, | ||
487 | .flags = ATA_FLAG_SLAVE_POSS, | 403 | .flags = ATA_FLAG_SLAVE_POSS, |
488 | .pio_mask = 0x1f, | 404 | .pio_mask = 0x1f, |
489 | .mwdma_mask = 0x07, | 405 | .mwdma_mask = 0x07, |
@@ -492,7 +408,6 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
492 | }; | 408 | }; |
493 | /* VIA UDMA 66 devices */ | 409 | /* VIA UDMA 66 devices */ |
494 | static const struct ata_port_info via_udma66_info = { | 410 | static const struct ata_port_info via_udma66_info = { |
495 | .sht = &via_sht, | ||
496 | .flags = ATA_FLAG_SLAVE_POSS, | 411 | .flags = ATA_FLAG_SLAVE_POSS, |
497 | .pio_mask = 0x1f, | 412 | .pio_mask = 0x1f, |
498 | .mwdma_mask = 0x07, | 413 | .mwdma_mask = 0x07, |
@@ -501,7 +416,6 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
501 | }; | 416 | }; |
502 | /* VIA UDMA 100 devices */ | 417 | /* VIA UDMA 100 devices */ |
503 | static const struct ata_port_info via_udma100_info = { | 418 | static const struct ata_port_info via_udma100_info = { |
504 | .sht = &via_sht, | ||
505 | .flags = ATA_FLAG_SLAVE_POSS, | 419 | .flags = ATA_FLAG_SLAVE_POSS, |
506 | .pio_mask = 0x1f, | 420 | .pio_mask = 0x1f, |
507 | .mwdma_mask = 0x07, | 421 | .mwdma_mask = 0x07, |
@@ -510,24 +424,27 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
510 | }; | 424 | }; |
511 | /* UDMA133 with bad AST (All current 133) */ | 425 | /* UDMA133 with bad AST (All current 133) */ |
512 | static const struct ata_port_info via_udma133_info = { | 426 | static const struct ata_port_info via_udma133_info = { |
513 | .sht = &via_sht, | ||
514 | .flags = ATA_FLAG_SLAVE_POSS, | 427 | .flags = ATA_FLAG_SLAVE_POSS, |
515 | .pio_mask = 0x1f, | 428 | .pio_mask = 0x1f, |
516 | .mwdma_mask = 0x07, | 429 | .mwdma_mask = 0x07, |
517 | .udma_mask = ATA_UDMA6, /* FIXME: should check north bridge */ | 430 | .udma_mask = ATA_UDMA6, /* FIXME: should check north bridge */ |
518 | .port_ops = &via_port_ops | 431 | .port_ops = &via_port_ops |
519 | }; | 432 | }; |
520 | struct ata_port_info type; | 433 | const struct ata_port_info *ppi[] = { NULL, NULL }; |
521 | const struct ata_port_info *ppi[] = { &type, NULL }; | ||
522 | struct pci_dev *isa = NULL; | 434 | struct pci_dev *isa = NULL; |
523 | const struct via_isa_bridge *config; | 435 | const struct via_isa_bridge *config; |
524 | static int printed_version; | 436 | static int printed_version; |
525 | u8 enable; | 437 | u8 enable; |
526 | u32 timing; | 438 | u32 timing; |
439 | int rc; | ||
527 | 440 | ||
528 | if (!printed_version++) | 441 | if (!printed_version++) |
529 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 442 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
530 | 443 | ||
444 | rc = pcim_enable_device(pdev); | ||
445 | if (rc) | ||
446 | return rc; | ||
447 | |||
531 | /* To find out how the IDE will behave and what features we | 448 | /* To find out how the IDE will behave and what features we |
532 | actually have to look at the bridge not the IDE controller */ | 449 | actually have to look at the bridge not the IDE controller */ |
533 | for (config = via_isa_bridges; config->id; config++) | 450 | for (config = via_isa_bridges; config->id; config++) |
@@ -561,25 +478,25 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
561 | switch(config->flags & VIA_UDMA) { | 478 | switch(config->flags & VIA_UDMA) { |
562 | case VIA_UDMA_NONE: | 479 | case VIA_UDMA_NONE: |
563 | if (config->flags & VIA_NO_UNMASK) | 480 | if (config->flags & VIA_NO_UNMASK) |
564 | type = via_mwdma_info_borked; | 481 | ppi[0] = &via_mwdma_info_borked; |
565 | else | 482 | else |
566 | type = via_mwdma_info; | 483 | ppi[0] = &via_mwdma_info; |
567 | break; | 484 | break; |
568 | case VIA_UDMA_33: | 485 | case VIA_UDMA_33: |
569 | type = via_udma33_info; | 486 | ppi[0] = &via_udma33_info; |
570 | break; | 487 | break; |
571 | case VIA_UDMA_66: | 488 | case VIA_UDMA_66: |
572 | type = via_udma66_info; | 489 | ppi[0] = &via_udma66_info; |
573 | /* The 66 MHz devices require we enable the clock */ | 490 | /* The 66 MHz devices require we enable the clock */ |
574 | pci_read_config_dword(pdev, 0x50, &timing); | 491 | pci_read_config_dword(pdev, 0x50, &timing); |
575 | timing |= 0x80008; | 492 | timing |= 0x80008; |
576 | pci_write_config_dword(pdev, 0x50, timing); | 493 | pci_write_config_dword(pdev, 0x50, timing); |
577 | break; | 494 | break; |
578 | case VIA_UDMA_100: | 495 | case VIA_UDMA_100: |
579 | type = via_udma100_info; | 496 | ppi[0] = &via_udma100_info; |
580 | break; | 497 | break; |
581 | case VIA_UDMA_133: | 498 | case VIA_UDMA_133: |
582 | type = via_udma133_info; | 499 | ppi[0] = &via_udma133_info; |
583 | break; | 500 | break; |
584 | default: | 501 | default: |
585 | WARN_ON(1); | 502 | WARN_ON(1); |
@@ -594,9 +511,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
594 | } | 511 | } |
595 | 512 | ||
596 | /* We have established the device type, now fire it up */ | 513 | /* We have established the device type, now fire it up */ |
597 | type.private_data = (void *)config; | 514 | return ata_pci_sff_init_one(pdev, ppi, &via_sht, (void *)config); |
598 | |||
599 | return ata_pci_init_one(pdev, ppi); | ||
600 | } | 515 | } |
601 | 516 | ||
602 | #ifdef CONFIG_PM | 517 | #ifdef CONFIG_PM |
@@ -615,6 +530,11 @@ static int via_reinit_one(struct pci_dev *pdev) | |||
615 | u32 timing; | 530 | u32 timing; |
616 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | 531 | struct ata_host *host = dev_get_drvdata(&pdev->dev); |
617 | const struct via_isa_bridge *config = host->private_data; | 532 | const struct via_isa_bridge *config = host->private_data; |
533 | int rc; | ||
534 | |||
535 | rc = ata_pci_device_do_resume(pdev); | ||
536 | if (rc) | ||
537 | return rc; | ||
618 | 538 | ||
619 | via_config_fifo(pdev, config->flags); | 539 | via_config_fifo(pdev, config->flags); |
620 | 540 | ||
@@ -630,7 +550,9 @@ static int via_reinit_one(struct pci_dev *pdev) | |||
630 | timing &= ~0x80008; | 550 | timing &= ~0x80008; |
631 | pci_write_config_dword(pdev, 0x50, timing); | 551 | pci_write_config_dword(pdev, 0x50, timing); |
632 | } | 552 | } |
633 | return ata_pci_device_resume(pdev); | 553 | |
554 | ata_host_resume(host); | ||
555 | return 0; | ||
634 | } | 556 | } |
635 | #endif | 557 | #endif |
636 | 558 | ||
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c index 99c92eda217b..6e52a3573fbf 100644 --- a/drivers/ata/pata_winbond.c +++ b/drivers/ata/pata_winbond.c | |||
@@ -116,53 +116,20 @@ static unsigned int winbond_data_xfer(struct ata_device *dev, | |||
116 | buflen += 4 - slop; | 116 | buflen += 4 - slop; |
117 | } | 117 | } |
118 | } else | 118 | } else |
119 | buflen = ata_data_xfer(dev, buf, buflen, rw); | 119 | buflen = ata_sff_data_xfer(dev, buf, buflen, rw); |
120 | 120 | ||
121 | return buflen; | 121 | return buflen; |
122 | } | 122 | } |
123 | 123 | ||
124 | static struct scsi_host_template winbond_sht = { | 124 | static struct scsi_host_template winbond_sht = { |
125 | .module = THIS_MODULE, | 125 | ATA_PIO_SHT(DRV_NAME), |
126 | .name = DRV_NAME, | ||
127 | .ioctl = ata_scsi_ioctl, | ||
128 | .queuecommand = ata_scsi_queuecmd, | ||
129 | .can_queue = ATA_DEF_QUEUE, | ||
130 | .this_id = ATA_SHT_THIS_ID, | ||
131 | .sg_tablesize = LIBATA_MAX_PRD, | ||
132 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
133 | .emulated = ATA_SHT_EMULATED, | ||
134 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
135 | .proc_name = DRV_NAME, | ||
136 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
137 | .slave_configure = ata_scsi_slave_config, | ||
138 | .slave_destroy = ata_scsi_slave_destroy, | ||
139 | .bios_param = ata_std_bios_param, | ||
140 | }; | 126 | }; |
141 | 127 | ||
142 | static struct ata_port_operations winbond_port_ops = { | 128 | static struct ata_port_operations winbond_port_ops = { |
143 | .set_piomode = winbond_set_piomode, | 129 | .inherits = &ata_sff_port_ops, |
144 | 130 | .sff_data_xfer = winbond_data_xfer, | |
145 | .tf_load = ata_tf_load, | ||
146 | .tf_read = ata_tf_read, | ||
147 | .check_status = ata_check_status, | ||
148 | .exec_command = ata_exec_command, | ||
149 | .dev_select = ata_std_dev_select, | ||
150 | |||
151 | .freeze = ata_bmdma_freeze, | ||
152 | .thaw = ata_bmdma_thaw, | ||
153 | .error_handler = ata_bmdma_error_handler, | ||
154 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
155 | .cable_detect = ata_cable_40wire, | 131 | .cable_detect = ata_cable_40wire, |
156 | 132 | .set_piomode = winbond_set_piomode, | |
157 | .qc_prep = ata_qc_prep, | ||
158 | .qc_issue = ata_qc_issue_prot, | ||
159 | |||
160 | .data_xfer = winbond_data_xfer, | ||
161 | |||
162 | .irq_clear = ata_bmdma_irq_clear, | ||
163 | .irq_on = ata_irq_on, | ||
164 | |||
165 | .port_start = ata_sff_port_start, | ||
166 | }; | 133 | }; |
167 | 134 | ||
168 | /** | 135 | /** |
@@ -231,7 +198,7 @@ static __init int winbond_init_one(unsigned long port) | |||
231 | ap->ioaddr.cmd_addr = cmd_addr; | 198 | ap->ioaddr.cmd_addr = cmd_addr; |
232 | ap->ioaddr.altstatus_addr = ctl_addr; | 199 | ap->ioaddr.altstatus_addr = ctl_addr; |
233 | ap->ioaddr.ctl_addr = ctl_addr; | 200 | ap->ioaddr.ctl_addr = ctl_addr; |
234 | ata_std_ports(&ap->ioaddr); | 201 | ata_sff_std_ports(&ap->ioaddr); |
235 | 202 | ||
236 | /* hook in a private data structure per channel */ | 203 | /* hook in a private data structure per channel */ |
237 | host->private_data = &winbond_data[nr_winbond_host]; | 204 | host->private_data = &winbond_data[nr_winbond_host]; |
@@ -239,7 +206,7 @@ static __init int winbond_init_one(unsigned long port) | |||
239 | winbond_data[nr_winbond_host].platform_dev = pdev; | 206 | winbond_data[nr_winbond_host].platform_dev = pdev; |
240 | 207 | ||
241 | /* activate */ | 208 | /* activate */ |
242 | rc = ata_host_activate(host, 14 + i, ata_interrupt, 0, | 209 | rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0, |
243 | &winbond_sht); | 210 | &winbond_sht); |
244 | if (rc) | 211 | if (rc) |
245 | goto err_unregister; | 212 | goto err_unregister; |
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index 8e1b7e9c0ae4..be53545c9f64 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c | |||
@@ -131,56 +131,33 @@ struct adma_port_priv { | |||
131 | static int adma_ata_init_one(struct pci_dev *pdev, | 131 | static int adma_ata_init_one(struct pci_dev *pdev, |
132 | const struct pci_device_id *ent); | 132 | const struct pci_device_id *ent); |
133 | static int adma_port_start(struct ata_port *ap); | 133 | static int adma_port_start(struct ata_port *ap); |
134 | static void adma_host_stop(struct ata_host *host); | ||
135 | static void adma_port_stop(struct ata_port *ap); | 134 | static void adma_port_stop(struct ata_port *ap); |
136 | static void adma_qc_prep(struct ata_queued_cmd *qc); | 135 | static void adma_qc_prep(struct ata_queued_cmd *qc); |
137 | static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); | 136 | static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); |
138 | static int adma_check_atapi_dma(struct ata_queued_cmd *qc); | 137 | static int adma_check_atapi_dma(struct ata_queued_cmd *qc); |
139 | static void adma_bmdma_stop(struct ata_queued_cmd *qc); | ||
140 | static u8 adma_bmdma_status(struct ata_port *ap); | ||
141 | static void adma_irq_clear(struct ata_port *ap); | ||
142 | static void adma_freeze(struct ata_port *ap); | 138 | static void adma_freeze(struct ata_port *ap); |
143 | static void adma_thaw(struct ata_port *ap); | 139 | static void adma_thaw(struct ata_port *ap); |
144 | static void adma_error_handler(struct ata_port *ap); | 140 | static int adma_prereset(struct ata_link *link, unsigned long deadline); |
145 | 141 | ||
146 | static struct scsi_host_template adma_ata_sht = { | 142 | static struct scsi_host_template adma_ata_sht = { |
147 | .module = THIS_MODULE, | 143 | ATA_BASE_SHT(DRV_NAME), |
148 | .name = DRV_NAME, | ||
149 | .ioctl = ata_scsi_ioctl, | ||
150 | .queuecommand = ata_scsi_queuecmd, | ||
151 | .slave_configure = ata_scsi_slave_config, | ||
152 | .slave_destroy = ata_scsi_slave_destroy, | ||
153 | .bios_param = ata_std_bios_param, | ||
154 | .proc_name = DRV_NAME, | ||
155 | .can_queue = ATA_DEF_QUEUE, | ||
156 | .this_id = ATA_SHT_THIS_ID, | ||
157 | .sg_tablesize = LIBATA_MAX_PRD, | 144 | .sg_tablesize = LIBATA_MAX_PRD, |
158 | .dma_boundary = ADMA_DMA_BOUNDARY, | 145 | .dma_boundary = ADMA_DMA_BOUNDARY, |
159 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
160 | .use_clustering = ENABLE_CLUSTERING, | ||
161 | .emulated = ATA_SHT_EMULATED, | ||
162 | }; | 146 | }; |
163 | 147 | ||
164 | static const struct ata_port_operations adma_ata_ops = { | 148 | static struct ata_port_operations adma_ata_ops = { |
165 | .tf_load = ata_tf_load, | 149 | .inherits = &ata_sff_port_ops, |
166 | .tf_read = ata_tf_read, | 150 | |
167 | .exec_command = ata_exec_command, | ||
168 | .check_status = ata_check_status, | ||
169 | .dev_select = ata_std_dev_select, | ||
170 | .check_atapi_dma = adma_check_atapi_dma, | 151 | .check_atapi_dma = adma_check_atapi_dma, |
171 | .data_xfer = ata_data_xfer, | ||
172 | .qc_prep = adma_qc_prep, | 152 | .qc_prep = adma_qc_prep, |
173 | .qc_issue = adma_qc_issue, | 153 | .qc_issue = adma_qc_issue, |
154 | |||
174 | .freeze = adma_freeze, | 155 | .freeze = adma_freeze, |
175 | .thaw = adma_thaw, | 156 | .thaw = adma_thaw, |
176 | .error_handler = adma_error_handler, | 157 | .prereset = adma_prereset, |
177 | .irq_clear = adma_irq_clear, | 158 | |
178 | .irq_on = ata_irq_on, | ||
179 | .port_start = adma_port_start, | 159 | .port_start = adma_port_start, |
180 | .port_stop = adma_port_stop, | 160 | .port_stop = adma_port_stop, |
181 | .host_stop = adma_host_stop, | ||
182 | .bmdma_stop = adma_bmdma_stop, | ||
183 | .bmdma_status = adma_bmdma_status, | ||
184 | }; | 161 | }; |
185 | 162 | ||
186 | static struct ata_port_info adma_port_info[] = { | 163 | static struct ata_port_info adma_port_info[] = { |
@@ -213,21 +190,6 @@ static int adma_check_atapi_dma(struct ata_queued_cmd *qc) | |||
213 | return 1; /* ATAPI DMA not yet supported */ | 190 | return 1; /* ATAPI DMA not yet supported */ |
214 | } | 191 | } |
215 | 192 | ||
216 | static void adma_bmdma_stop(struct ata_queued_cmd *qc) | ||
217 | { | ||
218 | /* nothing */ | ||
219 | } | ||
220 | |||
221 | static u8 adma_bmdma_status(struct ata_port *ap) | ||
222 | { | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static void adma_irq_clear(struct ata_port *ap) | ||
227 | { | ||
228 | /* nothing */ | ||
229 | } | ||
230 | |||
231 | static void adma_reset_engine(struct ata_port *ap) | 193 | static void adma_reset_engine(struct ata_port *ap) |
232 | { | 194 | { |
233 | void __iomem *chan = ADMA_PORT_REGS(ap); | 195 | void __iomem *chan = ADMA_PORT_REGS(ap); |
@@ -246,7 +208,7 @@ static void adma_reinit_engine(struct ata_port *ap) | |||
246 | 208 | ||
247 | /* mask/clear ATA interrupts */ | 209 | /* mask/clear ATA interrupts */ |
248 | writeb(ATA_NIEN, ap->ioaddr.ctl_addr); | 210 | writeb(ATA_NIEN, ap->ioaddr.ctl_addr); |
249 | ata_check_status(ap); | 211 | ata_sff_check_status(ap); |
250 | 212 | ||
251 | /* reset the ADMA engine */ | 213 | /* reset the ADMA engine */ |
252 | adma_reset_engine(ap); | 214 | adma_reset_engine(ap); |
@@ -281,7 +243,7 @@ static void adma_freeze(struct ata_port *ap) | |||
281 | 243 | ||
282 | /* mask/clear ATA interrupts */ | 244 | /* mask/clear ATA interrupts */ |
283 | writeb(ATA_NIEN, ap->ioaddr.ctl_addr); | 245 | writeb(ATA_NIEN, ap->ioaddr.ctl_addr); |
284 | ata_check_status(ap); | 246 | ata_sff_check_status(ap); |
285 | 247 | ||
286 | /* reset ADMA to idle state */ | 248 | /* reset ADMA to idle state */ |
287 | writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL); | 249 | writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL); |
@@ -304,13 +266,7 @@ static int adma_prereset(struct ata_link *link, unsigned long deadline) | |||
304 | pp->state = adma_state_mmio; | 266 | pp->state = adma_state_mmio; |
305 | adma_reinit_engine(ap); | 267 | adma_reinit_engine(ap); |
306 | 268 | ||
307 | return ata_std_prereset(link, deadline); | 269 | return ata_sff_prereset(link, deadline); |
308 | } | ||
309 | |||
310 | static void adma_error_handler(struct ata_port *ap) | ||
311 | { | ||
312 | ata_do_eh(ap, adma_prereset, ata_std_softreset, NULL, | ||
313 | ata_std_postreset); | ||
314 | } | 270 | } |
315 | 271 | ||
316 | static int adma_fill_sg(struct ata_queued_cmd *qc) | 272 | static int adma_fill_sg(struct ata_queued_cmd *qc) |
@@ -366,7 +322,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) | |||
366 | 322 | ||
367 | adma_enter_reg_mode(qc->ap); | 323 | adma_enter_reg_mode(qc->ap); |
368 | if (qc->tf.protocol != ATA_PROT_DMA) { | 324 | if (qc->tf.protocol != ATA_PROT_DMA) { |
369 | ata_qc_prep(qc); | 325 | ata_sff_qc_prep(qc); |
370 | return; | 326 | return; |
371 | } | 327 | } |
372 | 328 | ||
@@ -465,7 +421,7 @@ static unsigned int adma_qc_issue(struct ata_queued_cmd *qc) | |||
465 | } | 421 | } |
466 | 422 | ||
467 | pp->state = adma_state_mmio; | 423 | pp->state = adma_state_mmio; |
468 | return ata_qc_issue_prot(qc); | 424 | return ata_sff_qc_issue(qc); |
469 | } | 425 | } |
470 | 426 | ||
471 | static inline unsigned int adma_intr_pkt(struct ata_host *host) | 427 | static inline unsigned int adma_intr_pkt(struct ata_host *host) |
@@ -536,7 +492,7 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host) | |||
536 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { | 492 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
537 | 493 | ||
538 | /* check main status, clearing INTRQ */ | 494 | /* check main status, clearing INTRQ */ |
539 | u8 status = ata_check_status(ap); | 495 | u8 status = ata_sff_check_status(ap); |
540 | if ((status & ATA_BUSY)) | 496 | if ((status & ATA_BUSY)) |
541 | continue; | 497 | continue; |
542 | DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", | 498 | DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", |
@@ -633,14 +589,6 @@ static void adma_port_stop(struct ata_port *ap) | |||
633 | adma_reset_engine(ap); | 589 | adma_reset_engine(ap); |
634 | } | 590 | } |
635 | 591 | ||
636 | static void adma_host_stop(struct ata_host *host) | ||
637 | { | ||
638 | unsigned int port_no; | ||
639 | |||
640 | for (port_no = 0; port_no < ADMA_PORTS; ++port_no) | ||
641 | adma_reset_engine(host->ports[port_no]); | ||
642 | } | ||
643 | |||
644 | static void adma_host_init(struct ata_host *host, unsigned int chip_id) | 592 | static void adma_host_init(struct ata_host *host, unsigned int chip_id) |
645 | { | 593 | { |
646 | unsigned int port_no; | 594 | unsigned int port_no; |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 9d1e3cad4aa9..fddd346b1d57 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -35,7 +35,6 @@ enum { | |||
35 | SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 35 | SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
36 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | | 36 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | |
37 | ATA_FLAG_NCQ), | 37 | ATA_FLAG_NCQ), |
38 | SATA_FSL_HOST_LFLAGS = ATA_LFLAG_SKIP_D2H_BSY, | ||
39 | 38 | ||
40 | SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH, | 39 | SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH, |
41 | SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */ | 40 | SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */ |
@@ -245,17 +244,6 @@ struct sata_fsl_port_priv { | |||
245 | dma_addr_t cmdslot_paddr; | 244 | dma_addr_t cmdslot_paddr; |
246 | struct command_desc *cmdentry; | 245 | struct command_desc *cmdentry; |
247 | dma_addr_t cmdentry_paddr; | 246 | dma_addr_t cmdentry_paddr; |
248 | |||
249 | /* | ||
250 | * SATA FSL controller has a Status FIS which should contain the | ||
251 | * received D2H FIS & taskfile registers. This SFIS is present in | ||
252 | * the command descriptor, and to have a ready reference to it, | ||
253 | * we are caching it here, quite similar to what is done in H/W on | ||
254 | * AHCI compliant devices by copying taskfile fields to a 32-bit | ||
255 | * register. | ||
256 | */ | ||
257 | |||
258 | struct ata_taskfile tf; | ||
259 | }; | 247 | }; |
260 | 248 | ||
261 | /* | 249 | /* |
@@ -465,6 +453,20 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) | |||
465 | return 0; | 453 | return 0; |
466 | } | 454 | } |
467 | 455 | ||
456 | static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc) | ||
457 | { | ||
458 | struct sata_fsl_port_priv *pp = qc->ap->private_data; | ||
459 | struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data; | ||
460 | void __iomem *hcr_base = host_priv->hcr_base; | ||
461 | unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); | ||
462 | struct command_desc *cd; | ||
463 | |||
464 | cd = pp->cmdentry + tag; | ||
465 | |||
466 | ata_tf_from_fis(cd->sfis, &qc->result_tf); | ||
467 | return true; | ||
468 | } | ||
469 | |||
468 | static int sata_fsl_scr_write(struct ata_port *ap, unsigned int sc_reg_in, | 470 | static int sata_fsl_scr_write(struct ata_port *ap, unsigned int sc_reg_in, |
469 | u32 val) | 471 | u32 val) |
470 | { | 472 | { |
@@ -556,38 +558,6 @@ static void sata_fsl_thaw(struct ata_port *ap) | |||
556 | ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS)); | 558 | ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS)); |
557 | } | 559 | } |
558 | 560 | ||
559 | /* | ||
560 | * NOTE : 1st D2H FIS from device does not update sfis in command descriptor. | ||
561 | */ | ||
562 | static inline void sata_fsl_cache_taskfile_from_d2h_fis(struct ata_queued_cmd | ||
563 | *qc, | ||
564 | struct ata_port *ap) | ||
565 | { | ||
566 | struct sata_fsl_port_priv *pp = ap->private_data; | ||
567 | struct sata_fsl_host_priv *host_priv = ap->host->private_data; | ||
568 | void __iomem *hcr_base = host_priv->hcr_base; | ||
569 | unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); | ||
570 | struct command_desc *cd; | ||
571 | |||
572 | cd = pp->cmdentry + tag; | ||
573 | |||
574 | ata_tf_from_fis(cd->sfis, &pp->tf); | ||
575 | } | ||
576 | |||
577 | static u8 sata_fsl_check_status(struct ata_port *ap) | ||
578 | { | ||
579 | struct sata_fsl_port_priv *pp = ap->private_data; | ||
580 | |||
581 | return pp->tf.command; | ||
582 | } | ||
583 | |||
584 | static void sata_fsl_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | ||
585 | { | ||
586 | struct sata_fsl_port_priv *pp = ap->private_data; | ||
587 | |||
588 | *tf = pp->tf; | ||
589 | } | ||
590 | |||
591 | static int sata_fsl_port_start(struct ata_port *ap) | 561 | static int sata_fsl_port_start(struct ata_port *ap) |
592 | { | 562 | { |
593 | struct device *dev = ap->host->dev; | 563 | struct device *dev = ap->host->dev; |
@@ -708,6 +678,15 @@ static unsigned int sata_fsl_dev_classify(struct ata_port *ap) | |||
708 | return ata_dev_classify(&tf); | 678 | return ata_dev_classify(&tf); |
709 | } | 679 | } |
710 | 680 | ||
681 | static int sata_fsl_prereset(struct ata_linke *link, unsigned long deadline) | ||
682 | { | ||
683 | /* FIXME: Never skip softreset, sata_fsl_softreset() is | ||
684 | * combination of soft and hard resets. sata_fsl_softreset() | ||
685 | * needs to be splitted into soft and hard resets. | ||
686 | */ | ||
687 | return 0; | ||
688 | } | ||
689 | |||
711 | static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, | 690 | static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, |
712 | unsigned long deadline) | 691 | unsigned long deadline) |
713 | { | 692 | { |
@@ -913,16 +892,6 @@ err: | |||
913 | return -EIO; | 892 | return -EIO; |
914 | } | 893 | } |
915 | 894 | ||
916 | static void sata_fsl_error_handler(struct ata_port *ap) | ||
917 | { | ||
918 | |||
919 | DPRINTK("in xx_error_handler\n"); | ||
920 | |||
921 | /* perform recovery */ | ||
922 | ata_do_eh(ap, ata_std_prereset, sata_fsl_softreset, sata_std_hardreset, | ||
923 | ata_std_postreset); | ||
924 | } | ||
925 | |||
926 | static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc) | 895 | static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc) |
927 | { | 896 | { |
928 | if (qc->flags & ATA_QCFLAG_FAILED) | 897 | if (qc->flags & ATA_QCFLAG_FAILED) |
@@ -934,11 +903,6 @@ static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc) | |||
934 | } | 903 | } |
935 | } | 904 | } |
936 | 905 | ||
937 | static void sata_fsl_irq_clear(struct ata_port *ap) | ||
938 | { | ||
939 | /* unused */ | ||
940 | } | ||
941 | |||
942 | static void sata_fsl_error_intr(struct ata_port *ap) | 906 | static void sata_fsl_error_intr(struct ata_port *ap) |
943 | { | 907 | { |
944 | struct ata_link *link = &ap->link; | 908 | struct ata_link *link = &ap->link; |
@@ -996,7 +960,7 @@ static void sata_fsl_error_intr(struct ata_port *ap) | |||
996 | /* handle fatal errors */ | 960 | /* handle fatal errors */ |
997 | if (hstatus & FATAL_ERROR_DECODE) { | 961 | if (hstatus & FATAL_ERROR_DECODE) { |
998 | err_mask |= AC_ERR_ATA_BUS; | 962 | err_mask |= AC_ERR_ATA_BUS; |
999 | action |= ATA_EH_SOFTRESET; | 963 | action |= ATA_EH_RESET; |
1000 | /* how will fatal error interrupts be completed ?? */ | 964 | /* how will fatal error interrupts be completed ?? */ |
1001 | freeze = 1; | 965 | freeze = 1; |
1002 | } | 966 | } |
@@ -1013,10 +977,9 @@ static void sata_fsl_error_intr(struct ata_port *ap) | |||
1013 | /* record error info */ | 977 | /* record error info */ |
1014 | qc = ata_qc_from_tag(ap, link->active_tag); | 978 | qc = ata_qc_from_tag(ap, link->active_tag); |
1015 | 979 | ||
1016 | if (qc) { | 980 | if (qc) |
1017 | sata_fsl_cache_taskfile_from_d2h_fis(qc, qc->ap); | ||
1018 | qc->err_mask |= err_mask; | 981 | qc->err_mask |= err_mask; |
1019 | } else | 982 | else |
1020 | ehi->err_mask |= err_mask; | 983 | ehi->err_mask |= err_mask; |
1021 | 984 | ||
1022 | ehi->action |= action; | 985 | ehi->action |= action; |
@@ -1029,14 +992,6 @@ static void sata_fsl_error_intr(struct ata_port *ap) | |||
1029 | ata_port_abort(ap); | 992 | ata_port_abort(ap); |
1030 | } | 993 | } |
1031 | 994 | ||
1032 | static void sata_fsl_qc_complete(struct ata_queued_cmd *qc) | ||
1033 | { | ||
1034 | if (qc->flags & ATA_QCFLAG_RESULT_TF) { | ||
1035 | DPRINTK("xx_qc_complete called\n"); | ||
1036 | sata_fsl_cache_taskfile_from_d2h_fis(qc, qc->ap); | ||
1037 | } | ||
1038 | } | ||
1039 | |||
1040 | static void sata_fsl_host_intr(struct ata_port *ap) | 995 | static void sata_fsl_host_intr(struct ata_port *ap) |
1041 | { | 996 | { |
1042 | struct ata_link *link = &ap->link; | 997 | struct ata_link *link = &ap->link; |
@@ -1077,10 +1032,8 @@ static void sata_fsl_host_intr(struct ata_port *ap) | |||
1077 | for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) { | 1032 | for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) { |
1078 | if (qc_active & (1 << i)) { | 1033 | if (qc_active & (1 << i)) { |
1079 | qc = ata_qc_from_tag(ap, i); | 1034 | qc = ata_qc_from_tag(ap, i); |
1080 | if (qc) { | 1035 | if (qc) |
1081 | sata_fsl_qc_complete(qc); | ||
1082 | ata_qc_complete(qc); | 1036 | ata_qc_complete(qc); |
1083 | } | ||
1084 | DPRINTK | 1037 | DPRINTK |
1085 | ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n", | 1038 | ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n", |
1086 | i, ioread32(hcr_base + CC), | 1039 | i, ioread32(hcr_base + CC), |
@@ -1096,10 +1049,8 @@ static void sata_fsl_host_intr(struct ata_port *ap) | |||
1096 | DPRINTK("completing non-ncq cmd, tag=%d,CC=0x%x\n", | 1049 | DPRINTK("completing non-ncq cmd, tag=%d,CC=0x%x\n", |
1097 | link->active_tag, ioread32(hcr_base + CC)); | 1050 | link->active_tag, ioread32(hcr_base + CC)); |
1098 | 1051 | ||
1099 | if (qc) { | 1052 | if (qc) |
1100 | sata_fsl_qc_complete(qc); | ||
1101 | ata_qc_complete(qc); | 1053 | ata_qc_complete(qc); |
1102 | } | ||
1103 | } else { | 1054 | } else { |
1104 | /* Spurious Interrupt!! */ | 1055 | /* Spurious Interrupt!! */ |
1105 | DPRINTK("spurious interrupt!!, CC = 0x%x\n", | 1056 | DPRINTK("spurious interrupt!!, CC = 0x%x\n", |
@@ -1197,41 +1148,26 @@ static int sata_fsl_init_controller(struct ata_host *host) | |||
1197 | * scsi mid-layer and libata interface structures | 1148 | * scsi mid-layer and libata interface structures |
1198 | */ | 1149 | */ |
1199 | static struct scsi_host_template sata_fsl_sht = { | 1150 | static struct scsi_host_template sata_fsl_sht = { |
1200 | .module = THIS_MODULE, | 1151 | ATA_NCQ_SHT("sata_fsl"), |
1201 | .name = "sata_fsl", | ||
1202 | .ioctl = ata_scsi_ioctl, | ||
1203 | .queuecommand = ata_scsi_queuecmd, | ||
1204 | .change_queue_depth = ata_scsi_change_queue_depth, | ||
1205 | .can_queue = SATA_FSL_QUEUE_DEPTH, | 1152 | .can_queue = SATA_FSL_QUEUE_DEPTH, |
1206 | .this_id = ATA_SHT_THIS_ID, | ||
1207 | .sg_tablesize = SATA_FSL_MAX_PRD_USABLE, | 1153 | .sg_tablesize = SATA_FSL_MAX_PRD_USABLE, |
1208 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
1209 | .emulated = ATA_SHT_EMULATED, | ||
1210 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
1211 | .proc_name = "sata_fsl", | ||
1212 | .dma_boundary = ATA_DMA_BOUNDARY, | 1154 | .dma_boundary = ATA_DMA_BOUNDARY, |
1213 | .slave_configure = ata_scsi_slave_config, | ||
1214 | .slave_destroy = ata_scsi_slave_destroy, | ||
1215 | .bios_param = ata_std_bios_param, | ||
1216 | }; | 1155 | }; |
1217 | 1156 | ||
1218 | static const struct ata_port_operations sata_fsl_ops = { | 1157 | static const struct ata_port_operations sata_fsl_ops = { |
1219 | .check_status = sata_fsl_check_status, | 1158 | .inherits = &sata_port_ops, |
1220 | .check_altstatus = sata_fsl_check_status, | ||
1221 | .dev_select = ata_noop_dev_select, | ||
1222 | |||
1223 | .tf_read = sata_fsl_tf_read, | ||
1224 | 1159 | ||
1225 | .qc_prep = sata_fsl_qc_prep, | 1160 | .qc_prep = sata_fsl_qc_prep, |
1226 | .qc_issue = sata_fsl_qc_issue, | 1161 | .qc_issue = sata_fsl_qc_issue, |
1227 | .irq_clear = sata_fsl_irq_clear, | 1162 | .qc_fill_rtf = sata_fsl_qc_fill_rtf, |
1228 | 1163 | ||
1229 | .scr_read = sata_fsl_scr_read, | 1164 | .scr_read = sata_fsl_scr_read, |
1230 | .scr_write = sata_fsl_scr_write, | 1165 | .scr_write = sata_fsl_scr_write, |
1231 | 1166 | ||
1232 | .freeze = sata_fsl_freeze, | 1167 | .freeze = sata_fsl_freeze, |
1233 | .thaw = sata_fsl_thaw, | 1168 | .thaw = sata_fsl_thaw, |
1234 | .error_handler = sata_fsl_error_handler, | 1169 | .prereset = sata_fsl_prereset, |
1170 | .softreset = sata_fsl_softreset, | ||
1235 | .post_internal_cmd = sata_fsl_post_internal_cmd, | 1171 | .post_internal_cmd = sata_fsl_post_internal_cmd, |
1236 | 1172 | ||
1237 | .port_start = sata_fsl_port_start, | 1173 | .port_start = sata_fsl_port_start, |
@@ -1241,7 +1177,6 @@ static const struct ata_port_operations sata_fsl_ops = { | |||
1241 | static const struct ata_port_info sata_fsl_port_info[] = { | 1177 | static const struct ata_port_info sata_fsl_port_info[] = { |
1242 | { | 1178 | { |
1243 | .flags = SATA_FSL_HOST_FLAGS, | 1179 | .flags = SATA_FSL_HOST_FLAGS, |
1244 | .link_flags = SATA_FSL_HOST_LFLAGS, | ||
1245 | .pio_mask = 0x1f, /* pio 0-4 */ | 1180 | .pio_mask = 0x1f, /* pio 0-4 */ |
1246 | .udma_mask = 0x7f, /* udma 0-6 */ | 1181 | .udma_mask = 0x7f, /* udma 0-6 */ |
1247 | .port_ops = &sata_fsl_ops, | 1182 | .port_ops = &sata_fsl_ops, |
@@ -1297,11 +1232,6 @@ static int sata_fsl_probe(struct of_device *ofdev, | |||
1297 | /* host->iomap is not used currently */ | 1232 | /* host->iomap is not used currently */ |
1298 | host->private_data = host_priv; | 1233 | host->private_data = host_priv; |
1299 | 1234 | ||
1300 | /* setup port(s) */ | ||
1301 | |||
1302 | host->ports[0]->ioaddr.cmd_addr = host_priv->hcr_base; | ||
1303 | host->ports[0]->ioaddr.scr_addr = host_priv->ssr_base; | ||
1304 | |||
1305 | /* initialize host controller */ | 1235 | /* initialize host controller */ |
1306 | sata_fsl_init_controller(host); | 1236 | sata_fsl_init_controller(host); |
1307 | 1237 | ||
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 59e65edc5820..d27bb9a2568f 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c | |||
@@ -109,21 +109,7 @@ struct inic_port_priv { | |||
109 | }; | 109 | }; |
110 | 110 | ||
111 | static struct scsi_host_template inic_sht = { | 111 | static struct scsi_host_template inic_sht = { |
112 | .module = THIS_MODULE, | 112 | ATA_BMDMA_SHT(DRV_NAME), |
113 | .name = DRV_NAME, | ||
114 | .ioctl = ata_scsi_ioctl, | ||
115 | .queuecommand = ata_scsi_queuecmd, | ||
116 | .can_queue = ATA_DEF_QUEUE, | ||
117 | .this_id = ATA_SHT_THIS_ID, | ||
118 | .sg_tablesize = LIBATA_MAX_PRD, | ||
119 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
120 | .emulated = ATA_SHT_EMULATED, | ||
121 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
122 | .proc_name = DRV_NAME, | ||
123 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
124 | .slave_configure = ata_scsi_slave_config, | ||
125 | .slave_destroy = ata_scsi_slave_destroy, | ||
126 | .bios_param = ata_std_bios_param, | ||
127 | }; | 113 | }; |
128 | 114 | ||
129 | static const int scr_map[] = { | 115 | static const int scr_map[] = { |
@@ -236,7 +222,7 @@ static void inic_bmdma_setup(struct ata_queued_cmd *qc) | |||
236 | writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); | 222 | writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); |
237 | 223 | ||
238 | /* issue r/w command */ | 224 | /* issue r/w command */ |
239 | ap->ops->exec_command(ap, &qc->tf); | 225 | ap->ops->sff_exec_command(ap, &qc->tf); |
240 | } | 226 | } |
241 | 227 | ||
242 | static void inic_bmdma_start(struct ata_queued_cmd *qc) | 228 | static void inic_bmdma_start(struct ata_queued_cmd *qc) |
@@ -266,11 +252,6 @@ static u8 inic_bmdma_status(struct ata_port *ap) | |||
266 | return ATA_DMA_INTR; | 252 | return ATA_DMA_INTR; |
267 | } | 253 | } |
268 | 254 | ||
269 | static void inic_irq_clear(struct ata_port *ap) | ||
270 | { | ||
271 | /* noop */ | ||
272 | } | ||
273 | |||
274 | static void inic_host_intr(struct ata_port *ap) | 255 | static void inic_host_intr(struct ata_port *ap) |
275 | { | 256 | { |
276 | void __iomem *port_base = inic_port_base(ap); | 257 | void __iomem *port_base = inic_port_base(ap); |
@@ -286,14 +267,14 @@ static void inic_host_intr(struct ata_port *ap) | |||
286 | ata_qc_from_tag(ap, ap->link.active_tag); | 267 | ata_qc_from_tag(ap, ap->link.active_tag); |
287 | 268 | ||
288 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { | 269 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { |
289 | ata_chk_status(ap); /* clear ATA interrupt */ | 270 | ap->ops->sff_check_status(ap); /* clear ATA interrupt */ |
290 | return; | 271 | return; |
291 | } | 272 | } |
292 | 273 | ||
293 | if (likely(ata_host_intr(ap, qc))) | 274 | if (likely(ata_sff_host_intr(ap, qc))) |
294 | return; | 275 | return; |
295 | 276 | ||
296 | ata_chk_status(ap); /* clear ATA interrupt */ | 277 | ap->ops->sff_check_status(ap); /* clear ATA interrupt */ |
297 | ata_port_printk(ap, KERN_WARNING, "unhandled " | 278 | ata_port_printk(ap, KERN_WARNING, "unhandled " |
298 | "interrupt, irq_stat=%x\n", irq_stat); | 279 | "interrupt, irq_stat=%x\n", irq_stat); |
299 | return; | 280 | return; |
@@ -370,12 +351,12 @@ static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) | |||
370 | */ | 351 | */ |
371 | if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || | 352 | if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || |
372 | qc->tf.command == ATA_CMD_ID_ATAPI)) { | 353 | qc->tf.command == ATA_CMD_ID_ATAPI)) { |
373 | u8 stat = ata_chk_status(ap); | 354 | u8 stat = ap->ops->sff_check_status(ap); |
374 | if (stat == 0x7f || stat == 0xff) | 355 | if (stat == 0x7f || stat == 0xff) |
375 | return AC_ERR_HSM; | 356 | return AC_ERR_HSM; |
376 | } | 357 | } |
377 | 358 | ||
378 | return ata_qc_issue_prot(qc); | 359 | return ata_sff_qc_issue(qc); |
379 | } | 360 | } |
380 | 361 | ||
381 | static void inic_freeze(struct ata_port *ap) | 362 | static void inic_freeze(struct ata_port *ap) |
@@ -384,7 +365,7 @@ static void inic_freeze(struct ata_port *ap) | |||
384 | 365 | ||
385 | __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); | 366 | __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); |
386 | 367 | ||
387 | ata_chk_status(ap); | 368 | ap->ops->sff_check_status(ap); |
388 | writeb(0xff, port_base + PORT_IRQ_STAT); | 369 | writeb(0xff, port_base + PORT_IRQ_STAT); |
389 | 370 | ||
390 | readb(port_base + PORT_IRQ_STAT); /* flush */ | 371 | readb(port_base + PORT_IRQ_STAT); /* flush */ |
@@ -394,7 +375,7 @@ static void inic_thaw(struct ata_port *ap) | |||
394 | { | 375 | { |
395 | void __iomem *port_base = inic_port_base(ap); | 376 | void __iomem *port_base = inic_port_base(ap); |
396 | 377 | ||
397 | ata_chk_status(ap); | 378 | ap->ops->sff_check_status(ap); |
398 | writeb(0xff, port_base + PORT_IRQ_STAT); | 379 | writeb(0xff, port_base + PORT_IRQ_STAT); |
399 | 380 | ||
400 | __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); | 381 | __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); |
@@ -436,10 +417,8 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, | |||
436 | if (ata_link_online(link)) { | 417 | if (ata_link_online(link)) { |
437 | struct ata_taskfile tf; | 418 | struct ata_taskfile tf; |
438 | 419 | ||
439 | /* wait a while before checking status */ | 420 | /* wait for link to become ready */ |
440 | ata_wait_after_reset(ap, deadline); | 421 | rc = ata_sff_wait_after_reset(link, 1, deadline); |
441 | |||
442 | rc = ata_wait_ready(ap, deadline); | ||
443 | /* link occupied, -ENODEV too is an error */ | 422 | /* link occupied, -ENODEV too is an error */ |
444 | if (rc) { | 423 | if (rc) { |
445 | ata_link_printk(link, KERN_WARNING, "device not ready " | 424 | ata_link_printk(link, KERN_WARNING, "device not ready " |
@@ -447,10 +426,8 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, | |||
447 | return rc; | 426 | return rc; |
448 | } | 427 | } |
449 | 428 | ||
450 | ata_tf_read(ap, &tf); | 429 | ata_sff_tf_read(ap, &tf); |
451 | *class = ata_dev_classify(&tf); | 430 | *class = ata_dev_classify(&tf); |
452 | if (*class == ATA_DEV_UNKNOWN) | ||
453 | *class = ATA_DEV_NONE; | ||
454 | } | 431 | } |
455 | 432 | ||
456 | return 0; | 433 | return 0; |
@@ -471,8 +448,7 @@ static void inic_error_handler(struct ata_port *ap) | |||
471 | spin_unlock_irqrestore(ap->lock, flags); | 448 | spin_unlock_irqrestore(ap->lock, flags); |
472 | 449 | ||
473 | /* PIO and DMA engines have been stopped, perform recovery */ | 450 | /* PIO and DMA engines have been stopped, perform recovery */ |
474 | ata_do_eh(ap, ata_std_prereset, NULL, inic_hardreset, | 451 | ata_std_error_handler(ap); |
475 | ata_std_postreset); | ||
476 | } | 452 | } |
477 | 453 | ||
478 | static void inic_post_internal_cmd(struct ata_queued_cmd *qc) | 454 | static void inic_post_internal_cmd(struct ata_queued_cmd *qc) |
@@ -541,35 +517,26 @@ static int inic_port_start(struct ata_port *ap) | |||
541 | } | 517 | } |
542 | 518 | ||
543 | static struct ata_port_operations inic_port_ops = { | 519 | static struct ata_port_operations inic_port_ops = { |
544 | .tf_load = ata_tf_load, | 520 | .inherits = &ata_sff_port_ops, |
545 | .tf_read = ata_tf_read, | ||
546 | .check_status = ata_check_status, | ||
547 | .exec_command = ata_exec_command, | ||
548 | .dev_select = ata_std_dev_select, | ||
549 | |||
550 | .scr_read = inic_scr_read, | ||
551 | .scr_write = inic_scr_write, | ||
552 | 521 | ||
553 | .bmdma_setup = inic_bmdma_setup, | 522 | .bmdma_setup = inic_bmdma_setup, |
554 | .bmdma_start = inic_bmdma_start, | 523 | .bmdma_start = inic_bmdma_start, |
555 | .bmdma_stop = inic_bmdma_stop, | 524 | .bmdma_stop = inic_bmdma_stop, |
556 | .bmdma_status = inic_bmdma_status, | 525 | .bmdma_status = inic_bmdma_status, |
557 | |||
558 | .irq_clear = inic_irq_clear, | ||
559 | .irq_on = ata_irq_on, | ||
560 | |||
561 | .qc_prep = ata_qc_prep, | ||
562 | .qc_issue = inic_qc_issue, | 526 | .qc_issue = inic_qc_issue, |
563 | .data_xfer = ata_data_xfer, | ||
564 | 527 | ||
565 | .freeze = inic_freeze, | 528 | .freeze = inic_freeze, |
566 | .thaw = inic_thaw, | 529 | .thaw = inic_thaw, |
530 | .softreset = ATA_OP_NULL, /* softreset is broken */ | ||
531 | .hardreset = inic_hardreset, | ||
567 | .error_handler = inic_error_handler, | 532 | .error_handler = inic_error_handler, |
568 | .post_internal_cmd = inic_post_internal_cmd, | 533 | .post_internal_cmd = inic_post_internal_cmd, |
569 | .dev_config = inic_dev_config, | 534 | .dev_config = inic_dev_config, |
570 | 535 | ||
571 | .port_resume = inic_port_resume, | 536 | .scr_read = inic_scr_read, |
537 | .scr_write = inic_scr_write, | ||
572 | 538 | ||
539 | .port_resume = inic_port_resume, | ||
573 | .port_start = inic_port_start, | 540 | .port_start = inic_port_start, |
574 | }; | 541 | }; |
575 | 542 | ||
@@ -692,7 +659,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
692 | ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); | 659 | ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); |
693 | port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR; | 660 | port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR; |
694 | 661 | ||
695 | ata_std_ports(port); | 662 | ata_sff_std_ports(port); |
696 | 663 | ||
697 | ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio"); | 664 | ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio"); |
698 | ata_port_pbar_desc(ap, MMIO_BAR, offset, "port"); | 665 | ata_port_pbar_desc(ap, MMIO_BAR, offset, "port"); |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 6ebebde8454a..05ff8c776497 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * sata_mv.c - Marvell SATA support | 2 | * sata_mv.c - Marvell SATA support |
3 | * | 3 | * |
4 | * Copyright 2008: Marvell Corporation, all rights reserved. | ||
4 | * Copyright 2005: EMC Corporation, all rights reserved. | 5 | * Copyright 2005: EMC Corporation, all rights reserved. |
5 | * Copyright 2005 Red Hat, Inc. All rights reserved. | 6 | * Copyright 2005 Red Hat, Inc. All rights reserved. |
6 | * | 7 | * |
@@ -39,7 +40,9 @@ | |||
39 | 40 | ||
40 | 5) Investigate problems with PCI Message Signalled Interrupts (MSI). | 41 | 5) Investigate problems with PCI Message Signalled Interrupts (MSI). |
41 | 42 | ||
42 | 6) Add port multiplier support (intermediate) | 43 | 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead. |
44 | |||
45 | 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above). | ||
43 | 46 | ||
44 | 8) Develop a low-power-consumption strategy, and implement it. | 47 | 8) Develop a low-power-consumption strategy, and implement it. |
45 | 48 | ||
@@ -61,7 +64,6 @@ | |||
61 | 64 | ||
62 | */ | 65 | */ |
63 | 66 | ||
64 | |||
65 | #include <linux/kernel.h> | 67 | #include <linux/kernel.h> |
66 | #include <linux/module.h> | 68 | #include <linux/module.h> |
67 | #include <linux/pci.h> | 69 | #include <linux/pci.h> |
@@ -131,7 +133,7 @@ enum { | |||
131 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | 133 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ |
132 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 134 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
133 | /* SoC integrated controllers, no PCI interface */ | 135 | /* SoC integrated controllers, no PCI interface */ |
134 | MV_FLAG_SOC = (1 << 28), | 136 | MV_FLAG_SOC = (1 << 28), |
135 | 137 | ||
136 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 138 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
137 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | | 139 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | |
@@ -141,6 +143,7 @@ enum { | |||
141 | CRQB_FLAG_READ = (1 << 0), | 143 | CRQB_FLAG_READ = (1 << 0), |
142 | CRQB_TAG_SHIFT = 1, | 144 | CRQB_TAG_SHIFT = 1, |
143 | CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ | 145 | CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ |
146 | CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ | ||
144 | CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ | 147 | CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ |
145 | CRQB_CMD_ADDR_SHIFT = 8, | 148 | CRQB_CMD_ADDR_SHIFT = 8, |
146 | CRQB_CMD_CS = (0x2 << 11), | 149 | CRQB_CMD_CS = (0x2 << 11), |
@@ -199,7 +202,7 @@ enum { | |||
199 | TWSI_INT = (1 << 24), | 202 | TWSI_INT = (1 << 24), |
200 | HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ | 203 | HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ |
201 | HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ | 204 | HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ |
202 | HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ | 205 | HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ |
203 | HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | | 206 | HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | |
204 | PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | | 207 | PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | |
205 | HC_MAIN_RSVD), | 208 | HC_MAIN_RSVD), |
@@ -223,13 +226,24 @@ enum { | |||
223 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ | 226 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ |
224 | SATA_ACTIVE_OFS = 0x350, | 227 | SATA_ACTIVE_OFS = 0x350, |
225 | SATA_FIS_IRQ_CAUSE_OFS = 0x364, | 228 | SATA_FIS_IRQ_CAUSE_OFS = 0x364, |
229 | |||
230 | LTMODE_OFS = 0x30c, | ||
231 | LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ | ||
232 | |||
226 | PHY_MODE3 = 0x310, | 233 | PHY_MODE3 = 0x310, |
227 | PHY_MODE4 = 0x314, | 234 | PHY_MODE4 = 0x314, |
228 | PHY_MODE2 = 0x330, | 235 | PHY_MODE2 = 0x330, |
236 | SATA_IFCTL_OFS = 0x344, | ||
237 | SATA_IFSTAT_OFS = 0x34c, | ||
238 | VENDOR_UNIQUE_FIS_OFS = 0x35c, | ||
239 | |||
240 | FIS_CFG_OFS = 0x360, | ||
241 | FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ | ||
242 | |||
229 | MV5_PHY_MODE = 0x74, | 243 | MV5_PHY_MODE = 0x74, |
230 | MV5_LT_MODE = 0x30, | 244 | MV5_LT_MODE = 0x30, |
231 | MV5_PHY_CTL = 0x0C, | 245 | MV5_PHY_CTL = 0x0C, |
232 | SATA_INTERFACE_CTL = 0x050, | 246 | SATA_INTERFACE_CFG = 0x050, |
233 | 247 | ||
234 | MV_M2_PREAMP_MASK = 0x7e0, | 248 | MV_M2_PREAMP_MASK = 0x7e0, |
235 | 249 | ||
@@ -240,6 +254,8 @@ enum { | |||
240 | EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ | 254 | EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ |
241 | EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ | 255 | EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ |
242 | EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ | 256 | EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ |
257 | EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ | ||
258 | EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ | ||
243 | 259 | ||
244 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, | 260 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, |
245 | EDMA_ERR_IRQ_MASK_OFS = 0xc, | 261 | EDMA_ERR_IRQ_MASK_OFS = 0xc, |
@@ -282,7 +298,9 @@ enum { | |||
282 | EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | | 298 | EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | |
283 | EDMA_ERR_LNK_CTRL_RX_1 | | 299 | EDMA_ERR_LNK_CTRL_RX_1 | |
284 | EDMA_ERR_LNK_CTRL_RX_3 | | 300 | EDMA_ERR_LNK_CTRL_RX_3 | |
285 | EDMA_ERR_LNK_CTRL_TX, | 301 | EDMA_ERR_LNK_CTRL_TX | |
302 | /* temporary, until we fix hotplug: */ | ||
303 | (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON), | ||
286 | 304 | ||
287 | EDMA_EH_FREEZE = EDMA_ERR_D_PAR | | 305 | EDMA_EH_FREEZE = EDMA_ERR_D_PAR | |
288 | EDMA_ERR_PRD_PAR | | 306 | EDMA_ERR_PRD_PAR | |
@@ -298,6 +316,7 @@ enum { | |||
298 | EDMA_ERR_LNK_DATA_RX | | 316 | EDMA_ERR_LNK_DATA_RX | |
299 | EDMA_ERR_LNK_DATA_TX | | 317 | EDMA_ERR_LNK_DATA_TX | |
300 | EDMA_ERR_TRANS_PROTO, | 318 | EDMA_ERR_TRANS_PROTO, |
319 | |||
301 | EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | | 320 | EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | |
302 | EDMA_ERR_PRD_PAR | | 321 | EDMA_ERR_PRD_PAR | |
303 | EDMA_ERR_DEV_DCON | | 322 | EDMA_ERR_DEV_DCON | |
@@ -344,7 +363,6 @@ enum { | |||
344 | /* Port private flags (pp_flags) */ | 363 | /* Port private flags (pp_flags) */ |
345 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ | 364 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ |
346 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ | 365 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ |
347 | MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */ | ||
348 | }; | 366 | }; |
349 | 367 | ||
350 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) | 368 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) |
@@ -461,7 +479,6 @@ struct mv_hw_ops { | |||
461 | void (*reset_bus)(struct ata_host *host, void __iomem *mmio); | 479 | void (*reset_bus)(struct ata_host *host, void __iomem *mmio); |
462 | }; | 480 | }; |
463 | 481 | ||
464 | static void mv_irq_clear(struct ata_port *ap); | ||
465 | static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); | 482 | static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); |
466 | static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); | 483 | static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); |
467 | static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); | 484 | static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); |
@@ -471,7 +488,8 @@ static void mv_port_stop(struct ata_port *ap); | |||
471 | static void mv_qc_prep(struct ata_queued_cmd *qc); | 488 | static void mv_qc_prep(struct ata_queued_cmd *qc); |
472 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); | 489 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); |
473 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); | 490 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); |
474 | static void mv_error_handler(struct ata_port *ap); | 491 | static int mv_hardreset(struct ata_link *link, unsigned int *class, |
492 | unsigned long deadline); | ||
475 | static void mv_eh_freeze(struct ata_port *ap); | 493 | static void mv_eh_freeze(struct ata_port *ap); |
476 | static void mv_eh_thaw(struct ata_port *ap); | 494 | static void mv_eh_thaw(struct ata_port *ap); |
477 | static void mv6_dev_config(struct ata_device *dev); | 495 | static void mv6_dev_config(struct ata_device *dev); |
@@ -504,72 +522,46 @@ static void mv_soc_reset_flash(struct mv_host_priv *hpriv, | |||
504 | void __iomem *mmio); | 522 | void __iomem *mmio); |
505 | static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); | 523 | static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); |
506 | static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); | 524 | static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); |
507 | static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, | 525 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, |
508 | unsigned int port_no); | 526 | unsigned int port_no); |
509 | static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, | 527 | static int mv_stop_edma(struct ata_port *ap); |
510 | void __iomem *port_mmio, int want_ncq); | 528 | static int mv_stop_edma_engine(void __iomem *port_mmio); |
511 | static int __mv_stop_dma(struct ata_port *ap); | 529 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq); |
530 | |||
531 | static void mv_pmp_select(struct ata_port *ap, int pmp); | ||
532 | static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, | ||
533 | unsigned long deadline); | ||
534 | static int mv_softreset(struct ata_link *link, unsigned int *class, | ||
535 | unsigned long deadline); | ||
512 | 536 | ||
513 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below | 537 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below |
514 | * because we have to allow room for worst case splitting of | 538 | * because we have to allow room for worst case splitting of |
515 | * PRDs for 64K boundaries in mv_fill_sg(). | 539 | * PRDs for 64K boundaries in mv_fill_sg(). |
516 | */ | 540 | */ |
517 | static struct scsi_host_template mv5_sht = { | 541 | static struct scsi_host_template mv5_sht = { |
518 | .module = THIS_MODULE, | 542 | ATA_BASE_SHT(DRV_NAME), |
519 | .name = DRV_NAME, | ||
520 | .ioctl = ata_scsi_ioctl, | ||
521 | .queuecommand = ata_scsi_queuecmd, | ||
522 | .can_queue = ATA_DEF_QUEUE, | ||
523 | .this_id = ATA_SHT_THIS_ID, | ||
524 | .sg_tablesize = MV_MAX_SG_CT / 2, | 543 | .sg_tablesize = MV_MAX_SG_CT / 2, |
525 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
526 | .emulated = ATA_SHT_EMULATED, | ||
527 | .use_clustering = 1, | ||
528 | .proc_name = DRV_NAME, | ||
529 | .dma_boundary = MV_DMA_BOUNDARY, | 544 | .dma_boundary = MV_DMA_BOUNDARY, |
530 | .slave_configure = ata_scsi_slave_config, | ||
531 | .slave_destroy = ata_scsi_slave_destroy, | ||
532 | .bios_param = ata_std_bios_param, | ||
533 | }; | 545 | }; |
534 | 546 | ||
535 | static struct scsi_host_template mv6_sht = { | 547 | static struct scsi_host_template mv6_sht = { |
536 | .module = THIS_MODULE, | 548 | ATA_NCQ_SHT(DRV_NAME), |
537 | .name = DRV_NAME, | ||
538 | .ioctl = ata_scsi_ioctl, | ||
539 | .queuecommand = ata_scsi_queuecmd, | ||
540 | .change_queue_depth = ata_scsi_change_queue_depth, | ||
541 | .can_queue = MV_MAX_Q_DEPTH - 1, | 549 | .can_queue = MV_MAX_Q_DEPTH - 1, |
542 | .this_id = ATA_SHT_THIS_ID, | ||
543 | .sg_tablesize = MV_MAX_SG_CT / 2, | 550 | .sg_tablesize = MV_MAX_SG_CT / 2, |
544 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
545 | .emulated = ATA_SHT_EMULATED, | ||
546 | .use_clustering = 1, | ||
547 | .proc_name = DRV_NAME, | ||
548 | .dma_boundary = MV_DMA_BOUNDARY, | 551 | .dma_boundary = MV_DMA_BOUNDARY, |
549 | .slave_configure = ata_scsi_slave_config, | ||
550 | .slave_destroy = ata_scsi_slave_destroy, | ||
551 | .bios_param = ata_std_bios_param, | ||
552 | }; | 552 | }; |
553 | 553 | ||
554 | static const struct ata_port_operations mv5_ops = { | 554 | static struct ata_port_operations mv5_ops = { |
555 | .tf_load = ata_tf_load, | 555 | .inherits = &ata_sff_port_ops, |
556 | .tf_read = ata_tf_read, | ||
557 | .check_status = ata_check_status, | ||
558 | .exec_command = ata_exec_command, | ||
559 | .dev_select = ata_std_dev_select, | ||
560 | |||
561 | .cable_detect = ata_cable_sata, | ||
562 | 556 | ||
563 | .qc_prep = mv_qc_prep, | 557 | .qc_prep = mv_qc_prep, |
564 | .qc_issue = mv_qc_issue, | 558 | .qc_issue = mv_qc_issue, |
565 | .data_xfer = ata_data_xfer, | ||
566 | |||
567 | .irq_clear = mv_irq_clear, | ||
568 | .irq_on = ata_irq_on, | ||
569 | 559 | ||
570 | .error_handler = mv_error_handler, | ||
571 | .freeze = mv_eh_freeze, | 560 | .freeze = mv_eh_freeze, |
572 | .thaw = mv_eh_thaw, | 561 | .thaw = mv_eh_thaw, |
562 | .hardreset = mv_hardreset, | ||
563 | .error_handler = ata_std_error_handler, /* avoid SFF EH */ | ||
564 | .post_internal_cmd = ATA_OP_NULL, | ||
573 | 565 | ||
574 | .scr_read = mv5_scr_read, | 566 | .scr_read = mv5_scr_read, |
575 | .scr_write = mv5_scr_write, | 567 | .scr_write = mv5_scr_write, |
@@ -578,61 +570,24 @@ static const struct ata_port_operations mv5_ops = { | |||
578 | .port_stop = mv_port_stop, | 570 | .port_stop = mv_port_stop, |
579 | }; | 571 | }; |
580 | 572 | ||
581 | static const struct ata_port_operations mv6_ops = { | 573 | static struct ata_port_operations mv6_ops = { |
574 | .inherits = &mv5_ops, | ||
575 | .qc_defer = sata_pmp_qc_defer_cmd_switch, | ||
582 | .dev_config = mv6_dev_config, | 576 | .dev_config = mv6_dev_config, |
583 | .tf_load = ata_tf_load, | ||
584 | .tf_read = ata_tf_read, | ||
585 | .check_status = ata_check_status, | ||
586 | .exec_command = ata_exec_command, | ||
587 | .dev_select = ata_std_dev_select, | ||
588 | |||
589 | .cable_detect = ata_cable_sata, | ||
590 | |||
591 | .qc_prep = mv_qc_prep, | ||
592 | .qc_issue = mv_qc_issue, | ||
593 | .data_xfer = ata_data_xfer, | ||
594 | |||
595 | .irq_clear = mv_irq_clear, | ||
596 | .irq_on = ata_irq_on, | ||
597 | |||
598 | .error_handler = mv_error_handler, | ||
599 | .freeze = mv_eh_freeze, | ||
600 | .thaw = mv_eh_thaw, | ||
601 | .qc_defer = ata_std_qc_defer, | ||
602 | |||
603 | .scr_read = mv_scr_read, | 577 | .scr_read = mv_scr_read, |
604 | .scr_write = mv_scr_write, | 578 | .scr_write = mv_scr_write, |
605 | 579 | ||
606 | .port_start = mv_port_start, | 580 | .pmp_hardreset = mv_pmp_hardreset, |
607 | .port_stop = mv_port_stop, | 581 | .pmp_softreset = mv_softreset, |
582 | .softreset = mv_softreset, | ||
583 | .error_handler = sata_pmp_error_handler, | ||
608 | }; | 584 | }; |
609 | 585 | ||
610 | static const struct ata_port_operations mv_iie_ops = { | 586 | static struct ata_port_operations mv_iie_ops = { |
611 | .tf_load = ata_tf_load, | 587 | .inherits = &mv6_ops, |
612 | .tf_read = ata_tf_read, | 588 | .qc_defer = ata_std_qc_defer, /* FIS-based switching */ |
613 | .check_status = ata_check_status, | 589 | .dev_config = ATA_OP_NULL, |
614 | .exec_command = ata_exec_command, | ||
615 | .dev_select = ata_std_dev_select, | ||
616 | |||
617 | .cable_detect = ata_cable_sata, | ||
618 | |||
619 | .qc_prep = mv_qc_prep_iie, | 590 | .qc_prep = mv_qc_prep_iie, |
620 | .qc_issue = mv_qc_issue, | ||
621 | .data_xfer = ata_data_xfer, | ||
622 | |||
623 | .irq_clear = mv_irq_clear, | ||
624 | .irq_on = ata_irq_on, | ||
625 | |||
626 | .error_handler = mv_error_handler, | ||
627 | .freeze = mv_eh_freeze, | ||
628 | .thaw = mv_eh_thaw, | ||
629 | .qc_defer = ata_std_qc_defer, | ||
630 | |||
631 | .scr_read = mv_scr_read, | ||
632 | .scr_write = mv_scr_write, | ||
633 | |||
634 | .port_start = mv_port_start, | ||
635 | .port_stop = mv_port_stop, | ||
636 | }; | 591 | }; |
637 | 592 | ||
638 | static const struct ata_port_info mv_port_info[] = { | 593 | static const struct ata_port_info mv_port_info[] = { |
@@ -656,6 +611,7 @@ static const struct ata_port_info mv_port_info[] = { | |||
656 | }, | 611 | }, |
657 | { /* chip_604x */ | 612 | { /* chip_604x */ |
658 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 613 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | |
614 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
659 | ATA_FLAG_NCQ, | 615 | ATA_FLAG_NCQ, |
660 | .pio_mask = 0x1f, /* pio0-4 */ | 616 | .pio_mask = 0x1f, /* pio0-4 */ |
661 | .udma_mask = ATA_UDMA6, | 617 | .udma_mask = ATA_UDMA6, |
@@ -663,6 +619,7 @@ static const struct ata_port_info mv_port_info[] = { | |||
663 | }, | 619 | }, |
664 | { /* chip_608x */ | 620 | { /* chip_608x */ |
665 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 621 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | |
622 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
666 | ATA_FLAG_NCQ | MV_FLAG_DUAL_HC, | 623 | ATA_FLAG_NCQ | MV_FLAG_DUAL_HC, |
667 | .pio_mask = 0x1f, /* pio0-4 */ | 624 | .pio_mask = 0x1f, /* pio0-4 */ |
668 | .udma_mask = ATA_UDMA6, | 625 | .udma_mask = ATA_UDMA6, |
@@ -670,6 +627,7 @@ static const struct ata_port_info mv_port_info[] = { | |||
670 | }, | 627 | }, |
671 | { /* chip_6042 */ | 628 | { /* chip_6042 */ |
672 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 629 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | |
630 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
673 | ATA_FLAG_NCQ, | 631 | ATA_FLAG_NCQ, |
674 | .pio_mask = 0x1f, /* pio0-4 */ | 632 | .pio_mask = 0x1f, /* pio0-4 */ |
675 | .udma_mask = ATA_UDMA6, | 633 | .udma_mask = ATA_UDMA6, |
@@ -677,16 +635,19 @@ static const struct ata_port_info mv_port_info[] = { | |||
677 | }, | 635 | }, |
678 | { /* chip_7042 */ | 636 | { /* chip_7042 */ |
679 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 637 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | |
638 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
680 | ATA_FLAG_NCQ, | 639 | ATA_FLAG_NCQ, |
681 | .pio_mask = 0x1f, /* pio0-4 */ | 640 | .pio_mask = 0x1f, /* pio0-4 */ |
682 | .udma_mask = ATA_UDMA6, | 641 | .udma_mask = ATA_UDMA6, |
683 | .port_ops = &mv_iie_ops, | 642 | .port_ops = &mv_iie_ops, |
684 | }, | 643 | }, |
685 | { /* chip_soc */ | 644 | { /* chip_soc */ |
686 | .flags = MV_COMMON_FLAGS | MV_FLAG_SOC, | 645 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | |
687 | .pio_mask = 0x1f, /* pio0-4 */ | 646 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | |
688 | .udma_mask = ATA_UDMA6, | 647 | ATA_FLAG_NCQ | MV_FLAG_SOC, |
689 | .port_ops = &mv_iie_ops, | 648 | .pio_mask = 0x1f, /* pio0-4 */ |
649 | .udma_mask = ATA_UDMA6, | ||
650 | .port_ops = &mv_iie_ops, | ||
690 | }, | 651 | }, |
691 | }; | 652 | }; |
692 | 653 | ||
@@ -785,6 +746,14 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) | |||
785 | (mv_hardport_from_port(port) * MV_PORT_REG_SZ); | 746 | (mv_hardport_from_port(port) * MV_PORT_REG_SZ); |
786 | } | 747 | } |
787 | 748 | ||
749 | static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) | ||
750 | { | ||
751 | void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); | ||
752 | unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; | ||
753 | |||
754 | return hc_mmio + ofs; | ||
755 | } | ||
756 | |||
788 | static inline void __iomem *mv_host_base(struct ata_host *host) | 757 | static inline void __iomem *mv_host_base(struct ata_host *host) |
789 | { | 758 | { |
790 | struct mv_host_priv *hpriv = host->private_data; | 759 | struct mv_host_priv *hpriv = host->private_data; |
@@ -801,10 +770,6 @@ static inline int mv_get_hc_count(unsigned long port_flags) | |||
801 | return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); | 770 | return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); |
802 | } | 771 | } |
803 | 772 | ||
804 | static void mv_irq_clear(struct ata_port *ap) | ||
805 | { | ||
806 | } | ||
807 | |||
808 | static void mv_set_edma_ptrs(void __iomem *port_mmio, | 773 | static void mv_set_edma_ptrs(void __iomem *port_mmio, |
809 | struct mv_host_priv *hpriv, | 774 | struct mv_host_priv *hpriv, |
810 | struct mv_port_priv *pp) | 775 | struct mv_port_priv *pp) |
@@ -864,7 +829,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | |||
864 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { | 829 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { |
865 | int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); | 830 | int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); |
866 | if (want_ncq != using_ncq) | 831 | if (want_ncq != using_ncq) |
867 | __mv_stop_dma(ap); | 832 | mv_stop_edma(ap); |
868 | } | 833 | } |
869 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { | 834 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { |
870 | struct mv_host_priv *hpriv = ap->host->private_data; | 835 | struct mv_host_priv *hpriv = ap->host->private_data; |
@@ -885,7 +850,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | |||
885 | hc_mmio + HC_IRQ_CAUSE_OFS); | 850 | hc_mmio + HC_IRQ_CAUSE_OFS); |
886 | } | 851 | } |
887 | 852 | ||
888 | mv_edma_cfg(pp, hpriv, port_mmio, want_ncq); | 853 | mv_edma_cfg(ap, want_ncq); |
889 | 854 | ||
890 | /* clear FIS IRQ Cause */ | 855 | /* clear FIS IRQ Cause */ |
891 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | 856 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); |
@@ -899,58 +864,42 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | |||
899 | } | 864 | } |
900 | 865 | ||
901 | /** | 866 | /** |
902 | * __mv_stop_dma - Disable eDMA engine | 867 | * mv_stop_edma_engine - Disable eDMA engine |
903 | * @ap: ATA channel to manipulate | 868 | * @port_mmio: io base address |
904 | * | ||
905 | * Verify the local cache of the eDMA state is accurate with a | ||
906 | * WARN_ON. | ||
907 | * | 869 | * |
908 | * LOCKING: | 870 | * LOCKING: |
909 | * Inherited from caller. | 871 | * Inherited from caller. |
910 | */ | 872 | */ |
911 | static int __mv_stop_dma(struct ata_port *ap) | 873 | static int mv_stop_edma_engine(void __iomem *port_mmio) |
912 | { | 874 | { |
913 | void __iomem *port_mmio = mv_ap_base(ap); | 875 | int i; |
914 | struct mv_port_priv *pp = ap->private_data; | ||
915 | u32 reg; | ||
916 | int i, err = 0; | ||
917 | 876 | ||
918 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { | 877 | /* Disable eDMA. The disable bit auto clears. */ |
919 | /* Disable EDMA if active. The disable bit auto clears. | 878 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); |
920 | */ | ||
921 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); | ||
922 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | ||
923 | } else { | ||
924 | WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); | ||
925 | } | ||
926 | 879 | ||
927 | /* now properly wait for the eDMA to stop */ | 880 | /* Wait for the chip to confirm eDMA is off. */ |
928 | for (i = 1000; i > 0; i--) { | 881 | for (i = 10000; i > 0; i--) { |
929 | reg = readl(port_mmio + EDMA_CMD_OFS); | 882 | u32 reg = readl(port_mmio + EDMA_CMD_OFS); |
930 | if (!(reg & EDMA_EN)) | 883 | if (!(reg & EDMA_EN)) |
931 | break; | 884 | return 0; |
932 | 885 | udelay(10); | |
933 | udelay(100); | ||
934 | } | ||
935 | |||
936 | if (reg & EDMA_EN) { | ||
937 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); | ||
938 | err = -EIO; | ||
939 | } | 886 | } |
940 | 887 | return -EIO; | |
941 | return err; | ||
942 | } | 888 | } |
943 | 889 | ||
944 | static int mv_stop_dma(struct ata_port *ap) | 890 | static int mv_stop_edma(struct ata_port *ap) |
945 | { | 891 | { |
946 | unsigned long flags; | 892 | void __iomem *port_mmio = mv_ap_base(ap); |
947 | int rc; | 893 | struct mv_port_priv *pp = ap->private_data; |
948 | |||
949 | spin_lock_irqsave(&ap->host->lock, flags); | ||
950 | rc = __mv_stop_dma(ap); | ||
951 | spin_unlock_irqrestore(&ap->host->lock, flags); | ||
952 | 894 | ||
953 | return rc; | 895 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) |
896 | return 0; | ||
897 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | ||
898 | if (mv_stop_edma_engine(port_mmio)) { | ||
899 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); | ||
900 | return -EIO; | ||
901 | } | ||
902 | return 0; | ||
954 | } | 903 | } |
955 | 904 | ||
956 | #ifdef ATA_DEBUG | 905 | #ifdef ATA_DEBUG |
@@ -1074,18 +1023,50 @@ static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) | |||
1074 | static void mv6_dev_config(struct ata_device *adev) | 1023 | static void mv6_dev_config(struct ata_device *adev) |
1075 | { | 1024 | { |
1076 | /* | 1025 | /* |
1026 | * Deal with Gen-II ("mv6") hardware quirks/restrictions: | ||
1027 | * | ||
1028 | * Gen-II does not support NCQ over a port multiplier | ||
1029 | * (no FIS-based switching). | ||
1030 | * | ||
1077 | * We don't have hob_nsect when doing NCQ commands on Gen-II. | 1031 | * We don't have hob_nsect when doing NCQ commands on Gen-II. |
1078 | * See mv_qc_prep() for more info. | 1032 | * See mv_qc_prep() for more info. |
1079 | */ | 1033 | */ |
1080 | if (adev->flags & ATA_DFLAG_NCQ) | 1034 | if (adev->flags & ATA_DFLAG_NCQ) { |
1081 | if (adev->max_sectors > ATA_MAX_SECTORS) | 1035 | if (sata_pmp_attached(adev->link->ap)) |
1036 | adev->flags &= ~ATA_DFLAG_NCQ; | ||
1037 | else if (adev->max_sectors > ATA_MAX_SECTORS) | ||
1082 | adev->max_sectors = ATA_MAX_SECTORS; | 1038 | adev->max_sectors = ATA_MAX_SECTORS; |
1039 | } | ||
1083 | } | 1040 | } |
1084 | 1041 | ||
1085 | static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, | 1042 | static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs) |
1086 | void __iomem *port_mmio, int want_ncq) | 1043 | { |
1044 | u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode; | ||
1045 | /* | ||
1046 | * Various bit settings required for operation | ||
1047 | * in FIS-based switching (fbs) mode on GenIIe: | ||
1048 | */ | ||
1049 | old_fcfg = readl(port_mmio + FIS_CFG_OFS); | ||
1050 | old_ltmode = readl(port_mmio + LTMODE_OFS); | ||
1051 | if (enable_fbs) { | ||
1052 | new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC; | ||
1053 | new_ltmode = old_ltmode | LTMODE_BIT8; | ||
1054 | } else { /* disable fbs */ | ||
1055 | new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC; | ||
1056 | new_ltmode = old_ltmode & ~LTMODE_BIT8; | ||
1057 | } | ||
1058 | if (new_fcfg != old_fcfg) | ||
1059 | writelfl(new_fcfg, port_mmio + FIS_CFG_OFS); | ||
1060 | if (new_ltmode != old_ltmode) | ||
1061 | writelfl(new_ltmode, port_mmio + LTMODE_OFS); | ||
1062 | } | ||
1063 | |||
1064 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | ||
1087 | { | 1065 | { |
1088 | u32 cfg; | 1066 | u32 cfg; |
1067 | struct mv_port_priv *pp = ap->private_data; | ||
1068 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
1069 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1089 | 1070 | ||
1090 | /* set up non-NCQ EDMA configuration */ | 1071 | /* set up non-NCQ EDMA configuration */ |
1091 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ | 1072 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ |
@@ -1101,6 +1082,13 @@ static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, | |||
1101 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ | 1082 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ |
1102 | cfg |= (1 << 18); /* enab early completion */ | 1083 | cfg |= (1 << 18); /* enab early completion */ |
1103 | cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ | 1084 | cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ |
1085 | |||
1086 | if (want_ncq && sata_pmp_attached(ap)) { | ||
1087 | cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ | ||
1088 | mv_config_fbs(port_mmio, 1); | ||
1089 | } else { | ||
1090 | mv_config_fbs(port_mmio, 0); | ||
1091 | } | ||
1104 | } | 1092 | } |
1105 | 1093 | ||
1106 | if (want_ncq) { | 1094 | if (want_ncq) { |
@@ -1156,8 +1144,6 @@ static int mv_port_start(struct ata_port *ap) | |||
1156 | struct device *dev = ap->host->dev; | 1144 | struct device *dev = ap->host->dev; |
1157 | struct mv_host_priv *hpriv = ap->host->private_data; | 1145 | struct mv_host_priv *hpriv = ap->host->private_data; |
1158 | struct mv_port_priv *pp; | 1146 | struct mv_port_priv *pp; |
1159 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1160 | unsigned long flags; | ||
1161 | int tag; | 1147 | int tag; |
1162 | 1148 | ||
1163 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); | 1149 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); |
@@ -1190,18 +1176,6 @@ static int mv_port_start(struct ata_port *ap) | |||
1190 | pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; | 1176 | pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; |
1191 | } | 1177 | } |
1192 | } | 1178 | } |
1193 | |||
1194 | spin_lock_irqsave(&ap->host->lock, flags); | ||
1195 | |||
1196 | mv_edma_cfg(pp, hpriv, port_mmio, 0); | ||
1197 | mv_set_edma_ptrs(port_mmio, hpriv, pp); | ||
1198 | |||
1199 | spin_unlock_irqrestore(&ap->host->lock, flags); | ||
1200 | |||
1201 | /* Don't turn on EDMA here...do it before DMA commands only. Else | ||
1202 | * we'll be unable to send non-data, PIO, etc due to restricted access | ||
1203 | * to shadow regs. | ||
1204 | */ | ||
1205 | return 0; | 1179 | return 0; |
1206 | 1180 | ||
1207 | out_port_free_dma_mem: | 1181 | out_port_free_dma_mem: |
@@ -1220,7 +1194,7 @@ out_port_free_dma_mem: | |||
1220 | */ | 1194 | */ |
1221 | static void mv_port_stop(struct ata_port *ap) | 1195 | static void mv_port_stop(struct ata_port *ap) |
1222 | { | 1196 | { |
1223 | mv_stop_dma(ap); | 1197 | mv_stop_edma(ap); |
1224 | mv_port_free_dma_mem(ap); | 1198 | mv_port_free_dma_mem(ap); |
1225 | } | 1199 | } |
1226 | 1200 | ||
@@ -1306,6 +1280,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
1306 | flags |= CRQB_FLAG_READ; | 1280 | flags |= CRQB_FLAG_READ; |
1307 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); | 1281 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
1308 | flags |= qc->tag << CRQB_TAG_SHIFT; | 1282 | flags |= qc->tag << CRQB_TAG_SHIFT; |
1283 | flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; | ||
1309 | 1284 | ||
1310 | /* get current queue index from software */ | 1285 | /* get current queue index from software */ |
1311 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; | 1286 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; |
@@ -1390,14 +1365,14 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1390 | (qc->tf.protocol != ATA_PROT_NCQ)) | 1365 | (qc->tf.protocol != ATA_PROT_NCQ)) |
1391 | return; | 1366 | return; |
1392 | 1367 | ||
1393 | /* Fill in Gen IIE command request block | 1368 | /* Fill in Gen IIE command request block */ |
1394 | */ | ||
1395 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) | 1369 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) |
1396 | flags |= CRQB_FLAG_READ; | 1370 | flags |= CRQB_FLAG_READ; |
1397 | 1371 | ||
1398 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); | 1372 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
1399 | flags |= qc->tag << CRQB_TAG_SHIFT; | 1373 | flags |= qc->tag << CRQB_TAG_SHIFT; |
1400 | flags |= qc->tag << CRQB_HOSTQ_SHIFT; | 1374 | flags |= qc->tag << CRQB_HOSTQ_SHIFT; |
1375 | flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; | ||
1401 | 1376 | ||
1402 | /* get current queue index from software */ | 1377 | /* get current queue index from software */ |
1403 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; | 1378 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; |
@@ -1455,12 +1430,14 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1455 | 1430 | ||
1456 | if ((qc->tf.protocol != ATA_PROT_DMA) && | 1431 | if ((qc->tf.protocol != ATA_PROT_DMA) && |
1457 | (qc->tf.protocol != ATA_PROT_NCQ)) { | 1432 | (qc->tf.protocol != ATA_PROT_NCQ)) { |
1458 | /* We're about to send a non-EDMA capable command to the | 1433 | /* |
1434 | * We're about to send a non-EDMA capable command to the | ||
1459 | * port. Turn off EDMA so there won't be problems accessing | 1435 | * port. Turn off EDMA so there won't be problems accessing |
1460 | * shadow block, etc registers. | 1436 | * shadow block, etc registers. |
1461 | */ | 1437 | */ |
1462 | __mv_stop_dma(ap); | 1438 | mv_stop_edma(ap); |
1463 | return ata_qc_issue_prot(qc); | 1439 | mv_pmp_select(ap, qc->dev->link->pmp); |
1440 | return ata_sff_qc_issue(qc); | ||
1464 | } | 1441 | } |
1465 | 1442 | ||
1466 | mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); | 1443 | mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); |
@@ -1482,10 +1459,10 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1482 | * @reset_allowed: bool: 0 == don't trigger from reset here | 1459 | * @reset_allowed: bool: 0 == don't trigger from reset here |
1483 | * | 1460 | * |
1484 | * In most cases, just clear the interrupt and move on. However, | 1461 | * In most cases, just clear the interrupt and move on. However, |
1485 | * some cases require an eDMA reset, which is done right before | 1462 | * some cases require an eDMA reset, which also performs a COMRESET. |
1486 | * the COMRESET in mv_phy_reset(). The SERR case requires a | 1463 | * The SERR case requires a clear of pending errors in the SATA |
1487 | * clear of pending errors in the SATA SERROR register. Finally, | 1464 | * SERROR register. Finally, if the port disabled DMA, |
1488 | * if the port disabled DMA, update our cached copy to match. | 1465 | * update our cached copy to match. |
1489 | * | 1466 | * |
1490 | * LOCKING: | 1467 | * LOCKING: |
1491 | * Inherited from caller. | 1468 | * Inherited from caller. |
@@ -1524,14 +1501,14 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1524 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | | 1501 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | |
1525 | EDMA_ERR_INTRL_PAR)) { | 1502 | EDMA_ERR_INTRL_PAR)) { |
1526 | err_mask |= AC_ERR_ATA_BUS; | 1503 | err_mask |= AC_ERR_ATA_BUS; |
1527 | action |= ATA_EH_HARDRESET; | 1504 | action |= ATA_EH_RESET; |
1528 | ata_ehi_push_desc(ehi, "parity error"); | 1505 | ata_ehi_push_desc(ehi, "parity error"); |
1529 | } | 1506 | } |
1530 | if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { | 1507 | if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { |
1531 | ata_ehi_hotplugged(ehi); | 1508 | ata_ehi_hotplugged(ehi); |
1532 | ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? | 1509 | ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? |
1533 | "dev disconnect" : "dev connect"); | 1510 | "dev disconnect" : "dev connect"); |
1534 | action |= ATA_EH_HARDRESET; | 1511 | action |= ATA_EH_RESET; |
1535 | } | 1512 | } |
1536 | 1513 | ||
1537 | if (IS_GEN_I(hpriv)) { | 1514 | if (IS_GEN_I(hpriv)) { |
@@ -1555,7 +1532,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1555 | sata_scr_read(&ap->link, SCR_ERROR, &serr); | 1532 | sata_scr_read(&ap->link, SCR_ERROR, &serr); |
1556 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); | 1533 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); |
1557 | err_mask = AC_ERR_ATA_BUS; | 1534 | err_mask = AC_ERR_ATA_BUS; |
1558 | action |= ATA_EH_HARDRESET; | 1535 | action |= ATA_EH_RESET; |
1559 | } | 1536 | } |
1560 | } | 1537 | } |
1561 | 1538 | ||
@@ -1564,7 +1541,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1564 | 1541 | ||
1565 | if (!err_mask) { | 1542 | if (!err_mask) { |
1566 | err_mask = AC_ERR_OTHER; | 1543 | err_mask = AC_ERR_OTHER; |
1567 | action |= ATA_EH_HARDRESET; | 1544 | action |= ATA_EH_RESET; |
1568 | } | 1545 | } |
1569 | 1546 | ||
1570 | ehi->serror |= serr; | 1547 | ehi->serror |= serr; |
@@ -1723,9 +1700,9 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) | |||
1723 | pp = ap->private_data; | 1700 | pp = ap->private_data; |
1724 | 1701 | ||
1725 | shift = port << 1; /* (port * 2) */ | 1702 | shift = port << 1; /* (port * 2) */ |
1726 | if (port >= MV_PORTS_PER_HC) { | 1703 | if (port >= MV_PORTS_PER_HC) |
1727 | shift++; /* skip bit 8 in the HC Main IRQ reg */ | 1704 | shift++; /* skip bit 8 in the HC Main IRQ reg */ |
1728 | } | 1705 | |
1729 | have_err_bits = ((PORT0_ERR << shift) & relevant); | 1706 | have_err_bits = ((PORT0_ERR << shift) & relevant); |
1730 | 1707 | ||
1731 | if (unlikely(have_err_bits)) { | 1708 | if (unlikely(have_err_bits)) { |
@@ -1780,7 +1757,7 @@ static void mv_pci_error(struct ata_host *host, void __iomem *mmio) | |||
1780 | ata_ehi_push_desc(ehi, | 1757 | ata_ehi_push_desc(ehi, |
1781 | "PCI err cause 0x%08x", err_cause); | 1758 | "PCI err cause 0x%08x", err_cause); |
1782 | err_mask = AC_ERR_HOST_BUS; | 1759 | err_mask = AC_ERR_HOST_BUS; |
1783 | ehi->action = ATA_EH_HARDRESET; | 1760 | ehi->action = ATA_EH_RESET; |
1784 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | 1761 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
1785 | if (qc) | 1762 | if (qc) |
1786 | qc->err_mask |= err_mask; | 1763 | qc->err_mask |= err_mask; |
@@ -1814,6 +1791,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) | |||
1814 | void __iomem *mmio = hpriv->base; | 1791 | void __iomem *mmio = hpriv->base; |
1815 | u32 irq_stat, irq_mask; | 1792 | u32 irq_stat, irq_mask; |
1816 | 1793 | ||
1794 | /* Note to self: &host->lock == &ap->host->lock == ap->lock */ | ||
1817 | spin_lock(&host->lock); | 1795 | spin_lock(&host->lock); |
1818 | 1796 | ||
1819 | irq_stat = readl(hpriv->main_cause_reg_addr); | 1797 | irq_stat = readl(hpriv->main_cause_reg_addr); |
@@ -1847,14 +1825,6 @@ out_unlock: | |||
1847 | return IRQ_RETVAL(handled); | 1825 | return IRQ_RETVAL(handled); |
1848 | } | 1826 | } |
1849 | 1827 | ||
1850 | static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) | ||
1851 | { | ||
1852 | void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); | ||
1853 | unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; | ||
1854 | |||
1855 | return hc_mmio + ofs; | ||
1856 | } | ||
1857 | |||
1858 | static unsigned int mv5_scr_offset(unsigned int sc_reg_in) | 1828 | static unsigned int mv5_scr_offset(unsigned int sc_reg_in) |
1859 | { | 1829 | { |
1860 | unsigned int ofs; | 1830 | unsigned int ofs; |
@@ -1980,9 +1950,12 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1980 | { | 1950 | { |
1981 | void __iomem *port_mmio = mv_port_base(mmio, port); | 1951 | void __iomem *port_mmio = mv_port_base(mmio, port); |
1982 | 1952 | ||
1983 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); | 1953 | /* |
1984 | 1954 | * The datasheet warns against setting ATA_RST when EDMA is active | |
1985 | mv_channel_reset(hpriv, mmio, port); | 1955 | * (but doesn't say what the problem might be). So we first try |
1956 | * to disable the EDMA engine before doing the ATA_RST operation. | ||
1957 | */ | ||
1958 | mv_reset_channel(hpriv, mmio, port); | ||
1986 | 1959 | ||
1987 | ZERO(0x028); /* command */ | 1960 | ZERO(0x028); /* command */ |
1988 | writel(0x11f, port_mmio + EDMA_CFG_OFS); | 1961 | writel(0x11f, port_mmio + EDMA_CFG_OFS); |
@@ -2132,6 +2105,13 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2132 | printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); | 2105 | printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); |
2133 | rc = 1; | 2106 | rc = 1; |
2134 | } | 2107 | } |
2108 | /* | ||
2109 | * Temporary: wait 3 seconds before port-probing can happen, | ||
2110 | * so that we don't miss finding sleepy SilXXXX port-multipliers. | ||
2111 | * This can go away once hotplug is fully/correctly implemented. | ||
2112 | */ | ||
2113 | if (rc == 0) | ||
2114 | msleep(3000); | ||
2135 | done: | 2115 | done: |
2136 | return rc; | 2116 | return rc; |
2137 | } | 2117 | } |
@@ -2200,14 +2180,15 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2200 | m4 = readl(port_mmio + PHY_MODE4); | 2180 | m4 = readl(port_mmio + PHY_MODE4); |
2201 | 2181 | ||
2202 | if (hp_flags & MV_HP_ERRATA_60X1B2) | 2182 | if (hp_flags & MV_HP_ERRATA_60X1B2) |
2203 | tmp = readl(port_mmio + 0x310); | 2183 | tmp = readl(port_mmio + PHY_MODE3); |
2204 | 2184 | ||
2185 | /* workaround for errata FEr SATA#10 (part 1) */ | ||
2205 | m4 = (m4 & ~(1 << 1)) | (1 << 0); | 2186 | m4 = (m4 & ~(1 << 1)) | (1 << 0); |
2206 | 2187 | ||
2207 | writel(m4, port_mmio + PHY_MODE4); | 2188 | writel(m4, port_mmio + PHY_MODE4); |
2208 | 2189 | ||
2209 | if (hp_flags & MV_HP_ERRATA_60X1B2) | 2190 | if (hp_flags & MV_HP_ERRATA_60X1B2) |
2210 | writel(tmp, port_mmio + 0x310); | 2191 | writel(tmp, port_mmio + PHY_MODE3); |
2211 | } | 2192 | } |
2212 | 2193 | ||
2213 | /* Revert values of pre-emphasis and signal amps to the saved ones */ | 2194 | /* Revert values of pre-emphasis and signal amps to the saved ones */ |
@@ -2255,9 +2236,12 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, | |||
2255 | { | 2236 | { |
2256 | void __iomem *port_mmio = mv_port_base(mmio, port); | 2237 | void __iomem *port_mmio = mv_port_base(mmio, port); |
2257 | 2238 | ||
2258 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); | 2239 | /* |
2259 | 2240 | * The datasheet warns against setting ATA_RST when EDMA is active | |
2260 | mv_channel_reset(hpriv, mmio, port); | 2241 | * (but doesn't say what the problem might be). So we first try |
2242 | * to disable the EDMA engine before doing the ATA_RST operation. | ||
2243 | */ | ||
2244 | mv_reset_channel(hpriv, mmio, port); | ||
2261 | 2245 | ||
2262 | ZERO(0x028); /* command */ | 2246 | ZERO(0x028); /* command */ |
2263 | writel(0x101f, port_mmio + EDMA_CFG_OFS); | 2247 | writel(0x101f, port_mmio + EDMA_CFG_OFS); |
@@ -2314,25 +2298,39 @@ static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) | |||
2314 | return; | 2298 | return; |
2315 | } | 2299 | } |
2316 | 2300 | ||
2317 | static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, | 2301 | static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i) |
2302 | { | ||
2303 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG); | ||
2304 | |||
2305 | ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */ | ||
2306 | if (want_gen2i) | ||
2307 | ifctl |= (1 << 7); /* enable gen2i speed */ | ||
2308 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG); | ||
2309 | } | ||
2310 | |||
2311 | /* | ||
2312 | * Caller must ensure that EDMA is not active, | ||
2313 | * by first doing mv_stop_edma() where needed. | ||
2314 | */ | ||
2315 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, | ||
2318 | unsigned int port_no) | 2316 | unsigned int port_no) |
2319 | { | 2317 | { |
2320 | void __iomem *port_mmio = mv_port_base(mmio, port_no); | 2318 | void __iomem *port_mmio = mv_port_base(mmio, port_no); |
2321 | 2319 | ||
2320 | mv_stop_edma_engine(port_mmio); | ||
2322 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); | 2321 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); |
2323 | 2322 | ||
2324 | if (IS_GEN_II(hpriv)) { | 2323 | if (!IS_GEN_I(hpriv)) { |
2325 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); | 2324 | /* Enable 3.0gb/s link speed */ |
2326 | ifctl |= (1 << 7); /* enable gen2i speed */ | 2325 | mv_setup_ifctl(port_mmio, 1); |
2327 | ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ | ||
2328 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); | ||
2329 | } | 2326 | } |
2330 | 2327 | /* | |
2331 | udelay(25); /* allow reset propagation */ | 2328 | * Strobing ATA_RST here causes a hard reset of the SATA transport, |
2332 | 2329 | * link, and physical layers. It resets all SATA interface registers | |
2333 | /* Spec never mentions clearing the bit. Marvell's driver does | 2330 | * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. |
2334 | * clear the bit, however. | ||
2335 | */ | 2331 | */ |
2332 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); | ||
2333 | udelay(25); /* allow reset propagation */ | ||
2336 | writelfl(0, port_mmio + EDMA_CMD_OFS); | 2334 | writelfl(0, port_mmio + EDMA_CMD_OFS); |
2337 | 2335 | ||
2338 | hpriv->ops->phy_errata(hpriv, mmio, port_no); | 2336 | hpriv->ops->phy_errata(hpriv, mmio, port_no); |
@@ -2341,136 +2339,32 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2341 | mdelay(1); | 2339 | mdelay(1); |
2342 | } | 2340 | } |
2343 | 2341 | ||
2344 | /** | 2342 | static void mv_pmp_select(struct ata_port *ap, int pmp) |
2345 | * mv_phy_reset - Perform eDMA reset followed by COMRESET | ||
2346 | * @ap: ATA channel to manipulate | ||
2347 | * | ||
2348 | * Part of this is taken from __sata_phy_reset and modified to | ||
2349 | * not sleep since this routine gets called from interrupt level. | ||
2350 | * | ||
2351 | * LOCKING: | ||
2352 | * Inherited from caller. This is coded to safe to call at | ||
2353 | * interrupt level, i.e. it does not sleep. | ||
2354 | */ | ||
2355 | static void mv_phy_reset(struct ata_port *ap, unsigned int *class, | ||
2356 | unsigned long deadline) | ||
2357 | { | 2343 | { |
2358 | struct mv_port_priv *pp = ap->private_data; | 2344 | if (sata_pmp_supported(ap)) { |
2359 | struct mv_host_priv *hpriv = ap->host->private_data; | 2345 | void __iomem *port_mmio = mv_ap_base(ap); |
2360 | void __iomem *port_mmio = mv_ap_base(ap); | 2346 | u32 reg = readl(port_mmio + SATA_IFCTL_OFS); |
2361 | int retry = 5; | 2347 | int old = reg & 0xf; |
2362 | u32 sstatus; | ||
2363 | |||
2364 | VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); | ||
2365 | |||
2366 | #ifdef DEBUG | ||
2367 | { | ||
2368 | u32 sstatus, serror, scontrol; | ||
2369 | |||
2370 | mv_scr_read(ap, SCR_STATUS, &sstatus); | ||
2371 | mv_scr_read(ap, SCR_ERROR, &serror); | ||
2372 | mv_scr_read(ap, SCR_CONTROL, &scontrol); | ||
2373 | DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " | ||
2374 | "SCtrl 0x%08x\n", sstatus, serror, scontrol); | ||
2375 | } | ||
2376 | #endif | ||
2377 | |||
2378 | /* Issue COMRESET via SControl */ | ||
2379 | comreset_retry: | ||
2380 | sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301); | ||
2381 | msleep(1); | ||
2382 | |||
2383 | sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300); | ||
2384 | msleep(20); | ||
2385 | 2348 | ||
2386 | do { | 2349 | if (old != pmp) { |
2387 | sata_scr_read(&ap->link, SCR_STATUS, &sstatus); | 2350 | reg = (reg & ~0xf) | pmp; |
2388 | if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) | 2351 | writelfl(reg, port_mmio + SATA_IFCTL_OFS); |
2389 | break; | 2352 | } |
2390 | |||
2391 | msleep(1); | ||
2392 | } while (time_before(jiffies, deadline)); | ||
2393 | |||
2394 | /* work around errata */ | ||
2395 | if (IS_GEN_II(hpriv) && | ||
2396 | (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) && | ||
2397 | (retry-- > 0)) | ||
2398 | goto comreset_retry; | ||
2399 | |||
2400 | #ifdef DEBUG | ||
2401 | { | ||
2402 | u32 sstatus, serror, scontrol; | ||
2403 | |||
2404 | mv_scr_read(ap, SCR_STATUS, &sstatus); | ||
2405 | mv_scr_read(ap, SCR_ERROR, &serror); | ||
2406 | mv_scr_read(ap, SCR_CONTROL, &scontrol); | ||
2407 | DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x " | ||
2408 | "SCtrl 0x%08x\n", sstatus, serror, scontrol); | ||
2409 | } | ||
2410 | #endif | ||
2411 | |||
2412 | if (ata_link_offline(&ap->link)) { | ||
2413 | *class = ATA_DEV_NONE; | ||
2414 | return; | ||
2415 | } | ||
2416 | |||
2417 | /* even after SStatus reflects that device is ready, | ||
2418 | * it seems to take a while for link to be fully | ||
2419 | * established (and thus Status no longer 0x80/0x7F), | ||
2420 | * so we poll a bit for that, here. | ||
2421 | */ | ||
2422 | retry = 20; | ||
2423 | while (1) { | ||
2424 | u8 drv_stat = ata_check_status(ap); | ||
2425 | if ((drv_stat != 0x80) && (drv_stat != 0x7f)) | ||
2426 | break; | ||
2427 | msleep(500); | ||
2428 | if (retry-- <= 0) | ||
2429 | break; | ||
2430 | if (time_after(jiffies, deadline)) | ||
2431 | break; | ||
2432 | } | 2353 | } |
2433 | |||
2434 | /* FIXME: if we passed the deadline, the following | ||
2435 | * code probably produces an invalid result | ||
2436 | */ | ||
2437 | |||
2438 | /* finally, read device signature from TF registers */ | ||
2439 | *class = ata_dev_try_classify(ap->link.device, 1, NULL); | ||
2440 | |||
2441 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | ||
2442 | |||
2443 | WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN); | ||
2444 | |||
2445 | VPRINTK("EXIT\n"); | ||
2446 | } | 2354 | } |
2447 | 2355 | ||
2448 | static int mv_prereset(struct ata_link *link, unsigned long deadline) | 2356 | static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, |
2357 | unsigned long deadline) | ||
2449 | { | 2358 | { |
2450 | struct ata_port *ap = link->ap; | 2359 | mv_pmp_select(link->ap, sata_srst_pmp(link)); |
2451 | struct mv_port_priv *pp = ap->private_data; | 2360 | return sata_std_hardreset(link, class, deadline); |
2452 | struct ata_eh_context *ehc = &link->eh_context; | 2361 | } |
2453 | int rc; | ||
2454 | |||
2455 | rc = mv_stop_dma(ap); | ||
2456 | if (rc) | ||
2457 | ehc->i.action |= ATA_EH_HARDRESET; | ||
2458 | |||
2459 | if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) { | ||
2460 | pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET; | ||
2461 | ehc->i.action |= ATA_EH_HARDRESET; | ||
2462 | } | ||
2463 | |||
2464 | /* if we're about to do hardreset, nothing more to do */ | ||
2465 | if (ehc->i.action & ATA_EH_HARDRESET) | ||
2466 | return 0; | ||
2467 | |||
2468 | if (ata_link_online(link)) | ||
2469 | rc = ata_wait_ready(ap, deadline); | ||
2470 | else | ||
2471 | rc = -ENODEV; | ||
2472 | 2362 | ||
2473 | return rc; | 2363 | static int mv_softreset(struct ata_link *link, unsigned int *class, |
2364 | unsigned long deadline) | ||
2365 | { | ||
2366 | mv_pmp_select(link->ap, sata_srst_pmp(link)); | ||
2367 | return ata_sff_softreset(link, class, deadline); | ||
2474 | } | 2368 | } |
2475 | 2369 | ||
2476 | static int mv_hardreset(struct ata_link *link, unsigned int *class, | 2370 | static int mv_hardreset(struct ata_link *link, unsigned int *class, |
@@ -2478,43 +2372,34 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class, | |||
2478 | { | 2372 | { |
2479 | struct ata_port *ap = link->ap; | 2373 | struct ata_port *ap = link->ap; |
2480 | struct mv_host_priv *hpriv = ap->host->private_data; | 2374 | struct mv_host_priv *hpriv = ap->host->private_data; |
2375 | struct mv_port_priv *pp = ap->private_data; | ||
2481 | void __iomem *mmio = hpriv->base; | 2376 | void __iomem *mmio = hpriv->base; |
2377 | int rc, attempts = 0, extra = 0; | ||
2378 | u32 sstatus; | ||
2379 | bool online; | ||
2482 | 2380 | ||
2483 | mv_stop_dma(ap); | 2381 | mv_reset_channel(hpriv, mmio, ap->port_no); |
2484 | 2382 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | |
2485 | mv_channel_reset(hpriv, mmio, ap->port_no); | ||
2486 | |||
2487 | mv_phy_reset(ap, class, deadline); | ||
2488 | |||
2489 | return 0; | ||
2490 | } | ||
2491 | |||
2492 | static void mv_postreset(struct ata_link *link, unsigned int *classes) | ||
2493 | { | ||
2494 | struct ata_port *ap = link->ap; | ||
2495 | u32 serr; | ||
2496 | |||
2497 | /* print link status */ | ||
2498 | sata_print_link_status(link); | ||
2499 | |||
2500 | /* clear SError */ | ||
2501 | sata_scr_read(link, SCR_ERROR, &serr); | ||
2502 | sata_scr_write_flush(link, SCR_ERROR, serr); | ||
2503 | 2383 | ||
2504 | /* bail out if no device is present */ | 2384 | /* Workaround for errata FEr SATA#10 (part 2) */ |
2505 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { | 2385 | do { |
2506 | DPRINTK("EXIT, no device\n"); | 2386 | const unsigned long *timing = |
2507 | return; | 2387 | sata_ehc_deb_timing(&link->eh_context); |
2508 | } | ||
2509 | 2388 | ||
2510 | /* set up device control */ | 2389 | rc = sata_link_hardreset(link, timing, deadline + extra, |
2511 | iowrite8(ap->ctl, ap->ioaddr.ctl_addr); | 2390 | &online, NULL); |
2512 | } | 2391 | if (rc) |
2392 | return rc; | ||
2393 | sata_scr_read(link, SCR_STATUS, &sstatus); | ||
2394 | if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { | ||
2395 | /* Force 1.5gb/s link speed and try again */ | ||
2396 | mv_setup_ifctl(mv_ap_base(ap), 0); | ||
2397 | if (time_after(jiffies + HZ, deadline)) | ||
2398 | extra = HZ; /* only extend it once, max */ | ||
2399 | } | ||
2400 | } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); | ||
2513 | 2401 | ||
2514 | static void mv_error_handler(struct ata_port *ap) | 2402 | return rc; |
2515 | { | ||
2516 | ata_do_eh(ap, mv_prereset, ata_std_softreset, | ||
2517 | mv_hardreset, mv_postreset); | ||
2518 | } | 2403 | } |
2519 | 2404 | ||
2520 | static void mv_eh_freeze(struct ata_port *ap) | 2405 | static void mv_eh_freeze(struct ata_port *ap) |
@@ -2808,19 +2693,6 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
2808 | hpriv->ops->enable_leds(hpriv, mmio); | 2693 | hpriv->ops->enable_leds(hpriv, mmio); |
2809 | 2694 | ||
2810 | for (port = 0; port < host->n_ports; port++) { | 2695 | for (port = 0; port < host->n_ports; port++) { |
2811 | if (IS_GEN_II(hpriv)) { | ||
2812 | void __iomem *port_mmio = mv_port_base(mmio, port); | ||
2813 | |||
2814 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); | ||
2815 | ifctl |= (1 << 7); /* enable gen2i speed */ | ||
2816 | ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ | ||
2817 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); | ||
2818 | } | ||
2819 | |||
2820 | hpriv->ops->phy_errata(hpriv, mmio, port); | ||
2821 | } | ||
2822 | |||
2823 | for (port = 0; port < host->n_ports; port++) { | ||
2824 | struct ata_port *ap = host->ports[port]; | 2696 | struct ata_port *ap = host->ports[port]; |
2825 | void __iomem *port_mmio = mv_port_base(mmio, port); | 2697 | void __iomem *port_mmio = mv_port_base(mmio, port); |
2826 | 2698 | ||
@@ -3192,7 +3064,7 @@ MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); | |||
3192 | MODULE_LICENSE("GPL"); | 3064 | MODULE_LICENSE("GPL"); |
3193 | MODULE_DEVICE_TABLE(pci, mv_pci_tbl); | 3065 | MODULE_DEVICE_TABLE(pci, mv_pci_tbl); |
3194 | MODULE_VERSION(DRV_VERSION); | 3066 | MODULE_VERSION(DRV_VERSION); |
3195 | MODULE_ALIAS("platform:sata_mv"); | 3067 | MODULE_ALIAS("platform:" DRV_NAME); |
3196 | 3068 | ||
3197 | #ifdef CONFIG_PCI | 3069 | #ifdef CONFIG_PCI |
3198 | module_param(msi, int, 0444); | 3070 | module_param(msi, int, 0444); |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index ed5473bf7a0a..109b07495721 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -309,7 +309,8 @@ static void nv_nf2_freeze(struct ata_port *ap); | |||
309 | static void nv_nf2_thaw(struct ata_port *ap); | 309 | static void nv_nf2_thaw(struct ata_port *ap); |
310 | static void nv_ck804_freeze(struct ata_port *ap); | 310 | static void nv_ck804_freeze(struct ata_port *ap); |
311 | static void nv_ck804_thaw(struct ata_port *ap); | 311 | static void nv_ck804_thaw(struct ata_port *ap); |
312 | static void nv_error_handler(struct ata_port *ap); | 312 | static int nv_hardreset(struct ata_link *link, unsigned int *class, |
313 | unsigned long deadline); | ||
313 | static int nv_adma_slave_config(struct scsi_device *sdev); | 314 | static int nv_adma_slave_config(struct scsi_device *sdev); |
314 | static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); | 315 | static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); |
315 | static void nv_adma_qc_prep(struct ata_queued_cmd *qc); | 316 | static void nv_adma_qc_prep(struct ata_queued_cmd *qc); |
@@ -385,157 +386,60 @@ static struct pci_driver nv_pci_driver = { | |||
385 | }; | 386 | }; |
386 | 387 | ||
387 | static struct scsi_host_template nv_sht = { | 388 | static struct scsi_host_template nv_sht = { |
388 | .module = THIS_MODULE, | 389 | ATA_BMDMA_SHT(DRV_NAME), |
389 | .name = DRV_NAME, | ||
390 | .ioctl = ata_scsi_ioctl, | ||
391 | .queuecommand = ata_scsi_queuecmd, | ||
392 | .can_queue = ATA_DEF_QUEUE, | ||
393 | .this_id = ATA_SHT_THIS_ID, | ||
394 | .sg_tablesize = LIBATA_MAX_PRD, | ||
395 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
396 | .emulated = ATA_SHT_EMULATED, | ||
397 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
398 | .proc_name = DRV_NAME, | ||
399 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
400 | .slave_configure = ata_scsi_slave_config, | ||
401 | .slave_destroy = ata_scsi_slave_destroy, | ||
402 | .bios_param = ata_std_bios_param, | ||
403 | }; | 390 | }; |
404 | 391 | ||
405 | static struct scsi_host_template nv_adma_sht = { | 392 | static struct scsi_host_template nv_adma_sht = { |
406 | .module = THIS_MODULE, | 393 | ATA_NCQ_SHT(DRV_NAME), |
407 | .name = DRV_NAME, | ||
408 | .ioctl = ata_scsi_ioctl, | ||
409 | .queuecommand = ata_scsi_queuecmd, | ||
410 | .change_queue_depth = ata_scsi_change_queue_depth, | ||
411 | .can_queue = NV_ADMA_MAX_CPBS, | 394 | .can_queue = NV_ADMA_MAX_CPBS, |
412 | .this_id = ATA_SHT_THIS_ID, | ||
413 | .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN, | 395 | .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN, |
414 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
415 | .emulated = ATA_SHT_EMULATED, | ||
416 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
417 | .proc_name = DRV_NAME, | ||
418 | .dma_boundary = NV_ADMA_DMA_BOUNDARY, | 396 | .dma_boundary = NV_ADMA_DMA_BOUNDARY, |
419 | .slave_configure = nv_adma_slave_config, | 397 | .slave_configure = nv_adma_slave_config, |
420 | .slave_destroy = ata_scsi_slave_destroy, | ||
421 | .bios_param = ata_std_bios_param, | ||
422 | }; | 398 | }; |
423 | 399 | ||
424 | static struct scsi_host_template nv_swncq_sht = { | 400 | static struct scsi_host_template nv_swncq_sht = { |
425 | .module = THIS_MODULE, | 401 | ATA_NCQ_SHT(DRV_NAME), |
426 | .name = DRV_NAME, | ||
427 | .ioctl = ata_scsi_ioctl, | ||
428 | .queuecommand = ata_scsi_queuecmd, | ||
429 | .change_queue_depth = ata_scsi_change_queue_depth, | ||
430 | .can_queue = ATA_MAX_QUEUE, | 402 | .can_queue = ATA_MAX_QUEUE, |
431 | .this_id = ATA_SHT_THIS_ID, | ||
432 | .sg_tablesize = LIBATA_MAX_PRD, | 403 | .sg_tablesize = LIBATA_MAX_PRD, |
433 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
434 | .emulated = ATA_SHT_EMULATED, | ||
435 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
436 | .proc_name = DRV_NAME, | ||
437 | .dma_boundary = ATA_DMA_BOUNDARY, | 404 | .dma_boundary = ATA_DMA_BOUNDARY, |
438 | .slave_configure = nv_swncq_slave_config, | 405 | .slave_configure = nv_swncq_slave_config, |
439 | .slave_destroy = ata_scsi_slave_destroy, | ||
440 | .bios_param = ata_std_bios_param, | ||
441 | }; | 406 | }; |
442 | 407 | ||
443 | static const struct ata_port_operations nv_generic_ops = { | 408 | static struct ata_port_operations nv_generic_ops = { |
444 | .tf_load = ata_tf_load, | 409 | .inherits = &ata_bmdma_port_ops, |
445 | .tf_read = ata_tf_read, | 410 | .hardreset = nv_hardreset, |
446 | .exec_command = ata_exec_command, | ||
447 | .check_status = ata_check_status, | ||
448 | .dev_select = ata_std_dev_select, | ||
449 | .bmdma_setup = ata_bmdma_setup, | ||
450 | .bmdma_start = ata_bmdma_start, | ||
451 | .bmdma_stop = ata_bmdma_stop, | ||
452 | .bmdma_status = ata_bmdma_status, | ||
453 | .qc_prep = ata_qc_prep, | ||
454 | .qc_issue = ata_qc_issue_prot, | ||
455 | .freeze = ata_bmdma_freeze, | ||
456 | .thaw = ata_bmdma_thaw, | ||
457 | .error_handler = nv_error_handler, | ||
458 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
459 | .data_xfer = ata_data_xfer, | ||
460 | .irq_clear = ata_bmdma_irq_clear, | ||
461 | .irq_on = ata_irq_on, | ||
462 | .scr_read = nv_scr_read, | 411 | .scr_read = nv_scr_read, |
463 | .scr_write = nv_scr_write, | 412 | .scr_write = nv_scr_write, |
464 | .port_start = ata_port_start, | ||
465 | }; | 413 | }; |
466 | 414 | ||
467 | static const struct ata_port_operations nv_nf2_ops = { | 415 | static struct ata_port_operations nv_nf2_ops = { |
468 | .tf_load = ata_tf_load, | 416 | .inherits = &nv_generic_ops, |
469 | .tf_read = ata_tf_read, | ||
470 | .exec_command = ata_exec_command, | ||
471 | .check_status = ata_check_status, | ||
472 | .dev_select = ata_std_dev_select, | ||
473 | .bmdma_setup = ata_bmdma_setup, | ||
474 | .bmdma_start = ata_bmdma_start, | ||
475 | .bmdma_stop = ata_bmdma_stop, | ||
476 | .bmdma_status = ata_bmdma_status, | ||
477 | .qc_prep = ata_qc_prep, | ||
478 | .qc_issue = ata_qc_issue_prot, | ||
479 | .freeze = nv_nf2_freeze, | 417 | .freeze = nv_nf2_freeze, |
480 | .thaw = nv_nf2_thaw, | 418 | .thaw = nv_nf2_thaw, |
481 | .error_handler = nv_error_handler, | ||
482 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
483 | .data_xfer = ata_data_xfer, | ||
484 | .irq_clear = ata_bmdma_irq_clear, | ||
485 | .irq_on = ata_irq_on, | ||
486 | .scr_read = nv_scr_read, | ||
487 | .scr_write = nv_scr_write, | ||
488 | .port_start = ata_port_start, | ||
489 | }; | 419 | }; |
490 | 420 | ||
491 | static const struct ata_port_operations nv_ck804_ops = { | 421 | static struct ata_port_operations nv_ck804_ops = { |
492 | .tf_load = ata_tf_load, | 422 | .inherits = &nv_generic_ops, |
493 | .tf_read = ata_tf_read, | ||
494 | .exec_command = ata_exec_command, | ||
495 | .check_status = ata_check_status, | ||
496 | .dev_select = ata_std_dev_select, | ||
497 | .bmdma_setup = ata_bmdma_setup, | ||
498 | .bmdma_start = ata_bmdma_start, | ||
499 | .bmdma_stop = ata_bmdma_stop, | ||
500 | .bmdma_status = ata_bmdma_status, | ||
501 | .qc_prep = ata_qc_prep, | ||
502 | .qc_issue = ata_qc_issue_prot, | ||
503 | .freeze = nv_ck804_freeze, | 423 | .freeze = nv_ck804_freeze, |
504 | .thaw = nv_ck804_thaw, | 424 | .thaw = nv_ck804_thaw, |
505 | .error_handler = nv_error_handler, | ||
506 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
507 | .data_xfer = ata_data_xfer, | ||
508 | .irq_clear = ata_bmdma_irq_clear, | ||
509 | .irq_on = ata_irq_on, | ||
510 | .scr_read = nv_scr_read, | ||
511 | .scr_write = nv_scr_write, | ||
512 | .port_start = ata_port_start, | ||
513 | .host_stop = nv_ck804_host_stop, | 425 | .host_stop = nv_ck804_host_stop, |
514 | }; | 426 | }; |
515 | 427 | ||
516 | static const struct ata_port_operations nv_adma_ops = { | 428 | static struct ata_port_operations nv_adma_ops = { |
517 | .tf_load = ata_tf_load, | 429 | .inherits = &nv_generic_ops, |
518 | .tf_read = nv_adma_tf_read, | 430 | |
519 | .check_atapi_dma = nv_adma_check_atapi_dma, | 431 | .check_atapi_dma = nv_adma_check_atapi_dma, |
520 | .exec_command = ata_exec_command, | 432 | .sff_tf_read = nv_adma_tf_read, |
521 | .check_status = ata_check_status, | ||
522 | .dev_select = ata_std_dev_select, | ||
523 | .bmdma_setup = ata_bmdma_setup, | ||
524 | .bmdma_start = ata_bmdma_start, | ||
525 | .bmdma_stop = ata_bmdma_stop, | ||
526 | .bmdma_status = ata_bmdma_status, | ||
527 | .qc_defer = ata_std_qc_defer, | 433 | .qc_defer = ata_std_qc_defer, |
528 | .qc_prep = nv_adma_qc_prep, | 434 | .qc_prep = nv_adma_qc_prep, |
529 | .qc_issue = nv_adma_qc_issue, | 435 | .qc_issue = nv_adma_qc_issue, |
436 | .sff_irq_clear = nv_adma_irq_clear, | ||
437 | |||
530 | .freeze = nv_adma_freeze, | 438 | .freeze = nv_adma_freeze, |
531 | .thaw = nv_adma_thaw, | 439 | .thaw = nv_adma_thaw, |
532 | .error_handler = nv_adma_error_handler, | 440 | .error_handler = nv_adma_error_handler, |
533 | .post_internal_cmd = nv_adma_post_internal_cmd, | 441 | .post_internal_cmd = nv_adma_post_internal_cmd, |
534 | .data_xfer = ata_data_xfer, | 442 | |
535 | .irq_clear = nv_adma_irq_clear, | ||
536 | .irq_on = ata_irq_on, | ||
537 | .scr_read = nv_scr_read, | ||
538 | .scr_write = nv_scr_write, | ||
539 | .port_start = nv_adma_port_start, | 443 | .port_start = nv_adma_port_start, |
540 | .port_stop = nv_adma_port_stop, | 444 | .port_stop = nv_adma_port_stop, |
541 | #ifdef CONFIG_PM | 445 | #ifdef CONFIG_PM |
@@ -545,28 +449,17 @@ static const struct ata_port_operations nv_adma_ops = { | |||
545 | .host_stop = nv_adma_host_stop, | 449 | .host_stop = nv_adma_host_stop, |
546 | }; | 450 | }; |
547 | 451 | ||
548 | static const struct ata_port_operations nv_swncq_ops = { | 452 | static struct ata_port_operations nv_swncq_ops = { |
549 | .tf_load = ata_tf_load, | 453 | .inherits = &nv_generic_ops, |
550 | .tf_read = ata_tf_read, | 454 | |
551 | .exec_command = ata_exec_command, | ||
552 | .check_status = ata_check_status, | ||
553 | .dev_select = ata_std_dev_select, | ||
554 | .bmdma_setup = ata_bmdma_setup, | ||
555 | .bmdma_start = ata_bmdma_start, | ||
556 | .bmdma_stop = ata_bmdma_stop, | ||
557 | .bmdma_status = ata_bmdma_status, | ||
558 | .qc_defer = ata_std_qc_defer, | 455 | .qc_defer = ata_std_qc_defer, |
559 | .qc_prep = nv_swncq_qc_prep, | 456 | .qc_prep = nv_swncq_qc_prep, |
560 | .qc_issue = nv_swncq_qc_issue, | 457 | .qc_issue = nv_swncq_qc_issue, |
458 | |||
561 | .freeze = nv_mcp55_freeze, | 459 | .freeze = nv_mcp55_freeze, |
562 | .thaw = nv_mcp55_thaw, | 460 | .thaw = nv_mcp55_thaw, |
563 | .error_handler = nv_swncq_error_handler, | 461 | .error_handler = nv_swncq_error_handler, |
564 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | 462 | |
565 | .data_xfer = ata_data_xfer, | ||
566 | .irq_clear = ata_bmdma_irq_clear, | ||
567 | .irq_on = ata_irq_on, | ||
568 | .scr_read = nv_scr_read, | ||
569 | .scr_write = nv_scr_write, | ||
570 | #ifdef CONFIG_PM | 463 | #ifdef CONFIG_PM |
571 | .port_suspend = nv_swncq_port_suspend, | 464 | .port_suspend = nv_swncq_port_suspend, |
572 | .port_resume = nv_swncq_port_resume, | 465 | .port_resume = nv_swncq_port_resume, |
@@ -574,63 +467,61 @@ static const struct ata_port_operations nv_swncq_ops = { | |||
574 | .port_start = nv_swncq_port_start, | 467 | .port_start = nv_swncq_port_start, |
575 | }; | 468 | }; |
576 | 469 | ||
470 | struct nv_pi_priv { | ||
471 | irq_handler_t irq_handler; | ||
472 | struct scsi_host_template *sht; | ||
473 | }; | ||
474 | |||
475 | #define NV_PI_PRIV(_irq_handler, _sht) \ | ||
476 | &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht } | ||
477 | |||
577 | static const struct ata_port_info nv_port_info[] = { | 478 | static const struct ata_port_info nv_port_info[] = { |
578 | /* generic */ | 479 | /* generic */ |
579 | { | 480 | { |
580 | .sht = &nv_sht, | ||
581 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, | 481 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, |
582 | .link_flags = ATA_LFLAG_HRST_TO_RESUME, | ||
583 | .pio_mask = NV_PIO_MASK, | 482 | .pio_mask = NV_PIO_MASK, |
584 | .mwdma_mask = NV_MWDMA_MASK, | 483 | .mwdma_mask = NV_MWDMA_MASK, |
585 | .udma_mask = NV_UDMA_MASK, | 484 | .udma_mask = NV_UDMA_MASK, |
586 | .port_ops = &nv_generic_ops, | 485 | .port_ops = &nv_generic_ops, |
587 | .irq_handler = nv_generic_interrupt, | 486 | .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht), |
588 | }, | 487 | }, |
589 | /* nforce2/3 */ | 488 | /* nforce2/3 */ |
590 | { | 489 | { |
591 | .sht = &nv_sht, | ||
592 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, | 490 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, |
593 | .link_flags = ATA_LFLAG_HRST_TO_RESUME, | ||
594 | .pio_mask = NV_PIO_MASK, | 491 | .pio_mask = NV_PIO_MASK, |
595 | .mwdma_mask = NV_MWDMA_MASK, | 492 | .mwdma_mask = NV_MWDMA_MASK, |
596 | .udma_mask = NV_UDMA_MASK, | 493 | .udma_mask = NV_UDMA_MASK, |
597 | .port_ops = &nv_nf2_ops, | 494 | .port_ops = &nv_nf2_ops, |
598 | .irq_handler = nv_nf2_interrupt, | 495 | .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht), |
599 | }, | 496 | }, |
600 | /* ck804 */ | 497 | /* ck804 */ |
601 | { | 498 | { |
602 | .sht = &nv_sht, | ||
603 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, | 499 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, |
604 | .link_flags = ATA_LFLAG_HRST_TO_RESUME, | ||
605 | .pio_mask = NV_PIO_MASK, | 500 | .pio_mask = NV_PIO_MASK, |
606 | .mwdma_mask = NV_MWDMA_MASK, | 501 | .mwdma_mask = NV_MWDMA_MASK, |
607 | .udma_mask = NV_UDMA_MASK, | 502 | .udma_mask = NV_UDMA_MASK, |
608 | .port_ops = &nv_ck804_ops, | 503 | .port_ops = &nv_ck804_ops, |
609 | .irq_handler = nv_ck804_interrupt, | 504 | .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht), |
610 | }, | 505 | }, |
611 | /* ADMA */ | 506 | /* ADMA */ |
612 | { | 507 | { |
613 | .sht = &nv_adma_sht, | ||
614 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 508 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
615 | ATA_FLAG_MMIO | ATA_FLAG_NCQ, | 509 | ATA_FLAG_MMIO | ATA_FLAG_NCQ, |
616 | .link_flags = ATA_LFLAG_HRST_TO_RESUME, | ||
617 | .pio_mask = NV_PIO_MASK, | 510 | .pio_mask = NV_PIO_MASK, |
618 | .mwdma_mask = NV_MWDMA_MASK, | 511 | .mwdma_mask = NV_MWDMA_MASK, |
619 | .udma_mask = NV_UDMA_MASK, | 512 | .udma_mask = NV_UDMA_MASK, |
620 | .port_ops = &nv_adma_ops, | 513 | .port_ops = &nv_adma_ops, |
621 | .irq_handler = nv_adma_interrupt, | 514 | .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht), |
622 | }, | 515 | }, |
623 | /* SWNCQ */ | 516 | /* SWNCQ */ |
624 | { | 517 | { |
625 | .sht = &nv_swncq_sht, | ||
626 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 518 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
627 | ATA_FLAG_NCQ, | 519 | ATA_FLAG_NCQ, |
628 | .link_flags = ATA_LFLAG_HRST_TO_RESUME, | ||
629 | .pio_mask = NV_PIO_MASK, | 520 | .pio_mask = NV_PIO_MASK, |
630 | .mwdma_mask = NV_MWDMA_MASK, | 521 | .mwdma_mask = NV_MWDMA_MASK, |
631 | .udma_mask = NV_UDMA_MASK, | 522 | .udma_mask = NV_UDMA_MASK, |
632 | .port_ops = &nv_swncq_ops, | 523 | .port_ops = &nv_swncq_ops, |
633 | .irq_handler = nv_swncq_interrupt, | 524 | .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht), |
634 | }, | 525 | }, |
635 | }; | 526 | }; |
636 | 527 | ||
@@ -640,8 +531,8 @@ MODULE_LICENSE("GPL"); | |||
640 | MODULE_DEVICE_TABLE(pci, nv_pci_tbl); | 531 | MODULE_DEVICE_TABLE(pci, nv_pci_tbl); |
641 | MODULE_VERSION(DRV_VERSION); | 532 | MODULE_VERSION(DRV_VERSION); |
642 | 533 | ||
643 | static int adma_enabled = 1; | 534 | static int adma_enabled; |
644 | static int swncq_enabled; | 535 | static int swncq_enabled = 1; |
645 | 536 | ||
646 | static void nv_adma_register_mode(struct ata_port *ap) | 537 | static void nv_adma_register_mode(struct ata_port *ap) |
647 | { | 538 | { |
@@ -839,7 +730,7 @@ static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
839 | ADMA mode could abort outstanding commands. */ | 730 | ADMA mode could abort outstanding commands. */ |
840 | nv_adma_register_mode(ap); | 731 | nv_adma_register_mode(ap); |
841 | 732 | ||
842 | ata_tf_read(ap, tf); | 733 | ata_sff_tf_read(ap, tf); |
843 | } | 734 | } |
844 | 735 | ||
845 | static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb) | 736 | static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb) |
@@ -929,7 +820,7 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) | |||
929 | "notifier for tag %d with no cmd?\n", | 820 | "notifier for tag %d with no cmd?\n", |
930 | cpb_num); | 821 | cpb_num); |
931 | ehi->err_mask |= AC_ERR_HSM; | 822 | ehi->err_mask |= AC_ERR_HSM; |
932 | ehi->action |= ATA_EH_SOFTRESET; | 823 | ehi->action |= ATA_EH_RESET; |
933 | ata_port_freeze(ap); | 824 | ata_port_freeze(ap); |
934 | return 1; | 825 | return 1; |
935 | } | 826 | } |
@@ -953,12 +844,12 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat) | |||
953 | 844 | ||
954 | /* DEV interrupt w/ no active qc? */ | 845 | /* DEV interrupt w/ no active qc? */ |
955 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { | 846 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { |
956 | ata_check_status(ap); | 847 | ata_sff_check_status(ap); |
957 | return 1; | 848 | return 1; |
958 | } | 849 | } |
959 | 850 | ||
960 | /* handle interrupt */ | 851 | /* handle interrupt */ |
961 | return ata_host_intr(ap, qc); | 852 | return ata_sff_host_intr(ap, qc); |
962 | } | 853 | } |
963 | 854 | ||
964 | static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) | 855 | static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) |
@@ -1137,7 +1028,7 @@ static void nv_adma_irq_clear(struct ata_port *ap) | |||
1137 | u32 notifier_clears[2]; | 1028 | u32 notifier_clears[2]; |
1138 | 1029 | ||
1139 | if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { | 1030 | if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { |
1140 | ata_bmdma_irq_clear(ap); | 1031 | ata_sff_irq_clear(ap); |
1141 | return; | 1032 | return; |
1142 | } | 1033 | } |
1143 | 1034 | ||
@@ -1168,7 +1059,7 @@ static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc) | |||
1168 | struct nv_adma_port_priv *pp = qc->ap->private_data; | 1059 | struct nv_adma_port_priv *pp = qc->ap->private_data; |
1169 | 1060 | ||
1170 | if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) | 1061 | if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) |
1171 | ata_bmdma_post_internal_cmd(qc); | 1062 | ata_sff_post_internal_cmd(qc); |
1172 | } | 1063 | } |
1173 | 1064 | ||
1174 | static int nv_adma_port_start(struct ata_port *ap) | 1065 | static int nv_adma_port_start(struct ata_port *ap) |
@@ -1445,7 +1336,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) | |||
1445 | BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && | 1336 | BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && |
1446 | (qc->flags & ATA_QCFLAG_DMAMAP)); | 1337 | (qc->flags & ATA_QCFLAG_DMAMAP)); |
1447 | nv_adma_register_mode(qc->ap); | 1338 | nv_adma_register_mode(qc->ap); |
1448 | ata_qc_prep(qc); | 1339 | ata_sff_qc_prep(qc); |
1449 | return; | 1340 | return; |
1450 | } | 1341 | } |
1451 | 1342 | ||
@@ -1504,7 +1395,7 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) | |||
1504 | BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && | 1395 | BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && |
1505 | (qc->flags & ATA_QCFLAG_DMAMAP)); | 1396 | (qc->flags & ATA_QCFLAG_DMAMAP)); |
1506 | nv_adma_register_mode(qc->ap); | 1397 | nv_adma_register_mode(qc->ap); |
1507 | return ata_qc_issue_prot(qc); | 1398 | return ata_sff_qc_issue(qc); |
1508 | } else | 1399 | } else |
1509 | nv_adma_mode(qc->ap); | 1400 | nv_adma_mode(qc->ap); |
1510 | 1401 | ||
@@ -1545,11 +1436,11 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance) | |||
1545 | 1436 | ||
1546 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | 1437 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
1547 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) | 1438 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
1548 | handled += ata_host_intr(ap, qc); | 1439 | handled += ata_sff_host_intr(ap, qc); |
1549 | else | 1440 | else |
1550 | // No request pending? Clear interrupt status | 1441 | // No request pending? Clear interrupt status |
1551 | // anyway, in case there's one pending. | 1442 | // anyway, in case there's one pending. |
1552 | ap->ops->check_status(ap); | 1443 | ap->ops->sff_check_status(ap); |
1553 | } | 1444 | } |
1554 | 1445 | ||
1555 | } | 1446 | } |
@@ -1680,7 +1571,7 @@ static void nv_mcp55_freeze(struct ata_port *ap) | |||
1680 | mask = readl(mmio_base + NV_INT_ENABLE_MCP55); | 1571 | mask = readl(mmio_base + NV_INT_ENABLE_MCP55); |
1681 | mask &= ~(NV_INT_ALL_MCP55 << shift); | 1572 | mask &= ~(NV_INT_ALL_MCP55 << shift); |
1682 | writel(mask, mmio_base + NV_INT_ENABLE_MCP55); | 1573 | writel(mask, mmio_base + NV_INT_ENABLE_MCP55); |
1683 | ata_bmdma_freeze(ap); | 1574 | ata_sff_freeze(ap); |
1684 | } | 1575 | } |
1685 | 1576 | ||
1686 | static void nv_mcp55_thaw(struct ata_port *ap) | 1577 | static void nv_mcp55_thaw(struct ata_port *ap) |
@@ -1694,7 +1585,7 @@ static void nv_mcp55_thaw(struct ata_port *ap) | |||
1694 | mask = readl(mmio_base + NV_INT_ENABLE_MCP55); | 1585 | mask = readl(mmio_base + NV_INT_ENABLE_MCP55); |
1695 | mask |= (NV_INT_MASK_MCP55 << shift); | 1586 | mask |= (NV_INT_MASK_MCP55 << shift); |
1696 | writel(mask, mmio_base + NV_INT_ENABLE_MCP55); | 1587 | writel(mask, mmio_base + NV_INT_ENABLE_MCP55); |
1697 | ata_bmdma_thaw(ap); | 1588 | ata_sff_thaw(ap); |
1698 | } | 1589 | } |
1699 | 1590 | ||
1700 | static int nv_hardreset(struct ata_link *link, unsigned int *class, | 1591 | static int nv_hardreset(struct ata_link *link, unsigned int *class, |
@@ -1706,13 +1597,7 @@ static int nv_hardreset(struct ata_link *link, unsigned int *class, | |||
1706 | * some controllers. Don't classify on hardreset. For more | 1597 | * some controllers. Don't classify on hardreset. For more |
1707 | * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352 | 1598 | * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352 |
1708 | */ | 1599 | */ |
1709 | return sata_std_hardreset(link, &dummy, deadline); | 1600 | return sata_sff_hardreset(link, &dummy, deadline); |
1710 | } | ||
1711 | |||
1712 | static void nv_error_handler(struct ata_port *ap) | ||
1713 | { | ||
1714 | ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, | ||
1715 | nv_hardreset, ata_std_postreset); | ||
1716 | } | 1601 | } |
1717 | 1602 | ||
1718 | static void nv_adma_error_handler(struct ata_port *ap) | 1603 | static void nv_adma_error_handler(struct ata_port *ap) |
@@ -1768,8 +1653,7 @@ static void nv_adma_error_handler(struct ata_port *ap) | |||
1768 | readw(mmio + NV_ADMA_CTL); /* flush posted write */ | 1653 | readw(mmio + NV_ADMA_CTL); /* flush posted write */ |
1769 | } | 1654 | } |
1770 | 1655 | ||
1771 | ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, | 1656 | ata_sff_error_handler(ap); |
1772 | nv_hardreset, ata_std_postreset); | ||
1773 | } | 1657 | } |
1774 | 1658 | ||
1775 | static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) | 1659 | static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) |
@@ -1855,7 +1739,7 @@ static void nv_swncq_ncq_stop(struct ata_port *ap) | |||
1855 | pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits); | 1739 | pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits); |
1856 | 1740 | ||
1857 | ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n", | 1741 | ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n", |
1858 | ap->ops->check_status(ap), | 1742 | ap->ops->sff_check_status(ap), |
1859 | ioread8(ap->ioaddr.error_addr)); | 1743 | ioread8(ap->ioaddr.error_addr)); |
1860 | 1744 | ||
1861 | sactive = readl(pp->sactive_block); | 1745 | sactive = readl(pp->sactive_block); |
@@ -1881,7 +1765,7 @@ static void nv_swncq_ncq_stop(struct ata_port *ap) | |||
1881 | } | 1765 | } |
1882 | 1766 | ||
1883 | nv_swncq_pp_reinit(ap); | 1767 | nv_swncq_pp_reinit(ap); |
1884 | ap->ops->irq_clear(ap); | 1768 | ap->ops->sff_irq_clear(ap); |
1885 | __ata_bmdma_stop(ap); | 1769 | __ata_bmdma_stop(ap); |
1886 | nv_swncq_irq_clear(ap, 0xffff); | 1770 | nv_swncq_irq_clear(ap, 0xffff); |
1887 | } | 1771 | } |
@@ -1892,11 +1776,10 @@ static void nv_swncq_error_handler(struct ata_port *ap) | |||
1892 | 1776 | ||
1893 | if (ap->link.sactive) { | 1777 | if (ap->link.sactive) { |
1894 | nv_swncq_ncq_stop(ap); | 1778 | nv_swncq_ncq_stop(ap); |
1895 | ehc->i.action |= ATA_EH_HARDRESET; | 1779 | ehc->i.action |= ATA_EH_RESET; |
1896 | } | 1780 | } |
1897 | 1781 | ||
1898 | ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, | 1782 | ata_sff_error_handler(ap); |
1899 | nv_hardreset, ata_std_postreset); | ||
1900 | } | 1783 | } |
1901 | 1784 | ||
1902 | #ifdef CONFIG_PM | 1785 | #ifdef CONFIG_PM |
@@ -2042,7 +1925,7 @@ static int nv_swncq_port_start(struct ata_port *ap) | |||
2042 | static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) | 1925 | static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) |
2043 | { | 1926 | { |
2044 | if (qc->tf.protocol != ATA_PROT_NCQ) { | 1927 | if (qc->tf.protocol != ATA_PROT_NCQ) { |
2045 | ata_qc_prep(qc); | 1928 | ata_sff_qc_prep(qc); |
2046 | return; | 1929 | return; |
2047 | } | 1930 | } |
2048 | 1931 | ||
@@ -2104,8 +1987,8 @@ static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap, | |||
2104 | pp->dmafis_bits &= ~(1 << qc->tag); | 1987 | pp->dmafis_bits &= ~(1 << qc->tag); |
2105 | pp->qc_active |= (0x1 << qc->tag); | 1988 | pp->qc_active |= (0x1 << qc->tag); |
2106 | 1989 | ||
2107 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 1990 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ |
2108 | ap->ops->exec_command(ap, &qc->tf); | 1991 | ap->ops->sff_exec_command(ap, &qc->tf); |
2109 | 1992 | ||
2110 | DPRINTK("Issued tag %u\n", qc->tag); | 1993 | DPRINTK("Issued tag %u\n", qc->tag); |
2111 | 1994 | ||
@@ -2118,7 +2001,7 @@ static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc) | |||
2118 | struct nv_swncq_port_priv *pp = ap->private_data; | 2001 | struct nv_swncq_port_priv *pp = ap->private_data; |
2119 | 2002 | ||
2120 | if (qc->tf.protocol != ATA_PROT_NCQ) | 2003 | if (qc->tf.protocol != ATA_PROT_NCQ) |
2121 | return ata_qc_issue_prot(qc); | 2004 | return ata_sff_qc_issue(qc); |
2122 | 2005 | ||
2123 | DPRINTK("Enter\n"); | 2006 | DPRINTK("Enter\n"); |
2124 | 2007 | ||
@@ -2173,11 +2056,11 @@ static int nv_swncq_sdbfis(struct ata_port *ap) | |||
2173 | ata_ehi_clear_desc(ehi); | 2056 | ata_ehi_clear_desc(ehi); |
2174 | ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); | 2057 | ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); |
2175 | ehi->err_mask |= AC_ERR_HOST_BUS; | 2058 | ehi->err_mask |= AC_ERR_HOST_BUS; |
2176 | ehi->action |= ATA_EH_SOFTRESET; | 2059 | ehi->action |= ATA_EH_RESET; |
2177 | return -EINVAL; | 2060 | return -EINVAL; |
2178 | } | 2061 | } |
2179 | 2062 | ||
2180 | ap->ops->irq_clear(ap); | 2063 | ap->ops->sff_irq_clear(ap); |
2181 | __ata_bmdma_stop(ap); | 2064 | __ata_bmdma_stop(ap); |
2182 | 2065 | ||
2183 | sactive = readl(pp->sactive_block); | 2066 | sactive = readl(pp->sactive_block); |
@@ -2188,7 +2071,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap) | |||
2188 | ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition" | 2071 | ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition" |
2189 | "(%08x->%08x)", pp->qc_active, sactive); | 2072 | "(%08x->%08x)", pp->qc_active, sactive); |
2190 | ehi->err_mask |= AC_ERR_HSM; | 2073 | ehi->err_mask |= AC_ERR_HSM; |
2191 | ehi->action |= ATA_EH_HARDRESET; | 2074 | ehi->action |= ATA_EH_RESET; |
2192 | return -EINVAL; | 2075 | return -EINVAL; |
2193 | } | 2076 | } |
2194 | for (i = 0; i < ATA_MAX_QUEUE; i++) { | 2077 | for (i = 0; i < ATA_MAX_QUEUE; i++) { |
@@ -2299,7 +2182,7 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) | |||
2299 | u8 ata_stat; | 2182 | u8 ata_stat; |
2300 | int rc = 0; | 2183 | int rc = 0; |
2301 | 2184 | ||
2302 | ata_stat = ap->ops->check_status(ap); | 2185 | ata_stat = ap->ops->sff_check_status(ap); |
2303 | nv_swncq_irq_clear(ap, fis); | 2186 | nv_swncq_irq_clear(ap, fis); |
2304 | if (!fis) | 2187 | if (!fis) |
2305 | return; | 2188 | return; |
@@ -2324,7 +2207,7 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) | |||
2324 | ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis); | 2207 | ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis); |
2325 | ehi->err_mask |= AC_ERR_DEV; | 2208 | ehi->err_mask |= AC_ERR_DEV; |
2326 | ehi->serror |= serror; | 2209 | ehi->serror |= serror; |
2327 | ehi->action |= ATA_EH_SOFTRESET; | 2210 | ehi->action |= ATA_EH_RESET; |
2328 | ata_port_freeze(ap); | 2211 | ata_port_freeze(ap); |
2329 | return; | 2212 | return; |
2330 | } | 2213 | } |
@@ -2356,13 +2239,13 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) | |||
2356 | if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) { | 2239 | if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) { |
2357 | ata_ehi_push_desc(ehi, "illegal fis transaction"); | 2240 | ata_ehi_push_desc(ehi, "illegal fis transaction"); |
2358 | ehi->err_mask |= AC_ERR_HSM; | 2241 | ehi->err_mask |= AC_ERR_HSM; |
2359 | ehi->action |= ATA_EH_HARDRESET; | 2242 | ehi->action |= ATA_EH_RESET; |
2360 | goto irq_error; | 2243 | goto irq_error; |
2361 | } | 2244 | } |
2362 | 2245 | ||
2363 | if (!(fis & NV_SWNCQ_IRQ_DMASETUP) && | 2246 | if (!(fis & NV_SWNCQ_IRQ_DMASETUP) && |
2364 | !(pp->ncq_flags & ncq_saw_dmas)) { | 2247 | !(pp->ncq_flags & ncq_saw_dmas)) { |
2365 | ata_stat = ap->ops->check_status(ap); | 2248 | ata_stat = ap->ops->sff_check_status(ap); |
2366 | if (ata_stat & ATA_BUSY) | 2249 | if (ata_stat & ATA_BUSY) |
2367 | goto irq_exit; | 2250 | goto irq_exit; |
2368 | 2251 | ||
@@ -2429,6 +2312,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2429 | { | 2312 | { |
2430 | static int printed_version; | 2313 | static int printed_version; |
2431 | const struct ata_port_info *ppi[] = { NULL, NULL }; | 2314 | const struct ata_port_info *ppi[] = { NULL, NULL }; |
2315 | struct nv_pi_priv *ipriv; | ||
2432 | struct ata_host *host; | 2316 | struct ata_host *host; |
2433 | struct nv_host_priv *hpriv; | 2317 | struct nv_host_priv *hpriv; |
2434 | int rc; | 2318 | int rc; |
@@ -2465,7 +2349,8 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2465 | } | 2349 | } |
2466 | 2350 | ||
2467 | ppi[0] = &nv_port_info[type]; | 2351 | ppi[0] = &nv_port_info[type]; |
2468 | rc = ata_pci_prepare_sff_host(pdev, ppi, &host); | 2352 | ipriv = ppi[0]->private_data; |
2353 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); | ||
2469 | if (rc) | 2354 | if (rc) |
2470 | return rc; | 2355 | return rc; |
2471 | 2356 | ||
@@ -2503,8 +2388,8 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2503 | nv_swncq_host_init(host); | 2388 | nv_swncq_host_init(host); |
2504 | 2389 | ||
2505 | pci_set_master(pdev); | 2390 | pci_set_master(pdev); |
2506 | return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler, | 2391 | return ata_host_activate(host, pdev->irq, ipriv->irq_handler, |
2507 | IRQF_SHARED, ppi[0]->sht); | 2392 | IRQF_SHARED, ipriv->sht); |
2508 | } | 2393 | } |
2509 | 2394 | ||
2510 | #ifdef CONFIG_PM | 2395 | #ifdef CONFIG_PM |
@@ -2600,5 +2485,5 @@ module_exit(nv_exit); | |||
2600 | module_param_named(adma, adma_enabled, bool, 0444); | 2485 | module_param_named(adma, adma_enabled, bool, 0444); |
2601 | MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)"); | 2486 | MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)"); |
2602 | module_param_named(swncq, swncq_enabled, bool, 0444); | 2487 | module_param_named(swncq, swncq_enabled, bool, 0444); |
2603 | MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)"); | 2488 | MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)"); |
2604 | 2489 | ||
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 11c1afea2db2..5a10dc5048ad 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c | |||
@@ -143,103 +143,57 @@ static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile | |||
143 | static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); | 143 | static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); |
144 | static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc); | 144 | static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc); |
145 | static void pdc_irq_clear(struct ata_port *ap); | 145 | static void pdc_irq_clear(struct ata_port *ap); |
146 | static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc); | 146 | static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc); |
147 | static void pdc_freeze(struct ata_port *ap); | 147 | static void pdc_freeze(struct ata_port *ap); |
148 | static void pdc_sata_freeze(struct ata_port *ap); | 148 | static void pdc_sata_freeze(struct ata_port *ap); |
149 | static void pdc_thaw(struct ata_port *ap); | 149 | static void pdc_thaw(struct ata_port *ap); |
150 | static void pdc_sata_thaw(struct ata_port *ap); | 150 | static void pdc_sata_thaw(struct ata_port *ap); |
151 | static void pdc_pata_error_handler(struct ata_port *ap); | 151 | static void pdc_error_handler(struct ata_port *ap); |
152 | static void pdc_sata_error_handler(struct ata_port *ap); | ||
153 | static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); | 152 | static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); |
154 | static int pdc_pata_cable_detect(struct ata_port *ap); | 153 | static int pdc_pata_cable_detect(struct ata_port *ap); |
155 | static int pdc_sata_cable_detect(struct ata_port *ap); | 154 | static int pdc_sata_cable_detect(struct ata_port *ap); |
156 | 155 | ||
157 | static struct scsi_host_template pdc_ata_sht = { | 156 | static struct scsi_host_template pdc_ata_sht = { |
158 | .module = THIS_MODULE, | 157 | ATA_BASE_SHT(DRV_NAME), |
159 | .name = DRV_NAME, | ||
160 | .ioctl = ata_scsi_ioctl, | ||
161 | .queuecommand = ata_scsi_queuecmd, | ||
162 | .can_queue = ATA_DEF_QUEUE, | ||
163 | .this_id = ATA_SHT_THIS_ID, | ||
164 | .sg_tablesize = PDC_MAX_PRD, | 158 | .sg_tablesize = PDC_MAX_PRD, |
165 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
166 | .emulated = ATA_SHT_EMULATED, | ||
167 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
168 | .proc_name = DRV_NAME, | ||
169 | .dma_boundary = ATA_DMA_BOUNDARY, | 159 | .dma_boundary = ATA_DMA_BOUNDARY, |
170 | .slave_configure = ata_scsi_slave_config, | ||
171 | .slave_destroy = ata_scsi_slave_destroy, | ||
172 | .bios_param = ata_std_bios_param, | ||
173 | }; | 160 | }; |
174 | 161 | ||
175 | static const struct ata_port_operations pdc_sata_ops = { | 162 | static const struct ata_port_operations pdc_common_ops = { |
176 | .tf_load = pdc_tf_load_mmio, | 163 | .inherits = &ata_sff_port_ops, |
177 | .tf_read = ata_tf_read, | ||
178 | .check_status = ata_check_status, | ||
179 | .exec_command = pdc_exec_command_mmio, | ||
180 | .dev_select = ata_std_dev_select, | ||
181 | .check_atapi_dma = pdc_check_atapi_dma, | ||
182 | 164 | ||
165 | .sff_tf_load = pdc_tf_load_mmio, | ||
166 | .sff_exec_command = pdc_exec_command_mmio, | ||
167 | .check_atapi_dma = pdc_check_atapi_dma, | ||
183 | .qc_prep = pdc_qc_prep, | 168 | .qc_prep = pdc_qc_prep, |
184 | .qc_issue = pdc_qc_issue_prot, | 169 | .qc_issue = pdc_qc_issue, |
185 | .freeze = pdc_sata_freeze, | 170 | .sff_irq_clear = pdc_irq_clear, |
186 | .thaw = pdc_sata_thaw, | ||
187 | .error_handler = pdc_sata_error_handler, | ||
188 | .post_internal_cmd = pdc_post_internal_cmd, | ||
189 | .cable_detect = pdc_sata_cable_detect, | ||
190 | .data_xfer = ata_data_xfer, | ||
191 | .irq_clear = pdc_irq_clear, | ||
192 | .irq_on = ata_irq_on, | ||
193 | 171 | ||
194 | .scr_read = pdc_sata_scr_read, | 172 | .post_internal_cmd = pdc_post_internal_cmd, |
195 | .scr_write = pdc_sata_scr_write, | 173 | .error_handler = pdc_error_handler, |
196 | .port_start = pdc_sata_port_start, | ||
197 | }; | 174 | }; |
198 | 175 | ||
199 | /* First-generation chips need a more restrictive ->check_atapi_dma op */ | 176 | static struct ata_port_operations pdc_sata_ops = { |
200 | static const struct ata_port_operations pdc_old_sata_ops = { | 177 | .inherits = &pdc_common_ops, |
201 | .tf_load = pdc_tf_load_mmio, | 178 | .cable_detect = pdc_sata_cable_detect, |
202 | .tf_read = ata_tf_read, | ||
203 | .check_status = ata_check_status, | ||
204 | .exec_command = pdc_exec_command_mmio, | ||
205 | .dev_select = ata_std_dev_select, | ||
206 | .check_atapi_dma = pdc_old_sata_check_atapi_dma, | ||
207 | |||
208 | .qc_prep = pdc_qc_prep, | ||
209 | .qc_issue = pdc_qc_issue_prot, | ||
210 | .freeze = pdc_sata_freeze, | 179 | .freeze = pdc_sata_freeze, |
211 | .thaw = pdc_sata_thaw, | 180 | .thaw = pdc_sata_thaw, |
212 | .error_handler = pdc_sata_error_handler, | ||
213 | .post_internal_cmd = pdc_post_internal_cmd, | ||
214 | .cable_detect = pdc_sata_cable_detect, | ||
215 | .data_xfer = ata_data_xfer, | ||
216 | .irq_clear = pdc_irq_clear, | ||
217 | .irq_on = ata_irq_on, | ||
218 | |||
219 | .scr_read = pdc_sata_scr_read, | 181 | .scr_read = pdc_sata_scr_read, |
220 | .scr_write = pdc_sata_scr_write, | 182 | .scr_write = pdc_sata_scr_write, |
221 | .port_start = pdc_sata_port_start, | 183 | .port_start = pdc_sata_port_start, |
222 | }; | 184 | }; |
223 | 185 | ||
224 | static const struct ata_port_operations pdc_pata_ops = { | 186 | /* First-generation chips need a more restrictive ->check_atapi_dma op */ |
225 | .tf_load = pdc_tf_load_mmio, | 187 | static struct ata_port_operations pdc_old_sata_ops = { |
226 | .tf_read = ata_tf_read, | 188 | .inherits = &pdc_sata_ops, |
227 | .check_status = ata_check_status, | 189 | .check_atapi_dma = pdc_old_sata_check_atapi_dma, |
228 | .exec_command = pdc_exec_command_mmio, | 190 | }; |
229 | .dev_select = ata_std_dev_select, | ||
230 | .check_atapi_dma = pdc_check_atapi_dma, | ||
231 | 191 | ||
232 | .qc_prep = pdc_qc_prep, | 192 | static struct ata_port_operations pdc_pata_ops = { |
233 | .qc_issue = pdc_qc_issue_prot, | 193 | .inherits = &pdc_common_ops, |
194 | .cable_detect = pdc_pata_cable_detect, | ||
234 | .freeze = pdc_freeze, | 195 | .freeze = pdc_freeze, |
235 | .thaw = pdc_thaw, | 196 | .thaw = pdc_thaw, |
236 | .error_handler = pdc_pata_error_handler, | ||
237 | .post_internal_cmd = pdc_post_internal_cmd, | ||
238 | .cable_detect = pdc_pata_cable_detect, | ||
239 | .data_xfer = ata_data_xfer, | ||
240 | .irq_clear = pdc_irq_clear, | ||
241 | .irq_on = ata_irq_on, | ||
242 | |||
243 | .port_start = pdc_common_port_start, | 197 | .port_start = pdc_common_port_start, |
244 | }; | 198 | }; |
245 | 199 | ||
@@ -451,7 +405,7 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc) | |||
451 | u8 *cdb = qc->cdb; | 405 | u8 *cdb = qc->cdb; |
452 | struct pdc_port_priv *pp = ap->private_data; | 406 | struct pdc_port_priv *pp = ap->private_data; |
453 | u8 *buf = pp->pkt; | 407 | u8 *buf = pp->pkt; |
454 | u32 *buf32 = (u32 *) buf; | 408 | __le32 *buf32 = (__le32 *) buf; |
455 | unsigned int dev_sel, feature; | 409 | unsigned int dev_sel, feature; |
456 | 410 | ||
457 | /* set control bits (byte 0), zero delay seq id (byte 3), | 411 | /* set control bits (byte 0), zero delay seq id (byte 3), |
@@ -738,24 +692,12 @@ static void pdc_sata_thaw(struct ata_port *ap) | |||
738 | readl(host_mmio + hotplug_offset); /* flush */ | 692 | readl(host_mmio + hotplug_offset); /* flush */ |
739 | } | 693 | } |
740 | 694 | ||
741 | static void pdc_common_error_handler(struct ata_port *ap, ata_reset_fn_t hardreset) | 695 | static void pdc_error_handler(struct ata_port *ap) |
742 | { | 696 | { |
743 | if (!(ap->pflags & ATA_PFLAG_FROZEN)) | 697 | if (!(ap->pflags & ATA_PFLAG_FROZEN)) |
744 | pdc_reset_port(ap); | 698 | pdc_reset_port(ap); |
745 | 699 | ||
746 | /* perform recovery */ | 700 | ata_std_error_handler(ap); |
747 | ata_do_eh(ap, ata_std_prereset, ata_std_softreset, hardreset, | ||
748 | ata_std_postreset); | ||
749 | } | ||
750 | |||
751 | static void pdc_pata_error_handler(struct ata_port *ap) | ||
752 | { | ||
753 | pdc_common_error_handler(ap, NULL); | ||
754 | } | ||
755 | |||
756 | static void pdc_sata_error_handler(struct ata_port *ap) | ||
757 | { | ||
758 | pdc_common_error_handler(ap, sata_std_hardreset); | ||
759 | } | 701 | } |
760 | 702 | ||
761 | static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) | 703 | static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) |
@@ -952,7 +894,7 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc) | |||
952 | readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ | 894 | readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ |
953 | } | 895 | } |
954 | 896 | ||
955 | static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc) | 897 | static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc) |
956 | { | 898 | { |
957 | switch (qc->tf.protocol) { | 899 | switch (qc->tf.protocol) { |
958 | case ATAPI_PROT_NODATA: | 900 | case ATAPI_PROT_NODATA: |
@@ -972,20 +914,20 @@ static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc) | |||
972 | break; | 914 | break; |
973 | } | 915 | } |
974 | 916 | ||
975 | return ata_qc_issue_prot(qc); | 917 | return ata_sff_qc_issue(qc); |
976 | } | 918 | } |
977 | 919 | ||
978 | static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) | 920 | static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) |
979 | { | 921 | { |
980 | WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); | 922 | WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); |
981 | ata_tf_load(ap, tf); | 923 | ata_sff_tf_load(ap, tf); |
982 | } | 924 | } |
983 | 925 | ||
984 | static void pdc_exec_command_mmio(struct ata_port *ap, | 926 | static void pdc_exec_command_mmio(struct ata_port *ap, |
985 | const struct ata_taskfile *tf) | 927 | const struct ata_taskfile *tf) |
986 | { | 928 | { |
987 | WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); | 929 | WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); |
988 | ata_exec_command(ap, tf); | 930 | ata_sff_exec_command(ap, tf); |
989 | } | 931 | } |
990 | 932 | ||
991 | static int pdc_check_atapi_dma(struct ata_queued_cmd *qc) | 933 | static int pdc_check_atapi_dma(struct ata_queued_cmd *qc) |
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index 91cc12c82040..1600107047cf 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c | |||
@@ -121,50 +121,38 @@ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); | |||
121 | static int qs_check_atapi_dma(struct ata_queued_cmd *qc); | 121 | static int qs_check_atapi_dma(struct ata_queued_cmd *qc); |
122 | static void qs_bmdma_stop(struct ata_queued_cmd *qc); | 122 | static void qs_bmdma_stop(struct ata_queued_cmd *qc); |
123 | static u8 qs_bmdma_status(struct ata_port *ap); | 123 | static u8 qs_bmdma_status(struct ata_port *ap); |
124 | static void qs_irq_clear(struct ata_port *ap); | ||
125 | static void qs_freeze(struct ata_port *ap); | 124 | static void qs_freeze(struct ata_port *ap); |
126 | static void qs_thaw(struct ata_port *ap); | 125 | static void qs_thaw(struct ata_port *ap); |
126 | static int qs_prereset(struct ata_link *link, unsigned long deadline); | ||
127 | static void qs_error_handler(struct ata_port *ap); | 127 | static void qs_error_handler(struct ata_port *ap); |
128 | 128 | ||
129 | static struct scsi_host_template qs_ata_sht = { | 129 | static struct scsi_host_template qs_ata_sht = { |
130 | .module = THIS_MODULE, | 130 | ATA_BASE_SHT(DRV_NAME), |
131 | .name = DRV_NAME, | ||
132 | .ioctl = ata_scsi_ioctl, | ||
133 | .queuecommand = ata_scsi_queuecmd, | ||
134 | .can_queue = ATA_DEF_QUEUE, | ||
135 | .this_id = ATA_SHT_THIS_ID, | ||
136 | .sg_tablesize = QS_MAX_PRD, | 131 | .sg_tablesize = QS_MAX_PRD, |
137 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
138 | .emulated = ATA_SHT_EMULATED, | ||
139 | .use_clustering = ENABLE_CLUSTERING, | ||
140 | .proc_name = DRV_NAME, | ||
141 | .dma_boundary = QS_DMA_BOUNDARY, | 132 | .dma_boundary = QS_DMA_BOUNDARY, |
142 | .slave_configure = ata_scsi_slave_config, | ||
143 | .slave_destroy = ata_scsi_slave_destroy, | ||
144 | .bios_param = ata_std_bios_param, | ||
145 | }; | 133 | }; |
146 | 134 | ||
147 | static const struct ata_port_operations qs_ata_ops = { | 135 | static struct ata_port_operations qs_ata_ops = { |
148 | .tf_load = ata_tf_load, | 136 | .inherits = &ata_sff_port_ops, |
149 | .tf_read = ata_tf_read, | 137 | |
150 | .check_status = ata_check_status, | ||
151 | .check_atapi_dma = qs_check_atapi_dma, | 138 | .check_atapi_dma = qs_check_atapi_dma, |
152 | .exec_command = ata_exec_command, | 139 | .bmdma_stop = qs_bmdma_stop, |
153 | .dev_select = ata_std_dev_select, | 140 | .bmdma_status = qs_bmdma_status, |
154 | .qc_prep = qs_qc_prep, | 141 | .qc_prep = qs_qc_prep, |
155 | .qc_issue = qs_qc_issue, | 142 | .qc_issue = qs_qc_issue, |
156 | .data_xfer = ata_data_xfer, | 143 | |
157 | .freeze = qs_freeze, | 144 | .freeze = qs_freeze, |
158 | .thaw = qs_thaw, | 145 | .thaw = qs_thaw, |
146 | .prereset = qs_prereset, | ||
147 | .softreset = ATA_OP_NULL, | ||
159 | .error_handler = qs_error_handler, | 148 | .error_handler = qs_error_handler, |
160 | .irq_clear = qs_irq_clear, | 149 | .post_internal_cmd = ATA_OP_NULL, |
161 | .irq_on = ata_irq_on, | 150 | |
162 | .scr_read = qs_scr_read, | 151 | .scr_read = qs_scr_read, |
163 | .scr_write = qs_scr_write, | 152 | .scr_write = qs_scr_write, |
153 | |||
164 | .port_start = qs_port_start, | 154 | .port_start = qs_port_start, |
165 | .host_stop = qs_host_stop, | 155 | .host_stop = qs_host_stop, |
166 | .bmdma_stop = qs_bmdma_stop, | ||
167 | .bmdma_status = qs_bmdma_status, | ||
168 | }; | 156 | }; |
169 | 157 | ||
170 | static const struct ata_port_info qs_port_info[] = { | 158 | static const struct ata_port_info qs_port_info[] = { |
@@ -211,11 +199,6 @@ static u8 qs_bmdma_status(struct ata_port *ap) | |||
211 | return 0; | 199 | return 0; |
212 | } | 200 | } |
213 | 201 | ||
214 | static void qs_irq_clear(struct ata_port *ap) | ||
215 | { | ||
216 | /* nothing */ | ||
217 | } | ||
218 | |||
219 | static inline void qs_enter_reg_mode(struct ata_port *ap) | 202 | static inline void qs_enter_reg_mode(struct ata_port *ap) |
220 | { | 203 | { |
221 | u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); | 204 | u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); |
@@ -256,7 +239,7 @@ static int qs_prereset(struct ata_link *link, unsigned long deadline) | |||
256 | struct ata_port *ap = link->ap; | 239 | struct ata_port *ap = link->ap; |
257 | 240 | ||
258 | qs_reset_channel_logic(ap); | 241 | qs_reset_channel_logic(ap); |
259 | return ata_std_prereset(link, deadline); | 242 | return ata_sff_prereset(link, deadline); |
260 | } | 243 | } |
261 | 244 | ||
262 | static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) | 245 | static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
@@ -270,8 +253,7 @@ static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) | |||
270 | static void qs_error_handler(struct ata_port *ap) | 253 | static void qs_error_handler(struct ata_port *ap) |
271 | { | 254 | { |
272 | qs_enter_reg_mode(ap); | 255 | qs_enter_reg_mode(ap); |
273 | ata_do_eh(ap, qs_prereset, NULL, sata_std_hardreset, | 256 | ata_std_error_handler(ap); |
274 | ata_std_postreset); | ||
275 | } | 257 | } |
276 | 258 | ||
277 | static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) | 259 | static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) |
@@ -321,7 +303,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) | |||
321 | 303 | ||
322 | qs_enter_reg_mode(qc->ap); | 304 | qs_enter_reg_mode(qc->ap); |
323 | if (qc->tf.protocol != ATA_PROT_DMA) { | 305 | if (qc->tf.protocol != ATA_PROT_DMA) { |
324 | ata_qc_prep(qc); | 306 | ata_sff_qc_prep(qc); |
325 | return; | 307 | return; |
326 | } | 308 | } |
327 | 309 | ||
@@ -380,7 +362,7 @@ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc) | |||
380 | } | 362 | } |
381 | 363 | ||
382 | pp->state = qs_state_mmio; | 364 | pp->state = qs_state_mmio; |
383 | return ata_qc_issue_prot(qc); | 365 | return ata_sff_qc_issue(qc); |
384 | } | 366 | } |
385 | 367 | ||
386 | static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status) | 368 | static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status) |
@@ -469,7 +451,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host) | |||
469 | * and pretend we knew it was ours.. (ugh). | 451 | * and pretend we knew it was ours.. (ugh). |
470 | * This does not affect packet mode. | 452 | * This does not affect packet mode. |
471 | */ | 453 | */ |
472 | ata_check_status(ap); | 454 | ata_sff_check_status(ap); |
473 | handled = 1; | 455 | handled = 1; |
474 | continue; | 456 | continue; |
475 | } | 457 | } |
@@ -477,7 +459,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host) | |||
477 | if (!pp || pp->state != qs_state_mmio) | 459 | if (!pp || pp->state != qs_state_mmio) |
478 | continue; | 460 | continue; |
479 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) | 461 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) |
480 | handled |= ata_host_intr(ap, qc); | 462 | handled |= ata_sff_host_intr(ap, qc); |
481 | } | 463 | } |
482 | } | 464 | } |
483 | return handled; | 465 | return handled; |
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 0b8191b52f97..88bf4212590f 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c | |||
@@ -60,7 +60,6 @@ enum { | |||
60 | 60 | ||
61 | SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 61 | SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
62 | ATA_FLAG_MMIO, | 62 | ATA_FLAG_MMIO, |
63 | SIL_DFL_LINK_FLAGS = ATA_LFLAG_HRST_TO_RESUME, | ||
64 | 63 | ||
65 | /* | 64 | /* |
66 | * Controller IDs | 65 | * Controller IDs |
@@ -168,54 +167,23 @@ static struct pci_driver sil_pci_driver = { | |||
168 | }; | 167 | }; |
169 | 168 | ||
170 | static struct scsi_host_template sil_sht = { | 169 | static struct scsi_host_template sil_sht = { |
171 | .module = THIS_MODULE, | 170 | ATA_BMDMA_SHT(DRV_NAME), |
172 | .name = DRV_NAME, | ||
173 | .ioctl = ata_scsi_ioctl, | ||
174 | .queuecommand = ata_scsi_queuecmd, | ||
175 | .can_queue = ATA_DEF_QUEUE, | ||
176 | .this_id = ATA_SHT_THIS_ID, | ||
177 | .sg_tablesize = LIBATA_MAX_PRD, | ||
178 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
179 | .emulated = ATA_SHT_EMULATED, | ||
180 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
181 | .proc_name = DRV_NAME, | ||
182 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
183 | .slave_configure = ata_scsi_slave_config, | ||
184 | .slave_destroy = ata_scsi_slave_destroy, | ||
185 | .bios_param = ata_std_bios_param, | ||
186 | }; | 171 | }; |
187 | 172 | ||
188 | static const struct ata_port_operations sil_ops = { | 173 | static struct ata_port_operations sil_ops = { |
174 | .inherits = &ata_bmdma_port_ops, | ||
189 | .dev_config = sil_dev_config, | 175 | .dev_config = sil_dev_config, |
190 | .tf_load = ata_tf_load, | ||
191 | .tf_read = ata_tf_read, | ||
192 | .check_status = ata_check_status, | ||
193 | .exec_command = ata_exec_command, | ||
194 | .dev_select = ata_std_dev_select, | ||
195 | .set_mode = sil_set_mode, | 176 | .set_mode = sil_set_mode, |
196 | .bmdma_setup = ata_bmdma_setup, | ||
197 | .bmdma_start = ata_bmdma_start, | ||
198 | .bmdma_stop = ata_bmdma_stop, | ||
199 | .bmdma_status = ata_bmdma_status, | ||
200 | .qc_prep = ata_qc_prep, | ||
201 | .qc_issue = ata_qc_issue_prot, | ||
202 | .data_xfer = ata_data_xfer, | ||
203 | .freeze = sil_freeze, | 177 | .freeze = sil_freeze, |
204 | .thaw = sil_thaw, | 178 | .thaw = sil_thaw, |
205 | .error_handler = ata_bmdma_error_handler, | ||
206 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
207 | .irq_clear = ata_bmdma_irq_clear, | ||
208 | .irq_on = ata_irq_on, | ||
209 | .scr_read = sil_scr_read, | 179 | .scr_read = sil_scr_read, |
210 | .scr_write = sil_scr_write, | 180 | .scr_write = sil_scr_write, |
211 | .port_start = ata_port_start, | ||
212 | }; | 181 | }; |
213 | 182 | ||
214 | static const struct ata_port_info sil_port_info[] = { | 183 | static const struct ata_port_info sil_port_info[] = { |
215 | /* sil_3112 */ | 184 | /* sil_3112 */ |
216 | { | 185 | { |
217 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE, | 186 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE, |
218 | .link_flags = SIL_DFL_LINK_FLAGS, | ||
219 | .pio_mask = 0x1f, /* pio0-4 */ | 187 | .pio_mask = 0x1f, /* pio0-4 */ |
220 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 188 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
221 | .udma_mask = ATA_UDMA5, | 189 | .udma_mask = ATA_UDMA5, |
@@ -225,7 +193,6 @@ static const struct ata_port_info sil_port_info[] = { | |||
225 | { | 193 | { |
226 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE | | 194 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE | |
227 | SIL_FLAG_NO_SATA_IRQ, | 195 | SIL_FLAG_NO_SATA_IRQ, |
228 | .link_flags = SIL_DFL_LINK_FLAGS, | ||
229 | .pio_mask = 0x1f, /* pio0-4 */ | 196 | .pio_mask = 0x1f, /* pio0-4 */ |
230 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 197 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
231 | .udma_mask = ATA_UDMA5, | 198 | .udma_mask = ATA_UDMA5, |
@@ -234,7 +201,6 @@ static const struct ata_port_info sil_port_info[] = { | |||
234 | /* sil_3512 */ | 201 | /* sil_3512 */ |
235 | { | 202 | { |
236 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, | 203 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, |
237 | .link_flags = SIL_DFL_LINK_FLAGS, | ||
238 | .pio_mask = 0x1f, /* pio0-4 */ | 204 | .pio_mask = 0x1f, /* pio0-4 */ |
239 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 205 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
240 | .udma_mask = ATA_UDMA5, | 206 | .udma_mask = ATA_UDMA5, |
@@ -243,7 +209,6 @@ static const struct ata_port_info sil_port_info[] = { | |||
243 | /* sil_3114 */ | 209 | /* sil_3114 */ |
244 | { | 210 | { |
245 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, | 211 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, |
246 | .link_flags = SIL_DFL_LINK_FLAGS, | ||
247 | .pio_mask = 0x1f, /* pio0-4 */ | 212 | .pio_mask = 0x1f, /* pio0-4 */ |
248 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 213 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
249 | .udma_mask = ATA_UDMA5, | 214 | .udma_mask = ATA_UDMA5, |
@@ -404,7 +369,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) | |||
404 | 369 | ||
405 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { | 370 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { |
406 | /* this sometimes happens, just clear IRQ */ | 371 | /* this sometimes happens, just clear IRQ */ |
407 | ata_chk_status(ap); | 372 | ap->ops->sff_check_status(ap); |
408 | return; | 373 | return; |
409 | } | 374 | } |
410 | 375 | ||
@@ -440,15 +405,15 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) | |||
440 | } | 405 | } |
441 | 406 | ||
442 | /* check main status, clearing INTRQ */ | 407 | /* check main status, clearing INTRQ */ |
443 | status = ata_chk_status(ap); | 408 | status = ap->ops->sff_check_status(ap); |
444 | if (unlikely(status & ATA_BUSY)) | 409 | if (unlikely(status & ATA_BUSY)) |
445 | goto err_hsm; | 410 | goto err_hsm; |
446 | 411 | ||
447 | /* ack bmdma irq events */ | 412 | /* ack bmdma irq events */ |
448 | ata_bmdma_irq_clear(ap); | 413 | ata_sff_irq_clear(ap); |
449 | 414 | ||
450 | /* kick HSM in the ass */ | 415 | /* kick HSM in the ass */ |
451 | ata_hsm_move(ap, qc, status, 0); | 416 | ata_sff_hsm_move(ap, qc, status, 0); |
452 | 417 | ||
453 | if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) | 418 | if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) |
454 | ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2); | 419 | ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2); |
@@ -515,8 +480,8 @@ static void sil_thaw(struct ata_port *ap) | |||
515 | u32 tmp; | 480 | u32 tmp; |
516 | 481 | ||
517 | /* clear IRQ */ | 482 | /* clear IRQ */ |
518 | ata_chk_status(ap); | 483 | ap->ops->sff_check_status(ap); |
519 | ata_bmdma_irq_clear(ap); | 484 | ata_sff_irq_clear(ap); |
520 | 485 | ||
521 | /* turn on SATA IRQ if supported */ | 486 | /* turn on SATA IRQ if supported */ |
522 | if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) | 487 | if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) |
@@ -690,7 +655,7 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
690 | ioaddr->ctl_addr = mmio_base + sil_port[i].ctl; | 655 | ioaddr->ctl_addr = mmio_base + sil_port[i].ctl; |
691 | ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma; | 656 | ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma; |
692 | ioaddr->scr_addr = mmio_base + sil_port[i].scr; | 657 | ioaddr->scr_addr = mmio_base + sil_port[i].scr; |
693 | ata_std_ports(ioaddr); | 658 | ata_sff_std_ports(ioaddr); |
694 | 659 | ||
695 | ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio"); | 660 | ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio"); |
696 | ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf"); | 661 | ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf"); |
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index df7988df7908..27a110110077 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
@@ -254,7 +254,6 @@ enum { | |||
254 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | | 254 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | |
255 | ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | | 255 | ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | |
256 | ATA_FLAG_AN | ATA_FLAG_PMP, | 256 | ATA_FLAG_AN | ATA_FLAG_PMP, |
257 | SIL24_COMMON_LFLAGS = ATA_LFLAG_SKIP_D2H_BSY, | ||
258 | SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ | 257 | SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ |
259 | 258 | ||
260 | IRQ_STAT_4PORTS = 0xf, | 259 | IRQ_STAT_4PORTS = 0xf, |
@@ -286,45 +285,45 @@ static struct sil24_cerr_info { | |||
286 | "device error via D2H FIS" }, | 285 | "device error via D2H FIS" }, |
287 | [PORT_CERR_SDB] = { AC_ERR_DEV, 0, | 286 | [PORT_CERR_SDB] = { AC_ERR_DEV, 0, |
288 | "device error via SDB FIS" }, | 287 | "device error via SDB FIS" }, |
289 | [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET, | 288 | [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_RESET, |
290 | "error in data FIS" }, | 289 | "error in data FIS" }, |
291 | [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET, | 290 | [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_RESET, |
292 | "failed to transmit command FIS" }, | 291 | "failed to transmit command FIS" }, |
293 | [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET, | 292 | [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET, |
294 | "protocol mismatch" }, | 293 | "protocol mismatch" }, |
295 | [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_SOFTRESET, | 294 | [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET, |
296 | "data directon mismatch" }, | 295 | "data directon mismatch" }, |
297 | [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET, | 296 | [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET, |
298 | "ran out of SGEs while writing" }, | 297 | "ran out of SGEs while writing" }, |
299 | [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET, | 298 | [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET, |
300 | "ran out of SGEs while reading" }, | 299 | "ran out of SGEs while reading" }, |
301 | [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_SOFTRESET, | 300 | [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET, |
302 | "invalid data directon for ATAPI CDB" }, | 301 | "invalid data directon for ATAPI CDB" }, |
303 | [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET, | 302 | [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET, |
304 | "SGT not on qword boundary" }, | 303 | "SGT not on qword boundary" }, |
305 | [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, | 304 | [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, |
306 | "PCI target abort while fetching SGT" }, | 305 | "PCI target abort while fetching SGT" }, |
307 | [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, | 306 | [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, |
308 | "PCI master abort while fetching SGT" }, | 307 | "PCI master abort while fetching SGT" }, |
309 | [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, | 308 | [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET, |
310 | "PCI parity error while fetching SGT" }, | 309 | "PCI parity error while fetching SGT" }, |
311 | [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET, | 310 | [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET, |
312 | "PRB not on qword boundary" }, | 311 | "PRB not on qword boundary" }, |
313 | [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, | 312 | [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, |
314 | "PCI target abort while fetching PRB" }, | 313 | "PCI target abort while fetching PRB" }, |
315 | [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, | 314 | [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, |
316 | "PCI master abort while fetching PRB" }, | 315 | "PCI master abort while fetching PRB" }, |
317 | [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, | 316 | [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET, |
318 | "PCI parity error while fetching PRB" }, | 317 | "PCI parity error while fetching PRB" }, |
319 | [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, | 318 | [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_RESET, |
320 | "undefined error while transferring data" }, | 319 | "undefined error while transferring data" }, |
321 | [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, | 320 | [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, |
322 | "PCI target abort while transferring data" }, | 321 | "PCI target abort while transferring data" }, |
323 | [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, | 322 | [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, |
324 | "PCI master abort while transferring data" }, | 323 | "PCI master abort while transferring data" }, |
325 | [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, | 324 | [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET, |
326 | "PCI parity error while transferring data" }, | 325 | "PCI parity error while transferring data" }, |
327 | [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_SOFTRESET, | 326 | [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_RESET, |
328 | "FIS received while sending service FIS" }, | 327 | "FIS received while sending service FIS" }, |
329 | }; | 328 | }; |
330 | 329 | ||
@@ -337,23 +336,26 @@ static struct sil24_cerr_info { | |||
337 | struct sil24_port_priv { | 336 | struct sil24_port_priv { |
338 | union sil24_cmd_block *cmd_block; /* 32 cmd blocks */ | 337 | union sil24_cmd_block *cmd_block; /* 32 cmd blocks */ |
339 | dma_addr_t cmd_block_dma; /* DMA base addr for them */ | 338 | dma_addr_t cmd_block_dma; /* DMA base addr for them */ |
340 | struct ata_taskfile tf; /* Cached taskfile registers */ | ||
341 | int do_port_rst; | 339 | int do_port_rst; |
342 | }; | 340 | }; |
343 | 341 | ||
344 | static void sil24_dev_config(struct ata_device *dev); | 342 | static void sil24_dev_config(struct ata_device *dev); |
345 | static u8 sil24_check_status(struct ata_port *ap); | ||
346 | static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val); | 343 | static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val); |
347 | static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); | 344 | static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); |
348 | static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); | ||
349 | static int sil24_qc_defer(struct ata_queued_cmd *qc); | 345 | static int sil24_qc_defer(struct ata_queued_cmd *qc); |
350 | static void sil24_qc_prep(struct ata_queued_cmd *qc); | 346 | static void sil24_qc_prep(struct ata_queued_cmd *qc); |
351 | static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); | 347 | static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); |
352 | static void sil24_irq_clear(struct ata_port *ap); | 348 | static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc); |
353 | static void sil24_pmp_attach(struct ata_port *ap); | 349 | static void sil24_pmp_attach(struct ata_port *ap); |
354 | static void sil24_pmp_detach(struct ata_port *ap); | 350 | static void sil24_pmp_detach(struct ata_port *ap); |
355 | static void sil24_freeze(struct ata_port *ap); | 351 | static void sil24_freeze(struct ata_port *ap); |
356 | static void sil24_thaw(struct ata_port *ap); | 352 | static void sil24_thaw(struct ata_port *ap); |
353 | static int sil24_softreset(struct ata_link *link, unsigned int *class, | ||
354 | unsigned long deadline); | ||
355 | static int sil24_hardreset(struct ata_link *link, unsigned int *class, | ||
356 | unsigned long deadline); | ||
357 | static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class, | ||
358 | unsigned long deadline); | ||
357 | static void sil24_error_handler(struct ata_port *ap); | 359 | static void sil24_error_handler(struct ata_port *ap); |
358 | static void sil24_post_internal_cmd(struct ata_queued_cmd *qc); | 360 | static void sil24_post_internal_cmd(struct ata_queued_cmd *qc); |
359 | static int sil24_port_start(struct ata_port *ap); | 361 | static int sil24_port_start(struct ata_port *ap); |
@@ -386,52 +388,36 @@ static struct pci_driver sil24_pci_driver = { | |||
386 | }; | 388 | }; |
387 | 389 | ||
388 | static struct scsi_host_template sil24_sht = { | 390 | static struct scsi_host_template sil24_sht = { |
389 | .module = THIS_MODULE, | 391 | ATA_NCQ_SHT(DRV_NAME), |
390 | .name = DRV_NAME, | ||
391 | .ioctl = ata_scsi_ioctl, | ||
392 | .queuecommand = ata_scsi_queuecmd, | ||
393 | .change_queue_depth = ata_scsi_change_queue_depth, | ||
394 | .can_queue = SIL24_MAX_CMDS, | 392 | .can_queue = SIL24_MAX_CMDS, |
395 | .this_id = ATA_SHT_THIS_ID, | ||
396 | .sg_tablesize = SIL24_MAX_SGE, | 393 | .sg_tablesize = SIL24_MAX_SGE, |
397 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
398 | .emulated = ATA_SHT_EMULATED, | ||
399 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
400 | .proc_name = DRV_NAME, | ||
401 | .dma_boundary = ATA_DMA_BOUNDARY, | 394 | .dma_boundary = ATA_DMA_BOUNDARY, |
402 | .slave_configure = ata_scsi_slave_config, | ||
403 | .slave_destroy = ata_scsi_slave_destroy, | ||
404 | .bios_param = ata_std_bios_param, | ||
405 | }; | 395 | }; |
406 | 396 | ||
407 | static const struct ata_port_operations sil24_ops = { | 397 | static struct ata_port_operations sil24_ops = { |
408 | .dev_config = sil24_dev_config, | 398 | .inherits = &sata_pmp_port_ops, |
409 | |||
410 | .check_status = sil24_check_status, | ||
411 | .check_altstatus = sil24_check_status, | ||
412 | .dev_select = ata_noop_dev_select, | ||
413 | |||
414 | .tf_read = sil24_tf_read, | ||
415 | 399 | ||
416 | .qc_defer = sil24_qc_defer, | 400 | .qc_defer = sil24_qc_defer, |
417 | .qc_prep = sil24_qc_prep, | 401 | .qc_prep = sil24_qc_prep, |
418 | .qc_issue = sil24_qc_issue, | 402 | .qc_issue = sil24_qc_issue, |
403 | .qc_fill_rtf = sil24_qc_fill_rtf, | ||
419 | 404 | ||
420 | .irq_clear = sil24_irq_clear, | 405 | .freeze = sil24_freeze, |
406 | .thaw = sil24_thaw, | ||
407 | .softreset = sil24_softreset, | ||
408 | .hardreset = sil24_hardreset, | ||
409 | .pmp_softreset = sil24_softreset, | ||
410 | .pmp_hardreset = sil24_pmp_hardreset, | ||
411 | .error_handler = sil24_error_handler, | ||
412 | .post_internal_cmd = sil24_post_internal_cmd, | ||
413 | .dev_config = sil24_dev_config, | ||
421 | 414 | ||
422 | .scr_read = sil24_scr_read, | 415 | .scr_read = sil24_scr_read, |
423 | .scr_write = sil24_scr_write, | 416 | .scr_write = sil24_scr_write, |
424 | |||
425 | .pmp_attach = sil24_pmp_attach, | 417 | .pmp_attach = sil24_pmp_attach, |
426 | .pmp_detach = sil24_pmp_detach, | 418 | .pmp_detach = sil24_pmp_detach, |
427 | 419 | ||
428 | .freeze = sil24_freeze, | ||
429 | .thaw = sil24_thaw, | ||
430 | .error_handler = sil24_error_handler, | ||
431 | .post_internal_cmd = sil24_post_internal_cmd, | ||
432 | |||
433 | .port_start = sil24_port_start, | 420 | .port_start = sil24_port_start, |
434 | |||
435 | #ifdef CONFIG_PM | 421 | #ifdef CONFIG_PM |
436 | .port_resume = sil24_port_resume, | 422 | .port_resume = sil24_port_resume, |
437 | #endif | 423 | #endif |
@@ -449,7 +435,6 @@ static const struct ata_port_info sil24_port_info[] = { | |||
449 | { | 435 | { |
450 | .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) | | 436 | .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) | |
451 | SIL24_FLAG_PCIX_IRQ_WOC, | 437 | SIL24_FLAG_PCIX_IRQ_WOC, |
452 | .link_flags = SIL24_COMMON_LFLAGS, | ||
453 | .pio_mask = 0x1f, /* pio0-4 */ | 438 | .pio_mask = 0x1f, /* pio0-4 */ |
454 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 439 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
455 | .udma_mask = ATA_UDMA5, /* udma0-5 */ | 440 | .udma_mask = ATA_UDMA5, /* udma0-5 */ |
@@ -458,7 +443,6 @@ static const struct ata_port_info sil24_port_info[] = { | |||
458 | /* sil_3132 */ | 443 | /* sil_3132 */ |
459 | { | 444 | { |
460 | .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2), | 445 | .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2), |
461 | .link_flags = SIL24_COMMON_LFLAGS, | ||
462 | .pio_mask = 0x1f, /* pio0-4 */ | 446 | .pio_mask = 0x1f, /* pio0-4 */ |
463 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 447 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
464 | .udma_mask = ATA_UDMA5, /* udma0-5 */ | 448 | .udma_mask = ATA_UDMA5, /* udma0-5 */ |
@@ -467,7 +451,6 @@ static const struct ata_port_info sil24_port_info[] = { | |||
467 | /* sil_3131/sil_3531 */ | 451 | /* sil_3131/sil_3531 */ |
468 | { | 452 | { |
469 | .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1), | 453 | .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1), |
470 | .link_flags = SIL24_COMMON_LFLAGS, | ||
471 | .pio_mask = 0x1f, /* pio0-4 */ | 454 | .pio_mask = 0x1f, /* pio0-4 */ |
472 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 455 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
473 | .udma_mask = ATA_UDMA5, /* udma0-5 */ | 456 | .udma_mask = ATA_UDMA5, /* udma0-5 */ |
@@ -482,9 +465,19 @@ static int sil24_tag(int tag) | |||
482 | return tag; | 465 | return tag; |
483 | } | 466 | } |
484 | 467 | ||
468 | static unsigned long sil24_port_offset(struct ata_port *ap) | ||
469 | { | ||
470 | return ap->port_no * PORT_REGS_SIZE; | ||
471 | } | ||
472 | |||
473 | static void __iomem *sil24_port_base(struct ata_port *ap) | ||
474 | { | ||
475 | return ap->host->iomap[SIL24_PORT_BAR] + sil24_port_offset(ap); | ||
476 | } | ||
477 | |||
485 | static void sil24_dev_config(struct ata_device *dev) | 478 | static void sil24_dev_config(struct ata_device *dev) |
486 | { | 479 | { |
487 | void __iomem *port = dev->link->ap->ioaddr.cmd_addr; | 480 | void __iomem *port = sil24_port_base(dev->link->ap); |
488 | 481 | ||
489 | if (dev->cdb_len == 16) | 482 | if (dev->cdb_len == 16) |
490 | writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); | 483 | writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); |
@@ -494,7 +487,7 @@ static void sil24_dev_config(struct ata_device *dev) | |||
494 | 487 | ||
495 | static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf) | 488 | static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf) |
496 | { | 489 | { |
497 | void __iomem *port = ap->ioaddr.cmd_addr; | 490 | void __iomem *port = sil24_port_base(ap); |
498 | struct sil24_prb __iomem *prb; | 491 | struct sil24_prb __iomem *prb; |
499 | u8 fis[6 * 4]; | 492 | u8 fis[6 * 4]; |
500 | 493 | ||
@@ -503,12 +496,6 @@ static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf) | |||
503 | ata_tf_from_fis(fis, tf); | 496 | ata_tf_from_fis(fis, tf); |
504 | } | 497 | } |
505 | 498 | ||
506 | static u8 sil24_check_status(struct ata_port *ap) | ||
507 | { | ||
508 | struct sil24_port_priv *pp = ap->private_data; | ||
509 | return pp->tf.command; | ||
510 | } | ||
511 | |||
512 | static int sil24_scr_map[] = { | 499 | static int sil24_scr_map[] = { |
513 | [SCR_CONTROL] = 0, | 500 | [SCR_CONTROL] = 0, |
514 | [SCR_STATUS] = 1, | 501 | [SCR_STATUS] = 1, |
@@ -518,7 +505,7 @@ static int sil24_scr_map[] = { | |||
518 | 505 | ||
519 | static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) | 506 | static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) |
520 | { | 507 | { |
521 | void __iomem *scr_addr = ap->ioaddr.scr_addr; | 508 | void __iomem *scr_addr = sil24_port_base(ap) + PORT_SCONTROL; |
522 | 509 | ||
523 | if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { | 510 | if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { |
524 | void __iomem *addr; | 511 | void __iomem *addr; |
@@ -531,7 +518,7 @@ static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) | |||
531 | 518 | ||
532 | static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) | 519 | static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) |
533 | { | 520 | { |
534 | void __iomem *scr_addr = ap->ioaddr.scr_addr; | 521 | void __iomem *scr_addr = sil24_port_base(ap) + PORT_SCONTROL; |
535 | 522 | ||
536 | if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { | 523 | if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { |
537 | void __iomem *addr; | 524 | void __iomem *addr; |
@@ -542,15 +529,9 @@ static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) | |||
542 | return -EINVAL; | 529 | return -EINVAL; |
543 | } | 530 | } |
544 | 531 | ||
545 | static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | ||
546 | { | ||
547 | struct sil24_port_priv *pp = ap->private_data; | ||
548 | *tf = pp->tf; | ||
549 | } | ||
550 | |||
551 | static void sil24_config_port(struct ata_port *ap) | 532 | static void sil24_config_port(struct ata_port *ap) |
552 | { | 533 | { |
553 | void __iomem *port = ap->ioaddr.cmd_addr; | 534 | void __iomem *port = sil24_port_base(ap); |
554 | 535 | ||
555 | /* configure IRQ WoC */ | 536 | /* configure IRQ WoC */ |
556 | if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) | 537 | if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) |
@@ -575,7 +556,7 @@ static void sil24_config_port(struct ata_port *ap) | |||
575 | 556 | ||
576 | static void sil24_config_pmp(struct ata_port *ap, int attached) | 557 | static void sil24_config_pmp(struct ata_port *ap, int attached) |
577 | { | 558 | { |
578 | void __iomem *port = ap->ioaddr.cmd_addr; | 559 | void __iomem *port = sil24_port_base(ap); |
579 | 560 | ||
580 | if (attached) | 561 | if (attached) |
581 | writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT); | 562 | writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT); |
@@ -585,7 +566,7 @@ static void sil24_config_pmp(struct ata_port *ap, int attached) | |||
585 | 566 | ||
586 | static void sil24_clear_pmp(struct ata_port *ap) | 567 | static void sil24_clear_pmp(struct ata_port *ap) |
587 | { | 568 | { |
588 | void __iomem *port = ap->ioaddr.cmd_addr; | 569 | void __iomem *port = sil24_port_base(ap); |
589 | int i; | 570 | int i; |
590 | 571 | ||
591 | writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR); | 572 | writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR); |
@@ -600,12 +581,12 @@ static void sil24_clear_pmp(struct ata_port *ap) | |||
600 | 581 | ||
601 | static int sil24_init_port(struct ata_port *ap) | 582 | static int sil24_init_port(struct ata_port *ap) |
602 | { | 583 | { |
603 | void __iomem *port = ap->ioaddr.cmd_addr; | 584 | void __iomem *port = sil24_port_base(ap); |
604 | struct sil24_port_priv *pp = ap->private_data; | 585 | struct sil24_port_priv *pp = ap->private_data; |
605 | u32 tmp; | 586 | u32 tmp; |
606 | 587 | ||
607 | /* clear PMP error status */ | 588 | /* clear PMP error status */ |
608 | if (ap->nr_pmp_links) | 589 | if (sata_pmp_attached(ap)) |
609 | sil24_clear_pmp(ap); | 590 | sil24_clear_pmp(ap); |
610 | 591 | ||
611 | writel(PORT_CS_INIT, port + PORT_CTRL_STAT); | 592 | writel(PORT_CS_INIT, port + PORT_CTRL_STAT); |
@@ -616,7 +597,7 @@ static int sil24_init_port(struct ata_port *ap) | |||
616 | 597 | ||
617 | if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) { | 598 | if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) { |
618 | pp->do_port_rst = 1; | 599 | pp->do_port_rst = 1; |
619 | ap->link.eh_context.i.action |= ATA_EH_HARDRESET; | 600 | ap->link.eh_context.i.action |= ATA_EH_RESET; |
620 | return -EIO; | 601 | return -EIO; |
621 | } | 602 | } |
622 | 603 | ||
@@ -628,7 +609,7 @@ static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp, | |||
628 | int is_cmd, u32 ctrl, | 609 | int is_cmd, u32 ctrl, |
629 | unsigned long timeout_msec) | 610 | unsigned long timeout_msec) |
630 | { | 611 | { |
631 | void __iomem *port = ap->ioaddr.cmd_addr; | 612 | void __iomem *port = sil24_port_base(ap); |
632 | struct sil24_port_priv *pp = ap->private_data; | 613 | struct sil24_port_priv *pp = ap->private_data; |
633 | struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; | 614 | struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; |
634 | dma_addr_t paddr = pp->cmd_block_dma; | 615 | dma_addr_t paddr = pp->cmd_block_dma; |
@@ -670,10 +651,11 @@ static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp, | |||
670 | return rc; | 651 | return rc; |
671 | } | 652 | } |
672 | 653 | ||
673 | static int sil24_do_softreset(struct ata_link *link, unsigned int *class, | 654 | static int sil24_softreset(struct ata_link *link, unsigned int *class, |
674 | int pmp, unsigned long deadline) | 655 | unsigned long deadline) |
675 | { | 656 | { |
676 | struct ata_port *ap = link->ap; | 657 | struct ata_port *ap = link->ap; |
658 | int pmp = sata_srst_pmp(link); | ||
677 | unsigned long timeout_msec = 0; | 659 | unsigned long timeout_msec = 0; |
678 | struct ata_taskfile tf; | 660 | struct ata_taskfile tf; |
679 | const char *reason; | 661 | const char *reason; |
@@ -681,12 +663,6 @@ static int sil24_do_softreset(struct ata_link *link, unsigned int *class, | |||
681 | 663 | ||
682 | DPRINTK("ENTER\n"); | 664 | DPRINTK("ENTER\n"); |
683 | 665 | ||
684 | if (ata_link_offline(link)) { | ||
685 | DPRINTK("PHY reports no device\n"); | ||
686 | *class = ATA_DEV_NONE; | ||
687 | goto out; | ||
688 | } | ||
689 | |||
690 | /* put the port into known state */ | 666 | /* put the port into known state */ |
691 | if (sil24_init_port(ap)) { | 667 | if (sil24_init_port(ap)) { |
692 | reason = "port not ready"; | 668 | reason = "port not ready"; |
@@ -711,10 +687,6 @@ static int sil24_do_softreset(struct ata_link *link, unsigned int *class, | |||
711 | sil24_read_tf(ap, 0, &tf); | 687 | sil24_read_tf(ap, 0, &tf); |
712 | *class = ata_dev_classify(&tf); | 688 | *class = ata_dev_classify(&tf); |
713 | 689 | ||
714 | if (*class == ATA_DEV_UNKNOWN) | ||
715 | *class = ATA_DEV_NONE; | ||
716 | |||
717 | out: | ||
718 | DPRINTK("EXIT, class=%u\n", *class); | 690 | DPRINTK("EXIT, class=%u\n", *class); |
719 | return 0; | 691 | return 0; |
720 | 692 | ||
@@ -723,17 +695,11 @@ static int sil24_do_softreset(struct ata_link *link, unsigned int *class, | |||
723 | return -EIO; | 695 | return -EIO; |
724 | } | 696 | } |
725 | 697 | ||
726 | static int sil24_softreset(struct ata_link *link, unsigned int *class, | ||
727 | unsigned long deadline) | ||
728 | { | ||
729 | return sil24_do_softreset(link, class, SATA_PMP_CTRL_PORT, deadline); | ||
730 | } | ||
731 | |||
732 | static int sil24_hardreset(struct ata_link *link, unsigned int *class, | 698 | static int sil24_hardreset(struct ata_link *link, unsigned int *class, |
733 | unsigned long deadline) | 699 | unsigned long deadline) |
734 | { | 700 | { |
735 | struct ata_port *ap = link->ap; | 701 | struct ata_port *ap = link->ap; |
736 | void __iomem *port = ap->ioaddr.cmd_addr; | 702 | void __iomem *port = sil24_port_base(ap); |
737 | struct sil24_port_priv *pp = ap->private_data; | 703 | struct sil24_port_priv *pp = ap->private_data; |
738 | int did_port_rst = 0; | 704 | int did_port_rst = 0; |
739 | const char *reason; | 705 | const char *reason; |
@@ -911,7 +877,7 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) | |||
911 | { | 877 | { |
912 | struct ata_port *ap = qc->ap; | 878 | struct ata_port *ap = qc->ap; |
913 | struct sil24_port_priv *pp = ap->private_data; | 879 | struct sil24_port_priv *pp = ap->private_data; |
914 | void __iomem *port = ap->ioaddr.cmd_addr; | 880 | void __iomem *port = sil24_port_base(ap); |
915 | unsigned int tag = sil24_tag(qc->tag); | 881 | unsigned int tag = sil24_tag(qc->tag); |
916 | dma_addr_t paddr; | 882 | dma_addr_t paddr; |
917 | void __iomem *activate; | 883 | void __iomem *activate; |
@@ -925,9 +891,10 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) | |||
925 | return 0; | 891 | return 0; |
926 | } | 892 | } |
927 | 893 | ||
928 | static void sil24_irq_clear(struct ata_port *ap) | 894 | static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc) |
929 | { | 895 | { |
930 | /* unused */ | 896 | sil24_read_tf(qc->ap, qc->tag, &qc->result_tf); |
897 | return true; | ||
931 | } | 898 | } |
932 | 899 | ||
933 | static void sil24_pmp_attach(struct ata_port *ap) | 900 | static void sil24_pmp_attach(struct ata_port *ap) |
@@ -942,12 +909,6 @@ static void sil24_pmp_detach(struct ata_port *ap) | |||
942 | sil24_config_pmp(ap, 0); | 909 | sil24_config_pmp(ap, 0); |
943 | } | 910 | } |
944 | 911 | ||
945 | static int sil24_pmp_softreset(struct ata_link *link, unsigned int *class, | ||
946 | unsigned long deadline) | ||
947 | { | ||
948 | return sil24_do_softreset(link, class, link->pmp, deadline); | ||
949 | } | ||
950 | |||
951 | static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class, | 912 | static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class, |
952 | unsigned long deadline) | 913 | unsigned long deadline) |
953 | { | 914 | { |
@@ -960,12 +921,12 @@ static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class, | |||
960 | return rc; | 921 | return rc; |
961 | } | 922 | } |
962 | 923 | ||
963 | return sata_pmp_std_hardreset(link, class, deadline); | 924 | return sata_std_hardreset(link, class, deadline); |
964 | } | 925 | } |
965 | 926 | ||
966 | static void sil24_freeze(struct ata_port *ap) | 927 | static void sil24_freeze(struct ata_port *ap) |
967 | { | 928 | { |
968 | void __iomem *port = ap->ioaddr.cmd_addr; | 929 | void __iomem *port = sil24_port_base(ap); |
969 | 930 | ||
970 | /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear | 931 | /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear |
971 | * PORT_IRQ_ENABLE instead. | 932 | * PORT_IRQ_ENABLE instead. |
@@ -975,7 +936,7 @@ static void sil24_freeze(struct ata_port *ap) | |||
975 | 936 | ||
976 | static void sil24_thaw(struct ata_port *ap) | 937 | static void sil24_thaw(struct ata_port *ap) |
977 | { | 938 | { |
978 | void __iomem *port = ap->ioaddr.cmd_addr; | 939 | void __iomem *port = sil24_port_base(ap); |
979 | u32 tmp; | 940 | u32 tmp; |
980 | 941 | ||
981 | /* clear IRQ */ | 942 | /* clear IRQ */ |
@@ -988,7 +949,7 @@ static void sil24_thaw(struct ata_port *ap) | |||
988 | 949 | ||
989 | static void sil24_error_intr(struct ata_port *ap) | 950 | static void sil24_error_intr(struct ata_port *ap) |
990 | { | 951 | { |
991 | void __iomem *port = ap->ioaddr.cmd_addr; | 952 | void __iomem *port = sil24_port_base(ap); |
992 | struct sil24_port_priv *pp = ap->private_data; | 953 | struct sil24_port_priv *pp = ap->private_data; |
993 | struct ata_queued_cmd *qc = NULL; | 954 | struct ata_queued_cmd *qc = NULL; |
994 | struct ata_link *link; | 955 | struct ata_link *link; |
@@ -1022,7 +983,7 @@ static void sil24_error_intr(struct ata_port *ap) | |||
1022 | 983 | ||
1023 | if (irq_stat & PORT_IRQ_UNK_FIS) { | 984 | if (irq_stat & PORT_IRQ_UNK_FIS) { |
1024 | ehi->err_mask |= AC_ERR_HSM; | 985 | ehi->err_mask |= AC_ERR_HSM; |
1025 | ehi->action |= ATA_EH_SOFTRESET; | 986 | ehi->action |= ATA_EH_RESET; |
1026 | ata_ehi_push_desc(ehi, "unknown FIS"); | 987 | ata_ehi_push_desc(ehi, "unknown FIS"); |
1027 | freeze = 1; | 988 | freeze = 1; |
1028 | } | 989 | } |
@@ -1043,14 +1004,14 @@ static void sil24_error_intr(struct ata_port *ap) | |||
1043 | */ | 1004 | */ |
1044 | if (ap->nr_active_links >= 3) { | 1005 | if (ap->nr_active_links >= 3) { |
1045 | ehi->err_mask |= AC_ERR_OTHER; | 1006 | ehi->err_mask |= AC_ERR_OTHER; |
1046 | ehi->action |= ATA_EH_HARDRESET; | 1007 | ehi->action |= ATA_EH_RESET; |
1047 | ata_ehi_push_desc(ehi, "PMP DMA CS errata"); | 1008 | ata_ehi_push_desc(ehi, "PMP DMA CS errata"); |
1048 | pp->do_port_rst = 1; | 1009 | pp->do_port_rst = 1; |
1049 | freeze = 1; | 1010 | freeze = 1; |
1050 | } | 1011 | } |
1051 | 1012 | ||
1052 | /* find out the offending link and qc */ | 1013 | /* find out the offending link and qc */ |
1053 | if (ap->nr_pmp_links) { | 1014 | if (sata_pmp_attached(ap)) { |
1054 | context = readl(port + PORT_CONTEXT); | 1015 | context = readl(port + PORT_CONTEXT); |
1055 | pmp = (context >> 5) & 0xf; | 1016 | pmp = (context >> 5) & 0xf; |
1056 | 1017 | ||
@@ -1064,7 +1025,7 @@ static void sil24_error_intr(struct ata_port *ap) | |||
1064 | irq_stat); | 1025 | irq_stat); |
1065 | } else { | 1026 | } else { |
1066 | err_mask |= AC_ERR_HSM; | 1027 | err_mask |= AC_ERR_HSM; |
1067 | action |= ATA_EH_HARDRESET; | 1028 | action |= ATA_EH_RESET; |
1068 | freeze = 1; | 1029 | freeze = 1; |
1069 | } | 1030 | } |
1070 | } else | 1031 | } else |
@@ -1078,28 +1039,27 @@ static void sil24_error_intr(struct ata_port *ap) | |||
1078 | if (ci && ci->desc) { | 1039 | if (ci && ci->desc) { |
1079 | err_mask |= ci->err_mask; | 1040 | err_mask |= ci->err_mask; |
1080 | action |= ci->action; | 1041 | action |= ci->action; |
1081 | if (action & ATA_EH_RESET_MASK) | 1042 | if (action & ATA_EH_RESET) |
1082 | freeze = 1; | 1043 | freeze = 1; |
1083 | ata_ehi_push_desc(ehi, "%s", ci->desc); | 1044 | ata_ehi_push_desc(ehi, "%s", ci->desc); |
1084 | } else { | 1045 | } else { |
1085 | err_mask |= AC_ERR_OTHER; | 1046 | err_mask |= AC_ERR_OTHER; |
1086 | action |= ATA_EH_SOFTRESET; | 1047 | action |= ATA_EH_RESET; |
1087 | freeze = 1; | 1048 | freeze = 1; |
1088 | ata_ehi_push_desc(ehi, "unknown command error %d", | 1049 | ata_ehi_push_desc(ehi, "unknown command error %d", |
1089 | cerr); | 1050 | cerr); |
1090 | } | 1051 | } |
1091 | 1052 | ||
1092 | /* record error info */ | 1053 | /* record error info */ |
1093 | if (qc) { | 1054 | if (qc) |
1094 | sil24_read_tf(ap, qc->tag, &pp->tf); | ||
1095 | qc->err_mask |= err_mask; | 1055 | qc->err_mask |= err_mask; |
1096 | } else | 1056 | else |
1097 | ehi->err_mask |= err_mask; | 1057 | ehi->err_mask |= err_mask; |
1098 | 1058 | ||
1099 | ehi->action |= action; | 1059 | ehi->action |= action; |
1100 | 1060 | ||
1101 | /* if PMP, resume */ | 1061 | /* if PMP, resume */ |
1102 | if (ap->nr_pmp_links) | 1062 | if (sata_pmp_attached(ap)) |
1103 | writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT); | 1063 | writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT); |
1104 | } | 1064 | } |
1105 | 1065 | ||
@@ -1114,18 +1074,9 @@ static void sil24_error_intr(struct ata_port *ap) | |||
1114 | } | 1074 | } |
1115 | } | 1075 | } |
1116 | 1076 | ||
1117 | static void sil24_finish_qc(struct ata_queued_cmd *qc) | ||
1118 | { | ||
1119 | struct ata_port *ap = qc->ap; | ||
1120 | struct sil24_port_priv *pp = ap->private_data; | ||
1121 | |||
1122 | if (qc->flags & ATA_QCFLAG_RESULT_TF) | ||
1123 | sil24_read_tf(ap, qc->tag, &pp->tf); | ||
1124 | } | ||
1125 | |||
1126 | static inline void sil24_host_intr(struct ata_port *ap) | 1077 | static inline void sil24_host_intr(struct ata_port *ap) |
1127 | { | 1078 | { |
1128 | void __iomem *port = ap->ioaddr.cmd_addr; | 1079 | void __iomem *port = sil24_port_base(ap); |
1129 | u32 slot_stat, qc_active; | 1080 | u32 slot_stat, qc_active; |
1130 | int rc; | 1081 | int rc; |
1131 | 1082 | ||
@@ -1147,13 +1098,13 @@ static inline void sil24_host_intr(struct ata_port *ap) | |||
1147 | } | 1098 | } |
1148 | 1099 | ||
1149 | qc_active = slot_stat & ~HOST_SSTAT_ATTN; | 1100 | qc_active = slot_stat & ~HOST_SSTAT_ATTN; |
1150 | rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc); | 1101 | rc = ata_qc_complete_multiple(ap, qc_active); |
1151 | if (rc > 0) | 1102 | if (rc > 0) |
1152 | return; | 1103 | return; |
1153 | if (rc < 0) { | 1104 | if (rc < 0) { |
1154 | struct ata_eh_info *ehi = &ap->link.eh_info; | 1105 | struct ata_eh_info *ehi = &ap->link.eh_info; |
1155 | ehi->err_mask |= AC_ERR_HSM; | 1106 | ehi->err_mask |= AC_ERR_HSM; |
1156 | ehi->action |= ATA_EH_SOFTRESET; | 1107 | ehi->action |= ATA_EH_RESET; |
1157 | ata_port_freeze(ap); | 1108 | ata_port_freeze(ap); |
1158 | return; | 1109 | return; |
1159 | } | 1110 | } |
@@ -1209,11 +1160,7 @@ static void sil24_error_handler(struct ata_port *ap) | |||
1209 | if (sil24_init_port(ap)) | 1160 | if (sil24_init_port(ap)) |
1210 | ata_eh_freeze_port(ap); | 1161 | ata_eh_freeze_port(ap); |
1211 | 1162 | ||
1212 | /* perform recovery */ | 1163 | sata_pmp_error_handler(ap); |
1213 | sata_pmp_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset, | ||
1214 | ata_std_postreset, sata_pmp_std_prereset, | ||
1215 | sil24_pmp_softreset, sil24_pmp_hardreset, | ||
1216 | sata_pmp_std_postreset); | ||
1217 | 1164 | ||
1218 | pp->do_port_rst = 0; | 1165 | pp->do_port_rst = 0; |
1219 | } | 1166 | } |
@@ -1239,8 +1186,6 @@ static int sil24_port_start(struct ata_port *ap) | |||
1239 | if (!pp) | 1186 | if (!pp) |
1240 | return -ENOMEM; | 1187 | return -ENOMEM; |
1241 | 1188 | ||
1242 | pp->tf.command = ATA_DRDY; | ||
1243 | |||
1244 | cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL); | 1189 | cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL); |
1245 | if (!cb) | 1190 | if (!cb) |
1246 | return -ENOMEM; | 1191 | return -ENOMEM; |
@@ -1251,6 +1196,9 @@ static int sil24_port_start(struct ata_port *ap) | |||
1251 | 1196 | ||
1252 | ap->private_data = pp; | 1197 | ap->private_data = pp; |
1253 | 1198 | ||
1199 | ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host"); | ||
1200 | ata_port_pbar_desc(ap, SIL24_PORT_BAR, sil24_port_offset(ap), "port"); | ||
1201 | |||
1254 | return 0; | 1202 | return 0; |
1255 | } | 1203 | } |
1256 | 1204 | ||
@@ -1269,7 +1217,8 @@ static void sil24_init_controller(struct ata_host *host) | |||
1269 | /* init ports */ | 1217 | /* init ports */ |
1270 | for (i = 0; i < host->n_ports; i++) { | 1218 | for (i = 0; i < host->n_ports; i++) { |
1271 | struct ata_port *ap = host->ports[i]; | 1219 | struct ata_port *ap = host->ports[i]; |
1272 | void __iomem *port = ap->ioaddr.cmd_addr; | 1220 | void __iomem *port = sil24_port_base(ap); |
1221 | |||
1273 | 1222 | ||
1274 | /* Initial PHY setting */ | 1223 | /* Initial PHY setting */ |
1275 | writel(0x20c, port + PORT_PHY_CFG); | 1224 | writel(0x20c, port + PORT_PHY_CFG); |
@@ -1302,7 +1251,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1302 | const struct ata_port_info *ppi[] = { &pi, NULL }; | 1251 | const struct ata_port_info *ppi[] = { &pi, NULL }; |
1303 | void __iomem * const *iomap; | 1252 | void __iomem * const *iomap; |
1304 | struct ata_host *host; | 1253 | struct ata_host *host; |
1305 | int i, rc; | 1254 | int rc; |
1306 | u32 tmp; | 1255 | u32 tmp; |
1307 | 1256 | ||
1308 | /* cause link error if sil24_cmd_block is sized wrongly */ | 1257 | /* cause link error if sil24_cmd_block is sized wrongly */ |
@@ -1342,18 +1291,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1342 | return -ENOMEM; | 1291 | return -ENOMEM; |
1343 | host->iomap = iomap; | 1292 | host->iomap = iomap; |
1344 | 1293 | ||
1345 | for (i = 0; i < host->n_ports; i++) { | ||
1346 | struct ata_port *ap = host->ports[i]; | ||
1347 | size_t offset = ap->port_no * PORT_REGS_SIZE; | ||
1348 | void __iomem *port = iomap[SIL24_PORT_BAR] + offset; | ||
1349 | |||
1350 | host->ports[i]->ioaddr.cmd_addr = port; | ||
1351 | host->ports[i]->ioaddr.scr_addr = port + PORT_SCONTROL; | ||
1352 | |||
1353 | ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host"); | ||
1354 | ata_port_pbar_desc(ap, SIL24_PORT_BAR, offset, "port"); | ||
1355 | } | ||
1356 | |||
1357 | /* configure and activate the device */ | 1294 | /* configure and activate the device */ |
1358 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { | 1295 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { |
1359 | rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | 1296 | rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); |
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index a01260a56432..6b8e45ba32e8 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c | |||
@@ -86,45 +86,13 @@ static struct pci_driver sis_pci_driver = { | |||
86 | }; | 86 | }; |
87 | 87 | ||
88 | static struct scsi_host_template sis_sht = { | 88 | static struct scsi_host_template sis_sht = { |
89 | .module = THIS_MODULE, | 89 | ATA_BMDMA_SHT(DRV_NAME), |
90 | .name = DRV_NAME, | ||
91 | .ioctl = ata_scsi_ioctl, | ||
92 | .queuecommand = ata_scsi_queuecmd, | ||
93 | .can_queue = ATA_DEF_QUEUE, | ||
94 | .this_id = ATA_SHT_THIS_ID, | ||
95 | .sg_tablesize = LIBATA_MAX_PRD, | ||
96 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
97 | .emulated = ATA_SHT_EMULATED, | ||
98 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
99 | .proc_name = DRV_NAME, | ||
100 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
101 | .slave_configure = ata_scsi_slave_config, | ||
102 | .slave_destroy = ata_scsi_slave_destroy, | ||
103 | .bios_param = ata_std_bios_param, | ||
104 | }; | 90 | }; |
105 | 91 | ||
106 | static const struct ata_port_operations sis_ops = { | 92 | static struct ata_port_operations sis_ops = { |
107 | .tf_load = ata_tf_load, | 93 | .inherits = &ata_bmdma_port_ops, |
108 | .tf_read = ata_tf_read, | ||
109 | .check_status = ata_check_status, | ||
110 | .exec_command = ata_exec_command, | ||
111 | .dev_select = ata_std_dev_select, | ||
112 | .bmdma_setup = ata_bmdma_setup, | ||
113 | .bmdma_start = ata_bmdma_start, | ||
114 | .bmdma_stop = ata_bmdma_stop, | ||
115 | .bmdma_status = ata_bmdma_status, | ||
116 | .qc_prep = ata_qc_prep, | ||
117 | .qc_issue = ata_qc_issue_prot, | ||
118 | .data_xfer = ata_data_xfer, | ||
119 | .freeze = ata_bmdma_freeze, | ||
120 | .thaw = ata_bmdma_thaw, | ||
121 | .error_handler = ata_bmdma_error_handler, | ||
122 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
123 | .irq_clear = ata_bmdma_irq_clear, | ||
124 | .irq_on = ata_irq_on, | ||
125 | .scr_read = sis_scr_read, | 94 | .scr_read = sis_scr_read, |
126 | .scr_write = sis_scr_write, | 95 | .scr_write = sis_scr_write, |
127 | .port_start = ata_port_start, | ||
128 | }; | 96 | }; |
129 | 97 | ||
130 | static const struct ata_port_info sis_port_info = { | 98 | static const struct ata_port_info sis_port_info = { |
@@ -341,7 +309,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
341 | break; | 309 | break; |
342 | } | 310 | } |
343 | 311 | ||
344 | rc = ata_pci_prepare_sff_host(pdev, ppi, &host); | 312 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); |
345 | if (rc) | 313 | if (rc) |
346 | return rc; | 314 | return rc; |
347 | 315 | ||
@@ -359,8 +327,8 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
359 | 327 | ||
360 | pci_set_master(pdev); | 328 | pci_set_master(pdev); |
361 | pci_intx(pdev, 1); | 329 | pci_intx(pdev, 1); |
362 | return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, | 330 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, |
363 | &sis_sht); | 331 | IRQF_SHARED, &sis_sht); |
364 | } | 332 | } |
365 | 333 | ||
366 | static int __init sis_init(void) | 334 | static int __init sis_init(void) |
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 019e367b59fc..16aa6839aa5a 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c | |||
@@ -233,7 +233,7 @@ static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc) | |||
233 | 233 | ||
234 | /* issue r/w command if this is not a ATA DMA command*/ | 234 | /* issue r/w command if this is not a ATA DMA command*/ |
235 | if (qc->tf.protocol != ATA_PROT_DMA) | 235 | if (qc->tf.protocol != ATA_PROT_DMA) |
236 | ap->ops->exec_command(ap, &qc->tf); | 236 | ap->ops->sff_exec_command(ap, &qc->tf); |
237 | } | 237 | } |
238 | 238 | ||
239 | /** | 239 | /** |
@@ -269,7 +269,7 @@ static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc) | |||
269 | and the start command. */ | 269 | and the start command. */ |
270 | /* issue r/w command if the access is to ATA*/ | 270 | /* issue r/w command if the access is to ATA*/ |
271 | if (qc->tf.protocol == ATA_PROT_DMA) | 271 | if (qc->tf.protocol == ATA_PROT_DMA) |
272 | ap->ops->exec_command(ap, &qc->tf); | 272 | ap->ops->sff_exec_command(ap, &qc->tf); |
273 | } | 273 | } |
274 | 274 | ||
275 | 275 | ||
@@ -327,50 +327,23 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start, | |||
327 | 327 | ||
328 | 328 | ||
329 | static struct scsi_host_template k2_sata_sht = { | 329 | static struct scsi_host_template k2_sata_sht = { |
330 | .module = THIS_MODULE, | 330 | ATA_BMDMA_SHT(DRV_NAME), |
331 | .name = DRV_NAME, | ||
332 | .ioctl = ata_scsi_ioctl, | ||
333 | .queuecommand = ata_scsi_queuecmd, | ||
334 | .can_queue = ATA_DEF_QUEUE, | ||
335 | .this_id = ATA_SHT_THIS_ID, | ||
336 | .sg_tablesize = LIBATA_MAX_PRD, | ||
337 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
338 | .emulated = ATA_SHT_EMULATED, | ||
339 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
340 | .proc_name = DRV_NAME, | ||
341 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
342 | .slave_configure = ata_scsi_slave_config, | ||
343 | .slave_destroy = ata_scsi_slave_destroy, | ||
344 | #ifdef CONFIG_PPC_OF | 331 | #ifdef CONFIG_PPC_OF |
345 | .proc_info = k2_sata_proc_info, | 332 | .proc_info = k2_sata_proc_info, |
346 | #endif | 333 | #endif |
347 | .bios_param = ata_std_bios_param, | ||
348 | }; | 334 | }; |
349 | 335 | ||
350 | 336 | ||
351 | static const struct ata_port_operations k2_sata_ops = { | 337 | static struct ata_port_operations k2_sata_ops = { |
352 | .tf_load = k2_sata_tf_load, | 338 | .inherits = &ata_bmdma_port_ops, |
353 | .tf_read = k2_sata_tf_read, | 339 | .sff_tf_load = k2_sata_tf_load, |
354 | .check_status = k2_stat_check_status, | 340 | .sff_tf_read = k2_sata_tf_read, |
355 | .exec_command = ata_exec_command, | 341 | .sff_check_status = k2_stat_check_status, |
356 | .dev_select = ata_std_dev_select, | ||
357 | .check_atapi_dma = k2_sata_check_atapi_dma, | 342 | .check_atapi_dma = k2_sata_check_atapi_dma, |
358 | .bmdma_setup = k2_bmdma_setup_mmio, | 343 | .bmdma_setup = k2_bmdma_setup_mmio, |
359 | .bmdma_start = k2_bmdma_start_mmio, | 344 | .bmdma_start = k2_bmdma_start_mmio, |
360 | .bmdma_stop = ata_bmdma_stop, | ||
361 | .bmdma_status = ata_bmdma_status, | ||
362 | .qc_prep = ata_qc_prep, | ||
363 | .qc_issue = ata_qc_issue_prot, | ||
364 | .data_xfer = ata_data_xfer, | ||
365 | .freeze = ata_bmdma_freeze, | ||
366 | .thaw = ata_bmdma_thaw, | ||
367 | .error_handler = ata_bmdma_error_handler, | ||
368 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
369 | .irq_clear = ata_bmdma_irq_clear, | ||
370 | .irq_on = ata_irq_on, | ||
371 | .scr_read = k2_sata_scr_read, | 345 | .scr_read = k2_sata_scr_read, |
372 | .scr_write = k2_sata_scr_write, | 346 | .scr_write = k2_sata_scr_write, |
373 | .port_start = ata_port_start, | ||
374 | }; | 347 | }; |
375 | 348 | ||
376 | static const struct ata_port_info k2_port_info[] = { | 349 | static const struct ata_port_info k2_port_info[] = { |
@@ -519,8 +492,8 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
519 | writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); | 492 | writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); |
520 | 493 | ||
521 | pci_set_master(pdev); | 494 | pci_set_master(pdev); |
522 | return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, | 495 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, |
523 | &k2_sata_sht); | 496 | IRQF_SHARED, &k2_sata_sht); |
524 | } | 497 | } |
525 | 498 | ||
526 | /* 0x240 is device ID for Apple K2 device | 499 | /* 0x240 is device ID for Apple K2 device |
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index e3d56bc6726d..ec04b8d3c791 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c | |||
@@ -232,40 +232,30 @@ static void pdc20621_get_from_dimm(struct ata_host *host, | |||
232 | static void pdc20621_put_to_dimm(struct ata_host *host, | 232 | static void pdc20621_put_to_dimm(struct ata_host *host, |
233 | void *psource, u32 offset, u32 size); | 233 | void *psource, u32 offset, u32 size); |
234 | static void pdc20621_irq_clear(struct ata_port *ap); | 234 | static void pdc20621_irq_clear(struct ata_port *ap); |
235 | static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc); | 235 | static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc); |
236 | 236 | ||
237 | 237 | ||
238 | static struct scsi_host_template pdc_sata_sht = { | 238 | static struct scsi_host_template pdc_sata_sht = { |
239 | .module = THIS_MODULE, | 239 | ATA_BASE_SHT(DRV_NAME), |
240 | .name = DRV_NAME, | ||
241 | .ioctl = ata_scsi_ioctl, | ||
242 | .queuecommand = ata_scsi_queuecmd, | ||
243 | .can_queue = ATA_DEF_QUEUE, | ||
244 | .this_id = ATA_SHT_THIS_ID, | ||
245 | .sg_tablesize = LIBATA_MAX_PRD, | 240 | .sg_tablesize = LIBATA_MAX_PRD, |
246 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
247 | .emulated = ATA_SHT_EMULATED, | ||
248 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
249 | .proc_name = DRV_NAME, | ||
250 | .dma_boundary = ATA_DMA_BOUNDARY, | 241 | .dma_boundary = ATA_DMA_BOUNDARY, |
251 | .slave_configure = ata_scsi_slave_config, | ||
252 | .slave_destroy = ata_scsi_slave_destroy, | ||
253 | .bios_param = ata_std_bios_param, | ||
254 | }; | 242 | }; |
255 | 243 | ||
256 | static const struct ata_port_operations pdc_20621_ops = { | 244 | /* TODO: inherit from base port_ops after converting to new EH */ |
257 | .tf_load = pdc_tf_load_mmio, | 245 | static struct ata_port_operations pdc_20621_ops = { |
258 | .tf_read = ata_tf_read, | 246 | .sff_tf_load = pdc_tf_load_mmio, |
259 | .check_status = ata_check_status, | 247 | .sff_tf_read = ata_sff_tf_read, |
260 | .exec_command = pdc_exec_command_mmio, | 248 | .sff_check_status = ata_sff_check_status, |
261 | .dev_select = ata_std_dev_select, | 249 | .sff_exec_command = pdc_exec_command_mmio, |
250 | .sff_dev_select = ata_sff_dev_select, | ||
262 | .phy_reset = pdc_20621_phy_reset, | 251 | .phy_reset = pdc_20621_phy_reset, |
263 | .qc_prep = pdc20621_qc_prep, | 252 | .qc_prep = pdc20621_qc_prep, |
264 | .qc_issue = pdc20621_qc_issue_prot, | 253 | .qc_issue = pdc20621_qc_issue, |
265 | .data_xfer = ata_data_xfer, | 254 | .qc_fill_rtf = ata_sff_qc_fill_rtf, |
255 | .sff_data_xfer = ata_sff_data_xfer, | ||
266 | .eng_timeout = pdc_eng_timeout, | 256 | .eng_timeout = pdc_eng_timeout, |
267 | .irq_clear = pdc20621_irq_clear, | 257 | .sff_irq_clear = pdc20621_irq_clear, |
268 | .irq_on = ata_irq_on, | 258 | .sff_irq_on = ata_sff_irq_on, |
269 | .port_start = pdc_port_start, | 259 | .port_start = pdc_port_start, |
270 | }; | 260 | }; |
271 | 261 | ||
@@ -475,7 +465,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc) | |||
475 | void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; | 465 | void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; |
476 | unsigned int portno = ap->port_no; | 466 | unsigned int portno = ap->port_no; |
477 | unsigned int i, si, idx, total_len = 0, sgt_len; | 467 | unsigned int i, si, idx, total_len = 0, sgt_len; |
478 | u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; | 468 | __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; |
479 | 469 | ||
480 | WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); | 470 | WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); |
481 | 471 | ||
@@ -693,7 +683,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc) | |||
693 | } | 683 | } |
694 | } | 684 | } |
695 | 685 | ||
696 | static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc) | 686 | static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc) |
697 | { | 687 | { |
698 | switch (qc->tf.protocol) { | 688 | switch (qc->tf.protocol) { |
699 | case ATA_PROT_DMA: | 689 | case ATA_PROT_DMA: |
@@ -709,7 +699,7 @@ static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc) | |||
709 | break; | 699 | break; |
710 | } | 700 | } |
711 | 701 | ||
712 | return ata_qc_issue_prot(qc); | 702 | return ata_sff_qc_issue(qc); |
713 | } | 703 | } |
714 | 704 | ||
715 | static inline unsigned int pdc20621_host_intr(struct ata_port *ap, | 705 | static inline unsigned int pdc20621_host_intr(struct ata_port *ap, |
@@ -781,7 +771,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap, | |||
781 | /* command completion, but no data xfer */ | 771 | /* command completion, but no data xfer */ |
782 | } else if (qc->tf.protocol == ATA_PROT_NODATA) { | 772 | } else if (qc->tf.protocol == ATA_PROT_NODATA) { |
783 | 773 | ||
784 | status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); | 774 | status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); |
785 | DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); | 775 | DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); |
786 | qc->err_mask |= ac_err_mask(status); | 776 | qc->err_mask |= ac_err_mask(status); |
787 | ata_qc_complete(qc); | 777 | ata_qc_complete(qc); |
@@ -890,7 +880,7 @@ static void pdc_eng_timeout(struct ata_port *ap) | |||
890 | break; | 880 | break; |
891 | 881 | ||
892 | default: | 882 | default: |
893 | drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); | 883 | drv_stat = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); |
894 | 884 | ||
895 | ata_port_printk(ap, KERN_ERR, | 885 | ata_port_printk(ap, KERN_ERR, |
896 | "unknown timeout, cmd 0x%x stat 0x%x\n", | 886 | "unknown timeout, cmd 0x%x stat 0x%x\n", |
@@ -909,7 +899,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) | |||
909 | { | 899 | { |
910 | WARN_ON(tf->protocol == ATA_PROT_DMA || | 900 | WARN_ON(tf->protocol == ATA_PROT_DMA || |
911 | tf->protocol == ATA_PROT_NODATA); | 901 | tf->protocol == ATA_PROT_NODATA); |
912 | ata_tf_load(ap, tf); | 902 | ata_sff_tf_load(ap, tf); |
913 | } | 903 | } |
914 | 904 | ||
915 | 905 | ||
@@ -917,7 +907,7 @@ static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile | |||
917 | { | 907 | { |
918 | WARN_ON(tf->protocol == ATA_PROT_DMA || | 908 | WARN_ON(tf->protocol == ATA_PROT_DMA || |
919 | tf->protocol == ATA_PROT_NODATA); | 909 | tf->protocol == ATA_PROT_NODATA); |
920 | ata_exec_command(ap, tf); | 910 | ata_sff_exec_command(ap, tf); |
921 | } | 911 | } |
922 | 912 | ||
923 | 913 | ||
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index e710e71b7b92..f277cea904ce 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c | |||
@@ -76,50 +76,13 @@ static struct pci_driver uli_pci_driver = { | |||
76 | }; | 76 | }; |
77 | 77 | ||
78 | static struct scsi_host_template uli_sht = { | 78 | static struct scsi_host_template uli_sht = { |
79 | .module = THIS_MODULE, | 79 | ATA_BMDMA_SHT(DRV_NAME), |
80 | .name = DRV_NAME, | ||
81 | .ioctl = ata_scsi_ioctl, | ||
82 | .queuecommand = ata_scsi_queuecmd, | ||
83 | .can_queue = ATA_DEF_QUEUE, | ||
84 | .this_id = ATA_SHT_THIS_ID, | ||
85 | .sg_tablesize = LIBATA_MAX_PRD, | ||
86 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
87 | .emulated = ATA_SHT_EMULATED, | ||
88 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
89 | .proc_name = DRV_NAME, | ||
90 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
91 | .slave_configure = ata_scsi_slave_config, | ||
92 | .slave_destroy = ata_scsi_slave_destroy, | ||
93 | .bios_param = ata_std_bios_param, | ||
94 | }; | 80 | }; |
95 | 81 | ||
96 | static const struct ata_port_operations uli_ops = { | 82 | static struct ata_port_operations uli_ops = { |
97 | .tf_load = ata_tf_load, | 83 | .inherits = &ata_bmdma_port_ops, |
98 | .tf_read = ata_tf_read, | ||
99 | .check_status = ata_check_status, | ||
100 | .exec_command = ata_exec_command, | ||
101 | .dev_select = ata_std_dev_select, | ||
102 | |||
103 | .bmdma_setup = ata_bmdma_setup, | ||
104 | .bmdma_start = ata_bmdma_start, | ||
105 | .bmdma_stop = ata_bmdma_stop, | ||
106 | .bmdma_status = ata_bmdma_status, | ||
107 | .qc_prep = ata_qc_prep, | ||
108 | .qc_issue = ata_qc_issue_prot, | ||
109 | .data_xfer = ata_data_xfer, | ||
110 | |||
111 | .freeze = ata_bmdma_freeze, | ||
112 | .thaw = ata_bmdma_thaw, | ||
113 | .error_handler = ata_bmdma_error_handler, | ||
114 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
115 | |||
116 | .irq_clear = ata_bmdma_irq_clear, | ||
117 | .irq_on = ata_irq_on, | ||
118 | |||
119 | .scr_read = uli_scr_read, | 84 | .scr_read = uli_scr_read, |
120 | .scr_write = uli_scr_write, | 85 | .scr_write = uli_scr_write, |
121 | |||
122 | .port_start = ata_port_start, | ||
123 | }; | 86 | }; |
124 | 87 | ||
125 | static const struct ata_port_info uli_port_info = { | 88 | static const struct ata_port_info uli_port_info = { |
@@ -212,11 +175,11 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
212 | host->private_data = hpriv; | 175 | host->private_data = hpriv; |
213 | 176 | ||
214 | /* the first two ports are standard SFF */ | 177 | /* the first two ports are standard SFF */ |
215 | rc = ata_pci_init_sff_host(host); | 178 | rc = ata_pci_sff_init_host(host); |
216 | if (rc) | 179 | if (rc) |
217 | return rc; | 180 | return rc; |
218 | 181 | ||
219 | rc = ata_pci_init_bmdma(host); | 182 | rc = ata_pci_bmdma_init(host); |
220 | if (rc) | 183 | if (rc) |
221 | return rc; | 184 | return rc; |
222 | 185 | ||
@@ -237,7 +200,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
237 | ((unsigned long)iomap[1] | ATA_PCI_CTL_OFS) + 4; | 200 | ((unsigned long)iomap[1] | ATA_PCI_CTL_OFS) + 4; |
238 | ioaddr->bmdma_addr = iomap[4] + 16; | 201 | ioaddr->bmdma_addr = iomap[4] + 16; |
239 | hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4; | 202 | hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4; |
240 | ata_std_ports(ioaddr); | 203 | ata_sff_std_ports(ioaddr); |
241 | 204 | ||
242 | ata_port_desc(host->ports[2], | 205 | ata_port_desc(host->ports[2], |
243 | "cmd 0x%llx ctl 0x%llx bmdma 0x%llx", | 206 | "cmd 0x%llx ctl 0x%llx bmdma 0x%llx", |
@@ -252,7 +215,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
252 | ((unsigned long)iomap[3] | ATA_PCI_CTL_OFS) + 4; | 215 | ((unsigned long)iomap[3] | ATA_PCI_CTL_OFS) + 4; |
253 | ioaddr->bmdma_addr = iomap[4] + 24; | 216 | ioaddr->bmdma_addr = iomap[4] + 24; |
254 | hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5; | 217 | hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5; |
255 | ata_std_ports(ioaddr); | 218 | ata_sff_std_ports(ioaddr); |
256 | 219 | ||
257 | ata_port_desc(host->ports[2], | 220 | ata_port_desc(host->ports[2], |
258 | "cmd 0x%llx ctl 0x%llx bmdma 0x%llx", | 221 | "cmd 0x%llx ctl 0x%llx bmdma 0x%llx", |
@@ -279,8 +242,8 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
279 | 242 | ||
280 | pci_set_master(pdev); | 243 | pci_set_master(pdev); |
281 | pci_intx(pdev, 1); | 244 | pci_intx(pdev, 1); |
282 | return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, | 245 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, |
283 | &uli_sht); | 246 | IRQF_SHARED, &uli_sht); |
284 | } | 247 | } |
285 | 248 | ||
286 | static int __init uli_init(void) | 249 | static int __init uli_init(void) |
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 0d03f44824fb..96deeb354e16 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c | |||
@@ -71,7 +71,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | |||
71 | static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); | 71 | static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
72 | static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); | 72 | static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
73 | static void svia_noop_freeze(struct ata_port *ap); | 73 | static void svia_noop_freeze(struct ata_port *ap); |
74 | static void vt6420_error_handler(struct ata_port *ap); | 74 | static int vt6420_prereset(struct ata_link *link, unsigned long deadline); |
75 | static int vt6421_pata_cable_detect(struct ata_port *ap); | 75 | static int vt6421_pata_cable_detect(struct ata_port *ap); |
76 | static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev); | 76 | static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev); |
77 | static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev); | 77 | static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev); |
@@ -100,110 +100,26 @@ static struct pci_driver svia_pci_driver = { | |||
100 | }; | 100 | }; |
101 | 101 | ||
102 | static struct scsi_host_template svia_sht = { | 102 | static struct scsi_host_template svia_sht = { |
103 | .module = THIS_MODULE, | 103 | ATA_BMDMA_SHT(DRV_NAME), |
104 | .name = DRV_NAME, | ||
105 | .ioctl = ata_scsi_ioctl, | ||
106 | .queuecommand = ata_scsi_queuecmd, | ||
107 | .can_queue = ATA_DEF_QUEUE, | ||
108 | .this_id = ATA_SHT_THIS_ID, | ||
109 | .sg_tablesize = LIBATA_MAX_PRD, | ||
110 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
111 | .emulated = ATA_SHT_EMULATED, | ||
112 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
113 | .proc_name = DRV_NAME, | ||
114 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
115 | .slave_configure = ata_scsi_slave_config, | ||
116 | .slave_destroy = ata_scsi_slave_destroy, | ||
117 | .bios_param = ata_std_bios_param, | ||
118 | }; | 104 | }; |
119 | 105 | ||
120 | static const struct ata_port_operations vt6420_sata_ops = { | 106 | static struct ata_port_operations vt6420_sata_ops = { |
121 | .tf_load = ata_tf_load, | 107 | .inherits = &ata_bmdma_port_ops, |
122 | .tf_read = ata_tf_read, | ||
123 | .check_status = ata_check_status, | ||
124 | .exec_command = ata_exec_command, | ||
125 | .dev_select = ata_std_dev_select, | ||
126 | |||
127 | .bmdma_setup = ata_bmdma_setup, | ||
128 | .bmdma_start = ata_bmdma_start, | ||
129 | .bmdma_stop = ata_bmdma_stop, | ||
130 | .bmdma_status = ata_bmdma_status, | ||
131 | |||
132 | .qc_prep = ata_qc_prep, | ||
133 | .qc_issue = ata_qc_issue_prot, | ||
134 | .data_xfer = ata_data_xfer, | ||
135 | |||
136 | .freeze = svia_noop_freeze, | 108 | .freeze = svia_noop_freeze, |
137 | .thaw = ata_bmdma_thaw, | 109 | .prereset = vt6420_prereset, |
138 | .error_handler = vt6420_error_handler, | ||
139 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
140 | |||
141 | .irq_clear = ata_bmdma_irq_clear, | ||
142 | .irq_on = ata_irq_on, | ||
143 | |||
144 | .port_start = ata_port_start, | ||
145 | }; | 110 | }; |
146 | 111 | ||
147 | static const struct ata_port_operations vt6421_pata_ops = { | 112 | static struct ata_port_operations vt6421_pata_ops = { |
113 | .inherits = &ata_bmdma_port_ops, | ||
114 | .cable_detect = vt6421_pata_cable_detect, | ||
148 | .set_piomode = vt6421_set_pio_mode, | 115 | .set_piomode = vt6421_set_pio_mode, |
149 | .set_dmamode = vt6421_set_dma_mode, | 116 | .set_dmamode = vt6421_set_dma_mode, |
150 | |||
151 | .tf_load = ata_tf_load, | ||
152 | .tf_read = ata_tf_read, | ||
153 | .check_status = ata_check_status, | ||
154 | .exec_command = ata_exec_command, | ||
155 | .dev_select = ata_std_dev_select, | ||
156 | |||
157 | .bmdma_setup = ata_bmdma_setup, | ||
158 | .bmdma_start = ata_bmdma_start, | ||
159 | .bmdma_stop = ata_bmdma_stop, | ||
160 | .bmdma_status = ata_bmdma_status, | ||
161 | |||
162 | .qc_prep = ata_qc_prep, | ||
163 | .qc_issue = ata_qc_issue_prot, | ||
164 | .data_xfer = ata_data_xfer, | ||
165 | |||
166 | .freeze = ata_bmdma_freeze, | ||
167 | .thaw = ata_bmdma_thaw, | ||
168 | .error_handler = ata_bmdma_error_handler, | ||
169 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
170 | .cable_detect = vt6421_pata_cable_detect, | ||
171 | |||
172 | .irq_clear = ata_bmdma_irq_clear, | ||
173 | .irq_on = ata_irq_on, | ||
174 | |||
175 | .port_start = ata_port_start, | ||
176 | }; | 117 | }; |
177 | 118 | ||
178 | static const struct ata_port_operations vt6421_sata_ops = { | 119 | static struct ata_port_operations vt6421_sata_ops = { |
179 | .tf_load = ata_tf_load, | 120 | .inherits = &ata_bmdma_port_ops, |
180 | .tf_read = ata_tf_read, | ||
181 | .check_status = ata_check_status, | ||
182 | .exec_command = ata_exec_command, | ||
183 | .dev_select = ata_std_dev_select, | ||
184 | |||
185 | .bmdma_setup = ata_bmdma_setup, | ||
186 | .bmdma_start = ata_bmdma_start, | ||
187 | .bmdma_stop = ata_bmdma_stop, | ||
188 | .bmdma_status = ata_bmdma_status, | ||
189 | |||
190 | .qc_prep = ata_qc_prep, | ||
191 | .qc_issue = ata_qc_issue_prot, | ||
192 | .data_xfer = ata_data_xfer, | ||
193 | |||
194 | .freeze = ata_bmdma_freeze, | ||
195 | .thaw = ata_bmdma_thaw, | ||
196 | .error_handler = ata_bmdma_error_handler, | ||
197 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
198 | .cable_detect = ata_cable_sata, | ||
199 | |||
200 | .irq_clear = ata_bmdma_irq_clear, | ||
201 | .irq_on = ata_irq_on, | ||
202 | |||
203 | .scr_read = svia_scr_read, | 121 | .scr_read = svia_scr_read, |
204 | .scr_write = svia_scr_write, | 122 | .scr_write = svia_scr_write, |
205 | |||
206 | .port_start = ata_port_start, | ||
207 | }; | 123 | }; |
208 | 124 | ||
209 | static const struct ata_port_info vt6420_port_info = { | 125 | static const struct ata_port_info vt6420_port_info = { |
@@ -257,8 +173,8 @@ static void svia_noop_freeze(struct ata_port *ap) | |||
257 | /* Some VIA controllers choke if ATA_NIEN is manipulated in | 173 | /* Some VIA controllers choke if ATA_NIEN is manipulated in |
258 | * certain way. Leave it alone and just clear pending IRQ. | 174 | * certain way. Leave it alone and just clear pending IRQ. |
259 | */ | 175 | */ |
260 | ata_chk_status(ap); | 176 | ap->ops->sff_check_status(ap); |
261 | ata_bmdma_irq_clear(ap); | 177 | ata_sff_irq_clear(ap); |
262 | } | 178 | } |
263 | 179 | ||
264 | /** | 180 | /** |
@@ -320,23 +236,17 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline) | |||
320 | 236 | ||
321 | if (!online) { | 237 | if (!online) { |
322 | /* tell EH to bail */ | 238 | /* tell EH to bail */ |
323 | ehc->i.action &= ~ATA_EH_RESET_MASK; | 239 | ehc->i.action &= ~ATA_EH_RESET; |
324 | return 0; | 240 | return 0; |
325 | } | 241 | } |
326 | 242 | ||
327 | skip_scr: | 243 | skip_scr: |
328 | /* wait for !BSY */ | 244 | /* wait for !BSY */ |
329 | ata_wait_ready(ap, deadline); | 245 | ata_sff_wait_ready(link, deadline); |
330 | 246 | ||
331 | return 0; | 247 | return 0; |
332 | } | 248 | } |
333 | 249 | ||
334 | static void vt6420_error_handler(struct ata_port *ap) | ||
335 | { | ||
336 | ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset, NULL, | ||
337 | ata_std_postreset); | ||
338 | } | ||
339 | |||
340 | static int vt6421_pata_cable_detect(struct ata_port *ap) | 250 | static int vt6421_pata_cable_detect(struct ata_port *ap) |
341 | { | 251 | { |
342 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 252 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
@@ -394,7 +304,7 @@ static void vt6421_init_addrs(struct ata_port *ap) | |||
394 | ioaddr->bmdma_addr = bmdma_addr; | 304 | ioaddr->bmdma_addr = bmdma_addr; |
395 | ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no); | 305 | ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no); |
396 | 306 | ||
397 | ata_std_ports(ioaddr); | 307 | ata_sff_std_ports(ioaddr); |
398 | 308 | ||
399 | ata_port_pbar_desc(ap, ap->port_no, -1, "port"); | 309 | ata_port_pbar_desc(ap, ap->port_no, -1, "port"); |
400 | ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma"); | 310 | ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma"); |
@@ -406,7 +316,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) | |||
406 | struct ata_host *host; | 316 | struct ata_host *host; |
407 | int rc; | 317 | int rc; |
408 | 318 | ||
409 | rc = ata_pci_prepare_sff_host(pdev, ppi, &host); | 319 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); |
410 | if (rc) | 320 | if (rc) |
411 | return rc; | 321 | return rc; |
412 | *r_host = host; | 322 | *r_host = host; |
@@ -538,8 +448,8 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
538 | svia_configure(pdev); | 448 | svia_configure(pdev); |
539 | 449 | ||
540 | pci_set_master(pdev); | 450 | pci_set_master(pdev); |
541 | return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, | 451 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, |
542 | &svia_sht); | 452 | IRQF_SHARED, &svia_sht); |
543 | } | 453 | } |
544 | 454 | ||
545 | static int __init svia_init(void) | 455 | static int __init svia_init(void) |
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index 95ae3ed24a9d..f3d635c0a2e9 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c | |||
@@ -200,7 +200,7 @@ static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
200 | struct ata_ioports *ioaddr = &ap->ioaddr; | 200 | struct ata_ioports *ioaddr = &ap->ioaddr; |
201 | u16 nsect, lbal, lbam, lbah, feature; | 201 | u16 nsect, lbal, lbam, lbah, feature; |
202 | 202 | ||
203 | tf->command = ata_check_status(ap); | 203 | tf->command = ata_sff_check_status(ap); |
204 | tf->device = readw(ioaddr->device_addr); | 204 | tf->device = readw(ioaddr->device_addr); |
205 | feature = readw(ioaddr->error_addr); | 205 | feature = readw(ioaddr->error_addr); |
206 | nsect = readw(ioaddr->nsect_addr); | 206 | nsect = readw(ioaddr->nsect_addr); |
@@ -243,7 +243,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap) | |||
243 | 243 | ||
244 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | 244 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
245 | if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) | 245 | if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) |
246 | handled = ata_host_intr(ap, qc); | 246 | handled = ata_sff_host_intr(ap, qc); |
247 | 247 | ||
248 | /* We received an interrupt during a polled command, | 248 | /* We received an interrupt during a polled command, |
249 | * or some other spurious condition. Interrupt reporting | 249 | * or some other spurious condition. Interrupt reporting |
@@ -251,7 +251,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap) | |||
251 | * simply clear the interrupt | 251 | * simply clear the interrupt |
252 | */ | 252 | */ |
253 | if (unlikely(!handled)) | 253 | if (unlikely(!handled)) |
254 | ata_chk_status(ap); | 254 | ap->ops->sff_check_status(ap); |
255 | } | 255 | } |
256 | 256 | ||
257 | /* | 257 | /* |
@@ -300,46 +300,18 @@ out: | |||
300 | 300 | ||
301 | 301 | ||
302 | static struct scsi_host_template vsc_sata_sht = { | 302 | static struct scsi_host_template vsc_sata_sht = { |
303 | .module = THIS_MODULE, | 303 | ATA_BMDMA_SHT(DRV_NAME), |
304 | .name = DRV_NAME, | ||
305 | .ioctl = ata_scsi_ioctl, | ||
306 | .queuecommand = ata_scsi_queuecmd, | ||
307 | .can_queue = ATA_DEF_QUEUE, | ||
308 | .this_id = ATA_SHT_THIS_ID, | ||
309 | .sg_tablesize = LIBATA_MAX_PRD, | ||
310 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
311 | .emulated = ATA_SHT_EMULATED, | ||
312 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
313 | .proc_name = DRV_NAME, | ||
314 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
315 | .slave_configure = ata_scsi_slave_config, | ||
316 | .slave_destroy = ata_scsi_slave_destroy, | ||
317 | .bios_param = ata_std_bios_param, | ||
318 | }; | 304 | }; |
319 | 305 | ||
320 | 306 | ||
321 | static const struct ata_port_operations vsc_sata_ops = { | 307 | static struct ata_port_operations vsc_sata_ops = { |
322 | .tf_load = vsc_sata_tf_load, | 308 | .inherits = &ata_bmdma_port_ops, |
323 | .tf_read = vsc_sata_tf_read, | 309 | .sff_tf_load = vsc_sata_tf_load, |
324 | .exec_command = ata_exec_command, | 310 | .sff_tf_read = vsc_sata_tf_read, |
325 | .check_status = ata_check_status, | ||
326 | .dev_select = ata_std_dev_select, | ||
327 | .bmdma_setup = ata_bmdma_setup, | ||
328 | .bmdma_start = ata_bmdma_start, | ||
329 | .bmdma_stop = ata_bmdma_stop, | ||
330 | .bmdma_status = ata_bmdma_status, | ||
331 | .qc_prep = ata_qc_prep, | ||
332 | .qc_issue = ata_qc_issue_prot, | ||
333 | .data_xfer = ata_data_xfer, | ||
334 | .freeze = vsc_freeze, | 311 | .freeze = vsc_freeze, |
335 | .thaw = vsc_thaw, | 312 | .thaw = vsc_thaw, |
336 | .error_handler = ata_bmdma_error_handler, | ||
337 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
338 | .irq_clear = ata_bmdma_irq_clear, | ||
339 | .irq_on = ata_irq_on, | ||
340 | .scr_read = vsc_sata_scr_read, | 313 | .scr_read = vsc_sata_scr_read, |
341 | .scr_write = vsc_sata_scr_write, | 314 | .scr_write = vsc_sata_scr_write, |
342 | .port_start = ata_port_start, | ||
343 | }; | 315 | }; |
344 | 316 | ||
345 | static void __devinit vsc_sata_setup_port(struct ata_ioports *port, | 317 | static void __devinit vsc_sata_setup_port(struct ata_ioports *port, |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index c72014a3e7d4..65dc18dea845 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -3937,7 +3937,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) | |||
3937 | if (ipr_is_gata(res) && res->sata_port) { | 3937 | if (ipr_is_gata(res) && res->sata_port) { |
3938 | ap = res->sata_port->ap; | 3938 | ap = res->sata_port->ap; |
3939 | spin_unlock_irq(scsi_cmd->device->host->host_lock); | 3939 | spin_unlock_irq(scsi_cmd->device->host->host_lock); |
3940 | ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL); | 3940 | ata_std_error_handler(ap); |
3941 | spin_lock_irq(scsi_cmd->device->host->host_lock); | 3941 | spin_lock_irq(scsi_cmd->device->host->host_lock); |
3942 | 3942 | ||
3943 | list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { | 3943 | list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { |
@@ -5041,33 +5041,6 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc) | |||
5041 | } | 5041 | } |
5042 | 5042 | ||
5043 | /** | 5043 | /** |
5044 | * ipr_tf_read - Read the current ATA taskfile for the ATA port | ||
5045 | * @ap: ATA port | ||
5046 | * @tf: destination ATA taskfile | ||
5047 | * | ||
5048 | * Return value: | ||
5049 | * none | ||
5050 | **/ | ||
5051 | static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | ||
5052 | { | ||
5053 | struct ipr_sata_port *sata_port = ap->private_data; | ||
5054 | struct ipr_ioasa_gata *g = &sata_port->ioasa; | ||
5055 | |||
5056 | tf->feature = g->error; | ||
5057 | tf->nsect = g->nsect; | ||
5058 | tf->lbal = g->lbal; | ||
5059 | tf->lbam = g->lbam; | ||
5060 | tf->lbah = g->lbah; | ||
5061 | tf->device = g->device; | ||
5062 | tf->command = g->status; | ||
5063 | tf->hob_nsect = g->hob_nsect; | ||
5064 | tf->hob_lbal = g->hob_lbal; | ||
5065 | tf->hob_lbam = g->hob_lbam; | ||
5066 | tf->hob_lbah = g->hob_lbah; | ||
5067 | tf->ctl = g->alt_status; | ||
5068 | } | ||
5069 | |||
5070 | /** | ||
5071 | * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure | 5044 | * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure |
5072 | * @regs: destination | 5045 | * @regs: destination |
5073 | * @tf: source ATA taskfile | 5046 | * @tf: source ATA taskfile |
@@ -5245,40 +5218,41 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) | |||
5245 | } | 5218 | } |
5246 | 5219 | ||
5247 | /** | 5220 | /** |
5248 | * ipr_ata_check_status - Return last ATA status | 5221 | * ipr_qc_fill_rtf - Read result TF |
5249 | * @ap: ATA port | 5222 | * @qc: ATA queued command |
5250 | * | 5223 | * |
5251 | * Return value: | 5224 | * Return value: |
5252 | * ATA status | 5225 | * true |
5253 | **/ | 5226 | **/ |
5254 | static u8 ipr_ata_check_status(struct ata_port *ap) | 5227 | static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc) |
5255 | { | 5228 | { |
5256 | struct ipr_sata_port *sata_port = ap->private_data; | 5229 | struct ipr_sata_port *sata_port = qc->ap->private_data; |
5257 | return sata_port->ioasa.status; | 5230 | struct ipr_ioasa_gata *g = &sata_port->ioasa; |
5258 | } | 5231 | struct ata_taskfile *tf = &qc->result_tf; |
5259 | 5232 | ||
5260 | /** | 5233 | tf->feature = g->error; |
5261 | * ipr_ata_check_altstatus - Return last ATA altstatus | 5234 | tf->nsect = g->nsect; |
5262 | * @ap: ATA port | 5235 | tf->lbal = g->lbal; |
5263 | * | 5236 | tf->lbam = g->lbam; |
5264 | * Return value: | 5237 | tf->lbah = g->lbah; |
5265 | * Alt ATA status | 5238 | tf->device = g->device; |
5266 | **/ | 5239 | tf->command = g->status; |
5267 | static u8 ipr_ata_check_altstatus(struct ata_port *ap) | 5240 | tf->hob_nsect = g->hob_nsect; |
5268 | { | 5241 | tf->hob_lbal = g->hob_lbal; |
5269 | struct ipr_sata_port *sata_port = ap->private_data; | 5242 | tf->hob_lbam = g->hob_lbam; |
5270 | return sata_port->ioasa.alt_status; | 5243 | tf->hob_lbah = g->hob_lbah; |
5244 | tf->ctl = g->alt_status; | ||
5245 | |||
5246 | return true; | ||
5271 | } | 5247 | } |
5272 | 5248 | ||
5273 | static struct ata_port_operations ipr_sata_ops = { | 5249 | static struct ata_port_operations ipr_sata_ops = { |
5274 | .check_status = ipr_ata_check_status, | ||
5275 | .check_altstatus = ipr_ata_check_altstatus, | ||
5276 | .dev_select = ata_noop_dev_select, | ||
5277 | .phy_reset = ipr_ata_phy_reset, | 5250 | .phy_reset = ipr_ata_phy_reset, |
5251 | .hardreset = ipr_sata_reset, | ||
5278 | .post_internal_cmd = ipr_ata_post_internal, | 5252 | .post_internal_cmd = ipr_ata_post_internal, |
5279 | .tf_read = ipr_tf_read, | ||
5280 | .qc_prep = ata_noop_qc_prep, | 5253 | .qc_prep = ata_noop_qc_prep, |
5281 | .qc_issue = ipr_qc_issue, | 5254 | .qc_issue = ipr_qc_issue, |
5255 | .qc_fill_rtf = ipr_qc_fill_rtf, | ||
5282 | .port_start = ata_sas_port_start, | 5256 | .port_start = ata_sas_port_start, |
5283 | .port_stop = ata_sas_port_stop | 5257 | .port_stop = ata_sas_port_stop |
5284 | }; | 5258 | }; |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index b0e5ac372a32..a4811e4106df 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -225,10 +225,12 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) | |||
225 | return 0; | 225 | return 0; |
226 | } | 226 | } |
227 | 227 | ||
228 | static u8 sas_ata_check_status(struct ata_port *ap) | 228 | static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc) |
229 | { | 229 | { |
230 | struct domain_device *dev = ap->private_data; | 230 | struct domain_device *dev = qc->ap->private_data; |
231 | return dev->sata_dev.tf.command; | 231 | |
232 | memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf)); | ||
233 | return true; | ||
232 | } | 234 | } |
233 | 235 | ||
234 | static void sas_ata_phy_reset(struct ata_port *ap) | 236 | static void sas_ata_phy_reset(struct ata_port *ap) |
@@ -292,12 +294,6 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc) | |||
292 | } | 294 | } |
293 | } | 295 | } |
294 | 296 | ||
295 | static void sas_ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | ||
296 | { | ||
297 | struct domain_device *dev = ap->private_data; | ||
298 | memcpy(tf, &dev->sata_dev.tf, sizeof (*tf)); | ||
299 | } | ||
300 | |||
301 | static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in, | 297 | static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in, |
302 | u32 val) | 298 | u32 val) |
303 | { | 299 | { |
@@ -348,14 +344,11 @@ static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in, | |||
348 | } | 344 | } |
349 | 345 | ||
350 | static struct ata_port_operations sas_sata_ops = { | 346 | static struct ata_port_operations sas_sata_ops = { |
351 | .check_status = sas_ata_check_status, | ||
352 | .check_altstatus = sas_ata_check_status, | ||
353 | .dev_select = ata_noop_dev_select, | ||
354 | .phy_reset = sas_ata_phy_reset, | 347 | .phy_reset = sas_ata_phy_reset, |
355 | .post_internal_cmd = sas_ata_post_internal, | 348 | .post_internal_cmd = sas_ata_post_internal, |
356 | .tf_read = sas_ata_tf_read, | ||
357 | .qc_prep = ata_noop_qc_prep, | 349 | .qc_prep = ata_noop_qc_prep, |
358 | .qc_issue = sas_ata_qc_issue, | 350 | .qc_issue = sas_ata_qc_issue, |
351 | .qc_fill_rtf = sas_ata_qc_fill_rtf, | ||
359 | .port_start = ata_sas_port_start, | 352 | .port_start = ata_sas_port_start, |
360 | .port_stop = ata_sas_port_stop, | 353 | .port_stop = ata_sas_port_stop, |
361 | .scr_read = sas_ata_scr_read, | 354 | .scr_read = sas_ata_scr_read, |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 37ee881c42ac..165734a2dd47 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -122,6 +122,8 @@ enum { | |||
122 | 122 | ||
123 | ATAPI_MAX_DRAIN = 16 << 10, | 123 | ATAPI_MAX_DRAIN = 16 << 10, |
124 | 124 | ||
125 | ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1, | ||
126 | |||
125 | ATA_SHT_EMULATED = 1, | 127 | ATA_SHT_EMULATED = 1, |
126 | ATA_SHT_CMD_PER_LUN = 1, | 128 | ATA_SHT_CMD_PER_LUN = 1, |
127 | ATA_SHT_THIS_ID = -1, | 129 | ATA_SHT_THIS_ID = -1, |
@@ -163,9 +165,6 @@ enum { | |||
163 | ATA_DEV_NONE = 9, /* no device */ | 165 | ATA_DEV_NONE = 9, /* no device */ |
164 | 166 | ||
165 | /* struct ata_link flags */ | 167 | /* struct ata_link flags */ |
166 | ATA_LFLAG_HRST_TO_RESUME = (1 << 0), /* hardreset to resume link */ | ||
167 | ATA_LFLAG_SKIP_D2H_BSY = (1 << 1), /* can't wait for the first D2H | ||
168 | * Register FIS clearing BSY */ | ||
169 | ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ | 168 | ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ |
170 | ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ | 169 | ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ |
171 | ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */ | 170 | ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */ |
@@ -225,6 +224,7 @@ enum { | |||
225 | ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ | 224 | ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ |
226 | ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ | 225 | ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ |
227 | ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */ | 226 | ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */ |
227 | ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */ | ||
228 | 228 | ||
229 | ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ | 229 | ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ |
230 | ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ | 230 | ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ |
@@ -249,6 +249,25 @@ enum { | |||
249 | */ | 249 | */ |
250 | ATA_TMOUT_FF_WAIT = 4 * HZ / 5, | 250 | ATA_TMOUT_FF_WAIT = 4 * HZ / 5, |
251 | 251 | ||
252 | /* Spec mandates to wait for ">= 2ms" before checking status | ||
253 | * after reset. We wait 150ms, because that was the magic | ||
254 | * delay used for ATAPI devices in Hale Landis's ATADRVR, for | ||
255 | * the period of time between when the ATA command register is | ||
256 | * written, and then status is checked. Because waiting for | ||
257 | * "a while" before checking status is fine, post SRST, we | ||
258 | * perform this magic delay here as well. | ||
259 | * | ||
260 | * Old drivers/ide uses the 2mS rule and then waits for ready. | ||
261 | */ | ||
262 | ATA_WAIT_AFTER_RESET_MSECS = 150, | ||
263 | |||
264 | /* If PMP is supported, we have to do follow-up SRST. As some | ||
265 | * PMPs don't send D2H Reg FIS after hardreset, LLDs are | ||
266 | * advised to wait only for the following duration before | ||
267 | * doing SRST. | ||
268 | */ | ||
269 | ATA_TMOUT_PMP_SRST_WAIT = 1 * HZ, | ||
270 | |||
252 | /* ATA bus states */ | 271 | /* ATA bus states */ |
253 | BUS_UNKNOWN = 0, | 272 | BUS_UNKNOWN = 0, |
254 | BUS_DMA = 1, | 273 | BUS_DMA = 1, |
@@ -292,17 +311,16 @@ enum { | |||
292 | 311 | ||
293 | /* reset / recovery action types */ | 312 | /* reset / recovery action types */ |
294 | ATA_EH_REVALIDATE = (1 << 0), | 313 | ATA_EH_REVALIDATE = (1 << 0), |
295 | ATA_EH_SOFTRESET = (1 << 1), | 314 | ATA_EH_SOFTRESET = (1 << 1), /* meaningful only in ->prereset */ |
296 | ATA_EH_HARDRESET = (1 << 2), | 315 | ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */ |
316 | ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, | ||
297 | ATA_EH_ENABLE_LINK = (1 << 3), | 317 | ATA_EH_ENABLE_LINK = (1 << 3), |
298 | ATA_EH_LPM = (1 << 4), /* link power management action */ | 318 | ATA_EH_LPM = (1 << 4), /* link power management action */ |
299 | 319 | ||
300 | ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, | ||
301 | ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, | 320 | ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, |
302 | 321 | ||
303 | /* ata_eh_info->flags */ | 322 | /* ata_eh_info->flags */ |
304 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ | 323 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ |
305 | ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */ | ||
306 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ | 324 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ |
307 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ | 325 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ |
308 | 326 | ||
@@ -313,7 +331,6 @@ enum { | |||
313 | ATA_EHI_POST_SETMODE = (1 << 20), /* revaildating after setmode */ | 331 | ATA_EHI_POST_SETMODE = (1 << 20), /* revaildating after setmode */ |
314 | 332 | ||
315 | ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, | 333 | ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, |
316 | ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK, | ||
317 | 334 | ||
318 | /* max tries if error condition is still set after ->error_handler */ | 335 | /* max tries if error condition is still set after ->error_handler */ |
319 | ATA_EH_MAX_TRIES = 5, | 336 | ATA_EH_MAX_TRIES = 5, |
@@ -352,6 +369,22 @@ enum { | |||
352 | ATAPI_READ_CD = 2, /* READ CD [MSF] */ | 369 | ATAPI_READ_CD = 2, /* READ CD [MSF] */ |
353 | ATAPI_PASS_THRU = 3, /* SAT pass-thru */ | 370 | ATAPI_PASS_THRU = 3, /* SAT pass-thru */ |
354 | ATAPI_MISC = 4, /* the rest */ | 371 | ATAPI_MISC = 4, /* the rest */ |
372 | |||
373 | /* Timing constants */ | ||
374 | ATA_TIMING_SETUP = (1 << 0), | ||
375 | ATA_TIMING_ACT8B = (1 << 1), | ||
376 | ATA_TIMING_REC8B = (1 << 2), | ||
377 | ATA_TIMING_CYC8B = (1 << 3), | ||
378 | ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | | ||
379 | ATA_TIMING_CYC8B, | ||
380 | ATA_TIMING_ACTIVE = (1 << 4), | ||
381 | ATA_TIMING_RECOVER = (1 << 5), | ||
382 | ATA_TIMING_CYCLE = (1 << 6), | ||
383 | ATA_TIMING_UDMA = (1 << 7), | ||
384 | ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | | ||
385 | ATA_TIMING_REC8B | ATA_TIMING_CYC8B | | ||
386 | ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | | ||
387 | ATA_TIMING_CYCLE | ATA_TIMING_UDMA, | ||
355 | }; | 388 | }; |
356 | 389 | ||
357 | enum ata_xfer_mask { | 390 | enum ata_xfer_mask { |
@@ -412,6 +445,7 @@ enum link_pm { | |||
412 | }; | 445 | }; |
413 | extern struct class_device_attribute class_device_attr_link_power_management_policy; | 446 | extern struct class_device_attribute class_device_attr_link_power_management_policy; |
414 | 447 | ||
448 | #ifdef CONFIG_ATA_SFF | ||
415 | struct ata_ioports { | 449 | struct ata_ioports { |
416 | void __iomem *cmd_addr; | 450 | void __iomem *cmd_addr; |
417 | void __iomem *data_addr; | 451 | void __iomem *data_addr; |
@@ -429,6 +463,7 @@ struct ata_ioports { | |||
429 | void __iomem *bmdma_addr; | 463 | void __iomem *bmdma_addr; |
430 | void __iomem *scr_addr; | 464 | void __iomem *scr_addr; |
431 | }; | 465 | }; |
466 | #endif /* CONFIG_ATA_SFF */ | ||
432 | 467 | ||
433 | struct ata_host { | 468 | struct ata_host { |
434 | spinlock_t lock; | 469 | spinlock_t lock; |
@@ -436,7 +471,7 @@ struct ata_host { | |||
436 | void __iomem * const *iomap; | 471 | void __iomem * const *iomap; |
437 | unsigned int n_ports; | 472 | unsigned int n_ports; |
438 | void *private_data; | 473 | void *private_data; |
439 | const struct ata_port_operations *ops; | 474 | struct ata_port_operations *ops; |
440 | unsigned long flags; | 475 | unsigned long flags; |
441 | #ifdef CONFIG_ATA_ACPI | 476 | #ifdef CONFIG_ATA_ACPI |
442 | acpi_handle acpi_handle; | 477 | acpi_handle acpi_handle; |
@@ -605,7 +640,7 @@ struct ata_link { | |||
605 | 640 | ||
606 | struct ata_port { | 641 | struct ata_port { |
607 | struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ | 642 | struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ |
608 | const struct ata_port_operations *ops; | 643 | struct ata_port_operations *ops; |
609 | spinlock_t *lock; | 644 | spinlock_t *lock; |
610 | unsigned long flags; /* ATA_FLAG_xxx */ | 645 | unsigned long flags; /* ATA_FLAG_xxx */ |
611 | unsigned int pflags; /* ATA_PFLAG_xxx */ | 646 | unsigned int pflags; /* ATA_PFLAG_xxx */ |
@@ -615,7 +650,9 @@ struct ata_port { | |||
615 | struct ata_prd *prd; /* our SG list */ | 650 | struct ata_prd *prd; /* our SG list */ |
616 | dma_addr_t prd_dma; /* and its DMA mapping */ | 651 | dma_addr_t prd_dma; /* and its DMA mapping */ |
617 | 652 | ||
653 | #ifdef CONFIG_ATA_SFF | ||
618 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ | 654 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ |
655 | #endif /* CONFIG_ATA_SFF */ | ||
619 | 656 | ||
620 | u8 ctl; /* cache of ATA control register */ | 657 | u8 ctl; /* cache of ATA control register */ |
621 | u8 last_ctl; /* Cache last written value */ | 658 | u8 last_ctl; /* Cache last written value */ |
@@ -667,81 +704,108 @@ struct ata_port { | |||
667 | u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ | 704 | u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ |
668 | }; | 705 | }; |
669 | 706 | ||
670 | struct ata_port_operations { | 707 | /* The following initializer overrides a method to NULL whether one of |
671 | void (*dev_config) (struct ata_device *); | 708 | * its parent has the method defined or not. This is equivalent to |
672 | 709 | * ERR_PTR(-ENOENT). Unfortunately, ERR_PTR doesn't render a constant | |
673 | void (*set_piomode) (struct ata_port *, struct ata_device *); | 710 | * expression and thus can't be used as an initializer. |
674 | void (*set_dmamode) (struct ata_port *, struct ata_device *); | 711 | */ |
675 | unsigned long (*mode_filter) (struct ata_device *, unsigned long); | 712 | #define ATA_OP_NULL (void *)(unsigned long)(-ENOENT) |
676 | |||
677 | void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf); | ||
678 | void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf); | ||
679 | |||
680 | void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf); | ||
681 | u8 (*check_status)(struct ata_port *ap); | ||
682 | u8 (*check_altstatus)(struct ata_port *ap); | ||
683 | void (*dev_select)(struct ata_port *ap, unsigned int device); | ||
684 | |||
685 | void (*phy_reset) (struct ata_port *ap); /* obsolete */ | ||
686 | int (*set_mode) (struct ata_link *link, struct ata_device **r_failed_dev); | ||
687 | |||
688 | int (*cable_detect) (struct ata_port *ap); | ||
689 | |||
690 | int (*check_atapi_dma) (struct ata_queued_cmd *qc); | ||
691 | |||
692 | void (*bmdma_setup) (struct ata_queued_cmd *qc); | ||
693 | void (*bmdma_start) (struct ata_queued_cmd *qc); | ||
694 | |||
695 | unsigned int (*data_xfer) (struct ata_device *dev, unsigned char *buf, | ||
696 | unsigned int buflen, int rw); | ||
697 | |||
698 | int (*qc_defer) (struct ata_queued_cmd *qc); | ||
699 | void (*qc_prep) (struct ata_queued_cmd *qc); | ||
700 | unsigned int (*qc_issue) (struct ata_queued_cmd *qc); | ||
701 | |||
702 | /* port multiplier */ | ||
703 | void (*pmp_attach) (struct ata_port *ap); | ||
704 | void (*pmp_detach) (struct ata_port *ap); | ||
705 | 713 | ||
706 | /* Error handlers. ->error_handler overrides ->eng_timeout and | 714 | struct ata_port_operations { |
707 | * indicates that new-style EH is in place. | 715 | /* |
716 | * Command execution | ||
708 | */ | 717 | */ |
709 | void (*eng_timeout) (struct ata_port *ap); /* obsolete */ | 718 | int (*qc_defer)(struct ata_queued_cmd *qc); |
710 | 719 | int (*check_atapi_dma)(struct ata_queued_cmd *qc); | |
711 | void (*freeze) (struct ata_port *ap); | 720 | void (*qc_prep)(struct ata_queued_cmd *qc); |
712 | void (*thaw) (struct ata_port *ap); | 721 | unsigned int (*qc_issue)(struct ata_queued_cmd *qc); |
713 | void (*error_handler) (struct ata_port *ap); | 722 | bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); |
714 | void (*post_internal_cmd) (struct ata_queued_cmd *qc); | 723 | |
715 | 724 | /* | |
716 | irq_handler_t irq_handler; | 725 | * Configuration and exception handling |
717 | void (*irq_clear) (struct ata_port *); | 726 | */ |
718 | u8 (*irq_on) (struct ata_port *); | 727 | int (*cable_detect)(struct ata_port *ap); |
719 | 728 | unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask); | |
720 | int (*scr_read) (struct ata_port *ap, unsigned int sc_reg, u32 *val); | 729 | void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); |
721 | int (*scr_write) (struct ata_port *ap, unsigned int sc_reg, u32 val); | 730 | void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); |
731 | int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); | ||
732 | |||
733 | void (*dev_config)(struct ata_device *dev); | ||
734 | |||
735 | void (*freeze)(struct ata_port *ap); | ||
736 | void (*thaw)(struct ata_port *ap); | ||
737 | ata_prereset_fn_t prereset; | ||
738 | ata_reset_fn_t softreset; | ||
739 | ata_reset_fn_t hardreset; | ||
740 | ata_postreset_fn_t postreset; | ||
741 | ata_prereset_fn_t pmp_prereset; | ||
742 | ata_reset_fn_t pmp_softreset; | ||
743 | ata_reset_fn_t pmp_hardreset; | ||
744 | ata_postreset_fn_t pmp_postreset; | ||
745 | void (*error_handler)(struct ata_port *ap); | ||
746 | void (*post_internal_cmd)(struct ata_queued_cmd *qc); | ||
747 | |||
748 | /* | ||
749 | * Optional features | ||
750 | */ | ||
751 | int (*scr_read)(struct ata_port *ap, unsigned int sc_reg, u32 *val); | ||
752 | int (*scr_write)(struct ata_port *ap, unsigned int sc_reg, u32 val); | ||
753 | void (*pmp_attach)(struct ata_port *ap); | ||
754 | void (*pmp_detach)(struct ata_port *ap); | ||
755 | int (*enable_pm)(struct ata_port *ap, enum link_pm policy); | ||
756 | void (*disable_pm)(struct ata_port *ap); | ||
757 | |||
758 | /* | ||
759 | * Start, stop, suspend and resume | ||
760 | */ | ||
761 | int (*port_suspend)(struct ata_port *ap, pm_message_t mesg); | ||
762 | int (*port_resume)(struct ata_port *ap); | ||
763 | int (*port_start)(struct ata_port *ap); | ||
764 | void (*port_stop)(struct ata_port *ap); | ||
765 | void (*host_stop)(struct ata_host *host); | ||
766 | |||
767 | #ifdef CONFIG_ATA_SFF | ||
768 | /* | ||
769 | * SFF / taskfile oriented ops | ||
770 | */ | ||
771 | void (*sff_dev_select)(struct ata_port *ap, unsigned int device); | ||
772 | u8 (*sff_check_status)(struct ata_port *ap); | ||
773 | u8 (*sff_check_altstatus)(struct ata_port *ap); | ||
774 | void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf); | ||
775 | void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf); | ||
776 | void (*sff_exec_command)(struct ata_port *ap, | ||
777 | const struct ata_taskfile *tf); | ||
778 | unsigned int (*sff_data_xfer)(struct ata_device *dev, | ||
779 | unsigned char *buf, unsigned int buflen, int rw); | ||
780 | u8 (*sff_irq_on)(struct ata_port *); | ||
781 | void (*sff_irq_clear)(struct ata_port *); | ||
722 | 782 | ||
723 | int (*port_suspend) (struct ata_port *ap, pm_message_t mesg); | 783 | void (*bmdma_setup)(struct ata_queued_cmd *qc); |
724 | int (*port_resume) (struct ata_port *ap); | 784 | void (*bmdma_start)(struct ata_queued_cmd *qc); |
725 | int (*enable_pm) (struct ata_port *ap, enum link_pm policy); | 785 | void (*bmdma_stop)(struct ata_queued_cmd *qc); |
726 | void (*disable_pm) (struct ata_port *ap); | 786 | u8 (*bmdma_status)(struct ata_port *ap); |
727 | int (*port_start) (struct ata_port *ap); | 787 | #endif /* CONFIG_ATA_SFF */ |
728 | void (*port_stop) (struct ata_port *ap); | ||
729 | 788 | ||
730 | void (*host_stop) (struct ata_host *host); | 789 | /* |
790 | * Obsolete | ||
791 | */ | ||
792 | void (*phy_reset)(struct ata_port *ap); | ||
793 | void (*eng_timeout)(struct ata_port *ap); | ||
731 | 794 | ||
732 | void (*bmdma_stop) (struct ata_queued_cmd *qc); | 795 | /* |
733 | u8 (*bmdma_status) (struct ata_port *ap); | 796 | * ->inherits must be the last field and all the preceding |
797 | * fields must be pointers. | ||
798 | */ | ||
799 | const struct ata_port_operations *inherits; | ||
734 | }; | 800 | }; |
735 | 801 | ||
736 | struct ata_port_info { | 802 | struct ata_port_info { |
737 | struct scsi_host_template *sht; | ||
738 | unsigned long flags; | 803 | unsigned long flags; |
739 | unsigned long link_flags; | 804 | unsigned long link_flags; |
740 | unsigned long pio_mask; | 805 | unsigned long pio_mask; |
741 | unsigned long mwdma_mask; | 806 | unsigned long mwdma_mask; |
742 | unsigned long udma_mask; | 807 | unsigned long udma_mask; |
743 | const struct ata_port_operations *port_ops; | 808 | struct ata_port_operations *port_ops; |
744 | irq_handler_t irq_handler; | ||
745 | void *private_data; | 809 | void *private_data; |
746 | }; | 810 | }; |
747 | 811 | ||
@@ -759,11 +823,14 @@ struct ata_timing { | |||
759 | 823 | ||
760 | #define FIT(v, vmin, vmax) max_t(short, min_t(short, v, vmax), vmin) | 824 | #define FIT(v, vmin, vmax) max_t(short, min_t(short, v, vmax), vmin) |
761 | 825 | ||
826 | /* | ||
827 | * Core layer - drivers/ata/libata-core.c | ||
828 | */ | ||
762 | extern const unsigned long sata_deb_timing_normal[]; | 829 | extern const unsigned long sata_deb_timing_normal[]; |
763 | extern const unsigned long sata_deb_timing_hotplug[]; | 830 | extern const unsigned long sata_deb_timing_hotplug[]; |
764 | extern const unsigned long sata_deb_timing_long[]; | 831 | extern const unsigned long sata_deb_timing_long[]; |
765 | 832 | ||
766 | extern const struct ata_port_operations ata_dummy_port_ops; | 833 | extern struct ata_port_operations ata_dummy_port_ops; |
767 | extern const struct ata_port_info ata_dummy_port_info; | 834 | extern const struct ata_port_info ata_dummy_port_info; |
768 | 835 | ||
769 | static inline const unsigned long * | 836 | static inline const unsigned long * |
@@ -782,22 +849,21 @@ static inline int ata_port_is_dummy(struct ata_port *ap) | |||
782 | 849 | ||
783 | extern void sata_print_link_status(struct ata_link *link); | 850 | extern void sata_print_link_status(struct ata_link *link); |
784 | extern void ata_port_probe(struct ata_port *); | 851 | extern void ata_port_probe(struct ata_port *); |
785 | extern void ata_bus_reset(struct ata_port *ap); | ||
786 | extern int sata_set_spd(struct ata_link *link); | 852 | extern int sata_set_spd(struct ata_link *link); |
853 | extern int ata_std_prereset(struct ata_link *link, unsigned long deadline); | ||
854 | extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, | ||
855 | int (*check_ready)(struct ata_link *link)); | ||
787 | extern int sata_link_debounce(struct ata_link *link, | 856 | extern int sata_link_debounce(struct ata_link *link, |
788 | const unsigned long *params, unsigned long deadline); | 857 | const unsigned long *params, unsigned long deadline); |
789 | extern int sata_link_resume(struct ata_link *link, const unsigned long *params, | 858 | extern int sata_link_resume(struct ata_link *link, const unsigned long *params, |
790 | unsigned long deadline); | 859 | unsigned long deadline); |
791 | extern int ata_std_prereset(struct ata_link *link, unsigned long deadline); | ||
792 | extern int ata_std_softreset(struct ata_link *link, unsigned int *classes, | ||
793 | unsigned long deadline); | ||
794 | extern int sata_link_hardreset(struct ata_link *link, | 860 | extern int sata_link_hardreset(struct ata_link *link, |
795 | const unsigned long *timing, unsigned long deadline); | 861 | const unsigned long *timing, unsigned long deadline, |
862 | bool *online, int (*check_ready)(struct ata_link *)); | ||
796 | extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, | 863 | extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, |
797 | unsigned long deadline); | 864 | unsigned long deadline); |
798 | extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); | 865 | extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); |
799 | extern void ata_port_disable(struct ata_port *); | 866 | extern void ata_port_disable(struct ata_port *); |
800 | extern void ata_std_ports(struct ata_ioports *ioaddr); | ||
801 | 867 | ||
802 | extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); | 868 | extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); |
803 | extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, | 869 | extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, |
@@ -810,7 +876,7 @@ extern int ata_host_activate(struct ata_host *host, int irq, | |||
810 | struct scsi_host_template *sht); | 876 | struct scsi_host_template *sht); |
811 | extern void ata_host_detach(struct ata_host *host); | 877 | extern void ata_host_detach(struct ata_host *host); |
812 | extern void ata_host_init(struct ata_host *, struct device *, | 878 | extern void ata_host_init(struct ata_host *, struct device *, |
813 | unsigned long, const struct ata_port_operations *); | 879 | unsigned long, struct ata_port_operations *); |
814 | extern int ata_scsi_detect(struct scsi_host_template *sht); | 880 | extern int ata_scsi_detect(struct scsi_host_template *sht); |
815 | extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); | 881 | extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); |
816 | extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); | 882 | extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); |
@@ -823,7 +889,6 @@ extern void ata_sas_port_stop(struct ata_port *ap); | |||
823 | extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); | 889 | extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); |
824 | extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), | 890 | extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), |
825 | struct ata_port *ap); | 891 | struct ata_port *ap); |
826 | extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); | ||
827 | extern int sata_scr_valid(struct ata_link *link); | 892 | extern int sata_scr_valid(struct ata_link *link); |
828 | extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); | 893 | extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); |
829 | extern int sata_scr_write(struct ata_link *link, int reg, u32 val); | 894 | extern int sata_scr_write(struct ata_link *link, int reg, u32 val); |
@@ -835,21 +900,9 @@ extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); | |||
835 | extern void ata_host_resume(struct ata_host *host); | 900 | extern void ata_host_resume(struct ata_host *host); |
836 | #endif | 901 | #endif |
837 | extern int ata_ratelimit(void); | 902 | extern int ata_ratelimit(void); |
838 | extern int ata_busy_sleep(struct ata_port *ap, | ||
839 | unsigned long timeout_pat, unsigned long timeout); | ||
840 | extern void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline); | ||
841 | extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline); | ||
842 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, | 903 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, |
843 | unsigned long interval_msec, | 904 | unsigned long interval_msec, |
844 | unsigned long timeout_msec); | 905 | unsigned long timeout_msec); |
845 | extern unsigned int ata_dev_try_classify(struct ata_device *dev, int present, | ||
846 | u8 *r_err); | ||
847 | |||
848 | /* | ||
849 | * Default driver ops implementations | ||
850 | */ | ||
851 | extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); | ||
852 | extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf); | ||
853 | extern int atapi_cmd_type(u8 opcode); | 906 | extern int atapi_cmd_type(u8 opcode); |
854 | extern void ata_tf_to_fis(const struct ata_taskfile *tf, | 907 | extern void ata_tf_to_fis(const struct ata_taskfile *tf, |
855 | u8 pmp, int is_cmd, u8 *fis); | 908 | u8 pmp, int is_cmd, u8 *fis); |
@@ -864,23 +917,9 @@ extern unsigned long ata_xfer_mode2mask(u8 xfer_mode); | |||
864 | extern int ata_xfer_mode2shift(unsigned long xfer_mode); | 917 | extern int ata_xfer_mode2shift(unsigned long xfer_mode); |
865 | extern const char *ata_mode_string(unsigned long xfer_mask); | 918 | extern const char *ata_mode_string(unsigned long xfer_mask); |
866 | extern unsigned long ata_id_xfermask(const u16 *id); | 919 | extern unsigned long ata_id_xfermask(const u16 *id); |
867 | extern void ata_noop_dev_select(struct ata_port *ap, unsigned int device); | ||
868 | extern void ata_std_dev_select(struct ata_port *ap, unsigned int device); | ||
869 | extern u8 ata_check_status(struct ata_port *ap); | ||
870 | extern u8 ata_altstatus(struct ata_port *ap); | ||
871 | extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); | ||
872 | extern int ata_port_start(struct ata_port *ap); | 920 | extern int ata_port_start(struct ata_port *ap); |
873 | extern int ata_sff_port_start(struct ata_port *ap); | ||
874 | extern irqreturn_t ata_interrupt(int irq, void *dev_instance); | ||
875 | extern unsigned int ata_data_xfer(struct ata_device *dev, | ||
876 | unsigned char *buf, unsigned int buflen, int rw); | ||
877 | extern unsigned int ata_data_xfer_noirq(struct ata_device *dev, | ||
878 | unsigned char *buf, unsigned int buflen, int rw); | ||
879 | extern int ata_std_qc_defer(struct ata_queued_cmd *qc); | 921 | extern int ata_std_qc_defer(struct ata_queued_cmd *qc); |
880 | extern void ata_dumb_qc_prep(struct ata_queued_cmd *qc); | ||
881 | extern void ata_qc_prep(struct ata_queued_cmd *qc); | ||
882 | extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); | 922 | extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); |
883 | extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc); | ||
884 | extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, | 923 | extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, |
885 | unsigned int n_elem); | 924 | unsigned int n_elem); |
886 | extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); | 925 | extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); |
@@ -889,24 +928,8 @@ extern void ata_id_string(const u16 *id, unsigned char *s, | |||
889 | unsigned int ofs, unsigned int len); | 928 | unsigned int ofs, unsigned int len); |
890 | extern void ata_id_c_string(const u16 *id, unsigned char *s, | 929 | extern void ata_id_c_string(const u16 *id, unsigned char *s, |
891 | unsigned int ofs, unsigned int len); | 930 | unsigned int ofs, unsigned int len); |
892 | extern void ata_bmdma_setup(struct ata_queued_cmd *qc); | ||
893 | extern void ata_bmdma_start(struct ata_queued_cmd *qc); | ||
894 | extern void ata_bmdma_stop(struct ata_queued_cmd *qc); | ||
895 | extern u8 ata_bmdma_status(struct ata_port *ap); | ||
896 | extern void ata_bmdma_irq_clear(struct ata_port *ap); | ||
897 | extern void ata_bmdma_freeze(struct ata_port *ap); | ||
898 | extern void ata_bmdma_thaw(struct ata_port *ap); | ||
899 | extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | ||
900 | ata_reset_fn_t softreset, | ||
901 | ata_reset_fn_t hardreset, | ||
902 | ata_postreset_fn_t postreset); | ||
903 | extern void ata_bmdma_error_handler(struct ata_port *ap); | ||
904 | extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); | ||
905 | extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | ||
906 | u8 status, int in_wq); | ||
907 | extern void ata_qc_complete(struct ata_queued_cmd *qc); | 931 | extern void ata_qc_complete(struct ata_queued_cmd *qc); |
908 | extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, | 932 | extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); |
909 | void (*finish_qc)(struct ata_queued_cmd *)); | ||
910 | extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, | 933 | extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, |
911 | void (*done)(struct scsi_cmnd *)); | 934 | void (*done)(struct scsi_cmnd *)); |
912 | extern int ata_std_bios_param(struct scsi_device *sdev, | 935 | extern int ata_std_bios_param(struct scsi_device *sdev, |
@@ -918,7 +941,6 @@ extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, | |||
918 | int queue_depth); | 941 | int queue_depth); |
919 | extern struct ata_device *ata_dev_pair(struct ata_device *adev); | 942 | extern struct ata_device *ata_dev_pair(struct ata_device *adev); |
920 | extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); | 943 | extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); |
921 | extern u8 ata_irq_on(struct ata_port *ap); | ||
922 | 944 | ||
923 | extern int ata_cable_40wire(struct ata_port *ap); | 945 | extern int ata_cable_40wire(struct ata_port *ap); |
924 | extern int ata_cable_80wire(struct ata_port *ap); | 946 | extern int ata_cable_80wire(struct ata_port *ap); |
@@ -926,10 +948,7 @@ extern int ata_cable_sata(struct ata_port *ap); | |||
926 | extern int ata_cable_ignore(struct ata_port *ap); | 948 | extern int ata_cable_ignore(struct ata_port *ap); |
927 | extern int ata_cable_unknown(struct ata_port *ap); | 949 | extern int ata_cable_unknown(struct ata_port *ap); |
928 | 950 | ||
929 | /* | 951 | /* Timing helpers */ |
930 | * Timing helpers | ||
931 | */ | ||
932 | |||
933 | extern unsigned int ata_pio_need_iordy(const struct ata_device *); | 952 | extern unsigned int ata_pio_need_iordy(const struct ata_device *); |
934 | extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); | 953 | extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); |
935 | extern int ata_timing_compute(struct ata_device *, unsigned short, | 954 | extern int ata_timing_compute(struct ata_device *, unsigned short, |
@@ -939,24 +958,31 @@ extern void ata_timing_merge(const struct ata_timing *, | |||
939 | unsigned int); | 958 | unsigned int); |
940 | extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle); | 959 | extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle); |
941 | 960 | ||
942 | enum { | 961 | /* PCI */ |
943 | ATA_TIMING_SETUP = (1 << 0), | 962 | #ifdef CONFIG_PCI |
944 | ATA_TIMING_ACT8B = (1 << 1), | 963 | struct pci_dev; |
945 | ATA_TIMING_REC8B = (1 << 2), | 964 | |
946 | ATA_TIMING_CYC8B = (1 << 3), | 965 | struct pci_bits { |
947 | ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | | 966 | unsigned int reg; /* PCI config register to read */ |
948 | ATA_TIMING_CYC8B, | 967 | unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ |
949 | ATA_TIMING_ACTIVE = (1 << 4), | 968 | unsigned long mask; |
950 | ATA_TIMING_RECOVER = (1 << 5), | 969 | unsigned long val; |
951 | ATA_TIMING_CYCLE = (1 << 6), | ||
952 | ATA_TIMING_UDMA = (1 << 7), | ||
953 | ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | | ||
954 | ATA_TIMING_REC8B | ATA_TIMING_CYC8B | | ||
955 | ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | | ||
956 | ATA_TIMING_CYCLE | ATA_TIMING_UDMA, | ||
957 | }; | 970 | }; |
958 | 971 | ||
959 | /* libata-acpi.c */ | 972 | extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); |
973 | extern void ata_pci_remove_one(struct pci_dev *pdev); | ||
974 | |||
975 | #ifdef CONFIG_PM | ||
976 | extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
977 | extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev); | ||
978 | extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
979 | extern int ata_pci_device_resume(struct pci_dev *pdev); | ||
980 | #endif /* CONFIG_PM */ | ||
981 | #endif /* CONFIG_PCI */ | ||
982 | |||
983 | /* | ||
984 | * ACPI - drivers/ata/libata-acpi.c | ||
985 | */ | ||
960 | #ifdef CONFIG_ATA_ACPI | 986 | #ifdef CONFIG_ATA_ACPI |
961 | static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) | 987 | static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) |
962 | { | 988 | { |
@@ -1000,56 +1026,8 @@ static inline int ata_acpi_cbl_80wire(struct ata_port *ap, | |||
1000 | } | 1026 | } |
1001 | #endif | 1027 | #endif |
1002 | 1028 | ||
1003 | #ifdef CONFIG_PCI | ||
1004 | struct pci_dev; | ||
1005 | |||
1006 | extern int ata_pci_init_one(struct pci_dev *pdev, | ||
1007 | const struct ata_port_info * const * ppi); | ||
1008 | extern void ata_pci_remove_one(struct pci_dev *pdev); | ||
1009 | #ifdef CONFIG_PM | ||
1010 | extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
1011 | extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev); | ||
1012 | extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
1013 | extern int ata_pci_device_resume(struct pci_dev *pdev); | ||
1014 | #endif | ||
1015 | extern int ata_pci_clear_simplex(struct pci_dev *pdev); | ||
1016 | |||
1017 | struct pci_bits { | ||
1018 | unsigned int reg; /* PCI config register to read */ | ||
1019 | unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ | ||
1020 | unsigned long mask; | ||
1021 | unsigned long val; | ||
1022 | }; | ||
1023 | |||
1024 | extern int ata_pci_init_sff_host(struct ata_host *host); | ||
1025 | extern int ata_pci_init_bmdma(struct ata_host *host); | ||
1026 | extern int ata_pci_prepare_sff_host(struct pci_dev *pdev, | ||
1027 | const struct ata_port_info * const * ppi, | ||
1028 | struct ata_host **r_host); | ||
1029 | extern int ata_pci_activate_sff_host(struct ata_host *host, | ||
1030 | irq_handler_t irq_handler, | ||
1031 | struct scsi_host_template *sht); | ||
1032 | extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); | ||
1033 | extern unsigned long ata_pci_default_filter(struct ata_device *dev, | ||
1034 | unsigned long xfer_mask); | ||
1035 | #endif /* CONFIG_PCI */ | ||
1036 | |||
1037 | /* | ||
1038 | * PMP | ||
1039 | */ | ||
1040 | extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc); | ||
1041 | extern int sata_pmp_std_prereset(struct ata_link *link, unsigned long deadline); | ||
1042 | extern int sata_pmp_std_hardreset(struct ata_link *link, unsigned int *class, | ||
1043 | unsigned long deadline); | ||
1044 | extern void sata_pmp_std_postreset(struct ata_link *link, unsigned int *class); | ||
1045 | extern void sata_pmp_do_eh(struct ata_port *ap, | ||
1046 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, | ||
1047 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset, | ||
1048 | ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset, | ||
1049 | ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset); | ||
1050 | |||
1051 | /* | 1029 | /* |
1052 | * EH | 1030 | * EH - drivers/ata/libata-eh.c |
1053 | */ | 1031 | */ |
1054 | extern void ata_port_schedule_eh(struct ata_port *ap); | 1032 | extern void ata_port_schedule_eh(struct ata_port *ap); |
1055 | extern int ata_link_abort(struct ata_link *link); | 1033 | extern int ata_link_abort(struct ata_link *link); |
@@ -1066,6 +1044,92 @@ extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); | |||
1066 | extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | 1044 | extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, |
1067 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, | 1045 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, |
1068 | ata_postreset_fn_t postreset); | 1046 | ata_postreset_fn_t postreset); |
1047 | extern void ata_std_error_handler(struct ata_port *ap); | ||
1048 | |||
1049 | /* | ||
1050 | * Base operations to inherit from and initializers for sht | ||
1051 | * | ||
1052 | * Operations | ||
1053 | * | ||
1054 | * base : Common to all libata drivers. | ||
1055 | * sata : SATA controllers w/ native interface. | ||
1056 | * pmp : SATA controllers w/ PMP support. | ||
1057 | * sff : SFF ATA controllers w/o BMDMA support. | ||
1058 | * bmdma : SFF ATA controllers w/ BMDMA support. | ||
1059 | * | ||
1060 | * sht initializers | ||
1061 | * | ||
1062 | * BASE : Common to all libata drivers. The user must set | ||
1063 | * sg_tablesize and dma_boundary. | ||
1064 | * PIO : SFF ATA controllers w/ only PIO support. | ||
1065 | * BMDMA : SFF ATA controllers w/ BMDMA support. sg_tablesize and | ||
1066 | * dma_boundary are set to BMDMA limits. | ||
1067 | * NCQ : SATA controllers supporting NCQ. The user must set | ||
1068 | * sg_tablesize, dma_boundary and can_queue. | ||
1069 | */ | ||
1070 | extern const struct ata_port_operations ata_base_port_ops; | ||
1071 | extern const struct ata_port_operations sata_port_ops; | ||
1072 | |||
1073 | #define ATA_BASE_SHT(drv_name) \ | ||
1074 | .module = THIS_MODULE, \ | ||
1075 | .name = drv_name, \ | ||
1076 | .ioctl = ata_scsi_ioctl, \ | ||
1077 | .queuecommand = ata_scsi_queuecmd, \ | ||
1078 | .can_queue = ATA_DEF_QUEUE, \ | ||
1079 | .this_id = ATA_SHT_THIS_ID, \ | ||
1080 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \ | ||
1081 | .emulated = ATA_SHT_EMULATED, \ | ||
1082 | .use_clustering = ATA_SHT_USE_CLUSTERING, \ | ||
1083 | .proc_name = drv_name, \ | ||
1084 | .slave_configure = ata_scsi_slave_config, \ | ||
1085 | .slave_destroy = ata_scsi_slave_destroy, \ | ||
1086 | .bios_param = ata_std_bios_param | ||
1087 | |||
1088 | #define ATA_NCQ_SHT(drv_name) \ | ||
1089 | ATA_BASE_SHT(drv_name), \ | ||
1090 | .change_queue_depth = ata_scsi_change_queue_depth | ||
1091 | |||
1092 | /* | ||
1093 | * PMP helpers | ||
1094 | */ | ||
1095 | #ifdef CONFIG_SATA_PMP | ||
1096 | static inline bool sata_pmp_supported(struct ata_port *ap) | ||
1097 | { | ||
1098 | return ap->flags & ATA_FLAG_PMP; | ||
1099 | } | ||
1100 | |||
1101 | static inline bool sata_pmp_attached(struct ata_port *ap) | ||
1102 | { | ||
1103 | return ap->nr_pmp_links != 0; | ||
1104 | } | ||
1105 | |||
1106 | static inline int ata_is_host_link(const struct ata_link *link) | ||
1107 | { | ||
1108 | return link == &link->ap->link; | ||
1109 | } | ||
1110 | #else /* CONFIG_SATA_PMP */ | ||
1111 | static inline bool sata_pmp_supported(struct ata_port *ap) | ||
1112 | { | ||
1113 | return false; | ||
1114 | } | ||
1115 | |||
1116 | static inline bool sata_pmp_attached(struct ata_port *ap) | ||
1117 | { | ||
1118 | return false; | ||
1119 | } | ||
1120 | |||
1121 | static inline int ata_is_host_link(const struct ata_link *link) | ||
1122 | { | ||
1123 | return 1; | ||
1124 | } | ||
1125 | #endif /* CONFIG_SATA_PMP */ | ||
1126 | |||
1127 | static inline int sata_srst_pmp(struct ata_link *link) | ||
1128 | { | ||
1129 | if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) | ||
1130 | return SATA_PMP_CTRL_PORT; | ||
1131 | return link->pmp; | ||
1132 | } | ||
1069 | 1133 | ||
1070 | /* | 1134 | /* |
1071 | * printk helpers | 1135 | * printk helpers |
@@ -1074,7 +1138,7 @@ extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
1074 | printk("%sata%u: "fmt, lv, (ap)->print_id , ##args) | 1138 | printk("%sata%u: "fmt, lv, (ap)->print_id , ##args) |
1075 | 1139 | ||
1076 | #define ata_link_printk(link, lv, fmt, args...) do { \ | 1140 | #define ata_link_printk(link, lv, fmt, args...) do { \ |
1077 | if ((link)->ap->nr_pmp_links) \ | 1141 | if (sata_pmp_attached((link)->ap)) \ |
1078 | printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \ | 1142 | printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \ |
1079 | (link)->pmp , ##args); \ | 1143 | (link)->pmp , ##args); \ |
1080 | else \ | 1144 | else \ |
@@ -1094,18 +1158,11 @@ extern void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) | |||
1094 | __attribute__ ((format (printf, 2, 3))); | 1158 | __attribute__ ((format (printf, 2, 3))); |
1095 | extern void ata_ehi_clear_desc(struct ata_eh_info *ehi); | 1159 | extern void ata_ehi_clear_desc(struct ata_eh_info *ehi); |
1096 | 1160 | ||
1097 | static inline void ata_ehi_schedule_probe(struct ata_eh_info *ehi) | ||
1098 | { | ||
1099 | ehi->flags |= ATA_EHI_RESUME_LINK; | ||
1100 | ehi->action |= ATA_EH_SOFTRESET; | ||
1101 | ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; | ||
1102 | } | ||
1103 | |||
1104 | static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) | 1161 | static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) |
1105 | { | 1162 | { |
1106 | ata_ehi_schedule_probe(ehi); | 1163 | ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; |
1107 | ehi->flags |= ATA_EHI_HOTPLUGGED; | 1164 | ehi->flags |= ATA_EHI_HOTPLUGGED; |
1108 | ehi->action |= ATA_EH_ENABLE_LINK; | 1165 | ehi->action |= ATA_EH_RESET | ATA_EH_ENABLE_LINK; |
1109 | ehi->err_mask |= AC_ERR_ATA_BUS; | 1166 | ehi->err_mask |= AC_ERR_ATA_BUS; |
1110 | } | 1167 | } |
1111 | 1168 | ||
@@ -1126,7 +1183,7 @@ static inline unsigned int ata_tag_valid(unsigned int tag) | |||
1126 | 1183 | ||
1127 | static inline unsigned int ata_tag_internal(unsigned int tag) | 1184 | static inline unsigned int ata_tag_internal(unsigned int tag) |
1128 | { | 1185 | { |
1129 | return tag == ATA_MAX_QUEUE - 1; | 1186 | return tag == ATA_TAG_INTERNAL; |
1130 | } | 1187 | } |
1131 | 1188 | ||
1132 | /* | 1189 | /* |
@@ -1167,11 +1224,6 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev) | |||
1167 | /* | 1224 | /* |
1168 | * link helpers | 1225 | * link helpers |
1169 | */ | 1226 | */ |
1170 | static inline int ata_is_host_link(const struct ata_link *link) | ||
1171 | { | ||
1172 | return link == &link->ap->link; | ||
1173 | } | ||
1174 | |||
1175 | static inline int ata_link_max_devices(const struct ata_link *link) | 1227 | static inline int ata_link_max_devices(const struct ata_link *link) |
1176 | { | 1228 | { |
1177 | if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS) | 1229 | if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS) |
@@ -1186,7 +1238,7 @@ static inline int ata_link_active(struct ata_link *link) | |||
1186 | 1238 | ||
1187 | static inline struct ata_link *ata_port_first_link(struct ata_port *ap) | 1239 | static inline struct ata_link *ata_port_first_link(struct ata_port *ap) |
1188 | { | 1240 | { |
1189 | if (ap->nr_pmp_links) | 1241 | if (sata_pmp_attached(ap)) |
1190 | return ap->pmp_link; | 1242 | return ap->pmp_link; |
1191 | return &ap->link; | 1243 | return &ap->link; |
1192 | } | 1244 | } |
@@ -1195,8 +1247,8 @@ static inline struct ata_link *ata_port_next_link(struct ata_link *link) | |||
1195 | { | 1247 | { |
1196 | struct ata_port *ap = link->ap; | 1248 | struct ata_port *ap = link->ap; |
1197 | 1249 | ||
1198 | if (link == &ap->link) { | 1250 | if (ata_is_host_link(link)) { |
1199 | if (!ap->nr_pmp_links) | 1251 | if (!sata_pmp_attached(ap)) |
1200 | return NULL; | 1252 | return NULL; |
1201 | return ap->pmp_link; | 1253 | return ap->pmp_link; |
1202 | } | 1254 | } |
@@ -1222,11 +1274,6 @@ static inline struct ata_link *ata_port_next_link(struct ata_link *link) | |||
1222 | for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \ | 1274 | for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \ |
1223 | (dev) >= (link)->device || ((dev) = NULL); (dev)--) | 1275 | (dev) >= (link)->device || ((dev) = NULL); (dev)--) |
1224 | 1276 | ||
1225 | static inline u8 ata_chk_status(struct ata_port *ap) | ||
1226 | { | ||
1227 | return ap->ops->check_status(ap); | ||
1228 | } | ||
1229 | |||
1230 | /** | 1277 | /** |
1231 | * ata_ncq_enabled - Test whether NCQ is enabled | 1278 | * ata_ncq_enabled - Test whether NCQ is enabled |
1232 | * @dev: ATA device to test for | 1279 | * @dev: ATA device to test for |
@@ -1243,74 +1290,6 @@ static inline int ata_ncq_enabled(struct ata_device *dev) | |||
1243 | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ; | 1290 | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ; |
1244 | } | 1291 | } |
1245 | 1292 | ||
1246 | /** | ||
1247 | * ata_pause - Flush writes and pause 400 nanoseconds. | ||
1248 | * @ap: Port to wait for. | ||
1249 | * | ||
1250 | * LOCKING: | ||
1251 | * Inherited from caller. | ||
1252 | */ | ||
1253 | |||
1254 | static inline void ata_pause(struct ata_port *ap) | ||
1255 | { | ||
1256 | ata_altstatus(ap); | ||
1257 | ndelay(400); | ||
1258 | } | ||
1259 | |||
1260 | |||
1261 | /** | ||
1262 | * ata_busy_wait - Wait for a port status register | ||
1263 | * @ap: Port to wait for. | ||
1264 | * @bits: bits that must be clear | ||
1265 | * @max: number of 10uS waits to perform | ||
1266 | * | ||
1267 | * Waits up to max*10 microseconds for the selected bits in the port's | ||
1268 | * status register to be cleared. | ||
1269 | * Returns final value of status register. | ||
1270 | * | ||
1271 | * LOCKING: | ||
1272 | * Inherited from caller. | ||
1273 | */ | ||
1274 | |||
1275 | static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits, | ||
1276 | unsigned int max) | ||
1277 | { | ||
1278 | u8 status; | ||
1279 | |||
1280 | do { | ||
1281 | udelay(10); | ||
1282 | status = ata_chk_status(ap); | ||
1283 | max--; | ||
1284 | } while (status != 0xff && (status & bits) && (max > 0)); | ||
1285 | |||
1286 | return status; | ||
1287 | } | ||
1288 | |||
1289 | |||
1290 | /** | ||
1291 | * ata_wait_idle - Wait for a port to be idle. | ||
1292 | * @ap: Port to wait for. | ||
1293 | * | ||
1294 | * Waits up to 10ms for port's BUSY and DRQ signals to clear. | ||
1295 | * Returns final value of status register. | ||
1296 | * | ||
1297 | * LOCKING: | ||
1298 | * Inherited from caller. | ||
1299 | */ | ||
1300 | |||
1301 | static inline u8 ata_wait_idle(struct ata_port *ap) | ||
1302 | { | ||
1303 | u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); | ||
1304 | |||
1305 | #ifdef ATA_DEBUG | ||
1306 | if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) | ||
1307 | ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n", | ||
1308 | status); | ||
1309 | #endif | ||
1310 | |||
1311 | return status; | ||
1312 | } | ||
1313 | |||
1314 | static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) | 1293 | static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) |
1315 | { | 1294 | { |
1316 | qc->tf.ctl |= ATA_NIEN; | 1295 | qc->tf.ctl |= ATA_NIEN; |
@@ -1403,4 +1382,171 @@ static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) | |||
1403 | return *(struct ata_port **)&host->hostdata[0]; | 1382 | return *(struct ata_port **)&host->hostdata[0]; |
1404 | } | 1383 | } |
1405 | 1384 | ||
1385 | |||
1386 | /************************************************************************** | ||
1387 | * PMP - drivers/ata/libata-pmp.c | ||
1388 | */ | ||
1389 | #ifdef CONFIG_SATA_PMP | ||
1390 | |||
1391 | extern const struct ata_port_operations sata_pmp_port_ops; | ||
1392 | |||
1393 | extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc); | ||
1394 | extern void sata_pmp_error_handler(struct ata_port *ap); | ||
1395 | |||
1396 | #else /* CONFIG_SATA_PMP */ | ||
1397 | |||
1398 | #define sata_pmp_port_ops sata_port_ops | ||
1399 | #define sata_pmp_qc_defer_cmd_switch ata_std_qc_defer | ||
1400 | #define sata_pmp_error_handler ata_std_error_handler | ||
1401 | |||
1402 | #endif /* CONFIG_SATA_PMP */ | ||
1403 | |||
1404 | |||
1405 | /************************************************************************** | ||
1406 | * SFF - drivers/ata/libata-sff.c | ||
1407 | */ | ||
1408 | #ifdef CONFIG_ATA_SFF | ||
1409 | |||
1410 | extern const struct ata_port_operations ata_sff_port_ops; | ||
1411 | extern const struct ata_port_operations ata_bmdma_port_ops; | ||
1412 | |||
1413 | /* PIO only, sg_tablesize and dma_boundary limits can be removed */ | ||
1414 | #define ATA_PIO_SHT(drv_name) \ | ||
1415 | ATA_BASE_SHT(drv_name), \ | ||
1416 | .sg_tablesize = LIBATA_MAX_PRD, \ | ||
1417 | .dma_boundary = ATA_DMA_BOUNDARY | ||
1418 | |||
1419 | #define ATA_BMDMA_SHT(drv_name) \ | ||
1420 | ATA_BASE_SHT(drv_name), \ | ||
1421 | .sg_tablesize = LIBATA_MAX_PRD, \ | ||
1422 | .dma_boundary = ATA_DMA_BOUNDARY | ||
1423 | |||
1424 | extern void ata_sff_qc_prep(struct ata_queued_cmd *qc); | ||
1425 | extern void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc); | ||
1426 | extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); | ||
1427 | extern u8 ata_sff_check_status(struct ata_port *ap); | ||
1428 | extern u8 ata_sff_altstatus(struct ata_port *ap); | ||
1429 | extern int ata_sff_busy_sleep(struct ata_port *ap, | ||
1430 | unsigned long timeout_pat, unsigned long timeout); | ||
1431 | extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline); | ||
1432 | extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); | ||
1433 | extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf); | ||
1434 | extern void ata_sff_exec_command(struct ata_port *ap, | ||
1435 | const struct ata_taskfile *tf); | ||
1436 | extern unsigned int ata_sff_data_xfer(struct ata_device *dev, | ||
1437 | unsigned char *buf, unsigned int buflen, int rw); | ||
1438 | extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, | ||
1439 | unsigned char *buf, unsigned int buflen, int rw); | ||
1440 | extern u8 ata_sff_irq_on(struct ata_port *ap); | ||
1441 | extern void ata_sff_irq_clear(struct ata_port *ap); | ||
1442 | extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | ||
1443 | u8 status, int in_wq); | ||
1444 | extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); | ||
1445 | extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); | ||
1446 | extern unsigned int ata_sff_host_intr(struct ata_port *ap, | ||
1447 | struct ata_queued_cmd *qc); | ||
1448 | extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance); | ||
1449 | extern void ata_sff_freeze(struct ata_port *ap); | ||
1450 | extern void ata_sff_thaw(struct ata_port *ap); | ||
1451 | extern int ata_sff_prereset(struct ata_link *link, unsigned long deadline); | ||
1452 | extern unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, | ||
1453 | u8 *r_err); | ||
1454 | extern int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, | ||
1455 | unsigned long deadline); | ||
1456 | extern int ata_sff_softreset(struct ata_link *link, unsigned int *classes, | ||
1457 | unsigned long deadline); | ||
1458 | extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class, | ||
1459 | unsigned long deadline); | ||
1460 | extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes); | ||
1461 | extern void ata_sff_error_handler(struct ata_port *ap); | ||
1462 | extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc); | ||
1463 | extern int ata_sff_port_start(struct ata_port *ap); | ||
1464 | extern void ata_sff_std_ports(struct ata_ioports *ioaddr); | ||
1465 | extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev, | ||
1466 | unsigned long xfer_mask); | ||
1467 | extern void ata_bmdma_setup(struct ata_queued_cmd *qc); | ||
1468 | extern void ata_bmdma_start(struct ata_queued_cmd *qc); | ||
1469 | extern void ata_bmdma_stop(struct ata_queued_cmd *qc); | ||
1470 | extern u8 ata_bmdma_status(struct ata_port *ap); | ||
1471 | extern void ata_bus_reset(struct ata_port *ap); | ||
1472 | |||
1473 | #ifdef CONFIG_PCI | ||
1474 | extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev); | ||
1475 | extern int ata_pci_bmdma_init(struct ata_host *host); | ||
1476 | extern int ata_pci_sff_init_host(struct ata_host *host); | ||
1477 | extern int ata_pci_sff_prepare_host(struct pci_dev *pdev, | ||
1478 | const struct ata_port_info * const * ppi, | ||
1479 | struct ata_host **r_host); | ||
1480 | extern int ata_pci_sff_activate_host(struct ata_host *host, | ||
1481 | irq_handler_t irq_handler, | ||
1482 | struct scsi_host_template *sht); | ||
1483 | extern int ata_pci_sff_init_one(struct pci_dev *pdev, | ||
1484 | const struct ata_port_info * const * ppi, | ||
1485 | struct scsi_host_template *sht, void *host_priv); | ||
1486 | #endif /* CONFIG_PCI */ | ||
1487 | |||
1488 | /** | ||
1489 | * ata_sff_pause - Flush writes and pause 400 nanoseconds. | ||
1490 | * @ap: Port to wait for. | ||
1491 | * | ||
1492 | * LOCKING: | ||
1493 | * Inherited from caller. | ||
1494 | */ | ||
1495 | static inline void ata_sff_pause(struct ata_port *ap) | ||
1496 | { | ||
1497 | ata_sff_altstatus(ap); | ||
1498 | ndelay(400); | ||
1499 | } | ||
1500 | |||
1501 | /** | ||
1502 | * ata_sff_busy_wait - Wait for a port status register | ||
1503 | * @ap: Port to wait for. | ||
1504 | * @bits: bits that must be clear | ||
1505 | * @max: number of 10uS waits to perform | ||
1506 | * | ||
1507 | * Waits up to max*10 microseconds for the selected bits in the port's | ||
1508 | * status register to be cleared. | ||
1509 | * Returns final value of status register. | ||
1510 | * | ||
1511 | * LOCKING: | ||
1512 | * Inherited from caller. | ||
1513 | */ | ||
1514 | static inline u8 ata_sff_busy_wait(struct ata_port *ap, unsigned int bits, | ||
1515 | unsigned int max) | ||
1516 | { | ||
1517 | u8 status; | ||
1518 | |||
1519 | do { | ||
1520 | udelay(10); | ||
1521 | status = ap->ops->sff_check_status(ap); | ||
1522 | max--; | ||
1523 | } while (status != 0xff && (status & bits) && (max > 0)); | ||
1524 | |||
1525 | return status; | ||
1526 | } | ||
1527 | |||
1528 | /** | ||
1529 | * ata_wait_idle - Wait for a port to be idle. | ||
1530 | * @ap: Port to wait for. | ||
1531 | * | ||
1532 | * Waits up to 10ms for port's BUSY and DRQ signals to clear. | ||
1533 | * Returns final value of status register. | ||
1534 | * | ||
1535 | * LOCKING: | ||
1536 | * Inherited from caller. | ||
1537 | */ | ||
1538 | static inline u8 ata_wait_idle(struct ata_port *ap) | ||
1539 | { | ||
1540 | u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); | ||
1541 | |||
1542 | #ifdef ATA_DEBUG | ||
1543 | if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) | ||
1544 | ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n", | ||
1545 | status); | ||
1546 | #endif | ||
1547 | |||
1548 | return status; | ||
1549 | } | ||
1550 | #endif /* CONFIG_ATA_SFF */ | ||
1551 | |||
1406 | #endif /* __LINUX_LIBATA_H__ */ | 1552 | #endif /* __LINUX_LIBATA_H__ */ |